modelId
stringlengths
4
81
tags
list
pipeline_tag
stringclasses
17 values
config
dict
downloads
int64
0
59.7M
first_commit
timestamp[ns, tz=UTC]
card
stringlengths
51
438k
BenDavis71/GPT-2-Finetuning-AIRaid
[ "pytorch", "jax", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
2022-12-05T03:54:04Z
--- license: mit tags: - generated_from_trainer model-index: - name: CRS results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # CRS This model is a fine-tuned version of [xlnet-base-cased](https://huggingface.co/xlnet-base-cased) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 4e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 0 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.7.1 - Tokenizers 0.13.2
BenQLange/HF_bot
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-12-05T04:01:03Z
--- license: mit tags: - generated_from_trainer model-index: - name: ORS results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # ORS This model is a fine-tuned version of [xlnet-base-cased](https://huggingface.co/xlnet-base-cased) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 4e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 0 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.7.1 - Tokenizers 0.13.2
Bharathdamu/wav2vec2-model-hindibhasha
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-12-05T04:49:02Z
--- tags: - xsum_v1_last - generated_from_trainer datasets: - xsum metrics: - rouge model-index: - name: pegasus-large-finetune-xsum results: - task: name: Sequence-to-sequence Language Modeling type: text2text-generation dataset: name: xsum type: xsum args: default metrics: - name: Rouge1 type: rouge value: 5.0462 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # pegasus-large-finetune-xsum This model is a fine-tuned version of [google/pegasus-large](https://huggingface.co/google/pegasus-large) on the xsum dataset. It achieves the following results on the evaluation set: - Loss: 10.0826 - Rouge1: 5.0462 - Rouge2: 0.6914 - Rougel: 3.5071 - Rougelsum: 3.9548 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5.6e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | |:-------------:|:-----:|:----:|:---------------:|:------:|:------:|:------:|:---------:| | 11.4044 | 1.0 | 13 | 10.7501 | 5.5154 | 0.5561 | 3.8425 | 4.2435 | | 10.5741 | 2.0 | 26 | 10.2309 | 5.4282 | 0.7228 | 3.5759 | 4.0538 | | 10.0146 | 3.0 | 39 | 10.0826 | 5.0462 | 0.6914 | 3.5071 | 3.9548 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.13.0 - Datasets 2.6.1 - Tokenizers 0.11.0
Bhumika/roberta-base-finetuned-sst2
[ "pytorch", "tensorboard", "roberta", "text-classification", "dataset:glue", "transformers", "generated_from_trainer", "model-index" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
85
null
--- language: - en license: apache-2.0 tags: - generated_from_trainer datasets: - top_v2 model-index: - name: byt5-base-top_v2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # byt5-base-top_v2 This model is a fine-tuned version of [google/byt5-base](https://huggingface.co/google/byt5-base) on the top_v2 dataset. It achieves the following results on the evaluation set: - Loss: 0.0154 - Exact Match: 0.8533 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.001 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 64 - total_train_batch_size: 512 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 3000 ### Training results | Training Loss | Epoch | Step | Validation Loss | Exact Match | |:-------------:|:-----:|:----:|:---------------:|:-----------:| | 0.3866 | 0.82 | 200 | 0.0434 | 0.0278 | | 0.0335 | 1.65 | 400 | 0.0224 | 0.0396 | | 0.0206 | 2.47 | 600 | 0.0184 | 0.0403 | | 0.0162 | 3.29 | 800 | 0.0189 | 0.0409 | | 0.0165 | 4.12 | 1000 | 0.0167 | 0.0410 | | 0.0116 | 4.94 | 1200 | 0.0154 | 0.0413 | | 0.0093 | 5.76 | 1400 | 0.0154 | 0.0415 | | 0.008 | 6.58 | 1600 | 0.0161 | 0.0413 | | 0.0071 | 7.41 | 1800 | 0.0167 | 0.0414 | | 0.0064 | 8.23 | 2000 | 0.0171 | 0.0414 | | 0.0055 | 9.05 | 2200 | 0.0168 | 0.0415 | | 0.0046 | 9.88 | 2400 | 0.0177 | 0.0415 | | 0.0039 | 10.7 | 2600 | 0.0183 | 0.0416 | | 0.0034 | 11.52 | 2800 | 0.0190 | 0.0414 | | 0.0029 | 12.35 | 3000 | 0.0192 | 0.0415 | ### Framework versions - Transformers 4.24.0 - Pytorch 1.13.0+cu117 - Datasets 2.7.0 - Tokenizers 0.13.2
Bia18/Beatriz
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: distilgpt2-finetuned-wikitext2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilgpt2-finetuned-wikitext2 This model is a fine-tuned version of [distilgpt2](https://huggingface.co/distilgpt2) on the None dataset. It achieves the following results on the evaluation set: - Loss: 3.6438 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 3.7052 | 1.0 | 2334 | 3.6587 | | 3.6252 | 2.0 | 4668 | 3.6440 | | 3.5968 | 3.0 | 7002 | 3.6438 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.12.1+cu113 - Datasets 2.7.1 - Tokenizers 0.13.2
Biasface/DDDC
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
14
2022-12-05T05:30:57Z
--- license: creativeml-openrail-m tags: - text-to-image --- ### v2-object-16img-2000training Dreambooth model trained by spiron with [Hugging Face Dreambooth Training Space](https://huggingface.co/spaces/multimodalart/dreambooth-training) with the v2-768 base model You run your new concept via `diffusers` [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb). Don't forget to use the concept prompts! Sample pictures of: v2-object-16img-2000training (use that on your prompt) ![v2-object-16img-2000training 0](https://huggingface.co/spiron/v2-object-16img-2000training/resolve/main/concept_images/v2-object-16img-2000training_%281%29.jpg)![v2-object-16img-2000training 1](https://huggingface.co/spiron/v2-object-16img-2000training/resolve/main/concept_images/v2-object-16img-2000training_%282%29.jpg)![v2-object-16img-2000training 2](https://huggingface.co/spiron/v2-object-16img-2000training/resolve/main/concept_images/v2-object-16img-2000training_%283%29.jpg)![v2-object-16img-2000training 3](https://huggingface.co/spiron/v2-object-16img-2000training/resolve/main/concept_images/v2-object-16img-2000training_%284%29.jpg)![v2-object-16img-2000training 4](https://huggingface.co/spiron/v2-object-16img-2000training/resolve/main/concept_images/v2-object-16img-2000training_%285%29.jpg)![v2-object-16img-2000training 5](https://huggingface.co/spiron/v2-object-16img-2000training/resolve/main/concept_images/v2-object-16img-2000training_%286%29.jpg)![v2-object-16img-2000training 6](https://huggingface.co/spiron/v2-object-16img-2000training/resolve/main/concept_images/v2-object-16img-2000training_%287%29.jpg)![v2-object-16img-2000training 7](https://huggingface.co/spiron/v2-object-16img-2000training/resolve/main/concept_images/v2-object-16img-2000training_%288%29.jpg)![v2-object-16img-2000training 8](https://huggingface.co/spiron/v2-object-16img-2000training/resolve/main/concept_images/v2-object-16img-2000training_%289%29.jpg)![v2-object-16img-2000training 9](https://huggingface.co/spiron/v2-object-16img-2000training/resolve/main/concept_images/v2-object-16img-2000training_%2810%29.jpg)![v2-object-16img-2000training 10](https://huggingface.co/spiron/v2-object-16img-2000training/resolve/main/concept_images/v2-object-16img-2000training_%2811%29.jpg)![v2-object-16img-2000training 11](https://huggingface.co/spiron/v2-object-16img-2000training/resolve/main/concept_images/v2-object-16img-2000training_%2812%29.jpg)![v2-object-16img-2000training 12](https://huggingface.co/spiron/v2-object-16img-2000training/resolve/main/concept_images/v2-object-16img-2000training_%2813%29.jpg)![v2-object-16img-2000training 13](https://huggingface.co/spiron/v2-object-16img-2000training/resolve/main/concept_images/v2-object-16img-2000training_%2814%29.jpg)![v2-object-16img-2000training 14](https://huggingface.co/spiron/v2-object-16img-2000training/resolve/main/concept_images/v2-object-16img-2000training_%2815%29.jpg)![v2-object-16img-2000training 15](https://huggingface.co/spiron/v2-object-16img-2000training/resolve/main/concept_images/v2-object-16img-2000training_%2816%29.jpg)
BigDaddyNe1L/Hhaa
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: distilgpt2-finetuned-wikitext2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilgpt2-finetuned-wikitext2 This model is a fine-tuned version of [distilgpt2](https://huggingface.co/distilgpt2) on the None dataset. It achieves the following results on the evaluation set: - Loss: 3.6421 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 3.7602 | 1.0 | 2334 | 3.6669 | | 3.653 | 2.0 | 4668 | 3.6472 | | 3.6006 | 3.0 | 7002 | 3.6421 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.12.1+cu113 - Datasets 2.7.1 - Tokenizers 0.13.2
BigSalmon/BestMask2
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
2022-12-05T05:50:21Z
--- license: apache-2.0 --- MobileNet V2 model from Torchvision fine-tuned for Imagenette dataset. Checkpoint trained for 100 epoches using https://github.com/alexsu52/mobilenet_v2_imagenette. Top-1 accuracy is 98.64%. The main intent is to use it in samples and demos for model optimization. Here is the advantages: - Imagenette can automatically downloaded and is quite small. - MobileNet v2 is easy to train and lightweight model which is also representative and used in many public benchmarks. Here is the code to load the checkpoint in PyTorch: ```python import torch import torchvision.models as models CHECKPOINT_URL = 'https://huggingface.co/alexsu52/mobilenet_v2_imagenette/resolve/main/pytorch_model.bin' IMAGENETTE_CLASSES = 10 model = models.mobilenet_v2(num_classes=IMAGENETTE_CLASSES) checkpoint = torch.hub.load_state_dict_from_url(CHECKPOINT_URL, progress=False) model.load_state_dict(checkpoint['state_dict']) ```
BigSalmon/FormalBerta
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: NL-RX-Synth-t5-base-finetuned-en-to-regex results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # NL-RX-Synth-t5-base-finetuned-en-to-regex This model is a fine-tuned version of [t5-base](https://huggingface.co/t5-base) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.0255 - Semantic accuracy: 0.336 - Syntactic accuracy: 0.286 - Gen Len: 18.316 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.001 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 1000 ### Training results | Training Loss | Epoch | Step | Validation Loss | Semantic accuracy | Syntactic accuracy | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:-----------------:|:------------------:|:-------:| | No log | 0.18 | 100 | 0.2726 | 0.242 | 0.158 | 18.09 | | No log | 0.36 | 200 | 0.1477 | 0.264 | 0.2 | 18.268 | | No log | 0.53 | 300 | 0.1153 | 0.262 | 0.224 | 18.298 | | No log | 0.71 | 400 | 0.0602 | 0.292 | 0.242 | 18.266 | | 0.2992 | 0.89 | 500 | 0.0526 | 0.32 | 0.276 | 18.282 | | 0.2992 | 1.07 | 600 | 0.0396 | 0.318 | 0.272 | 18.3 | | 0.2992 | 1.24 | 700 | 0.0326 | 0.318 | 0.286 | 18.33 | | 0.2992 | 1.42 | 800 | 0.0322 | 0.326 | 0.28 | 18.314 | | 0.2992 | 1.6 | 900 | 0.0267 | 0.326 | 0.278 | 18.328 | | 0.0383 | 1.78 | 1000 | 0.0255 | 0.336 | 0.286 | 18.316 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.7.1 - Tokenizers 0.13.2
BigSalmon/FormalRobertaaa
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: distilroberta-base-finetuned-wikitext2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilroberta-base-finetuned-wikitext2 This model is a fine-tuned version of [distilroberta-base](https://huggingface.co/distilroberta-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.8340 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 2.0843 | 1.0 | 2406 | 1.9226 | | 1.9913 | 2.0 | 4812 | 1.8820 | | 1.9597 | 3.0 | 7218 | 1.8214 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.12.1+cu113 - Datasets 2.7.1 - Tokenizers 0.13.2
BigSalmon/FroBurta
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-12-05T06:22:57Z
--- language: en license: apache-2.0 library_name: diffusers tags: [] datasets: imagefolder metrics: [] --- <!-- This model card has been generated automatically according to the information the training script had access to. You should probably proofread and complete it, then remove this comment. --> # ddpm-butterflies-128 ## Model description This diffusion model is trained with the [🤗 Diffusers](https://github.com/huggingface/diffusers) library on the `imagefolder` dataset. ## Intended uses & limitations #### How to use ```python # TODO: add an example code snippet for running this diffusion pipeline ``` #### Limitations and bias [TODO: provide examples of latent issues and potential remediations] ## Training data [TODO: describe the data used to train the model] ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 16 - eval_batch_size: 16 - gradient_accumulation_steps: 1 - optimizer: AdamW with betas=(None, None), weight_decay=None and epsilon=None - lr_scheduler: None - lr_warmup_steps: 500 - ema_inv_gamma: None - ema_inv_gamma: None - ema_inv_gamma: None - mixed_precision: fp16 ### Training results 📈 [TensorBoard logs](https://huggingface.co/rambosorn/ddpm-butterflies-128/tensorboard?#scalars)
BigSalmon/GPTNeo350MInformalToFormalLincoln
[ "pytorch", "gpt_neo", "text-generation", "transformers", "has_space" ]
text-generation
{ "architectures": [ "GPTNeoForCausalLM" ], "model_type": "gpt_neo", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- tags: - generated_from_trainer model-index: - name: pegasus-pubmed_dataset_radiology_summary20221129.tsv results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # pegasus-pubmed_dataset_radiology_summary20221129.tsv This model is a fine-tuned version of [google/pegasus-pubmed](https://huggingface.co/google/pegasus-pubmed) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - gradient_accumulation_steps: 16 - total_train_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0+cu102 - Datasets 2.4.0 - Tokenizers 0.12.1
BigSalmon/GPTNeo350MInformalToFormalLincoln3
[ "pytorch", "gpt_neo", "text-generation", "transformers", "has_space" ]
text-generation
{ "architectures": [ "GPTNeoForCausalLM" ], "model_type": "gpt_neo", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- license: mit tags: - generated_from_trainer model-index: - name: lyrics-generator-v2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # lyrics-generator-v2 This model is a fine-tuned version of [gpt2](https://huggingface.co/gpt2) on the None dataset. It achieves the following results on the evaluation set: - Loss: 2.1114 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 3.4325 | 0.36 | 200 | 2.3267 | | 2.4121 | 0.71 | 400 | 2.2693 | | 2.3582 | 1.07 | 600 | 2.2401 | | 2.2903 | 1.42 | 800 | 2.2182 | | 2.2738 | 1.78 | 1000 | 2.2046 | | 2.2322 | 2.14 | 1200 | 2.1922 | | 2.1933 | 2.49 | 1400 | 2.1832 | | 2.1944 | 2.85 | 1600 | 2.1736 | | 2.1632 | 3.2 | 1800 | 2.1648 | | 2.1366 | 3.56 | 2000 | 2.1554 | | 2.1492 | 3.91 | 2200 | 2.1491 | | 2.1108 | 4.27 | 2400 | 2.1472 | | 2.0882 | 4.63 | 2600 | 2.1422 | | 2.0971 | 4.98 | 2800 | 2.1343 | | 2.0829 | 5.34 | 3000 | 2.1318 | | 2.042 | 5.69 | 3200 | 2.1280 | | 2.0375 | 6.05 | 3400 | 2.1261 | | 2.0146 | 6.41 | 3600 | 2.1245 | | 2.0551 | 6.76 | 3800 | 2.1217 | | 1.992 | 7.12 | 4000 | 2.1182 | | 1.9994 | 7.47 | 4200 | 2.1170 | | 2.0189 | 7.83 | 4400 | 2.1156 | | 1.9795 | 8.19 | 4600 | 2.1133 | | 2.0101 | 8.54 | 4800 | 2.1143 | | 1.9864 | 8.9 | 5000 | 2.1111 | | 1.9602 | 9.25 | 5200 | 2.1120 | | 1.9899 | 9.61 | 5400 | 2.1117 | | 1.9928 | 9.96 | 5600 | 2.1114 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.12.1+cu113 - Datasets 2.7.1 - Tokenizers 0.13.2
BigSalmon/GPTNeo350MInformalToFormalLincoln4
[ "pytorch", "gpt_neo", "text-generation", "transformers", "has_space" ]
text-generation
{ "architectures": [ "GPTNeoForCausalLM" ], "model_type": "gpt_neo", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
11
2022-12-05T07:05:25Z
# iSTFTNet Pre-Trained Models https://github.com/rishikksh20/iSTFTNet-pytorch Information about files: As of right now, the files seen in this repository were trained on 22khz sample rates only. Format: - g_ = generator - do_ = discriminator - _xxxxxx = step # - music_ = models with music indicate it has been trained on specifically music data. as of right now, the music data is from Free Music Archive (FMA)
BigSalmon/GPTNeo350MInformalToFormalLincoln5
[ "pytorch", "gpt_neo", "text-generation", "transformers", "has_space" ]
text-generation
{ "architectures": [ "GPTNeoForCausalLM" ], "model_type": "gpt_neo", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
11
null
--- library_name: stable-baselines3 tags: - SpaceInvadersNoFrameskip-v4 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: DQN results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: SpaceInvadersNoFrameskip-v4 type: SpaceInvadersNoFrameskip-v4 metrics: - type: mean_reward value: 545.00 +/- 53.53 name: mean_reward verified: false --- # **DQN** Agent playing **SpaceInvadersNoFrameskip-v4** This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga gaioNL -f logs/ python enjoy.py --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do: ``` python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga gaioNL -f logs/ rl_zoo3 enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` ## Training (with the RL Zoo) ``` python train.py --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga gaioNL ``` ## Hyperparameters ```python OrderedDict([('batch_size', 32), ('buffer_size', 100000), ('env_wrapper', ['stable_baselines3.common.atari_wrappers.AtariWrapper']), ('exploration_final_eps', 0.01), ('exploration_fraction', 0.1), ('frame_stack', 4), ('gradient_steps', 1), ('learning_rate', 0.0001), ('learning_starts', 100000), ('n_timesteps', 1000000.0), ('optimize_memory_usage', False), ('policy', 'CnnPolicy'), ('target_update_interval', 1000), ('train_freq', 4), ('normalize', False)]) ```
BigSalmon/GPTNeo350MInformalToFormalLincoln6
[ "pytorch", "gpt_neo", "text-generation", "transformers", "has_space" ]
text-generation
{ "architectures": [ "GPTNeoForCausalLM" ], "model_type": "gpt_neo", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
14
2022-12-05T07:19:04Z
--- language: - it license: apache-2.0 tags: - hf-asr-leaderboard - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 metrics: - wer model-index: - name: Whisper Small It - Gianluca Ruberto results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: Common Voice 11.0 type: mozilla-foundation/common_voice_11_0 config: it split: test[:10%] args: 'config: hi, split: test' metrics: - name: Wer type: wer value: 22.108985024958404 --- # Whisper Small It - Gianluca Ruberto This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the Common Voice 11.0 dataset. It achieves the following results on the evaluation set: - Loss: 0.393979 - Wer: 22.108985 ## Model description This model is the openai whisper small transformer adapted for Italian audio to text transcription. ## Intended uses & limitations The model is available through its [HuggingFace web app](https://huggingface.co/spaces/GIanlucaRub/whisper-it) ## Training and evaluation data Data used for training is the initial 10% of train and validation of [Italian Common Voice](https://huggingface.co/datasets/mozilla-foundation/common_voice_11_0/viewer/it/train) 11.0 from Mozilla Foundation. The dataset used for evaluation is the initial 10% of test of Italian Common Voice. ## Training procedure After loading the pre trained model, it has been trained on the dataset. ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 4000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:-------:| | 0.2545 | 0.95 | 1000 | 0.3872 | 24.8891 | | 0.129 | 1.91 | 2000 | 0.3682 | 22.1991 | | 0.0534 | 2.86 | 3000 | 0.3771 | 22.4695 | | 0.0302 | 3.82 | 4000 | 0.3940 | 22.1090 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.12.1+cu113 - Datasets 2.7.1 - Tokenizers 0.13.2
BigSalmon/GoodMaskResults
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
2022-12-05T07:27:30Z
--- datasets: - bs-la/xP3ru license: bigscience-bloom-rail-1.0 model-index: - name: bloomz-7b1 results: - task: type: Coreference resolution dataset: type: Muennighoff/xwinograd name: XWinograd (ru) config: ru split: test revision: 9dd5ea5505fad86b7bedad667955577815300cee metrics: - type: Accuracy value: 53.97 - task: type: Natural language inference dataset: type: xnli name: XNLI (ru) config: ru split: validation revision: a5a45e4ff92d5d3f34de70aaf4b72c3bdf9f7f16 metrics: - type: Accuracy value: 33.49 - task: type: Sentence completion dataset: type: Muennighoff/xstory_cloze name: XStoryCloze (ru) config: ru split: validation revision: 8bb76e594b68147f1a430e86829d07189622b90d metrics: - type: Accuracy value: 48.64 --- # Model Summary [bloom-7b1](https://huggingface.co/bigscience/bloom-7b1) finetuned on Russian multitask data. Hence the same as [bloomz-7b1](https://huggingface.co/bigscience/bloomz-7b1), but with **only** Russian finetuning data. 4b stands for 4 billion finetuning tokens (same as bloomz-7b1). # Citation ``` @article{yong2022bloom+, title={BLOOM+ 1: Adding Language Support to BLOOM for Zero-Shot Prompting}, author={Yong, Zheng-Xin and Schoelkopf, Hailey and Muennighoff, Niklas and Aji, Alham Fikri and Adelani, David Ifeoluwa and Almubarak, Khalid and Bari, M Saiful and Sutawika, Lintang and Kasai, Jungo and Baruwa, Ahmed and others}, journal={arXiv preprint arXiv:2212.09535}, year={2022} } ``` ```bibtex @misc{muennighoff2022crosslingual, title={Crosslingual Generalization through Multitask Finetuning}, author={Niklas Muennighoff and Thomas Wang and Lintang Sutawika and Adam Roberts and Stella Biderman and Teven Le Scao and M Saiful Bari and Sheng Shen and Zheng-Xin Yong and Hailey Schoelkopf and Xiangru Tang and Dragomir Radev and Alham Fikri Aji and Khalid Almubarak and Samuel Albanie and Zaid Alyafeai and Albert Webson and Edward Raff and Colin Raffel}, year={2022}, eprint={2211.01786}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
BigSalmon/InfillFormalLincoln
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- language: - sv license: apache-2.0 tags: - hf-asr-leaderboard - generated_from_trainer metrics: - wer model-index: - name: Whisper Small - Swedish results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Small - Swedish This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the Common Voice 11.0 dataset. It achieves the following results on the evaluation set: - Loss: 0.3500 - Wer: 19.5235 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 4 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 5000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:-------:| | 0.1391 | 1.3 | 1000 | 0.2981 | 21.5939 | | 0.049 | 2.59 | 2000 | 0.2954 | 20.5614 | | 0.0198 | 3.89 | 3000 | 0.3049 | 19.9564 | | 0.0036 | 5.18 | 4000 | 0.3381 | 19.6042 | | 0.0024 | 6.48 | 5000 | 0.3500 | 19.5235 | ### Framework versions - Transformers 4.25.0.dev0 - Pytorch 1.12.1 - Datasets 2.7.1 - Tokenizers 0.13.2
BigSalmon/InformalToFormalLincoln17
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # lmxhappy/new_bert This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('lmxhappy/new_bert') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch def cls_pooling(model_output, attention_mask): return model_output[0][:,0] # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('lmxhappy/new_bert') model = AutoModel.from_pretrained('lmxhappy/new_bert') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, cls pooling. sentence_embeddings = cls_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name=lmxhappy/new_bert) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 935 with parameters: ``` {'batch_size': 32, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.MultipleNegativesRankingLoss.MultipleNegativesRankingLoss` with parameters: ``` {'scale': 20.0, 'similarity_fct': 'cos_sim'} ``` Parameters of the fit()-Method: ``` { "epochs": 1, "evaluation_steps": 100, "evaluator": "sentence_transformers.evaluation.EmbeddingSimilarityEvaluator.EmbeddingSimilarityEvaluator", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "lr": 5e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 94, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 256, 'do_lower_case': False}) with Transformer model: BertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': True, 'pooling_mode_mean_tokens': False, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
BigSalmon/InformalToFormalLincoln19
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
11
null
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: ajdowney/3epoch-1warmup-0.1decay results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # ajdowney/3epoch-1warmup-0.1decay This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 0.2933 - Validation Loss: 0.7097 - Epoch: 2 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': {'class_name': 'WarmUp', 'config': {'initial_learning_rate': 2e-05, 'decay_schedule_fn': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 257, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}, '__passive_serialization__': True}, 'warmup_steps': 1, 'power': 1.0, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.1} - training_precision: mixed_float16 ### Training results | Train Loss | Validation Loss | Epoch | |:----------:|:---------------:|:-----:| | 0.4863 | 0.6130 | 0 | | 0.3848 | 0.6046 | 1 | | 0.2933 | 0.7097 | 2 | ### Framework versions - Transformers 4.23.1 - TensorFlow 2.10.0 - Datasets 2.6.1 - Tokenizers 0.13.1
BigSalmon/Neo
[ "pytorch", "gpt_neo", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPTNeoForCausalLM" ], "model_type": "gpt_neo", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
2022-12-05T08:53:08Z
--- license: mit tags: - pytorch - diffusers - unconditional-image-generation - diffusion-models-class --- # Model Card for Unit 1 of the [Diffusion Models Class 🧨](https://github.com/huggingface/diffusion-models-class) This model is a diffusion model for unconditional image generation of cute 🦋. ## Usage ```python from diffusers import DDPMPipeline pipeline = DDPMPipeline.from_pretrained('diffX/sd-class-butterflies-32') image = pipeline().images[0] image ```
BigSalmon/T5F
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
6
null
# gpt2-medium-nlg-multiwoz21_sgd_tm1_tm2_tm3 This model is a fine-tuned version of [GPT2-medium](https://huggingface.co/gpt2-medium) on [MultiWOZ21](https://huggingface.co/datasets/ConvLab/multiwoz21), [Schema-Guided Dialogue](https://huggingface.co/datasets/ConvLab/sgd), [TaskMaster1](https://huggingface.co/datasets/ConvLab/tm1), [TaskMaster2](https://huggingface.co/datasets/ConvLab/tm2) and [TaskMaster3](https://huggingface.co/datasets/ConvLab/tm3) Refer to [ConvLab-3](https://github.com/ConvLab/ConvLab-3) for model description and usage. ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-5 - train_batch_size: 64 - gradient_accumulation_steps: 2 - total_train_batch_size: 128 - optimizer: AdamW - lr_scheduler_type: linear - num_epochs: 20 ### Framework versions - Transformers 4.23.1 - Pytorch 1.10.1+cu111
BobBraico/distilbert-base-uncased-finetuned-imdb-accelerate
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
Access to model gitigt/test_001 is restricted and you are not in the authorized list. Visit https://huggingface.co/gitigt/test_001 to ask for access.
BobBraico/distilbert-base-uncased-finetuned-imdb
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 language: en datasets: - Jzuluaga/atco2_corpus_1h tags: - text - token-classification - en-atc - en - generated_from_trainer - bert - ner-for-atc metrics: - Precision - Recall - Accuracy - F1 widget: - text: "csa two nine six startup approved mike current qnh one zero one eight time check one seven" - text: "swiss four eight seven november runway three one cleared for takeoff wind one three zero degrees seven knots" - text: "lufthansa five yankee victor runway one three clear to land wind zero seven zero degrees" - text: "austrian seven one zulu hello to you reduce one six zero knots" - text: "sky travel one nine two approaching holding point three one ready for departure" model-index: - name: bert-base-ner-atc-en-atco2-1h results: - task: type: token-classification name: ner dataset: type: Jzuluaga/atco2_corpus_1h name: ATCO2 corpus (Air Traffic Control Communications) config: test split: test metrics: - type: F1 value: 0.94 name: TEST F1 (callsign) verified: False - type: F1 value: 0.74 name: TEST F1 (command) verified: False - type: F1 value: 0.81 name: TEST F1 (value) verified: False --- # bert-base-ner-atc-en-atco2-1h This model allow to perform named-entity recognition (NER) on air traffic control communications data. We solve this challenge by performing token classification (NER) with a BERT model. We fine-tune a pretrained BERT model on the ner task. For instance, if you have the following transcripts/gold annotations: - **Utterance**: lufthansa three two five cleared to land runway three four left Could you tell what are the main entities in the communication? The desired output is shown below: - **Named-entity module output**: [call] lufthansa three two five [/call] [cmd] cleared to land [/cmd] [val] runway three four left [/val] This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the [atco2_corpus_1h](https://huggingface.co/datasets/Jzuluaga/atco2_corpus_1h). <a href="https://github.com/idiap/atco2-corpus"> <img alt="GitHub" src="https://img.shields.io/badge/GitHub-Open%20source-green\"> </a> It achieves the following results on the development set: - Loss: 1.4282 - Precision: 0.6195 - Recall: 0.7071 - F1: 0.6604 - Accuracy: 0.8182 **Paper**: [ATCO2 corpus: A Large-Scale Dataset for Research on Automatic Speech Recognition and Natural Language Understanding of Air Traffic Control Communications](https://arxiv.org/abs/2211.04054) Authors: Juan Zuluaga-Gomez, Karel Veselý, Igor Szöke, Petr Motlicek, Martin Kocour, Mickael Rigault, Khalid Choukri, Amrutha Prasad and others Abstract: Personal assistants, automatic speech recognizers and dialogue understanding systems are becoming more critical in our interconnected digital world. A clear example is air traffic control (ATC) communications. ATC aims at guiding aircraft and controlling the airspace in a safe and optimal manner. These voice-based dialogues are carried between an air traffic controller (ATCO) and pilots via very-high frequency radio channels. In order to incorporate these novel technologies into ATC (low-resource domain), large-scale annotated datasets are required to develop the data-driven AI systems. Two examples are automatic speech recognition (ASR) and natural language understanding (NLU). In this paper, we introduce the ATCO2 corpus, a dataset that aims at fostering research on the challenging ATC field, which has lagged behind due to lack of annotated data. The ATCO2 corpus covers 1) data collection and pre-processing, 2) pseudo-annotations of speech data, and 3) extraction of ATC-related named entities. The ATCO2 corpus is split into three subsets. 1) ATCO2-test-set corpus contains 4 hours of ATC speech with manual transcripts and a subset with gold annotations for named-entity recognition (callsign, command, value). 2) The ATCO2-PL-set corpus consists of 5281 hours of unlabeled ATC data enriched with automatic transcripts from an in-domain speech recognizer, contextual information, speaker turn information, signal-to-noise ratio estimate and English language detection score per sample. Both available for purchase through ELDA at this http URL. 3) The ATCO2-test-set-1h corpus is a one-hour subset from the original test set corpus, that we are offering for free at this url: https://www.atco2.org/data. We expect the ATCO2 corpus will foster research on robust ASR and NLU not only in the field of ATC communications but also in the general research community. Code — GitHub repository: https://github.com/idiap/atco2-corpus ## Intended uses & limitations This model was fine-tuned on air traffic control data. We don't expect that it keeps the same performance on some others datasets where BERT was pre-trained or fine-tuned. ## Training and evaluation data See Table 6 (page 18) in our paper: [ATCO2 corpus: A Large-Scale Dataset for Research on Automatic Speech Recognition and Natural Language Understanding of Air Traffic Control Communications](https://arxiv.org/abs/2211.04054). We described there the data used to fine-tune our NER model. - We use the ATCO2 corpus to fine-tune this model. You can download a free sample here: https://www.atco2.org/data - However, do not worry, we have prepared a script in our repository for preparing this databases: - Dataset preparation folder: https://github.com/idiap/atco2-corpus/tree/main/data/databases/atco2_test_set_1h/data_prepare_atco2_corpus_other.sh - Get the data in the format required by HuggingFace: speaker_role/data_preparation/prepare_spkid_atco2_corpus_test_set_1h.sh ## Writing your own inference script The snippet of code: ```python from transformers import pipeline, AutoTokenizer, AutoModelForTokenClassification tokenizer = AutoTokenizer.from_pretrained("Jzuluaga/bert-base-ner-atc-en-atco2-1h") model = AutoModelForTokenClassification.from_pretrained("Jzuluaga/bert-base-ner-atc-en-atco2-1h") ##### Process text sample from transformers import pipeline nlp = pipeline('ner', model=model, tokenizer=tokenizer, aggregation_strategy="first") nlp("lufthansa three two five cleared to land runway three four left") # output: [{'entity_group': 'callsign', 'score': 0.8753265, 'word': 'lufthansa three two five', 'start': 0, 'end': 24}, {'entity_group': 'command', 'score': 0.99988264, 'word': 'cleared to land', 'start': 25, 'end': 40}, {'entity_group': 'value', 'score': 0.9999145, 'word': 'runway three four left', 'start': 41, 'end': 63}] ``` # Cite us If you use this code for your research, please cite our paper with: ``` @article{zuluaga2022bertraffic, title={BERTraffic: BERT-based Joint Speaker Role and Speaker Change Detection for Air Traffic Control Communications}, author={Zuluaga-Gomez, Juan and Sarfjoo, Seyyed Saeed and Prasad, Amrutha and others}, journal={IEEE Spoken Language Technology Workshop (SLT), Doha, Qatar}, year={2022} } ``` and, ``` @article{zuluaga2022how, title={How Does Pre-trained Wav2Vec2. 0 Perform on Domain Shifted ASR? An Extensive Benchmark on Air Traffic Control Communications}, author={Zuluaga-Gomez, Juan and Prasad, Amrutha and Nigmatulina, Iuliia and Sarfjoo, Saeed and others}, journal={IEEE Spoken Language Technology Workshop (SLT), Doha, Qatar}, year={2022} } ``` and, ``` @article{zuluaga2022atco2, title={ATCO2 corpus: A Large-Scale Dataset for Research on Automatic Speech Recognition and Natural Language Understanding of Air Traffic Control Communications}, author={Zuluaga-Gomez, Juan and Vesel{\`y}, Karel and Sz{\"o}ke, Igor and Motlicek, Petr and others}, journal={arXiv preprint arXiv:2211.04054}, year={2022} } ``` ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 3000 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 125.0 | 500 | 0.8692 | 0.6396 | 0.7172 | 0.6762 | 0.8307 | | 0.2158 | 250.0 | 1000 | 1.0074 | 0.5702 | 0.6970 | 0.6273 | 0.8245 | | 0.2158 | 375.0 | 1500 | 1.3560 | 0.6577 | 0.7374 | 0.6952 | 0.8119 | | 0.0184 | 500.0 | 2000 | 1.3393 | 0.6182 | 0.6869 | 0.6507 | 0.8056 | | 0.0184 | 625.0 | 2500 | 1.3528 | 0.6087 | 0.7071 | 0.6542 | 0.8213 | | 0.0175 | 750.0 | 3000 | 1.4282 | 0.6195 | 0.7071 | 0.6604 | 0.8182 | ### Framework versions - Transformers 4.24.0 - Pytorch 1.13.0+cu117 - Datasets 2.7.0 - Tokenizers 0.13.2
BonjinKim/dst_kor_bert
[ "pytorch", "jax", "bert", "pretraining", "transformers" ]
null
{ "architectures": [ "BertForPreTraining" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- license: mit tags: - pytorch - diffusers - unconditional-image-generation - diffusion-models-class --- # Model Card for Stable Diffusion - Pokemon, 256px Model developed for the Unit 1 of the [Diffusion Models Class 🧨](https://github.com/huggingface/diffusion-models-class). This model is a diffusion model for unconditional image generation of Pokemon [![PikachuCool](https://cdn3.emoji.gg/emojis/5085-pikachucool.png)](https://emoji.gg/emoji/5085-pikachucool) It is trained on a small collection of pokemon pictures and trained for 50 epochs, with 🤗 Accelerate. ## Usage ```python from diffusers import DDPMPipeline pipeline = DDPMPipeline.from_pretrained('alkiskoudounas/sd-pokemon-256px') ``` ## Example Here you can find an example of the output of the model, in a batch of 8 images: ![Pokemon Example](pokemon-256px-examples.png)
BossLee/t5-gec
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
6
null
--- license: apache-2.0 language: en datasets: - Jzuluaga/uwb_atcc tags: - text - sequence-classification - en-atc - en - generated_from_trainer - bert - bertraffic metrics: - Precision - Recall - Accuracy - F1 widget: - text: "csa two nine six startup approved mike current qnh one zero one eight time check one seven" - text: "swiss four eight seven november runway three one cleared for takeoff wind one three zero degrees seven knots" - text: "lufthansa five yankee victor runway one three clear to land wind zero seven zero degrees" - text: "austrian seven one zulu hello to you reduce one six zero knots" - text: "sky travel one nine two approaching holding point three one ready for departure" - name: bert-base-speaker-role-atc-en-uwb-atcc results: - task: type: token-classification name: chunking dataset: type: Jzuluaga/uwb_atcc name: UWB-ATCC corpus (Air Traffic Control Communications) config: test split: test metrics: - type: F1 value: 0.87 name: TEST F1 (macro) verified: False - type: Accuracy value: 0.91 name: TEST Accuracy verified: False - type: Precision value: 0.86 name: TEST Precision (macro) verified: False - type: Recall value: 0.88 name: TEST Recall (macro) verified: False - type: Jaccard Error Rate value: 0.169 name: TEST Jaccard Error Rate verified: False --- # bert-base-speaker-role-atc-en-uwb-atcc This model allow to detect speaker roles based on text. Normally, this task is done on the acoustic level. However, we propose to perform this task on the text level. We solve this challenge by performing speaker role with a BERT model. We fine-tune it on the sequence classification task. For instance: - Utterance 1: **lufthansa six two nine charlie tango report when established** - Utterance 2: **report when established lufthansa six two nine charlie tango** Based on that, could you tell the speaker role? Is it Utterance 1 air traffic controller or pilot? Check the inference API (there are 5 examples)! This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the [UWB-ATCC corpus](https://huggingface.co/datasets/Jzuluaga/uwb_atcc). <a href="https://github.com/idiap/atco2-corpus"> <img alt="GitHub" src="https://img.shields.io/badge/GitHub-Open%20source-green\"> </a> It achieves the following results on the evaluation set: - Loss: 0.6191 - Accuracy: 0.9103 - Precision: 0.9239 - Recall: 0.9161 - F1: 0.9200 **Paper**: [ATCO2 corpus: A Large-Scale Dataset for Research on Automatic Speech Recognition and Natural Language Understanding of Air Traffic Control Communications](https://arxiv.org/abs/2211.04054) Authors: Juan Zuluaga-Gomez, Karel Veselý, Igor Szöke, Petr Motlicek, Martin Kocour, Mickael Rigault, Khalid Choukri, Amrutha Prasad and others Abstract: Personal assistants, automatic speech recognizers and dialogue understanding systems are becoming more critical in our interconnected digital world. A clear example is air traffic control (ATC) communications. ATC aims at guiding aircraft and controlling the airspace in a safe and optimal manner. These voice-based dialogues are carried between an air traffic controller (ATCO) and pilots via very-high frequency radio channels. In order to incorporate these novel technologies into ATC (low-resource domain), large-scale annotated datasets are required to develop the data-driven AI systems. Two examples are automatic speech recognition (ASR) and natural language understanding (NLU). In this paper, we introduce the ATCO2 corpus, a dataset that aims at fostering research on the challenging ATC field, which has lagged behind due to lack of annotated data. The ATCO2 corpus covers 1) data collection and pre-processing, 2) pseudo-annotations of speech data, and 3) extraction of ATC-related named entities. The ATCO2 corpus is split into three subsets. 1) ATCO2-test-set corpus contains 4 hours of ATC speech with manual transcripts and a subset with gold annotations for named-entity recognition (callsign, command, value). 2) The ATCO2-PL-set corpus consists of 5281 hours of unlabeled ATC data enriched with automatic transcripts from an in-domain speech recognizer, contextual information, speaker turn information, signal-to-noise ratio estimate and English language detection score per sample. Both available for purchase through ELDA at this http URL. 3) The ATCO2-test-set-1h corpus is a one-hour subset from the original test set corpus, that we are offering for free at this url: https://www.atco2.org/data. We expect the ATCO2 corpus will foster research on robust ASR and NLU not only in the field of ATC communications but also in the general research community. Code — GitHub repository: https://github.com/idiap/atco2-corpus ## Intended uses & limitations This model was fine-tuned on air traffic control data. We don't expect that it keeps the same performance on some others datasets where BERT was pre-trained or fine-tuned. ## Training and evaluation data See Table 7 (page 19) in our paper: [ATCO2 corpus: A Large-Scale Dataset for Research on Automatic Speech Recognition and Natural Language Understanding of Air Traffic Control Communications](https://arxiv.org/abs/2211.04054). We described there the data used to fine-tune our sequence classification model. - We use the UWB-ATCC corpus to fine-tune this model. You can download the raw data here: https://lindat.mff.cuni.cz/repository/xmlui/handle/11858/00-097C-0000-0001-CCA1-0 - However, do not worry, we have prepared a script in our repository for preparing this databases: - Dataset preparation folder: https://github.com/idiap/atco2-corpus/tree/main/data/databases/uwb_atcc/ - Prepare the data: https://github.com/idiap/atco2-corpus/blob/main/data/databases/uwb_atcc/data_prepare_uwb_atcc_corpus_other.sh ## Writing your own inference script The snippet of code: ```python from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification tokenizer = AutoTokenizer.from_pretrained("Jzuluaga/bert-base-speaker-role-atc-en-uwb-atcc") model = AutoModelForSequenceClassification.from_pretrained("Jzuluaga/bert-base-speaker-role-atc-en-uwb-atcc") ##### Process text sample (from UWB-ATCC) from transformers import pipeline nlp = pipeline('text-classification', model=model, tokenizer=tokenizer) nlp("lining up runway three one csa five bravo") [{'label': 'pilot', 'score': 0.9998971223831177}] ``` # Cite us If you use this code for your research, please cite our paper with: ``` @article{zuluaga2022bertraffic, title={BERTraffic: BERT-based Joint Speaker Role and Speaker Change Detection for Air Traffic Control Communications}, author={Zuluaga-Gomez, Juan and Sarfjoo, Seyyed Saeed and Prasad, Amrutha and others}, journal={IEEE Spoken Language Technology Workshop (SLT), Doha, Qatar}, year={2022} } ``` and, ``` @article{zuluaga2022how, title={How Does Pre-trained Wav2Vec2. 0 Perform on Domain Shifted ASR? An Extensive Benchmark on Air Traffic Control Communications}, author={Zuluaga-Gomez, Juan and Prasad, Amrutha and Nigmatulina, Iuliia and Sarfjoo, Saeed and others}, journal={IEEE Spoken Language Technology Workshop (SLT), Doha, Qatar}, year={2022} } ``` and, ``` @article{zuluaga2022atco2, title={ATCO2 corpus: A Large-Scale Dataset for Research on Automatic Speech Recognition and Natural Language Understanding of Air Traffic Control Communications}, author={Zuluaga-Gomez, Juan and Vesel{\`y}, Karel and Sz{\"o}ke, Igor and Motlicek, Petr and others}, journal={arXiv preprint arXiv:2211.04054}, year={2022} } ``` ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 3000 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | Precision | Recall | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:---------:|:------:|:------:| | No log | 3.36 | 500 | 0.2346 | 0.9207 | 0.9197 | 0.9413 | 0.9303 | | 0.2212 | 6.71 | 1000 | 0.3161 | 0.9046 | 0.9260 | 0.9027 | 0.9142 | | 0.2212 | 10.07 | 1500 | 0.4337 | 0.9065 | 0.9191 | 0.9144 | 0.9167 | | 0.0651 | 13.42 | 2000 | 0.4743 | 0.9178 | 0.9249 | 0.9295 | 0.9272 | | 0.0651 | 16.78 | 2500 | 0.5538 | 0.9103 | 0.9196 | 0.9211 | 0.9204 | | 0.0296 | 20.13 | 3000 | 0.6191 | 0.9103 | 0.9239 | 0.9161 | 0.9200 | ### Framework versions - Transformers 4.24.0 - Pytorch 1.13.0+cu117 - Datasets 2.7.0 - Tokenizers 0.13.2
Botslity/Bot
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: ta datasets: - wikiann examples: widget: - text: "இந்திய" example_title: "Sentence_1" - text: "இந்தியா வளர்ந்து வரும் வல்லரசு" example_title: "Sentence_2" - text: "2050ல் இந்தியா உலகின் மிகப்பெரிய பொருளாதார நாடாக மாறும்" example_title: "Sentence_3" - text: "இந்தியாவின் வெளியுறவுத்துறை அமைச்சர் திரு.ஜெய் சங்கர், ரஷ்யா-உக்ரைன் மோதலில் இந்தியாவின் நிலைப்பாட்டை தெளிவாக எடுத்துரைத்தார்." example_title: "Sentence_4" - text: "ஜி20 நாடுகளின் தலைவர் பதவி இந்திய பிரதமர் நரேந்திர மோடியிடம் ஒப்படைக்கப்பட்டுள்ளது" example_title: "Sentence_5" - text: "ரஷ்யாவிடம் இருந்து எண்ணெய் வாங்க வேண்டாம் என ஐரோப்பிய நாடுகளுக்கு ஐரோப்பிய ஒன்றியம் அறிவுறுத்தியுள்ளது" --- <h1>Tamil Named Entity Recognition</h1> Fine-tuning bert-base-multilingual-cased on Wikiann dataset for performing NER on Tamil language. ## Label ID and its corresponding label name | Label ID | Label Name| | -------- | ----- | |0 | O | | 1 | B-PER | | 2 | I-PER | | 3 | B-ORG| | 4 | I-ORG | | 5 | B-LOC | | 6 | I-LOC | <h1>Results</h1> | Step | Training Loss| Validation Loss|Overall Precision|Overall Recall|Overall F1|Overall Accuracy| Loc F1 | Org F1 | Per F1 | |----- |--------------|----------------|-----------------|--------------|----------|----------------|---------|---------|---------| | 1000 | 0.386900 | 0.300006 | 0.833469 | 0.824748 | 0.829086 | 0.912857 | 0.835343| 0.781625| 0.867752| | 2000 | 0.210200 | 0.251389 | 0.845455 | 0.842052 | 0.843750 | 0.924861 | 0.851711| 0.790198| 0.886515| | 3000 | 0.140000 | 0.264964 | 0.866952 | 0.856137 | 0.861510 | 0.930141 | 0.874872| 0.818150| 0.885203| | 4000 | 0.095400 | 0.298542 | 0.860871 | 0.882696 | 0.871647 | 0.935692 | 0.881348| 0.829285| 0.899245| | 5000 | 0.062200 | 0.296011 | 0.871805 | 0.878471 | 0.875125 | 0.938806 | 0.875434| 0.850889| 0.898148| | 6000 | 0.042200 | 0.320418 | 0.868416 | 0.879074 | 0.873713 | 0.937497 | 0.877588| 0.833611| 0.907737| Example ```py from transformers import AutoTokenizer, AutoModelForTokenClassification from transformers import pipeline tokenizer = AutoTokenizer.from_pretrained("Ambareeshkumar/BERT-Tamil") model = AutoModelForTokenClassification.from_pretrained("Ambareeshkumar/BERT-Tamil") nlp = pipeline("ner", model=model, tokenizer=tokenizer) example = "இந்திய" ner_results = nlp(example) ner_results ```
Brendan/cse244b-hw2-roberta
[ "pytorch", "roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
28
2022-12-05T10:29:30Z
--- tags: - generated_from_keras_callback model-index: - name: dung1308/RM_system_not_mixed__NLP_model_90_10_CPU results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # dung1308/RM_system_not_mixed__NLP_model_90_10_CPU This model is a fine-tuned version of [vinai/phobert-base](https://huggingface.co/vinai/phobert-base) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 5.6102 - Validation Loss: 4.7316 - Epoch: 0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': {'class_name': 'WarmUp', 'config': {'initial_learning_rate': 2e-05, 'decay_schedule_fn': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': -687, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}, '__passive_serialization__': True}, 'warmup_steps': 1000, 'power': 1.0, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.01} - training_precision: float32 ### Training results | Train Loss | Validation Loss | Epoch | |:----------:|:---------------:|:-----:| | 5.6102 | 4.7316 | 0 | ### Framework versions - Transformers 4.18.0 - TensorFlow 2.10.1 - Datasets 2.7.0 - Tokenizers 0.11.0
BritishLibraryLabs/bl-books-genre
[ "pytorch", "distilbert", "text-classification", "multilingual", "dataset:blbooksgenre", "transformers", "genre", "books", "library", "historic", "glam ", "lam", "license:mit", "has_space" ]
text-classification
{ "architectures": [ "DistilBertForSequenceClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
76
null
--- license: mit tags: - generated_from_trainer model-index: - name: robbert-dutch-base-squad-nl results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # robbert-dutch-base-squad-nl This model is a fine-tuned version of [pdelobelle/robbert-v2-dutch-base](https://huggingface.co/pdelobelle/robbert-v2-dutch-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.5735 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.2 - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 1.8337 | 1.0 | 4162 | 1.5621 | | 1.5251 | 2.0 | 8324 | 1.5735 | ### Framework versions - Transformers 4.24.0 - Pytorch 1.13.0+cu117 - Datasets 2.7.1 - Tokenizers 0.13.2
Brona/model1
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: mit tags: - generated_from_trainer model-index: - name: deberta-v3-xsmall-indonesia-squadv2 results: [] widget: - text: "Siapa yang mendirikan Sunda Pura?" context: "Pada tahun 397 M, Raja Purnawarman mendirikan Sunda Pura, yang terletak di pantai utara Jawa Barat, sebagai ibu kota baru kerajaan.Ibu kota kerajaan Tarumanagara tersebut kemungkinan besar terletak di suatu tempat antara Kecamatan Tugu, Jakarta Utara dan Kabupaten Bekasi, Jawa Barat. Purnawarman meninggalkan tujuh batu peringatan di seluruh daerah tersebut, termasuk Provinsi Banten dan Jawa Barat saat ini, yang terdiri dari prasasti yang memuat namanya" example_title: "Example 1 - Name" - text: "Kapan Orang Belanda datang ke Jayakarta?" context: "Orang Belanda datang ke Jayakarta sekitar akhir abad ke-16, setelah singgah di Banten pada 1596. Jayakarta pada awal abad ke-17 diperintah oleh Pangeran Jayakarta, salah seorang kerabat Kesultanan Banten." example_title: "Example 2 - Time" - text: "Kapan ulang tahun jakarta?" context: "Penetapan 22 Juni sebagai hari ulang tahun Jakarta didasari momen Fatahillah berhasil mengusir Portugis dari Sunda Kelapa pada 22 Juni 1527." example_title: "Example 3 - Time" --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # deberta-v3-xsmall-indonesia-squadv2 This model is a fine-tuned version of [microsoft/deberta-v3-xsmall](https://huggingface.co/microsoft/deberta-v3-xsmall) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.4182 ## Model description Deberta-V3-Xsmall from Microsft with model parameter: Backbone Parameters 22M, 384 Hidden Size, 12 Layers Based Deberta-V3 ## Intended uses & limitations More information needed ## Training and evaluation data Training and evaluation data using Indonesia SQuAD V2, source from https://github.com/Wikidepia/SQuAD-id ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 4.5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 1.6078 | 1.0 | 13505 | 1.5331 | | 1.4216 | 2.0 | 27010 | 1.4344 | | 1.2017 | 3.0 | 40515 | 1.4182 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.12.1+cu113 - Datasets 2.7.1 - Tokenizers 0.13.2 ### Evaluation Results ``` {'exact': 55.34646711872568, 'f1': 67.22757187614371, 'total': 24923, 'HasAns_exact': 55.34646711872568, 'HasAns_f1': 67.22757187614371, 'HasAns_total': 24923, 'best_exact': 55.34646711872568, 'best_exact_thresh': 0.0, 'best_f1': 67.22757187614371, 'best_f1_thresh': 0.0} ``` ### Simple Usage ``` from transformers import pipeline qa_pipeline = pipeline( "question-answering", model="asaduas/deberta-v3-xsmall-indonesia-squadv2", tokenizer="asaduas/deberta-v3-xsmall-indonesia-squadv2" ) qa_pipeline( { 'context': "Pada tahun 1512 juga Afonso de Albuquerque mengirim Antonio Albreu dan Franscisco Serrao untuk memimpin armadanya mencari jalan ke tempat asal rempah-rempah di Maluku. Sepanjang perjalanan, mereka singgah di Madura, Bali, dan Lombok. Dengan menggunakan nakhoda-nakhoda Jawa, armada itu tiba di Kepulauan Banda, terus menuju Aibku Utara sampai tiba di Ternate.", 'question': "Siapa yang dikirim oleh Afonso de Albuquerque Pada tahun 1512?" } ) ``` ### Output ``` [ {'score': 0.8919295072555542, 'start': 51, 'end': 88, 'answer': ' Antonio Albreu dan Franscisco Serrao'} ] ```
Bryanwong/wangchanberta-ner
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy model-index: - name: all-roberta-large-v1-banking-16-16-5-oos results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # all-roberta-large-v1-banking-16-16-5-oos This model is a fine-tuned version of [sentence-transformers/all-roberta-large-v1](https://huggingface.co/sentence-transformers/all-roberta-large-v1) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 2.2920 - Accuracy: 0.3982 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 48 - eval_batch_size: 48 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 2.7211 | 1.0 | 1 | 2.5748 | 0.2301 | | 2.2722 | 2.0 | 2 | 2.4566 | 0.3009 | | 1.9185 | 3.0 | 3 | 2.3596 | 0.3805 | | 1.667 | 4.0 | 4 | 2.2920 | 0.3982 | | 1.4704 | 5.0 | 5 | 2.2565 | 0.3982 | ### Framework versions - Transformers 4.20.0 - Pytorch 1.11.0+cu102 - Datasets 2.3.2 - Tokenizers 0.12.1
Bwehfuk/Ron
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy model-index: - name: all-roberta-large-v1-banking-1000-16-5-oos results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # all-roberta-large-v1-banking-1000-16-5-oos This model is a fine-tuned version of [sentence-transformers/all-roberta-large-v1](https://huggingface.co/sentence-transformers/all-roberta-large-v1) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 4.1313 - Accuracy: 0.3451 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 48 - eval_batch_size: 48 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 5.0616 | 1.0 | 1 | 4.7687 | 0.2035 | | 4.4657 | 2.0 | 2 | 4.5386 | 0.2920 | | 4.0496 | 3.0 | 3 | 4.3450 | 0.3097 | | 3.6317 | 4.0 | 4 | 4.2044 | 0.3363 | | 3.3941 | 5.0 | 5 | 4.1313 | 0.3451 | ### Framework versions - Transformers 4.20.0 - Pytorch 1.11.0+cu102 - Datasets 2.3.2 - Tokenizers 0.12.1
CAMeL-Lab/bert-base-arabic-camelbert-ca-pos-egy
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
16,451
null
--- license: other tags: - generated_from_trainer metrics: - accuracy model-index: - name: 6.7b-ri-reproduce-combined-4-gpu-20-val-v3 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # 6.7b-ri-reproduce-combined-4-gpu-20-val-v3 This model is a fine-tuned version of [facebook/opt-6.7b](https://huggingface.co/facebook/opt-6.7b) on the None dataset. It achieves the following results on the evaluation set: - Loss: 3.9434 - Accuracy: 0.0329 - Perplexity: 51.5916 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 9e-07 - train_batch_size: 1 - eval_batch_size: 8 - seed: 100 - distributed_type: multi-GPU - num_devices: 4 - total_train_batch_size: 4 - total_eval_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: constant - num_epochs: 15.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | Perplexity | |:-------------:|:-----:|:----:|:---------------:|:--------:|:----------:| | 2.5731 | 1.0 | 79 | 2.6113 | 0.0317 | 13.6171 | | 2.206 | 2.0 | 158 | 2.4805 | 0.0328 | 11.9469 | | 1.9105 | 3.0 | 237 | 2.4512 | 0.0333 | 11.6019 | | 1.6301 | 4.0 | 316 | 2.5078 | 0.0345 | 12.2780 | | 1.3733 | 5.0 | 395 | 2.6816 | 0.0342 | 14.6090 | | 1.1337 | 6.0 | 474 | 3.0078 | 0.0330 | 20.2431 | | 0.9619 | 7.0 | 553 | 3.1777 | 0.0330 | 23.9923 | | 0.798 | 8.0 | 632 | 3.2559 | 0.0330 | 25.9419 | | 0.6653 | 9.0 | 711 | 3.4277 | 0.0331 | 30.8068 | | 0.552 | 10.0 | 790 | 3.5566 | 0.0333 | 35.0453 | | 0.4568 | 11.0 | 869 | 3.7324 | 0.0324 | 41.7802 | | 0.3756 | 12.0 | 948 | 3.8184 | 0.0328 | 45.5295 | | 0.3119 | 13.0 | 1027 | 3.8477 | 0.0331 | 46.8831 | | 0.2448 | 14.0 | 1106 | 3.9062 | 0.0329 | 49.7122 | | 0.1986 | 15.0 | 1185 | 3.9434 | 0.0329 | 51.5916 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.12.1+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
CAMeL-Lab/bert-base-arabic-camelbert-da-pos-egy
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
32
2022-12-05T12:14:00Z
--- language: - tt license: apache-2.0 tags: - whisper-event datasets: - mozilla-foundation/common_voice_11_0 metrics: - wer model-index: - name: Whisper Small Tatar - Kirill Milintsevich results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: mozilla-foundation/common_voice_11_0 tt type: mozilla-foundation/common_voice_11_0 config: tt split: test args: tt metrics: - name: Wer type: wer value: 29.091166477916197 --- # Whisper Small Tatar - Kirill Milintsevich This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the Common Voice 11.0 dataset. It achieves the following results on the evaluation set: - Loss: 0.4745 - Wer: 29.1407 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 64 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 5000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:-------:| | 0.0415 | 4.98 | 1000 | 0.3252 | 29.9512 | | 0.0041 | 9.95 | 2000 | 0.3982 | 29.4805 | | 0.0007 | 14.93 | 3000 | 0.4457 | 29.1513 | | 0.0003 | 19.9 | 4000 | 0.4665 | 29.0912 | | 0.0002 | 24.88 | 5000 | 0.4745 | 29.1407 |
CAMeL-Lab/bert-base-arabic-camelbert-msa-ner
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
229
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: bert-small-mlm-finetuned-imdb results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-small-mlm-finetuned-imdb This model is a fine-tuned version of [google/bert_uncased_L-4_H-512_A-8](https://huggingface.co/google/bert_uncased_L-4_H-512_A-8) on the None dataset. It achieves the following results on the evaluation set: - Loss: 2.3337 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: constant - num_epochs: 200 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 2.6923 | 0.64 | 500 | 2.4796 | | 2.602 | 1.28 | 1000 | 2.4423 | | 2.5672 | 1.92 | 1500 | 2.4226 | | 2.5252 | 2.56 | 2000 | 2.4148 | | 2.5107 | 3.2 | 2500 | 2.3978 | | 2.4948 | 3.84 | 3000 | 2.3839 | | 2.463 | 4.48 | 3500 | 2.3739 | | 2.4584 | 5.12 | 4000 | 2.3726 | | 2.4367 | 5.75 | 4500 | 2.3515 | | 2.4308 | 6.39 | 5000 | 2.3565 | | 2.411 | 7.03 | 5500 | 2.3540 | | 2.4026 | 7.67 | 6000 | 2.3491 | | 2.3956 | 8.31 | 6500 | 2.3297 | | 2.3864 | 8.95 | 7000 | 2.3338 | | 2.3799 | 9.59 | 7500 | 2.3310 | | 2.3575 | 10.23 | 8000 | 2.3337 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.12.1+cu113 - Datasets 2.7.1 - Tokenizers 0.13.2
CAUKiel/JavaBERT-uncased
[ "pytorch", "safetensors", "bert", "fill-mask", "java", "code", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy model-index: - name: all-roberta-large-v1-credit_cards-16-16-5-oos results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # all-roberta-large-v1-credit_cards-16-16-5-oos This model is a fine-tuned version of [sentence-transformers/all-roberta-large-v1](https://huggingface.co/sentence-transformers/all-roberta-large-v1) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 2.3376 - Accuracy: 0.3186 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 48 - eval_batch_size: 48 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 2.75 | 1.0 | 1 | 2.5769 | 0.2389 | | 2.178 | 2.0 | 2 | 2.4879 | 0.2389 | | 1.769 | 3.0 | 3 | 2.4180 | 0.2566 | | 1.4703 | 4.0 | 4 | 2.3657 | 0.3097 | | 1.2711 | 5.0 | 5 | 2.3376 | 0.3186 | ### Framework versions - Transformers 4.20.0 - Pytorch 1.11.0+cu102 - Datasets 2.3.2 - Tokenizers 0.12.1
CAUKiel/JavaBERT
[ "pytorch", "safetensors", "bert", "fill-mask", "code", "arxiv:2110.10404", "arxiv:1910.09700", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
388
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 192.10 +/- 40.31 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
CBreit00/DialoGPT_small_Rick
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
Access to model rafat0421/Whisper is restricted and you are not in the authorized list. Visit https://huggingface.co/rafat0421/Whisper to ask for access.
CL/safe-math-bot
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: Redex-NL-RX-Synth-t5-small-finetuned-en-to-regex-finetuned-en-to-regex results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Redex-NL-RX-Synth-t5-small-finetuned-en-to-regex-finetuned-en-to-regex This model is a fine-tuned version of [rymaju/NL-RX-Synth-t5-small-finetuned-en-to-regex](https://huggingface.co/rymaju/NL-RX-Synth-t5-small-finetuned-en-to-regex) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.0811 - Semantic-accuracy: 0.912 - Gen Len: 10.71 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.001 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 10000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Semantic-accuracy | Gen Len | |:-------------:|:-----:|:-----:|:---------------:|:-----------------:|:-------:| | 0.1767 | 1.0 | 563 | 0.0710 | 0.916 | 10.766 | | 0.0798 | 2.0 | 1126 | 0.0700 | 0.922 | 10.746 | | 0.0677 | 3.0 | 1689 | 0.0711 | 0.92 | 10.732 | | 0.0602 | 4.0 | 2252 | 0.0635 | 0.92 | 10.722 | | 0.0552 | 5.0 | 2815 | 0.0693 | 0.932 | 10.75 | | 0.05 | 6.0 | 3378 | 0.0746 | 0.922 | 10.778 | | 0.0486 | 7.0 | 3941 | 0.0743 | 0.914 | 10.726 | | 0.0462 | 8.0 | 4504 | 0.0712 | 0.902 | 10.772 | | 0.0435 | 9.0 | 5067 | 0.0710 | 0.92 | 10.724 | | 0.0442 | 10.0 | 5630 | 0.0799 | 0.896 | 10.69 | | 0.043 | 11.0 | 6193 | 0.0754 | 0.914 | 10.72 | | 0.0412 | 12.0 | 6756 | 0.0728 | 0.91 | 10.706 | | 0.0395 | 13.0 | 7319 | 0.0755 | 0.914 | 10.71 | | 0.0397 | 14.0 | 7882 | 0.0799 | 0.91 | 10.7 | | 0.0389 | 15.0 | 8445 | 0.0778 | 0.908 | 10.684 | | 0.0379 | 16.0 | 9008 | 0.0801 | 0.914 | 10.7 | | 0.0369 | 17.0 | 9571 | 0.0806 | 0.912 | 10.708 | | 0.0364 | 17.76 | 10000 | 0.0811 | 0.912 | 10.71 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.12.1+cu113 - Datasets 2.7.1 - Tokenizers 0.13.2
CLAck/en-km
[ "pytorch", "marian", "text2text-generation", "transformers", "translation", "autotrain_compatible" ]
translation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
2022-12-06T04:23:02Z
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: distilgpt2-fda results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilgpt2-fda This model is a fine-tuned version of [distilgpt2](https://huggingface.co/distilgpt2) on the None dataset. It achieves the following results on the evaluation set: - Loss: 2.7315 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 3.1223 | 1.0 | 7958 | 2.9579 | | 2.9033 | 2.0 | 15916 | 2.7765 | | 2.8392 | 3.0 | 23874 | 2.7315 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.11.0+cu115 - Datasets 2.7.1 - Tokenizers 0.13.2
CLEE/CLEE
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-12-05T13:43:49Z
--- tags: - spacy - token-classification language: - en license: mit model-index: - name: en_Coff_Ev2 results: - task: name: NER type: token-classification metrics: - name: NER Precision type: precision value: 0.9922248804 - name: NER Recall type: recall value: 0.9916317992 - name: NER F Score type: f_score value: 0.9919282511 --- Your Coffee at the Speed of Sound | Feature | Description | | --- | --- | | **Name** | `en_Coff_Ev2` | | **Version** | `1.1.6` | | **spaCy** | `>=3.4.3,<3.5.0` | | **Default Pipeline** | `tok2vec`, `ner` | | **Components** | `tok2vec`, `ner` | | **Vectors** | 0 keys, 0 unique vectors (0 dimensions) | | **Sources** | n/a | | **License** | `MIT` | | **Author** | [Chris Bruinsma,Iris Chi,Jack Felciano,Jeffrey Li,Dustin Paden]() | ### Label Scheme <details> <summary>View label scheme (18 labels for 1 components)</summary> | Component | Labels | | --- | --- | | **`ner`** | `Anti`, `Brew Style`, `add-on`, `drink`, `extra`, `hot breakfast`, `milk`, `milk texture`, `pastry`, `pump quantity`, `roast`, `shot quality`, `shot quantity`, `size`, `syrup`, `temperature`, `toppings`, `upside-down` | </details> ### Accuracy | Type | Score | | --- | --- | | `ENTS_F` | 99.19 | | `ENTS_P` | 99.22 | | `ENTS_R` | 99.16 | | `TOK2VEC_LOSS` | 58625.70 | | `NER_LOSS` | 168185.77 |
CLTL/MedRoBERTa.nl
[ "pytorch", "roberta", "fill-mask", "nl", "transformers", "license:mit", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2,988
null
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- ### ssmoky-v2-512-diffusion Dreambooth model trained by guumaster with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb) Or you can run your new concept via `diffusers` [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb) Sample pictures of this concept:
CLTL/icf-levels-mbw
[ "pytorch", "roberta", "text-classification", "nl", "transformers", "license:mit" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
30
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy model-index: - name: all-roberta-large-v1-kitchen_and_dining-2-16-5-oos results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # all-roberta-large-v1-kitchen_and_dining-2-16-5-oos This model is a fine-tuned version of [sentence-transformers/all-roberta-large-v1](https://huggingface.co/sentence-transformers/all-roberta-large-v1) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 2.3560 - Accuracy: 0.2692 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 48 - eval_batch_size: 48 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 2.7421 | 1.0 | 1 | 2.5878 | 0.2012 | | 2.1065 | 2.0 | 2 | 2.4975 | 0.2012 | | 1.5994 | 3.0 | 3 | 2.4274 | 0.2249 | | 1.1739 | 4.0 | 4 | 2.3808 | 0.2456 | | 1.083 | 5.0 | 5 | 2.3560 | 0.2692 | ### Framework versions - Transformers 4.20.0 - Pytorch 1.11.0+cu102 - Datasets 2.3.2 - Tokenizers 0.12.1
CM-CA/Cartman
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- model_creators: - Jordan Painter, Diptesh Kanojia tags: - generated_from_trainer metrics: - accuracy - precision - recall - f1 model-index: - name: bertweet-base-finetuned-SARC-combined-DS results: [] widget: - text: "wow, i mean who would have thought" --- # Utilising Weak Supervision to Create S3D: A Sarcasm Annotated Dataset This is the repository for the S3D dataset published at EMNLP 2022. The dataset can help build sarcasm detection models. # bertweet-base-finetuned-SARC-combined-DS This model is a fine-tuned version of [vinai/bertweet-base](https://huggingface.co/vinai/bertweet-base) on our combined sarcasm dataset. It achieves the following results on the evaluation set: - Loss: 1.4624 - Accuracy: 0.7611 - Precision: 0.7611 - Recall: 0.7611 - F1: 0.7611 ## Model description The given description for BERTweet by VinAI is as follows: <br> BERTweet is the first public large-scale language model pre-trained for English Tweets. BERTweet is trained based on the RoBERTa pre-training procedure. The corpus used to pre-train BERTweet consists of 850M English Tweets (16B word tokens ~ 80GB), containing 845M Tweets streamed from 01/2012 to 08/2019 and 5M Tweets related to the COVID-19 pandemic. <br> ## Training and evaluation data More information neededThis [vinai/bertweet-base](https://huggingface.co/vinai/bertweet-base) model was finetuned on our combined sarcasm dataset. This dataset was created to aid the building of sarcasm detection models ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 64 - eval_batch_size: 16 - seed: 43 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 30 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | Precision | Recall | F1 | |:-------------:|:-----:|:------:|:---------------:|:--------:|:---------:|:------:|:------:| | 0.4319 | 4.0 | 44819 | 0.5049 | 0.7790 | 0.7796 | 0.7789 | 0.7789 | | 0.2835 | 8.0 | 89638 | 0.6475 | 0.7663 | 0.7664 | 0.7663 | 0.7663 | | 0.1797 | 12.0 | 134457 | 0.8746 | 0.7638 | 0.7639 | 0.7637 | 0.7637 | | 0.1219 | 16.0 | 179276 | 1.0595 | 0.7585 | 0.7597 | 0.7587 | 0.7583 | | 0.0905 | 20.0 | 224095 | 1.2115 | 0.7611 | 0.7612 | 0.7612 | 0.7611 | | 0.0728 | 24.0 | 268914 | 1.3644 | 0.7628 | 0.7629 | 0.7627 | 0.7627 | | 0.0612 | 28.0 | 313733 | 1.4624 | 0.7611 | 0.7611 | 0.7611 | 0.7611 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.10.1+cu111 - Datasets 2.3.2 - Tokenizers 0.12.1
CSResearcher/TestModel
[ "license:mit" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
# Resnet34 Sketch Classifier This is a basic sketch classifier using [ResNet-34](https://huggingface.co/microsoft/resnet-34) and fine-tuning it on the full [TU-Berlin dataset](https://huggingface.co/datasets/kmewhort/tu-berlin-png).
CSZay/bart
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: mit tags: - pytorch - diffusers - unconditional-image-generation - diffusion-models-class --- # Model Card for Unit 1 of the [Diffusion Models Class 🧨](https://github.com/huggingface/diffusion-models-class) This model is a diffusion model for unconditional image generation of cute 🦋. ## Usage ```python from diffusers import DDPMPipeline pipeline = DDPMPipeline.from_pretrained('ClawCardMirror/sd-class-butterflies-32') image = pipeline().images[0] image ```
CTBC/ATS
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: mit tags: - generated_from_trainer model-index: - name: bert-base-historic-multilingual-cased-squad-nl results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-historic-multilingual-cased-squad-nl This model is a fine-tuned version of [dbmdz/bert-base-historic-multilingual-cased](https://huggingface.co/dbmdz/bert-base-historic-multilingual-cased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.5356 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.2 - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 1.878 | 1.0 | 4820 | 1.5704 | | 1.5881 | 2.0 | 9640 | 1.5356 | ### Framework versions - Transformers 4.24.0 - Pytorch 1.13.0+cu117 - Datasets 2.7.1 - Tokenizers 0.13.2
Cameron/BERT-Jigsaw
[ "pytorch", "jax", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
35
null
--- language: fi tags: - whisper-medium - mozilla-foundation/common_voice_11_0 - Finnish - whisper-event metrics: wer license: creativeml-openrail-m ---
Cameron/BERT-jigsaw-identityhate
[ "pytorch", "jax", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
37
null
--- license: mit tags: - pytorch - diffusers - unconditional-image-generation - diffusion-models-class --- # Model Card for Unit 1 of the [Diffusion Models Class 🧨](https://github.com/huggingface/diffusion-models-class) This model is a diffusion model for unconditional image generation of cute 🦋. ## Usage ```python from diffusers import DDPMPipeline pipeline = DDPMPipeline.from_pretrained('LKyluk/sd-class-butterflies-32') image = pipeline().images[0] image ```
Cameron/BERT-mdgender-convai-ternary
[ "pytorch", "jax", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
38
2022-12-05T14:55:25Z
--- license: mit tags: - pytorch - diffusers - unconditional-image-generation - diffusion-models-class --- # Model Card for Unit 1 of the [Diffusion Models Class 🧨](https://github.com/huggingface/diffusion-models-class) This model is a diffusion model for unconditional image generation of cute 🦋. ## Usage ```python from diffusers import DDPMPipeline pipeline = DDPMPipeline.from_pretrained('ClawCardMirror/sd-class-butterflies-64') image = pipeline().images[0] image ```
Canadiancaleb/DialoGPT-small-jesse
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
2022-12-05T15:03:02Z
--- license: creativeml-openrail-m tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image inference: true ---
CapitainData/wav2vec2-large-xlsr-turkish-demo-colab
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: creativeml-openrail-m tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image inference: true ---
Capreolus/birch-bert-large-car_mb
[ "pytorch", "tf", "jax", "bert", "next-sentence-prediction", "transformers" ]
null
{ "architectures": [ "BertForNextSentencePrediction" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- language: ko license: apache-2.0 --- # hyunwoo3235/t5-v1_1-base-ko [Google's T5](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) Version 1.1 that trained on korean corpus t5-v1_1-base-ko은 한국어 코퍼스에서 학습된 t5 v1.1 모델입니다. OOV을 막기 위해 BBPE를 사용하였으며, HyperCLOVA에서 형태소 분석이 성능을 높히는데 도움이 되는 것을 보고 토크나이저 학습 과정에서 MeCab을 이용해 형태소가 이상하게 토큰화 되지 않도록 하였습니다. ## Usage ```python from transformers import AutoTokenizer, T5ForConditionalGeneration tokenizer = AutoTokenizer.from_pretrained('hyunwoo3235/t5-v1_1-base-ko') model = T5ForConditionalGeneration.from_pretrained('hyunwoo3235/t5-v1_1-base-ko') ```
Capreolus/birch-bert-large-mb
[ "pytorch", "tf", "jax", "bert", "next-sentence-prediction", "transformers" ]
null
{ "architectures": [ "BertForNextSentencePrediction" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # {MODEL_NAME} This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}') model = AutoModel.from_pretrained('{MODEL_NAME}') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 1566 with parameters: ``` {'batch_size': 64, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.MultipleNegativesRankingLoss.MultipleNegativesRankingLoss` with parameters: ``` {'scale': 20.0, 'similarity_fct': 'cos_sim'} ``` Parameters of the fit()-Method: ``` { "epochs": 2, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 314, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 64, 'do_lower_case': False}) with Transformer model: RobertaModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
CarlosPR/mt5-spanish-memmories-analysis
[ "pytorch", "mt5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MT5ForConditionalGeneration" ], "model_type": "mt5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- language: - th license: apache-2.0 tags: - whisper-event - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 model-index: - name: Whisper Tiny Thai results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Tiny Thai This model is a fine-tuned version of [openai/whisper-tiny](https://huggingface.co/openai/whisper-tiny) on the Common Voice 11.0 dataset. It achieves the following results on the evaluation set: - epoch: 8.0 - eval_loss: 0.4077 - eval_runtime: 1691.5217 - eval_samples_per_second: 6.462 - eval_steps_per_second: 0.202 - eval_wer: 92.7519 - step: 4000 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 64 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 5000 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.0+cu117 - Datasets 2.7.1.dev0 - Tokenizers 0.13.2
CarlosTron/Yo
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-12-05T15:39:07Z
--- license: mit tags: - pytorch - diffusers - unconditional-image-generation - diffusion-models-class --- # Model Card for Unit 1 of the [Diffusion Models Class 🧨](https://github.com/huggingface/diffusion-models-class) This model is a diffusion model for unconditional image generation of cute anime faces. ## Usage ```python from diffusers import DDPMPipeline pipeline = DDPMPipeline.from_pretrained(GV05/sd-class-anime-32) image = pipeline().images[0] image ```
Carolhuehuehuehue/Sla
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: tonality results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # tonality This model is a fine-tuned version of [distilbert-base-cased](https://huggingface.co/distilbert-base-cased) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Framework versions - Transformers 4.24.0 - Pytorch 1.13.0+cpu - Datasets 2.7.1 - Tokenizers 0.13.2
CasualHomie/DialoGPT-small-harrypotter
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
11
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - rouge model-index: - name: distilbart-cnn-12-6-finetuned-1.3.2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbart-cnn-12-6-finetuned-1.3.2 This model is a fine-tuned version of [sshleifer/distilbart-cnn-12-6](https://huggingface.co/sshleifer/distilbart-cnn-12-6) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.8049 - Rouge1: 50.5143 - Rouge2: 23.2481 - Rougel: 34.135 - Rougelsum: 46.4261 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | |:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|:-------:|:---------:| | 2.1377 | 1.0 | 586 | 1.8792 | 49.8737 | 22.7881 | 33.6698 | 45.8037 | | 1.5731 | 2.0 | 1172 | 1.8049 | 50.5143 | 23.2481 | 34.135 | 46.4261 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.12.1+cu113 - Datasets 2.7.1 - Tokenizers 0.13.2
dccuchile/albert-large-spanish-finetuned-pos
[ "pytorch", "albert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "AlbertForTokenClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: bert-finetuned-ner results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-finetuned-ner This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.2734 - Precision: 0.0 - Recall: 0.0 - F1: 0.0 - Accuracy: 0.9388 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:---:|:--------:| | No log | 1.0 | 34 | 0.3396 | 0.0 | 0.0 | 0.0 | 0.9388 | | No log | 2.0 | 68 | 0.3204 | 0.0 | 0.0 | 0.0 | 0.9388 | | No log | 3.0 | 102 | 0.2837 | 0.0 | 0.0 | 0.0 | 0.9388 | | No log | 4.0 | 136 | 0.2751 | 0.0 | 0.0 | 0.0 | 0.9388 | | No log | 5.0 | 170 | 0.2734 | 0.0 | 0.0 | 0.0 | 0.9388 | ### Framework versions - Transformers 4.26.1 - Pytorch 1.13.1+cu116 - Datasets 2.10.0 - Tokenizers 0.13.2
dccuchile/albert-tiny-spanish-finetuned-pawsx
[ "pytorch", "albert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "AlbertForSequenceClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
29
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy model-index: - name: all-roberta-large-v1-home-2-16-5-oos results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # all-roberta-large-v1-home-2-16-5-oos This model is a fine-tuned version of [sentence-transformers/all-roberta-large-v1](https://huggingface.co/sentence-transformers/all-roberta-large-v1) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 2.3789 - Accuracy: 0.3356 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 48 - eval_batch_size: 48 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 2.7614 | 1.0 | 1 | 2.6146 | 0.1889 | | 2.2082 | 2.0 | 2 | 2.5232 | 0.2667 | | 1.8344 | 3.0 | 3 | 2.4516 | 0.2933 | | 1.4601 | 4.0 | 4 | 2.4033 | 0.3267 | | 1.2748 | 5.0 | 5 | 2.3789 | 0.3356 | ### Framework versions - Transformers 4.20.0 - Pytorch 1.11.0+cu102 - Datasets 2.3.2 - Tokenizers 0.12.1
dccuchile/albert-xlarge-spanish-finetuned-xnli
[ "pytorch", "albert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "AlbertForSequenceClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
29
null
--- tags: - generated_from_keras_callback model-index: - name: dung1308/RM_system_not_mixed__NLP_model_80_20_CPU results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # dung1308/RM_system_not_mixed__NLP_model_80_20_CPU This model is a fine-tuned version of [vinai/phobert-base](https://huggingface.co/vinai/phobert-base) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 4.2681 - Validation Loss: 4.2461 - Epoch: 3 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': {'class_name': 'WarmUp', 'config': {'initial_learning_rate': 2e-05, 'decay_schedule_fn': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': -356, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}, '__passive_serialization__': True}, 'warmup_steps': 1000, 'power': 1.0, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.01} - training_precision: mixed_float16 ### Training results | Train Loss | Validation Loss | Epoch | |:----------:|:---------------:|:-----:| | 5.1497 | 4.3969 | 0 | | 4.3110 | 4.2424 | 1 | | 4.2373 | 4.2722 | 2 | | 4.2681 | 4.2461 | 3 | ### Framework versions - Transformers 4.18.0 - TensorFlow 2.8.0 - Datasets 2.7.0 - Tokenizers 0.11.0
dccuchile/albert-xxlarge-spanish-finetuned-mldoc
[ "pytorch", "albert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "AlbertForSequenceClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
26
null
These are a few mix models that I had created in my spare time, whatever I consider to be the primary model in the mix will have its name before the mixing method Midnight Mixes are the latest Mixes, in theroy they should have a even spread of all modlels in the mix Spiced models folow an idea to keep as mutch of one model preserved when mixed with somthing else, will be generaly anything Mixed at 25 weighted sum or lower Bake models folow a very easy to track mix of the models, First part of the mix is the model that was primaraly used Potluck models end up being a large mix of models, with one model still having a majority in the mix, mixing methods can get messy so true recipes can be had to keep track of Below are a few examples of what I consider to be the best of these mixes thus far, will update this page the more I mess with it! All examples use the WD 1.4 VAE kl-f8-anime2.ckpt, and are done on clip skip 2 ### Examples : ## Midnight Mixes: # DustStorm Waistlands ![DustormWaistland.png](https://s3.amazonaws.com/moonup/production/uploads/1672446416453-638e1f96a6f0c2299f2e3974.png) # CherryBlossoms ![tmp8dsfwmei.png](https://s3.amazonaws.com/moonup/production/uploads/1672447751507-638e1f96a6f0c2299f2e3974.png) # Cyber Fox ![tmpvuc6nogn.png](https://s3.amazonaws.com/moonup/production/uploads/1672446950824-638e1f96a6f0c2299f2e3974.png) ## Other Mixes : # Fire Fox ![fox1.png](https://s3.amazonaws.com/moonup/production/uploads/1671932500321-638e1f96a6f0c2299f2e3974.png) ![fox2.png](https://s3.amazonaws.com/moonup/production/uploads/1671932500345-638e1f96a6f0c2299f2e3974.png) # Mecha ![Mecha1.png](https://s3.amazonaws.com/moonup/production/uploads/1671932500066-638e1f96a6f0c2299f2e3974.png) ![Mecha2.png](https://s3.amazonaws.com/moonup/production/uploads/1671932500067-638e1f96a6f0c2299f2e3974.png) # Cyber Fox ![cyber1.png](https://s3.amazonaws.com/moonup/production/uploads/1671932500331-638e1f96a6f0c2299f2e3974.png) ![cyber2.png](https://s3.amazonaws.com/moonup/production/uploads/1671932500371-638e1f96a6f0c2299f2e3974.png)
dccuchile/albert-xxlarge-spanish-finetuned-ner
[ "pytorch", "albert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "AlbertForTokenClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
28
null
--- language: - en license: mit tags: - generated_from_trainer datasets: - tomekkorbak/detoxify-pile-chunk3-0-50000 - tomekkorbak/detoxify-pile-chunk3-50000-100000 - tomekkorbak/detoxify-pile-chunk3-100000-150000 - tomekkorbak/detoxify-pile-chunk3-150000-200000 - tomekkorbak/detoxify-pile-chunk3-200000-250000 - tomekkorbak/detoxify-pile-chunk3-250000-300000 - tomekkorbak/detoxify-pile-chunk3-300000-350000 - tomekkorbak/detoxify-pile-chunk3-350000-400000 - tomekkorbak/detoxify-pile-chunk3-400000-450000 - tomekkorbak/detoxify-pile-chunk3-450000-500000 - tomekkorbak/detoxify-pile-chunk3-500000-550000 - tomekkorbak/detoxify-pile-chunk3-550000-600000 - tomekkorbak/detoxify-pile-chunk3-600000-650000 - tomekkorbak/detoxify-pile-chunk3-650000-700000 - tomekkorbak/detoxify-pile-chunk3-700000-750000 - tomekkorbak/detoxify-pile-chunk3-750000-800000 - tomekkorbak/detoxify-pile-chunk3-800000-850000 - tomekkorbak/detoxify-pile-chunk3-850000-900000 - tomekkorbak/detoxify-pile-chunk3-900000-950000 - tomekkorbak/detoxify-pile-chunk3-950000-1000000 - tomekkorbak/detoxify-pile-chunk3-1000000-1050000 - tomekkorbak/detoxify-pile-chunk3-1050000-1100000 - tomekkorbak/detoxify-pile-chunk3-1100000-1150000 - tomekkorbak/detoxify-pile-chunk3-1150000-1200000 - tomekkorbak/detoxify-pile-chunk3-1200000-1250000 - tomekkorbak/detoxify-pile-chunk3-1250000-1300000 - tomekkorbak/detoxify-pile-chunk3-1300000-1350000 - tomekkorbak/detoxify-pile-chunk3-1350000-1400000 - tomekkorbak/detoxify-pile-chunk3-1400000-1450000 - tomekkorbak/detoxify-pile-chunk3-1450000-1500000 - tomekkorbak/detoxify-pile-chunk3-1500000-1550000 - tomekkorbak/detoxify-pile-chunk3-1550000-1600000 - tomekkorbak/detoxify-pile-chunk3-1600000-1650000 - tomekkorbak/detoxify-pile-chunk3-1650000-1700000 - tomekkorbak/detoxify-pile-chunk3-1700000-1750000 - tomekkorbak/detoxify-pile-chunk3-1750000-1800000 - tomekkorbak/detoxify-pile-chunk3-1800000-1850000 - tomekkorbak/detoxify-pile-chunk3-1850000-1900000 - tomekkorbak/detoxify-pile-chunk3-1900000-1950000 model-index: - name: upbeat_ramanujan results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # upbeat_ramanujan This model was trained from scratch on the tomekkorbak/detoxify-pile-chunk3-0-50000, the tomekkorbak/detoxify-pile-chunk3-50000-100000, the tomekkorbak/detoxify-pile-chunk3-100000-150000, the tomekkorbak/detoxify-pile-chunk3-150000-200000, the tomekkorbak/detoxify-pile-chunk3-200000-250000, the tomekkorbak/detoxify-pile-chunk3-250000-300000, the tomekkorbak/detoxify-pile-chunk3-300000-350000, the tomekkorbak/detoxify-pile-chunk3-350000-400000, the tomekkorbak/detoxify-pile-chunk3-400000-450000, the tomekkorbak/detoxify-pile-chunk3-450000-500000, the tomekkorbak/detoxify-pile-chunk3-500000-550000, the tomekkorbak/detoxify-pile-chunk3-550000-600000, the tomekkorbak/detoxify-pile-chunk3-600000-650000, the tomekkorbak/detoxify-pile-chunk3-650000-700000, the tomekkorbak/detoxify-pile-chunk3-700000-750000, the tomekkorbak/detoxify-pile-chunk3-750000-800000, the tomekkorbak/detoxify-pile-chunk3-800000-850000, the tomekkorbak/detoxify-pile-chunk3-850000-900000, the tomekkorbak/detoxify-pile-chunk3-900000-950000, the tomekkorbak/detoxify-pile-chunk3-950000-1000000, the tomekkorbak/detoxify-pile-chunk3-1000000-1050000, the tomekkorbak/detoxify-pile-chunk3-1050000-1100000, the tomekkorbak/detoxify-pile-chunk3-1100000-1150000, the tomekkorbak/detoxify-pile-chunk3-1150000-1200000, the tomekkorbak/detoxify-pile-chunk3-1200000-1250000, the tomekkorbak/detoxify-pile-chunk3-1250000-1300000, the tomekkorbak/detoxify-pile-chunk3-1300000-1350000, the tomekkorbak/detoxify-pile-chunk3-1350000-1400000, the tomekkorbak/detoxify-pile-chunk3-1400000-1450000, the tomekkorbak/detoxify-pile-chunk3-1450000-1500000, the tomekkorbak/detoxify-pile-chunk3-1500000-1550000, the tomekkorbak/detoxify-pile-chunk3-1550000-1600000, the tomekkorbak/detoxify-pile-chunk3-1600000-1650000, the tomekkorbak/detoxify-pile-chunk3-1650000-1700000, the tomekkorbak/detoxify-pile-chunk3-1700000-1750000, the tomekkorbak/detoxify-pile-chunk3-1750000-1800000, the tomekkorbak/detoxify-pile-chunk3-1800000-1850000, the tomekkorbak/detoxify-pile-chunk3-1850000-1900000 and the tomekkorbak/detoxify-pile-chunk3-1900000-1950000 datasets. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.001 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 64 - total_train_batch_size: 1024 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.01 - training_steps: 3147 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.24.0 - Pytorch 1.11.0+cu113 - Datasets 2.5.1 - Tokenizers 0.11.6 # Full config {'dataset': {'datasets': ['tomekkorbak/detoxify-pile-chunk3-0-50000', 'tomekkorbak/detoxify-pile-chunk3-50000-100000', 'tomekkorbak/detoxify-pile-chunk3-100000-150000', 'tomekkorbak/detoxify-pile-chunk3-150000-200000', 'tomekkorbak/detoxify-pile-chunk3-200000-250000', 'tomekkorbak/detoxify-pile-chunk3-250000-300000', 'tomekkorbak/detoxify-pile-chunk3-300000-350000', 'tomekkorbak/detoxify-pile-chunk3-350000-400000', 'tomekkorbak/detoxify-pile-chunk3-400000-450000', 'tomekkorbak/detoxify-pile-chunk3-450000-500000', 'tomekkorbak/detoxify-pile-chunk3-500000-550000', 'tomekkorbak/detoxify-pile-chunk3-550000-600000', 'tomekkorbak/detoxify-pile-chunk3-600000-650000', 'tomekkorbak/detoxify-pile-chunk3-650000-700000', 'tomekkorbak/detoxify-pile-chunk3-700000-750000', 'tomekkorbak/detoxify-pile-chunk3-750000-800000', 'tomekkorbak/detoxify-pile-chunk3-800000-850000', 'tomekkorbak/detoxify-pile-chunk3-850000-900000', 'tomekkorbak/detoxify-pile-chunk3-900000-950000', 'tomekkorbak/detoxify-pile-chunk3-950000-1000000', 'tomekkorbak/detoxify-pile-chunk3-1000000-1050000', 'tomekkorbak/detoxify-pile-chunk3-1050000-1100000', 'tomekkorbak/detoxify-pile-chunk3-1100000-1150000', 'tomekkorbak/detoxify-pile-chunk3-1150000-1200000', 'tomekkorbak/detoxify-pile-chunk3-1200000-1250000', 'tomekkorbak/detoxify-pile-chunk3-1250000-1300000', 'tomekkorbak/detoxify-pile-chunk3-1300000-1350000', 'tomekkorbak/detoxify-pile-chunk3-1350000-1400000', 'tomekkorbak/detoxify-pile-chunk3-1400000-1450000', 'tomekkorbak/detoxify-pile-chunk3-1450000-1500000', 'tomekkorbak/detoxify-pile-chunk3-1500000-1550000', 'tomekkorbak/detoxify-pile-chunk3-1550000-1600000', 'tomekkorbak/detoxify-pile-chunk3-1600000-1650000', 'tomekkorbak/detoxify-pile-chunk3-1650000-1700000', 'tomekkorbak/detoxify-pile-chunk3-1700000-1750000', 'tomekkorbak/detoxify-pile-chunk3-1750000-1800000', 'tomekkorbak/detoxify-pile-chunk3-1800000-1850000', 'tomekkorbak/detoxify-pile-chunk3-1850000-1900000', 'tomekkorbak/detoxify-pile-chunk3-1900000-1950000'], 'is_split_by_sentences': True}, 'generation': {'every_n_steps': 16, 'force_call_on': [25354], 'metrics_configs': [{}, {'n': 1}, {'n': 2}, {'n': 5}], 'scenario_configs': [{'generate_kwargs': {'do_sample': True, 'max_length': 128, 'min_length': 10, 'temperature': 0.7, 'top_k': 0, 'top_p': 0.9}, 'name': 'unconditional', 'num_samples': 2048}, {'generate_kwargs': {'do_sample': True, 'max_length': 128, 'min_length': 10, 'temperature': 0.7, 'top_k': 0, 'top_p': 0.9}, 'name': 'challenging_rtp', 'num_samples': 2048, 'prompts_path': 'resources/challenging_rtp.jsonl'}], 'scorer_config': {'device': 'cuda:0'}}, 'kl_gpt3_callback': {'every_n_steps': 16, 'force_call_on': [25354], 'max_tokens': 64, 'num_samples': 4096}, 'model': {'from_scratch': True, 'gpt2_config_kwargs': {'reorder_and_upcast_attn': True, 'scale_attn_by': True}, 'model_kwargs': {'value_head_config': {'is_detached': False}}, 'path_or_name': 'gpt2'}, 'objective': {'alpha': 0.5, 'beta': 10, 'name': 'AWR'}, 'tokenizer': {'path_or_name': 'gpt2'}, 'training': {'dataloader_num_workers': 0, 'effective_batch_size': 1024, 'evaluation_strategy': 'no', 'fp16': True, 'hub_model_id': 'upbeat_ramanujan', 'hub_strategy': 'all_checkpoints', 'learning_rate': 0.001, 'logging_first_step': True, 'logging_steps': 1, 'num_tokens': 3300000000, 'output_dir': 'training_output104340', 'per_device_train_batch_size': 16, 'push_to_hub': True, 'remove_unused_columns': False, 'save_steps': 1673, 'save_strategy': 'steps', 'seed': 42, 'warmup_ratio': 0.01, 'weight_decay': 0.1}} # Wandb URL: https://wandb.ai/tomekkorbak/apo/runs/6u4q22ur
dccuchile/albert-xxlarge-spanish-finetuned-pos
[ "pytorch", "albert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "AlbertForTokenClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- language: - en license: mit tags: - generated_from_trainer datasets: - tomekkorbak/detoxify-pile-chunk3-0-50000 - tomekkorbak/detoxify-pile-chunk3-50000-100000 - tomekkorbak/detoxify-pile-chunk3-100000-150000 - tomekkorbak/detoxify-pile-chunk3-150000-200000 - tomekkorbak/detoxify-pile-chunk3-200000-250000 - tomekkorbak/detoxify-pile-chunk3-250000-300000 - tomekkorbak/detoxify-pile-chunk3-300000-350000 - tomekkorbak/detoxify-pile-chunk3-350000-400000 - tomekkorbak/detoxify-pile-chunk3-400000-450000 - tomekkorbak/detoxify-pile-chunk3-450000-500000 - tomekkorbak/detoxify-pile-chunk3-500000-550000 - tomekkorbak/detoxify-pile-chunk3-550000-600000 - tomekkorbak/detoxify-pile-chunk3-600000-650000 - tomekkorbak/detoxify-pile-chunk3-650000-700000 - tomekkorbak/detoxify-pile-chunk3-700000-750000 - tomekkorbak/detoxify-pile-chunk3-750000-800000 - tomekkorbak/detoxify-pile-chunk3-800000-850000 - tomekkorbak/detoxify-pile-chunk3-850000-900000 - tomekkorbak/detoxify-pile-chunk3-900000-950000 - tomekkorbak/detoxify-pile-chunk3-950000-1000000 - tomekkorbak/detoxify-pile-chunk3-1000000-1050000 - tomekkorbak/detoxify-pile-chunk3-1050000-1100000 - tomekkorbak/detoxify-pile-chunk3-1100000-1150000 - tomekkorbak/detoxify-pile-chunk3-1150000-1200000 - tomekkorbak/detoxify-pile-chunk3-1200000-1250000 - tomekkorbak/detoxify-pile-chunk3-1250000-1300000 - tomekkorbak/detoxify-pile-chunk3-1300000-1350000 - tomekkorbak/detoxify-pile-chunk3-1350000-1400000 - tomekkorbak/detoxify-pile-chunk3-1400000-1450000 - tomekkorbak/detoxify-pile-chunk3-1450000-1500000 - tomekkorbak/detoxify-pile-chunk3-1500000-1550000 - tomekkorbak/detoxify-pile-chunk3-1550000-1600000 - tomekkorbak/detoxify-pile-chunk3-1600000-1650000 - tomekkorbak/detoxify-pile-chunk3-1650000-1700000 - tomekkorbak/detoxify-pile-chunk3-1700000-1750000 - tomekkorbak/detoxify-pile-chunk3-1750000-1800000 - tomekkorbak/detoxify-pile-chunk3-1800000-1850000 - tomekkorbak/detoxify-pile-chunk3-1850000-1900000 - tomekkorbak/detoxify-pile-chunk3-1900000-1950000 model-index: - name: musing_hoover results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # musing_hoover This model was trained from scratch on the tomekkorbak/detoxify-pile-chunk3-0-50000, the tomekkorbak/detoxify-pile-chunk3-50000-100000, the tomekkorbak/detoxify-pile-chunk3-100000-150000, the tomekkorbak/detoxify-pile-chunk3-150000-200000, the tomekkorbak/detoxify-pile-chunk3-200000-250000, the tomekkorbak/detoxify-pile-chunk3-250000-300000, the tomekkorbak/detoxify-pile-chunk3-300000-350000, the tomekkorbak/detoxify-pile-chunk3-350000-400000, the tomekkorbak/detoxify-pile-chunk3-400000-450000, the tomekkorbak/detoxify-pile-chunk3-450000-500000, the tomekkorbak/detoxify-pile-chunk3-500000-550000, the tomekkorbak/detoxify-pile-chunk3-550000-600000, the tomekkorbak/detoxify-pile-chunk3-600000-650000, the tomekkorbak/detoxify-pile-chunk3-650000-700000, the tomekkorbak/detoxify-pile-chunk3-700000-750000, the tomekkorbak/detoxify-pile-chunk3-750000-800000, the tomekkorbak/detoxify-pile-chunk3-800000-850000, the tomekkorbak/detoxify-pile-chunk3-850000-900000, the tomekkorbak/detoxify-pile-chunk3-900000-950000, the tomekkorbak/detoxify-pile-chunk3-950000-1000000, the tomekkorbak/detoxify-pile-chunk3-1000000-1050000, the tomekkorbak/detoxify-pile-chunk3-1050000-1100000, the tomekkorbak/detoxify-pile-chunk3-1100000-1150000, the tomekkorbak/detoxify-pile-chunk3-1150000-1200000, the tomekkorbak/detoxify-pile-chunk3-1200000-1250000, the tomekkorbak/detoxify-pile-chunk3-1250000-1300000, the tomekkorbak/detoxify-pile-chunk3-1300000-1350000, the tomekkorbak/detoxify-pile-chunk3-1350000-1400000, the tomekkorbak/detoxify-pile-chunk3-1400000-1450000, the tomekkorbak/detoxify-pile-chunk3-1450000-1500000, the tomekkorbak/detoxify-pile-chunk3-1500000-1550000, the tomekkorbak/detoxify-pile-chunk3-1550000-1600000, the tomekkorbak/detoxify-pile-chunk3-1600000-1650000, the tomekkorbak/detoxify-pile-chunk3-1650000-1700000, the tomekkorbak/detoxify-pile-chunk3-1700000-1750000, the tomekkorbak/detoxify-pile-chunk3-1750000-1800000, the tomekkorbak/detoxify-pile-chunk3-1800000-1850000, the tomekkorbak/detoxify-pile-chunk3-1850000-1900000 and the tomekkorbak/detoxify-pile-chunk3-1900000-1950000 datasets. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 64 - total_train_batch_size: 1024 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.01 - training_steps: 3147 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.24.0 - Pytorch 1.11.0+cu113 - Datasets 2.5.1 - Tokenizers 0.11.6 # Full config {'dataset': {'datasets': ['tomekkorbak/detoxify-pile-chunk3-0-50000', 'tomekkorbak/detoxify-pile-chunk3-50000-100000', 'tomekkorbak/detoxify-pile-chunk3-100000-150000', 'tomekkorbak/detoxify-pile-chunk3-150000-200000', 'tomekkorbak/detoxify-pile-chunk3-200000-250000', 'tomekkorbak/detoxify-pile-chunk3-250000-300000', 'tomekkorbak/detoxify-pile-chunk3-300000-350000', 'tomekkorbak/detoxify-pile-chunk3-350000-400000', 'tomekkorbak/detoxify-pile-chunk3-400000-450000', 'tomekkorbak/detoxify-pile-chunk3-450000-500000', 'tomekkorbak/detoxify-pile-chunk3-500000-550000', 'tomekkorbak/detoxify-pile-chunk3-550000-600000', 'tomekkorbak/detoxify-pile-chunk3-600000-650000', 'tomekkorbak/detoxify-pile-chunk3-650000-700000', 'tomekkorbak/detoxify-pile-chunk3-700000-750000', 'tomekkorbak/detoxify-pile-chunk3-750000-800000', 'tomekkorbak/detoxify-pile-chunk3-800000-850000', 'tomekkorbak/detoxify-pile-chunk3-850000-900000', 'tomekkorbak/detoxify-pile-chunk3-900000-950000', 'tomekkorbak/detoxify-pile-chunk3-950000-1000000', 'tomekkorbak/detoxify-pile-chunk3-1000000-1050000', 'tomekkorbak/detoxify-pile-chunk3-1050000-1100000', 'tomekkorbak/detoxify-pile-chunk3-1100000-1150000', 'tomekkorbak/detoxify-pile-chunk3-1150000-1200000', 'tomekkorbak/detoxify-pile-chunk3-1200000-1250000', 'tomekkorbak/detoxify-pile-chunk3-1250000-1300000', 'tomekkorbak/detoxify-pile-chunk3-1300000-1350000', 'tomekkorbak/detoxify-pile-chunk3-1350000-1400000', 'tomekkorbak/detoxify-pile-chunk3-1400000-1450000', 'tomekkorbak/detoxify-pile-chunk3-1450000-1500000', 'tomekkorbak/detoxify-pile-chunk3-1500000-1550000', 'tomekkorbak/detoxify-pile-chunk3-1550000-1600000', 'tomekkorbak/detoxify-pile-chunk3-1600000-1650000', 'tomekkorbak/detoxify-pile-chunk3-1650000-1700000', 'tomekkorbak/detoxify-pile-chunk3-1700000-1750000', 'tomekkorbak/detoxify-pile-chunk3-1750000-1800000', 'tomekkorbak/detoxify-pile-chunk3-1800000-1850000', 'tomekkorbak/detoxify-pile-chunk3-1850000-1900000', 'tomekkorbak/detoxify-pile-chunk3-1900000-1950000'], 'is_split_by_sentences': True}, 'generation': {'every_n_steps': 16, 'force_call_on': [25354], 'metrics_configs': [{}, {'n': 1}, {'n': 2}, {'n': 5}], 'scenario_configs': [{'generate_kwargs': {'do_sample': True, 'max_length': 128, 'min_length': 10, 'temperature': 0.7, 'top_k': 0, 'top_p': 0.9}, 'name': 'unconditional', 'num_samples': 2048}, {'generate_kwargs': {'do_sample': True, 'max_length': 128, 'min_length': 10, 'temperature': 0.7, 'top_k': 0, 'top_p': 0.9}, 'name': 'challenging_rtp', 'num_samples': 2048, 'prompts_path': 'resources/challenging_rtp.jsonl'}], 'scorer_config': {'device': 'cuda:0'}}, 'kl_gpt3_callback': {'every_n_steps': 16, 'force_call_on': [25354], 'max_tokens': 64, 'num_samples': 4096}, 'model': {'from_scratch': True, 'gpt2_config_kwargs': {'reorder_and_upcast_attn': True, 'scale_attn_by': True}, 'model_kwargs': {'value_head_config': {'is_detached': False}}, 'path_or_name': 'gpt2'}, 'objective': {'alpha': 1, 'beta': 10, 'name': 'AWR'}, 'tokenizer': {'path_or_name': 'gpt2'}, 'training': {'dataloader_num_workers': 0, 'effective_batch_size': 1024, 'evaluation_strategy': 'no', 'fp16': True, 'hub_model_id': 'musing_hoover', 'hub_strategy': 'all_checkpoints', 'learning_rate': 0.0005, 'logging_first_step': True, 'logging_steps': 1, 'num_tokens': 3300000000, 'output_dir': 'training_output104340', 'per_device_train_batch_size': 16, 'push_to_hub': True, 'remove_unused_columns': False, 'save_steps': 1673, 'save_strategy': 'steps', 'seed': 42, 'warmup_ratio': 0.01, 'weight_decay': 0.1}} # Wandb URL: https://wandb.ai/tomekkorbak/apo/runs/1nm8napp
dccuchile/bert-base-spanish-wwm-cased-finetuned-qa-mlqa
[ "pytorch", "bert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "BertForQuestionAnswering" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
digit fix is a merge block wieght In01 and in02 are at 1 and all are at 0
dccuchile/bert-base-spanish-wwm-uncased-finetuned-ner
[ "pytorch", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: -184.62 +/- 90.59 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
dccuchile/bert-base-spanish-wwm-uncased-finetuned-pawsx
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
24
null
--- tags: - generated_from_trainer model-index: - name: simcse_finetuned_100k_64batch results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # simcse_finetuned_100k_64batch This model is a fine-tuned version of [bitsanlp/simcse_retrain_edos_100k_64batch](https://huggingface.co/bitsanlp/simcse_retrain_edos_100k_64batch) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 8 - seed: 28 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results ### Framework versions - Transformers 4.25.1 - Pytorch 1.12.1+cu113 - Datasets 2.7.1 - Tokenizers 0.13.2
dccuchile/distilbert-base-spanish-uncased-finetuned-mldoc
[ "pytorch", "distilbert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "DistilBertForSequenceClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
27
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy model-index: - name: all-roberta-large-v1-home-16-16-5-oos results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # all-roberta-large-v1-home-16-16-5-oos This model is a fine-tuned version of [sentence-transformers/all-roberta-large-v1](https://huggingface.co/sentence-transformers/all-roberta-large-v1) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 2.3789 - Accuracy: 0.3356 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 48 - eval_batch_size: 48 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 2.7614 | 1.0 | 1 | 2.6146 | 0.1889 | | 2.2082 | 2.0 | 2 | 2.5232 | 0.2667 | | 1.8344 | 3.0 | 3 | 2.4516 | 0.2933 | | 1.4601 | 4.0 | 4 | 2.4033 | 0.3267 | | 1.2748 | 5.0 | 5 | 2.3789 | 0.3356 | ### Framework versions - Transformers 4.20.0 - Pytorch 1.11.0+cu102 - Datasets 2.3.2 - Tokenizers 0.12.1
dccuchile/distilbert-base-spanish-uncased-finetuned-qa-mlqa
[ "pytorch", "distilbert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "DistilBertForQuestionAnswering" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- ### scottpilgrim Dreambooth model trained by spooncats with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb) Or you can run your new concept via `diffusers` [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb) Sample pictures of this concept: ![0](https://media.discordapp.net/attachments/705948050144886795/1049833690672410634/vkAf5eCZcQAAAABJRU5ErkJggg.png)
dccuchile/distilbert-base-spanish-uncased-finetuned-xnli
[ "pytorch", "distilbert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "DistilBertForSequenceClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
31
null
--- language: - hi license: apache-2.0 tags: - hf-asr-leaderboard - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 metrics: - wer model-index: - name: Whisper Small Hi - Swedish results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: Common Voice 11.0 type: mozilla-foundation/common_voice_11_0 config: sv-SE split: test args: 'config: hi, split: test' metrics: - name: Wer type: wer value: 20.25758920168858 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Small Hi - Swedish This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the Common Voice 11.0 dataset. It achieves the following results on the evaluation set: - Loss: 0.2895 - Wer: 20.2576 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 1000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:-------:| | 0.0645 | 1.29 | 1000 | 0.2895 | 20.2576 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.12.1+cu113 - Datasets 2.7.1 - Tokenizers 0.13.2
CennetOguz/distilbert-base-uncased-finetuned-recipe-accelerate-1
[ "pytorch", "distilbert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "DistilBertForMaskedLM" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- language: - en license: mit tags: - generated_from_trainer datasets: - tomekkorbak/pii-pile-chunk3-0-50000 - tomekkorbak/pii-pile-chunk3-50000-100000 - tomekkorbak/pii-pile-chunk3-100000-150000 - tomekkorbak/pii-pile-chunk3-150000-200000 - tomekkorbak/pii-pile-chunk3-200000-250000 - tomekkorbak/pii-pile-chunk3-250000-300000 - tomekkorbak/pii-pile-chunk3-300000-350000 - tomekkorbak/pii-pile-chunk3-350000-400000 - tomekkorbak/pii-pile-chunk3-400000-450000 - tomekkorbak/pii-pile-chunk3-450000-500000 - tomekkorbak/pii-pile-chunk3-500000-550000 - tomekkorbak/pii-pile-chunk3-550000-600000 - tomekkorbak/pii-pile-chunk3-600000-650000 - tomekkorbak/pii-pile-chunk3-650000-700000 - tomekkorbak/pii-pile-chunk3-700000-750000 - tomekkorbak/pii-pile-chunk3-750000-800000 - tomekkorbak/pii-pile-chunk3-800000-850000 - tomekkorbak/pii-pile-chunk3-850000-900000 - tomekkorbak/pii-pile-chunk3-900000-950000 - tomekkorbak/pii-pile-chunk3-950000-1000000 - tomekkorbak/pii-pile-chunk3-1000000-1050000 - tomekkorbak/pii-pile-chunk3-1050000-1100000 - tomekkorbak/pii-pile-chunk3-1100000-1150000 - tomekkorbak/pii-pile-chunk3-1150000-1200000 - tomekkorbak/pii-pile-chunk3-1200000-1250000 - tomekkorbak/pii-pile-chunk3-1250000-1300000 - tomekkorbak/pii-pile-chunk3-1300000-1350000 - tomekkorbak/pii-pile-chunk3-1350000-1400000 - tomekkorbak/pii-pile-chunk3-1400000-1450000 - tomekkorbak/pii-pile-chunk3-1450000-1500000 - tomekkorbak/pii-pile-chunk3-1500000-1550000 - tomekkorbak/pii-pile-chunk3-1550000-1600000 - tomekkorbak/pii-pile-chunk3-1600000-1650000 - tomekkorbak/pii-pile-chunk3-1650000-1700000 - tomekkorbak/pii-pile-chunk3-1700000-1750000 - tomekkorbak/pii-pile-chunk3-1750000-1800000 - tomekkorbak/pii-pile-chunk3-1800000-1850000 - tomekkorbak/pii-pile-chunk3-1850000-1900000 - tomekkorbak/pii-pile-chunk3-1900000-1950000 model-index: - name: affectionate_wescoff results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # affectionate_wescoff This model was trained from scratch on the tomekkorbak/pii-pile-chunk3-0-50000, the tomekkorbak/pii-pile-chunk3-50000-100000, the tomekkorbak/pii-pile-chunk3-100000-150000, the tomekkorbak/pii-pile-chunk3-150000-200000, the tomekkorbak/pii-pile-chunk3-200000-250000, the tomekkorbak/pii-pile-chunk3-250000-300000, the tomekkorbak/pii-pile-chunk3-300000-350000, the tomekkorbak/pii-pile-chunk3-350000-400000, the tomekkorbak/pii-pile-chunk3-400000-450000, the tomekkorbak/pii-pile-chunk3-450000-500000, the tomekkorbak/pii-pile-chunk3-500000-550000, the tomekkorbak/pii-pile-chunk3-550000-600000, the tomekkorbak/pii-pile-chunk3-600000-650000, the tomekkorbak/pii-pile-chunk3-650000-700000, the tomekkorbak/pii-pile-chunk3-700000-750000, the tomekkorbak/pii-pile-chunk3-750000-800000, the tomekkorbak/pii-pile-chunk3-800000-850000, the tomekkorbak/pii-pile-chunk3-850000-900000, the tomekkorbak/pii-pile-chunk3-900000-950000, the tomekkorbak/pii-pile-chunk3-950000-1000000, the tomekkorbak/pii-pile-chunk3-1000000-1050000, the tomekkorbak/pii-pile-chunk3-1050000-1100000, the tomekkorbak/pii-pile-chunk3-1100000-1150000, the tomekkorbak/pii-pile-chunk3-1150000-1200000, the tomekkorbak/pii-pile-chunk3-1200000-1250000, the tomekkorbak/pii-pile-chunk3-1250000-1300000, the tomekkorbak/pii-pile-chunk3-1300000-1350000, the tomekkorbak/pii-pile-chunk3-1350000-1400000, the tomekkorbak/pii-pile-chunk3-1400000-1450000, the tomekkorbak/pii-pile-chunk3-1450000-1500000, the tomekkorbak/pii-pile-chunk3-1500000-1550000, the tomekkorbak/pii-pile-chunk3-1550000-1600000, the tomekkorbak/pii-pile-chunk3-1600000-1650000, the tomekkorbak/pii-pile-chunk3-1650000-1700000, the tomekkorbak/pii-pile-chunk3-1700000-1750000, the tomekkorbak/pii-pile-chunk3-1750000-1800000, the tomekkorbak/pii-pile-chunk3-1800000-1850000, the tomekkorbak/pii-pile-chunk3-1850000-1900000 and the tomekkorbak/pii-pile-chunk3-1900000-1950000 datasets. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.01 - training_steps: 50354 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.24.0 - Pytorch 1.11.0+cu113 - Datasets 2.5.1 - Tokenizers 0.11.6 # Full config {'dataset': {'datasets': ['tomekkorbak/pii-pile-chunk3-0-50000', 'tomekkorbak/pii-pile-chunk3-50000-100000', 'tomekkorbak/pii-pile-chunk3-100000-150000', 'tomekkorbak/pii-pile-chunk3-150000-200000', 'tomekkorbak/pii-pile-chunk3-200000-250000', 'tomekkorbak/pii-pile-chunk3-250000-300000', 'tomekkorbak/pii-pile-chunk3-300000-350000', 'tomekkorbak/pii-pile-chunk3-350000-400000', 'tomekkorbak/pii-pile-chunk3-400000-450000', 'tomekkorbak/pii-pile-chunk3-450000-500000', 'tomekkorbak/pii-pile-chunk3-500000-550000', 'tomekkorbak/pii-pile-chunk3-550000-600000', 'tomekkorbak/pii-pile-chunk3-600000-650000', 'tomekkorbak/pii-pile-chunk3-650000-700000', 'tomekkorbak/pii-pile-chunk3-700000-750000', 'tomekkorbak/pii-pile-chunk3-750000-800000', 'tomekkorbak/pii-pile-chunk3-800000-850000', 'tomekkorbak/pii-pile-chunk3-850000-900000', 'tomekkorbak/pii-pile-chunk3-900000-950000', 'tomekkorbak/pii-pile-chunk3-950000-1000000', 'tomekkorbak/pii-pile-chunk3-1000000-1050000', 'tomekkorbak/pii-pile-chunk3-1050000-1100000', 'tomekkorbak/pii-pile-chunk3-1100000-1150000', 'tomekkorbak/pii-pile-chunk3-1150000-1200000', 'tomekkorbak/pii-pile-chunk3-1200000-1250000', 'tomekkorbak/pii-pile-chunk3-1250000-1300000', 'tomekkorbak/pii-pile-chunk3-1300000-1350000', 'tomekkorbak/pii-pile-chunk3-1350000-1400000', 'tomekkorbak/pii-pile-chunk3-1400000-1450000', 'tomekkorbak/pii-pile-chunk3-1450000-1500000', 'tomekkorbak/pii-pile-chunk3-1500000-1550000', 'tomekkorbak/pii-pile-chunk3-1550000-1600000', 'tomekkorbak/pii-pile-chunk3-1600000-1650000', 'tomekkorbak/pii-pile-chunk3-1650000-1700000', 'tomekkorbak/pii-pile-chunk3-1700000-1750000', 'tomekkorbak/pii-pile-chunk3-1750000-1800000', 'tomekkorbak/pii-pile-chunk3-1800000-1850000', 'tomekkorbak/pii-pile-chunk3-1850000-1900000', 'tomekkorbak/pii-pile-chunk3-1900000-1950000'], 'is_split_by_sentences': True}, 'generation': {'force_call_on': [25177], 'metrics_configs': [{}, {'n': 1}, {'n': 2}, {'n': 5}], 'scenario_configs': [{'generate_kwargs': {'do_sample': True, 'max_length': 128, 'min_length': 10, 'temperature': 0.7, 'top_k': 0, 'top_p': 0.9}, 'name': 'unconditional', 'num_samples': 2048}], 'scorer_config': {}}, 'kl_gpt3_callback': {'force_call_on': [25177], 'max_tokens': 64, 'num_samples': 4096}, 'model': {'from_scratch': True, 'gpt2_config_kwargs': {'reorder_and_upcast_attn': True, 'scale_attn_by': True}, 'path_or_name': 'gpt2'}, 'objective': {'alpha': 1, 'name': 'Unlikelihood', 'score_threshold': 0.0}, 'tokenizer': {'path_or_name': 'gpt2'}, 'training': {'dataloader_num_workers': 0, 'effective_batch_size': 64, 'evaluation_strategy': 'no', 'fp16': True, 'hub_model_id': 'affectionate_wescoff', 'hub_strategy': 'all_checkpoints', 'learning_rate': 0.0005, 'logging_first_step': True, 'logging_steps': 1, 'num_tokens': 3300000000, 'output_dir': 'training_output2', 'per_device_train_batch_size': 16, 'push_to_hub': True, 'remove_unused_columns': False, 'save_steps': 25177, 'save_strategy': 'steps', 'seed': 42, 'warmup_ratio': 0.01, 'weight_decay': 0.1}} # Wandb URL: https://wandb.ai/tomekkorbak/apo/runs/6qbhukni
CennetOguz/distilbert-base-uncased-finetuned-recipe-accelerate
[ "pytorch", "distilbert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "DistilBertForMaskedLM" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- language: - en license: mit tags: - generated_from_trainer datasets: - tomekkorbak/pii-pile-chunk3-0-50000 - tomekkorbak/pii-pile-chunk3-50000-100000 - tomekkorbak/pii-pile-chunk3-100000-150000 - tomekkorbak/pii-pile-chunk3-150000-200000 - tomekkorbak/pii-pile-chunk3-200000-250000 - tomekkorbak/pii-pile-chunk3-250000-300000 - tomekkorbak/pii-pile-chunk3-300000-350000 - tomekkorbak/pii-pile-chunk3-350000-400000 - tomekkorbak/pii-pile-chunk3-400000-450000 - tomekkorbak/pii-pile-chunk3-450000-500000 - tomekkorbak/pii-pile-chunk3-500000-550000 - tomekkorbak/pii-pile-chunk3-550000-600000 - tomekkorbak/pii-pile-chunk3-600000-650000 - tomekkorbak/pii-pile-chunk3-650000-700000 - tomekkorbak/pii-pile-chunk3-700000-750000 - tomekkorbak/pii-pile-chunk3-750000-800000 - tomekkorbak/pii-pile-chunk3-800000-850000 - tomekkorbak/pii-pile-chunk3-850000-900000 - tomekkorbak/pii-pile-chunk3-900000-950000 - tomekkorbak/pii-pile-chunk3-950000-1000000 - tomekkorbak/pii-pile-chunk3-1000000-1050000 - tomekkorbak/pii-pile-chunk3-1050000-1100000 - tomekkorbak/pii-pile-chunk3-1100000-1150000 - tomekkorbak/pii-pile-chunk3-1150000-1200000 - tomekkorbak/pii-pile-chunk3-1200000-1250000 - tomekkorbak/pii-pile-chunk3-1250000-1300000 - tomekkorbak/pii-pile-chunk3-1300000-1350000 - tomekkorbak/pii-pile-chunk3-1350000-1400000 - tomekkorbak/pii-pile-chunk3-1400000-1450000 - tomekkorbak/pii-pile-chunk3-1450000-1500000 - tomekkorbak/pii-pile-chunk3-1500000-1550000 - tomekkorbak/pii-pile-chunk3-1550000-1600000 - tomekkorbak/pii-pile-chunk3-1600000-1650000 - tomekkorbak/pii-pile-chunk3-1650000-1700000 - tomekkorbak/pii-pile-chunk3-1700000-1750000 - tomekkorbak/pii-pile-chunk3-1750000-1800000 - tomekkorbak/pii-pile-chunk3-1800000-1850000 - tomekkorbak/pii-pile-chunk3-1850000-1900000 - tomekkorbak/pii-pile-chunk3-1900000-1950000 model-index: - name: gifted_hugle results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # gifted_hugle This model was trained from scratch on the tomekkorbak/pii-pile-chunk3-0-50000, the tomekkorbak/pii-pile-chunk3-50000-100000, the tomekkorbak/pii-pile-chunk3-100000-150000, the tomekkorbak/pii-pile-chunk3-150000-200000, the tomekkorbak/pii-pile-chunk3-200000-250000, the tomekkorbak/pii-pile-chunk3-250000-300000, the tomekkorbak/pii-pile-chunk3-300000-350000, the tomekkorbak/pii-pile-chunk3-350000-400000, the tomekkorbak/pii-pile-chunk3-400000-450000, the tomekkorbak/pii-pile-chunk3-450000-500000, the tomekkorbak/pii-pile-chunk3-500000-550000, the tomekkorbak/pii-pile-chunk3-550000-600000, the tomekkorbak/pii-pile-chunk3-600000-650000, the tomekkorbak/pii-pile-chunk3-650000-700000, the tomekkorbak/pii-pile-chunk3-700000-750000, the tomekkorbak/pii-pile-chunk3-750000-800000, the tomekkorbak/pii-pile-chunk3-800000-850000, the tomekkorbak/pii-pile-chunk3-850000-900000, the tomekkorbak/pii-pile-chunk3-900000-950000, the tomekkorbak/pii-pile-chunk3-950000-1000000, the tomekkorbak/pii-pile-chunk3-1000000-1050000, the tomekkorbak/pii-pile-chunk3-1050000-1100000, the tomekkorbak/pii-pile-chunk3-1100000-1150000, the tomekkorbak/pii-pile-chunk3-1150000-1200000, the tomekkorbak/pii-pile-chunk3-1200000-1250000, the tomekkorbak/pii-pile-chunk3-1250000-1300000, the tomekkorbak/pii-pile-chunk3-1300000-1350000, the tomekkorbak/pii-pile-chunk3-1350000-1400000, the tomekkorbak/pii-pile-chunk3-1400000-1450000, the tomekkorbak/pii-pile-chunk3-1450000-1500000, the tomekkorbak/pii-pile-chunk3-1500000-1550000, the tomekkorbak/pii-pile-chunk3-1550000-1600000, the tomekkorbak/pii-pile-chunk3-1600000-1650000, the tomekkorbak/pii-pile-chunk3-1650000-1700000, the tomekkorbak/pii-pile-chunk3-1700000-1750000, the tomekkorbak/pii-pile-chunk3-1750000-1800000, the tomekkorbak/pii-pile-chunk3-1800000-1850000, the tomekkorbak/pii-pile-chunk3-1850000-1900000 and the tomekkorbak/pii-pile-chunk3-1900000-1950000 datasets. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.01 - training_steps: 50354 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.24.0 - Pytorch 1.11.0+cu113 - Datasets 2.5.1 - Tokenizers 0.11.6 # Full config {'dataset': {'datasets': ['tomekkorbak/pii-pile-chunk3-0-50000', 'tomekkorbak/pii-pile-chunk3-50000-100000', 'tomekkorbak/pii-pile-chunk3-100000-150000', 'tomekkorbak/pii-pile-chunk3-150000-200000', 'tomekkorbak/pii-pile-chunk3-200000-250000', 'tomekkorbak/pii-pile-chunk3-250000-300000', 'tomekkorbak/pii-pile-chunk3-300000-350000', 'tomekkorbak/pii-pile-chunk3-350000-400000', 'tomekkorbak/pii-pile-chunk3-400000-450000', 'tomekkorbak/pii-pile-chunk3-450000-500000', 'tomekkorbak/pii-pile-chunk3-500000-550000', 'tomekkorbak/pii-pile-chunk3-550000-600000', 'tomekkorbak/pii-pile-chunk3-600000-650000', 'tomekkorbak/pii-pile-chunk3-650000-700000', 'tomekkorbak/pii-pile-chunk3-700000-750000', 'tomekkorbak/pii-pile-chunk3-750000-800000', 'tomekkorbak/pii-pile-chunk3-800000-850000', 'tomekkorbak/pii-pile-chunk3-850000-900000', 'tomekkorbak/pii-pile-chunk3-900000-950000', 'tomekkorbak/pii-pile-chunk3-950000-1000000', 'tomekkorbak/pii-pile-chunk3-1000000-1050000', 'tomekkorbak/pii-pile-chunk3-1050000-1100000', 'tomekkorbak/pii-pile-chunk3-1100000-1150000', 'tomekkorbak/pii-pile-chunk3-1150000-1200000', 'tomekkorbak/pii-pile-chunk3-1200000-1250000', 'tomekkorbak/pii-pile-chunk3-1250000-1300000', 'tomekkorbak/pii-pile-chunk3-1300000-1350000', 'tomekkorbak/pii-pile-chunk3-1350000-1400000', 'tomekkorbak/pii-pile-chunk3-1400000-1450000', 'tomekkorbak/pii-pile-chunk3-1450000-1500000', 'tomekkorbak/pii-pile-chunk3-1500000-1550000', 'tomekkorbak/pii-pile-chunk3-1550000-1600000', 'tomekkorbak/pii-pile-chunk3-1600000-1650000', 'tomekkorbak/pii-pile-chunk3-1650000-1700000', 'tomekkorbak/pii-pile-chunk3-1700000-1750000', 'tomekkorbak/pii-pile-chunk3-1750000-1800000', 'tomekkorbak/pii-pile-chunk3-1800000-1850000', 'tomekkorbak/pii-pile-chunk3-1850000-1900000', 'tomekkorbak/pii-pile-chunk3-1900000-1950000'], 'is_split_by_sentences': True}, 'generation': {'force_call_on': [25177], 'metrics_configs': [{}, {'n': 1}, {'n': 2}, {'n': 5}], 'scenario_configs': [{'generate_kwargs': {'do_sample': True, 'max_length': 128, 'min_length': 10, 'temperature': 0.7, 'top_k': 0, 'top_p': 0.9}, 'name': 'unconditional', 'num_samples': 2048}], 'scorer_config': {}}, 'kl_gpt3_callback': {'force_call_on': [25177], 'max_tokens': 64, 'num_samples': 4096}, 'model': {'from_scratch': True, 'gpt2_config_kwargs': {'reorder_and_upcast_attn': True, 'scale_attn_by': True}, 'model_kwargs': {'value_head_config': {'is_detached': False}}, 'path_or_name': 'gpt2'}, 'objective': {'alpha': 1, 'beta': 10, 'name': 'AWR'}, 'tokenizer': {'path_or_name': 'gpt2'}, 'training': {'dataloader_num_workers': 0, 'effective_batch_size': 64, 'evaluation_strategy': 'no', 'fp16': True, 'hub_model_id': 'gifted_hugle', 'hub_strategy': 'all_checkpoints', 'learning_rate': 0.0005, 'logging_first_step': True, 'logging_steps': 1, 'num_tokens': 3300000000, 'output_dir': 'training_output2', 'per_device_train_batch_size': 16, 'push_to_hub': True, 'remove_unused_columns': False, 'save_steps': 25177, 'save_strategy': 'steps', 'seed': 42, 'warmup_ratio': 0.01, 'weight_decay': 0.1}} # Wandb URL: https://wandb.ai/tomekkorbak/apo/runs/3m2fvjsp
CennetOguz/distilbert-base-uncased-finetuned-recipe
[ "pytorch", "tensorboard", "distilbert", "fill-mask", "transformers", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "DistilBertForMaskedLM" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- language: - en license: mit tags: - generated_from_trainer datasets: - tomekkorbak/pii-pile-chunk3-0-50000 - tomekkorbak/pii-pile-chunk3-50000-100000 - tomekkorbak/pii-pile-chunk3-100000-150000 - tomekkorbak/pii-pile-chunk3-150000-200000 - tomekkorbak/pii-pile-chunk3-200000-250000 - tomekkorbak/pii-pile-chunk3-250000-300000 - tomekkorbak/pii-pile-chunk3-300000-350000 - tomekkorbak/pii-pile-chunk3-350000-400000 - tomekkorbak/pii-pile-chunk3-400000-450000 - tomekkorbak/pii-pile-chunk3-450000-500000 - tomekkorbak/pii-pile-chunk3-500000-550000 - tomekkorbak/pii-pile-chunk3-550000-600000 - tomekkorbak/pii-pile-chunk3-600000-650000 - tomekkorbak/pii-pile-chunk3-650000-700000 - tomekkorbak/pii-pile-chunk3-700000-750000 - tomekkorbak/pii-pile-chunk3-750000-800000 - tomekkorbak/pii-pile-chunk3-800000-850000 - tomekkorbak/pii-pile-chunk3-850000-900000 - tomekkorbak/pii-pile-chunk3-900000-950000 - tomekkorbak/pii-pile-chunk3-950000-1000000 - tomekkorbak/pii-pile-chunk3-1000000-1050000 - tomekkorbak/pii-pile-chunk3-1050000-1100000 - tomekkorbak/pii-pile-chunk3-1100000-1150000 - tomekkorbak/pii-pile-chunk3-1150000-1200000 - tomekkorbak/pii-pile-chunk3-1200000-1250000 - tomekkorbak/pii-pile-chunk3-1250000-1300000 - tomekkorbak/pii-pile-chunk3-1300000-1350000 - tomekkorbak/pii-pile-chunk3-1350000-1400000 - tomekkorbak/pii-pile-chunk3-1400000-1450000 - tomekkorbak/pii-pile-chunk3-1450000-1500000 - tomekkorbak/pii-pile-chunk3-1500000-1550000 - tomekkorbak/pii-pile-chunk3-1550000-1600000 - tomekkorbak/pii-pile-chunk3-1600000-1650000 - tomekkorbak/pii-pile-chunk3-1650000-1700000 - tomekkorbak/pii-pile-chunk3-1700000-1750000 - tomekkorbak/pii-pile-chunk3-1750000-1800000 - tomekkorbak/pii-pile-chunk3-1800000-1850000 - tomekkorbak/pii-pile-chunk3-1850000-1900000 - tomekkorbak/pii-pile-chunk3-1900000-1950000 model-index: - name: nervous_wozniak results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # nervous_wozniak This model was trained from scratch on the tomekkorbak/pii-pile-chunk3-0-50000, the tomekkorbak/pii-pile-chunk3-50000-100000, the tomekkorbak/pii-pile-chunk3-100000-150000, the tomekkorbak/pii-pile-chunk3-150000-200000, the tomekkorbak/pii-pile-chunk3-200000-250000, the tomekkorbak/pii-pile-chunk3-250000-300000, the tomekkorbak/pii-pile-chunk3-300000-350000, the tomekkorbak/pii-pile-chunk3-350000-400000, the tomekkorbak/pii-pile-chunk3-400000-450000, the tomekkorbak/pii-pile-chunk3-450000-500000, the tomekkorbak/pii-pile-chunk3-500000-550000, the tomekkorbak/pii-pile-chunk3-550000-600000, the tomekkorbak/pii-pile-chunk3-600000-650000, the tomekkorbak/pii-pile-chunk3-650000-700000, the tomekkorbak/pii-pile-chunk3-700000-750000, the tomekkorbak/pii-pile-chunk3-750000-800000, the tomekkorbak/pii-pile-chunk3-800000-850000, the tomekkorbak/pii-pile-chunk3-850000-900000, the tomekkorbak/pii-pile-chunk3-900000-950000, the tomekkorbak/pii-pile-chunk3-950000-1000000, the tomekkorbak/pii-pile-chunk3-1000000-1050000, the tomekkorbak/pii-pile-chunk3-1050000-1100000, the tomekkorbak/pii-pile-chunk3-1100000-1150000, the tomekkorbak/pii-pile-chunk3-1150000-1200000, the tomekkorbak/pii-pile-chunk3-1200000-1250000, the tomekkorbak/pii-pile-chunk3-1250000-1300000, the tomekkorbak/pii-pile-chunk3-1300000-1350000, the tomekkorbak/pii-pile-chunk3-1350000-1400000, the tomekkorbak/pii-pile-chunk3-1400000-1450000, the tomekkorbak/pii-pile-chunk3-1450000-1500000, the tomekkorbak/pii-pile-chunk3-1500000-1550000, the tomekkorbak/pii-pile-chunk3-1550000-1600000, the tomekkorbak/pii-pile-chunk3-1600000-1650000, the tomekkorbak/pii-pile-chunk3-1650000-1700000, the tomekkorbak/pii-pile-chunk3-1700000-1750000, the tomekkorbak/pii-pile-chunk3-1750000-1800000, the tomekkorbak/pii-pile-chunk3-1800000-1850000, the tomekkorbak/pii-pile-chunk3-1850000-1900000 and the tomekkorbak/pii-pile-chunk3-1900000-1950000 datasets. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.01 - training_steps: 50354 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.24.0 - Pytorch 1.11.0+cu113 - Datasets 2.5.1 - Tokenizers 0.11.6 # Full config {'dataset': {'datasets': ['tomekkorbak/pii-pile-chunk3-0-50000', 'tomekkorbak/pii-pile-chunk3-50000-100000', 'tomekkorbak/pii-pile-chunk3-100000-150000', 'tomekkorbak/pii-pile-chunk3-150000-200000', 'tomekkorbak/pii-pile-chunk3-200000-250000', 'tomekkorbak/pii-pile-chunk3-250000-300000', 'tomekkorbak/pii-pile-chunk3-300000-350000', 'tomekkorbak/pii-pile-chunk3-350000-400000', 'tomekkorbak/pii-pile-chunk3-400000-450000', 'tomekkorbak/pii-pile-chunk3-450000-500000', 'tomekkorbak/pii-pile-chunk3-500000-550000', 'tomekkorbak/pii-pile-chunk3-550000-600000', 'tomekkorbak/pii-pile-chunk3-600000-650000', 'tomekkorbak/pii-pile-chunk3-650000-700000', 'tomekkorbak/pii-pile-chunk3-700000-750000', 'tomekkorbak/pii-pile-chunk3-750000-800000', 'tomekkorbak/pii-pile-chunk3-800000-850000', 'tomekkorbak/pii-pile-chunk3-850000-900000', 'tomekkorbak/pii-pile-chunk3-900000-950000', 'tomekkorbak/pii-pile-chunk3-950000-1000000', 'tomekkorbak/pii-pile-chunk3-1000000-1050000', 'tomekkorbak/pii-pile-chunk3-1050000-1100000', 'tomekkorbak/pii-pile-chunk3-1100000-1150000', 'tomekkorbak/pii-pile-chunk3-1150000-1200000', 'tomekkorbak/pii-pile-chunk3-1200000-1250000', 'tomekkorbak/pii-pile-chunk3-1250000-1300000', 'tomekkorbak/pii-pile-chunk3-1300000-1350000', 'tomekkorbak/pii-pile-chunk3-1350000-1400000', 'tomekkorbak/pii-pile-chunk3-1400000-1450000', 'tomekkorbak/pii-pile-chunk3-1450000-1500000', 'tomekkorbak/pii-pile-chunk3-1500000-1550000', 'tomekkorbak/pii-pile-chunk3-1550000-1600000', 'tomekkorbak/pii-pile-chunk3-1600000-1650000', 'tomekkorbak/pii-pile-chunk3-1650000-1700000', 'tomekkorbak/pii-pile-chunk3-1700000-1750000', 'tomekkorbak/pii-pile-chunk3-1750000-1800000', 'tomekkorbak/pii-pile-chunk3-1800000-1850000', 'tomekkorbak/pii-pile-chunk3-1850000-1900000', 'tomekkorbak/pii-pile-chunk3-1900000-1950000'], 'is_split_by_sentences': True}, 'generation': {'force_call_on': [25177], 'metrics_configs': [{}, {'n': 1}, {'n': 2}, {'n': 5}], 'scenario_configs': [{'generate_kwargs': {'do_sample': True, 'max_length': 128, 'min_length': 10, 'temperature': 0.7, 'top_k': 0, 'top_p': 0.9}, 'name': 'unconditional', 'num_samples': 2048}], 'scorer_config': {}}, 'kl_gpt3_callback': {'force_call_on': [25177], 'max_tokens': 64, 'num_samples': 4096}, 'model': {'from_scratch': True, 'gpt2_config_kwargs': {'reorder_and_upcast_attn': True, 'scale_attn_by': True}, 'path_or_name': 'gpt2'}, 'objective': {'name': 'MLE'}, 'tokenizer': {'path_or_name': 'gpt2'}, 'training': {'dataloader_num_workers': 0, 'effective_batch_size': 64, 'evaluation_strategy': 'no', 'fp16': True, 'hub_model_id': 'nervous_wozniak', 'hub_strategy': 'all_checkpoints', 'learning_rate': 0.0005, 'logging_first_step': True, 'logging_steps': 1, 'num_tokens': 3300000000, 'output_dir': 'training_output2', 'per_device_train_batch_size': 16, 'push_to_hub': True, 'remove_unused_columns': False, 'save_steps': 25177, 'save_strategy': 'steps', 'seed': 42, 'warmup_ratio': 0.01, 'weight_decay': 0.1}} # Wandb URL: https://wandb.ai/tomekkorbak/apo/runs/qjc0jrdx
Certified-Zoomer/DialoGPT-small-rick
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: - en license: mit tags: - generated_from_trainer datasets: - tomekkorbak/pii-pile-chunk3-0-50000 - tomekkorbak/pii-pile-chunk3-50000-100000 - tomekkorbak/pii-pile-chunk3-100000-150000 - tomekkorbak/pii-pile-chunk3-150000-200000 - tomekkorbak/pii-pile-chunk3-200000-250000 - tomekkorbak/pii-pile-chunk3-250000-300000 - tomekkorbak/pii-pile-chunk3-300000-350000 - tomekkorbak/pii-pile-chunk3-350000-400000 - tomekkorbak/pii-pile-chunk3-400000-450000 - tomekkorbak/pii-pile-chunk3-450000-500000 - tomekkorbak/pii-pile-chunk3-500000-550000 - tomekkorbak/pii-pile-chunk3-550000-600000 - tomekkorbak/pii-pile-chunk3-600000-650000 - tomekkorbak/pii-pile-chunk3-650000-700000 - tomekkorbak/pii-pile-chunk3-700000-750000 - tomekkorbak/pii-pile-chunk3-750000-800000 - tomekkorbak/pii-pile-chunk3-800000-850000 - tomekkorbak/pii-pile-chunk3-850000-900000 - tomekkorbak/pii-pile-chunk3-900000-950000 - tomekkorbak/pii-pile-chunk3-950000-1000000 - tomekkorbak/pii-pile-chunk3-1000000-1050000 - tomekkorbak/pii-pile-chunk3-1050000-1100000 - tomekkorbak/pii-pile-chunk3-1100000-1150000 - tomekkorbak/pii-pile-chunk3-1150000-1200000 - tomekkorbak/pii-pile-chunk3-1200000-1250000 - tomekkorbak/pii-pile-chunk3-1250000-1300000 - tomekkorbak/pii-pile-chunk3-1300000-1350000 - tomekkorbak/pii-pile-chunk3-1350000-1400000 - tomekkorbak/pii-pile-chunk3-1400000-1450000 - tomekkorbak/pii-pile-chunk3-1450000-1500000 - tomekkorbak/pii-pile-chunk3-1500000-1550000 - tomekkorbak/pii-pile-chunk3-1550000-1600000 - tomekkorbak/pii-pile-chunk3-1600000-1650000 - tomekkorbak/pii-pile-chunk3-1650000-1700000 - tomekkorbak/pii-pile-chunk3-1700000-1750000 - tomekkorbak/pii-pile-chunk3-1750000-1800000 - tomekkorbak/pii-pile-chunk3-1800000-1850000 - tomekkorbak/pii-pile-chunk3-1850000-1900000 - tomekkorbak/pii-pile-chunk3-1900000-1950000 model-index: - name: confident_knuth results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # confident_knuth This model was trained from scratch on the tomekkorbak/pii-pile-chunk3-0-50000, the tomekkorbak/pii-pile-chunk3-50000-100000, the tomekkorbak/pii-pile-chunk3-100000-150000, the tomekkorbak/pii-pile-chunk3-150000-200000, the tomekkorbak/pii-pile-chunk3-200000-250000, the tomekkorbak/pii-pile-chunk3-250000-300000, the tomekkorbak/pii-pile-chunk3-300000-350000, the tomekkorbak/pii-pile-chunk3-350000-400000, the tomekkorbak/pii-pile-chunk3-400000-450000, the tomekkorbak/pii-pile-chunk3-450000-500000, the tomekkorbak/pii-pile-chunk3-500000-550000, the tomekkorbak/pii-pile-chunk3-550000-600000, the tomekkorbak/pii-pile-chunk3-600000-650000, the tomekkorbak/pii-pile-chunk3-650000-700000, the tomekkorbak/pii-pile-chunk3-700000-750000, the tomekkorbak/pii-pile-chunk3-750000-800000, the tomekkorbak/pii-pile-chunk3-800000-850000, the tomekkorbak/pii-pile-chunk3-850000-900000, the tomekkorbak/pii-pile-chunk3-900000-950000, the tomekkorbak/pii-pile-chunk3-950000-1000000, the tomekkorbak/pii-pile-chunk3-1000000-1050000, the tomekkorbak/pii-pile-chunk3-1050000-1100000, the tomekkorbak/pii-pile-chunk3-1100000-1150000, the tomekkorbak/pii-pile-chunk3-1150000-1200000, the tomekkorbak/pii-pile-chunk3-1200000-1250000, the tomekkorbak/pii-pile-chunk3-1250000-1300000, the tomekkorbak/pii-pile-chunk3-1300000-1350000, the tomekkorbak/pii-pile-chunk3-1350000-1400000, the tomekkorbak/pii-pile-chunk3-1400000-1450000, the tomekkorbak/pii-pile-chunk3-1450000-1500000, the tomekkorbak/pii-pile-chunk3-1500000-1550000, the tomekkorbak/pii-pile-chunk3-1550000-1600000, the tomekkorbak/pii-pile-chunk3-1600000-1650000, the tomekkorbak/pii-pile-chunk3-1650000-1700000, the tomekkorbak/pii-pile-chunk3-1700000-1750000, the tomekkorbak/pii-pile-chunk3-1750000-1800000, the tomekkorbak/pii-pile-chunk3-1800000-1850000, the tomekkorbak/pii-pile-chunk3-1850000-1900000 and the tomekkorbak/pii-pile-chunk3-1900000-1950000 datasets. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.01 - training_steps: 50354 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.24.0 - Pytorch 1.11.0+cu113 - Datasets 2.5.1 - Tokenizers 0.11.6 # Full config {'dataset': {'datasets': ['tomekkorbak/pii-pile-chunk3-0-50000', 'tomekkorbak/pii-pile-chunk3-50000-100000', 'tomekkorbak/pii-pile-chunk3-100000-150000', 'tomekkorbak/pii-pile-chunk3-150000-200000', 'tomekkorbak/pii-pile-chunk3-200000-250000', 'tomekkorbak/pii-pile-chunk3-250000-300000', 'tomekkorbak/pii-pile-chunk3-300000-350000', 'tomekkorbak/pii-pile-chunk3-350000-400000', 'tomekkorbak/pii-pile-chunk3-400000-450000', 'tomekkorbak/pii-pile-chunk3-450000-500000', 'tomekkorbak/pii-pile-chunk3-500000-550000', 'tomekkorbak/pii-pile-chunk3-550000-600000', 'tomekkorbak/pii-pile-chunk3-600000-650000', 'tomekkorbak/pii-pile-chunk3-650000-700000', 'tomekkorbak/pii-pile-chunk3-700000-750000', 'tomekkorbak/pii-pile-chunk3-750000-800000', 'tomekkorbak/pii-pile-chunk3-800000-850000', 'tomekkorbak/pii-pile-chunk3-850000-900000', 'tomekkorbak/pii-pile-chunk3-900000-950000', 'tomekkorbak/pii-pile-chunk3-950000-1000000', 'tomekkorbak/pii-pile-chunk3-1000000-1050000', 'tomekkorbak/pii-pile-chunk3-1050000-1100000', 'tomekkorbak/pii-pile-chunk3-1100000-1150000', 'tomekkorbak/pii-pile-chunk3-1150000-1200000', 'tomekkorbak/pii-pile-chunk3-1200000-1250000', 'tomekkorbak/pii-pile-chunk3-1250000-1300000', 'tomekkorbak/pii-pile-chunk3-1300000-1350000', 'tomekkorbak/pii-pile-chunk3-1350000-1400000', 'tomekkorbak/pii-pile-chunk3-1400000-1450000', 'tomekkorbak/pii-pile-chunk3-1450000-1500000', 'tomekkorbak/pii-pile-chunk3-1500000-1550000', 'tomekkorbak/pii-pile-chunk3-1550000-1600000', 'tomekkorbak/pii-pile-chunk3-1600000-1650000', 'tomekkorbak/pii-pile-chunk3-1650000-1700000', 'tomekkorbak/pii-pile-chunk3-1700000-1750000', 'tomekkorbak/pii-pile-chunk3-1750000-1800000', 'tomekkorbak/pii-pile-chunk3-1800000-1850000', 'tomekkorbak/pii-pile-chunk3-1850000-1900000', 'tomekkorbak/pii-pile-chunk3-1900000-1950000'], 'is_split_by_sentences': True}, 'generation': {'force_call_on': [25177], 'metrics_configs': [{}, {'n': 1}, {'n': 2}, {'n': 5}], 'scenario_configs': [{'generate_kwargs': {'do_sample': True, 'max_length': 128, 'min_length': 10, 'temperature': 0.7, 'top_k': 0, 'top_p': 0.9}, 'name': 'unconditional', 'num_samples': 2048}], 'scorer_config': {}}, 'kl_gpt3_callback': {'force_call_on': [25177], 'max_tokens': 64, 'num_samples': 4096}, 'model': {'from_scratch': True, 'gpt2_config_kwargs': {'reorder_and_upcast_attn': True, 'scale_attn_by': True}, 'model_kwargs': {'value_head_config': {'is_detached': False}}, 'path_or_name': 'gpt2'}, 'objective': {'alpha': 0.5, 'beta': 0.1, 'name': 'AWR'}, 'tokenizer': {'path_or_name': 'gpt2'}, 'training': {'dataloader_num_workers': 0, 'effective_batch_size': 64, 'evaluation_strategy': 'no', 'fp16': True, 'hub_model_id': 'confident_knuth', 'hub_strategy': 'all_checkpoints', 'learning_rate': 0.0005, 'logging_first_step': True, 'logging_steps': 1, 'num_tokens': 3300000000, 'output_dir': 'training_output2', 'per_device_train_batch_size': 16, 'push_to_hub': True, 'remove_unused_columns': False, 'save_steps': 25177, 'save_strategy': 'steps', 'seed': 42, 'warmup_ratio': 0.01, 'weight_decay': 0.1}} # Wandb URL: https://wandb.ai/tomekkorbak/apo/runs/q3c975dt
Chaddmckay/Cdm
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: - en license: mit tags: - generated_from_trainer datasets: - tomekkorbak/pii-pile-chunk3-0-50000 - tomekkorbak/pii-pile-chunk3-50000-100000 - tomekkorbak/pii-pile-chunk3-100000-150000 - tomekkorbak/pii-pile-chunk3-150000-200000 - tomekkorbak/pii-pile-chunk3-200000-250000 - tomekkorbak/pii-pile-chunk3-250000-300000 - tomekkorbak/pii-pile-chunk3-300000-350000 - tomekkorbak/pii-pile-chunk3-350000-400000 - tomekkorbak/pii-pile-chunk3-400000-450000 - tomekkorbak/pii-pile-chunk3-450000-500000 - tomekkorbak/pii-pile-chunk3-500000-550000 - tomekkorbak/pii-pile-chunk3-550000-600000 - tomekkorbak/pii-pile-chunk3-600000-650000 - tomekkorbak/pii-pile-chunk3-650000-700000 - tomekkorbak/pii-pile-chunk3-700000-750000 - tomekkorbak/pii-pile-chunk3-750000-800000 - tomekkorbak/pii-pile-chunk3-800000-850000 - tomekkorbak/pii-pile-chunk3-850000-900000 - tomekkorbak/pii-pile-chunk3-900000-950000 - tomekkorbak/pii-pile-chunk3-950000-1000000 - tomekkorbak/pii-pile-chunk3-1000000-1050000 - tomekkorbak/pii-pile-chunk3-1050000-1100000 - tomekkorbak/pii-pile-chunk3-1100000-1150000 - tomekkorbak/pii-pile-chunk3-1150000-1200000 - tomekkorbak/pii-pile-chunk3-1200000-1250000 - tomekkorbak/pii-pile-chunk3-1250000-1300000 - tomekkorbak/pii-pile-chunk3-1300000-1350000 - tomekkorbak/pii-pile-chunk3-1350000-1400000 - tomekkorbak/pii-pile-chunk3-1400000-1450000 - tomekkorbak/pii-pile-chunk3-1450000-1500000 - tomekkorbak/pii-pile-chunk3-1500000-1550000 - tomekkorbak/pii-pile-chunk3-1550000-1600000 - tomekkorbak/pii-pile-chunk3-1600000-1650000 - tomekkorbak/pii-pile-chunk3-1650000-1700000 - tomekkorbak/pii-pile-chunk3-1700000-1750000 - tomekkorbak/pii-pile-chunk3-1750000-1800000 - tomekkorbak/pii-pile-chunk3-1800000-1850000 - tomekkorbak/pii-pile-chunk3-1850000-1900000 - tomekkorbak/pii-pile-chunk3-1900000-1950000 model-index: - name: cocky_carson results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # cocky_carson This model was trained from scratch on the tomekkorbak/pii-pile-chunk3-0-50000, the tomekkorbak/pii-pile-chunk3-50000-100000, the tomekkorbak/pii-pile-chunk3-100000-150000, the tomekkorbak/pii-pile-chunk3-150000-200000, the tomekkorbak/pii-pile-chunk3-200000-250000, the tomekkorbak/pii-pile-chunk3-250000-300000, the tomekkorbak/pii-pile-chunk3-300000-350000, the tomekkorbak/pii-pile-chunk3-350000-400000, the tomekkorbak/pii-pile-chunk3-400000-450000, the tomekkorbak/pii-pile-chunk3-450000-500000, the tomekkorbak/pii-pile-chunk3-500000-550000, the tomekkorbak/pii-pile-chunk3-550000-600000, the tomekkorbak/pii-pile-chunk3-600000-650000, the tomekkorbak/pii-pile-chunk3-650000-700000, the tomekkorbak/pii-pile-chunk3-700000-750000, the tomekkorbak/pii-pile-chunk3-750000-800000, the tomekkorbak/pii-pile-chunk3-800000-850000, the tomekkorbak/pii-pile-chunk3-850000-900000, the tomekkorbak/pii-pile-chunk3-900000-950000, the tomekkorbak/pii-pile-chunk3-950000-1000000, the tomekkorbak/pii-pile-chunk3-1000000-1050000, the tomekkorbak/pii-pile-chunk3-1050000-1100000, the tomekkorbak/pii-pile-chunk3-1100000-1150000, the tomekkorbak/pii-pile-chunk3-1150000-1200000, the tomekkorbak/pii-pile-chunk3-1200000-1250000, the tomekkorbak/pii-pile-chunk3-1250000-1300000, the tomekkorbak/pii-pile-chunk3-1300000-1350000, the tomekkorbak/pii-pile-chunk3-1350000-1400000, the tomekkorbak/pii-pile-chunk3-1400000-1450000, the tomekkorbak/pii-pile-chunk3-1450000-1500000, the tomekkorbak/pii-pile-chunk3-1500000-1550000, the tomekkorbak/pii-pile-chunk3-1550000-1600000, the tomekkorbak/pii-pile-chunk3-1600000-1650000, the tomekkorbak/pii-pile-chunk3-1650000-1700000, the tomekkorbak/pii-pile-chunk3-1700000-1750000, the tomekkorbak/pii-pile-chunk3-1750000-1800000, the tomekkorbak/pii-pile-chunk3-1800000-1850000, the tomekkorbak/pii-pile-chunk3-1850000-1900000 and the tomekkorbak/pii-pile-chunk3-1900000-1950000 datasets. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.01 - training_steps: 50354 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.24.0 - Pytorch 1.11.0+cu113 - Datasets 2.5.1 - Tokenizers 0.11.6 # Full config {'dataset': {'datasets': ['tomekkorbak/pii-pile-chunk3-0-50000', 'tomekkorbak/pii-pile-chunk3-50000-100000', 'tomekkorbak/pii-pile-chunk3-100000-150000', 'tomekkorbak/pii-pile-chunk3-150000-200000', 'tomekkorbak/pii-pile-chunk3-200000-250000', 'tomekkorbak/pii-pile-chunk3-250000-300000', 'tomekkorbak/pii-pile-chunk3-300000-350000', 'tomekkorbak/pii-pile-chunk3-350000-400000', 'tomekkorbak/pii-pile-chunk3-400000-450000', 'tomekkorbak/pii-pile-chunk3-450000-500000', 'tomekkorbak/pii-pile-chunk3-500000-550000', 'tomekkorbak/pii-pile-chunk3-550000-600000', 'tomekkorbak/pii-pile-chunk3-600000-650000', 'tomekkorbak/pii-pile-chunk3-650000-700000', 'tomekkorbak/pii-pile-chunk3-700000-750000', 'tomekkorbak/pii-pile-chunk3-750000-800000', 'tomekkorbak/pii-pile-chunk3-800000-850000', 'tomekkorbak/pii-pile-chunk3-850000-900000', 'tomekkorbak/pii-pile-chunk3-900000-950000', 'tomekkorbak/pii-pile-chunk3-950000-1000000', 'tomekkorbak/pii-pile-chunk3-1000000-1050000', 'tomekkorbak/pii-pile-chunk3-1050000-1100000', 'tomekkorbak/pii-pile-chunk3-1100000-1150000', 'tomekkorbak/pii-pile-chunk3-1150000-1200000', 'tomekkorbak/pii-pile-chunk3-1200000-1250000', 'tomekkorbak/pii-pile-chunk3-1250000-1300000', 'tomekkorbak/pii-pile-chunk3-1300000-1350000', 'tomekkorbak/pii-pile-chunk3-1350000-1400000', 'tomekkorbak/pii-pile-chunk3-1400000-1450000', 'tomekkorbak/pii-pile-chunk3-1450000-1500000', 'tomekkorbak/pii-pile-chunk3-1500000-1550000', 'tomekkorbak/pii-pile-chunk3-1550000-1600000', 'tomekkorbak/pii-pile-chunk3-1600000-1650000', 'tomekkorbak/pii-pile-chunk3-1650000-1700000', 'tomekkorbak/pii-pile-chunk3-1700000-1750000', 'tomekkorbak/pii-pile-chunk3-1750000-1800000', 'tomekkorbak/pii-pile-chunk3-1800000-1850000', 'tomekkorbak/pii-pile-chunk3-1850000-1900000', 'tomekkorbak/pii-pile-chunk3-1900000-1950000'], 'filter_threshold': 0.000286, 'is_split_by_sentences': True}, 'generation': {'force_call_on': [25177], 'metrics_configs': [{}, {'n': 1}, {'n': 2}, {'n': 5}], 'scenario_configs': [{'generate_kwargs': {'do_sample': True, 'max_length': 128, 'min_length': 10, 'temperature': 0.7, 'top_k': 0, 'top_p': 0.9}, 'name': 'unconditional', 'num_samples': 2048}], 'scorer_config': {}}, 'kl_gpt3_callback': {'force_call_on': [25177], 'max_tokens': 64, 'num_samples': 4096}, 'model': {'from_scratch': True, 'gpt2_config_kwargs': {'reorder_and_upcast_attn': True, 'scale_attn_by': True}, 'path_or_name': 'gpt2'}, 'objective': {'name': 'MLE'}, 'tokenizer': {'path_or_name': 'gpt2'}, 'training': {'dataloader_num_workers': 0, 'effective_batch_size': 64, 'evaluation_strategy': 'no', 'fp16': True, 'hub_model_id': 'cocky_carson', 'hub_strategy': 'all_checkpoints', 'learning_rate': 0.0005, 'logging_first_step': True, 'logging_steps': 1, 'num_tokens': 3300000000, 'output_dir': 'training_output2', 'per_device_train_batch_size': 16, 'push_to_hub': True, 'remove_unused_columns': False, 'save_steps': 25177, 'save_strategy': 'steps', 'seed': 42, 'warmup_ratio': 0.01, 'weight_decay': 0.1}} # Wandb URL: https://wandb.ai/tomekkorbak/apo/runs/2y0u35mu
Chae/botman
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- language: - en license: mit tags: - generated_from_trainer datasets: - tomekkorbak/pii-pile-chunk3-0-50000 - tomekkorbak/pii-pile-chunk3-50000-100000 - tomekkorbak/pii-pile-chunk3-100000-150000 - tomekkorbak/pii-pile-chunk3-150000-200000 - tomekkorbak/pii-pile-chunk3-200000-250000 - tomekkorbak/pii-pile-chunk3-250000-300000 - tomekkorbak/pii-pile-chunk3-300000-350000 - tomekkorbak/pii-pile-chunk3-350000-400000 - tomekkorbak/pii-pile-chunk3-400000-450000 - tomekkorbak/pii-pile-chunk3-450000-500000 - tomekkorbak/pii-pile-chunk3-500000-550000 - tomekkorbak/pii-pile-chunk3-550000-600000 - tomekkorbak/pii-pile-chunk3-600000-650000 - tomekkorbak/pii-pile-chunk3-650000-700000 - tomekkorbak/pii-pile-chunk3-700000-750000 - tomekkorbak/pii-pile-chunk3-750000-800000 - tomekkorbak/pii-pile-chunk3-800000-850000 - tomekkorbak/pii-pile-chunk3-850000-900000 - tomekkorbak/pii-pile-chunk3-900000-950000 - tomekkorbak/pii-pile-chunk3-950000-1000000 - tomekkorbak/pii-pile-chunk3-1000000-1050000 - tomekkorbak/pii-pile-chunk3-1050000-1100000 - tomekkorbak/pii-pile-chunk3-1100000-1150000 - tomekkorbak/pii-pile-chunk3-1150000-1200000 - tomekkorbak/pii-pile-chunk3-1200000-1250000 - tomekkorbak/pii-pile-chunk3-1250000-1300000 - tomekkorbak/pii-pile-chunk3-1300000-1350000 - tomekkorbak/pii-pile-chunk3-1350000-1400000 - tomekkorbak/pii-pile-chunk3-1400000-1450000 - tomekkorbak/pii-pile-chunk3-1450000-1500000 - tomekkorbak/pii-pile-chunk3-1500000-1550000 - tomekkorbak/pii-pile-chunk3-1550000-1600000 - tomekkorbak/pii-pile-chunk3-1600000-1650000 - tomekkorbak/pii-pile-chunk3-1650000-1700000 - tomekkorbak/pii-pile-chunk3-1700000-1750000 - tomekkorbak/pii-pile-chunk3-1750000-1800000 - tomekkorbak/pii-pile-chunk3-1800000-1850000 - tomekkorbak/pii-pile-chunk3-1850000-1900000 - tomekkorbak/pii-pile-chunk3-1900000-1950000 model-index: - name: boring_mcclintock results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # boring_mcclintock This model was trained from scratch on the tomekkorbak/pii-pile-chunk3-0-50000, the tomekkorbak/pii-pile-chunk3-50000-100000, the tomekkorbak/pii-pile-chunk3-100000-150000, the tomekkorbak/pii-pile-chunk3-150000-200000, the tomekkorbak/pii-pile-chunk3-200000-250000, the tomekkorbak/pii-pile-chunk3-250000-300000, the tomekkorbak/pii-pile-chunk3-300000-350000, the tomekkorbak/pii-pile-chunk3-350000-400000, the tomekkorbak/pii-pile-chunk3-400000-450000, the tomekkorbak/pii-pile-chunk3-450000-500000, the tomekkorbak/pii-pile-chunk3-500000-550000, the tomekkorbak/pii-pile-chunk3-550000-600000, the tomekkorbak/pii-pile-chunk3-600000-650000, the tomekkorbak/pii-pile-chunk3-650000-700000, the tomekkorbak/pii-pile-chunk3-700000-750000, the tomekkorbak/pii-pile-chunk3-750000-800000, the tomekkorbak/pii-pile-chunk3-800000-850000, the tomekkorbak/pii-pile-chunk3-850000-900000, the tomekkorbak/pii-pile-chunk3-900000-950000, the tomekkorbak/pii-pile-chunk3-950000-1000000, the tomekkorbak/pii-pile-chunk3-1000000-1050000, the tomekkorbak/pii-pile-chunk3-1050000-1100000, the tomekkorbak/pii-pile-chunk3-1100000-1150000, the tomekkorbak/pii-pile-chunk3-1150000-1200000, the tomekkorbak/pii-pile-chunk3-1200000-1250000, the tomekkorbak/pii-pile-chunk3-1250000-1300000, the tomekkorbak/pii-pile-chunk3-1300000-1350000, the tomekkorbak/pii-pile-chunk3-1350000-1400000, the tomekkorbak/pii-pile-chunk3-1400000-1450000, the tomekkorbak/pii-pile-chunk3-1450000-1500000, the tomekkorbak/pii-pile-chunk3-1500000-1550000, the tomekkorbak/pii-pile-chunk3-1550000-1600000, the tomekkorbak/pii-pile-chunk3-1600000-1650000, the tomekkorbak/pii-pile-chunk3-1650000-1700000, the tomekkorbak/pii-pile-chunk3-1700000-1750000, the tomekkorbak/pii-pile-chunk3-1750000-1800000, the tomekkorbak/pii-pile-chunk3-1800000-1850000, the tomekkorbak/pii-pile-chunk3-1850000-1900000 and the tomekkorbak/pii-pile-chunk3-1900000-1950000 datasets. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.01 - training_steps: 50354 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.24.0 - Pytorch 1.11.0+cu113 - Datasets 2.5.1 - Tokenizers 0.11.6 # Full config {'dataset': {'conditional_training_config': {'aligned_prefix': '<|aligned|>', 'drop_token_fraction': 0.01, 'misaligned_prefix': '<|misaligned|>', 'threshold': 0.0}, 'datasets': ['tomekkorbak/pii-pile-chunk3-0-50000', 'tomekkorbak/pii-pile-chunk3-50000-100000', 'tomekkorbak/pii-pile-chunk3-100000-150000', 'tomekkorbak/pii-pile-chunk3-150000-200000', 'tomekkorbak/pii-pile-chunk3-200000-250000', 'tomekkorbak/pii-pile-chunk3-250000-300000', 'tomekkorbak/pii-pile-chunk3-300000-350000', 'tomekkorbak/pii-pile-chunk3-350000-400000', 'tomekkorbak/pii-pile-chunk3-400000-450000', 'tomekkorbak/pii-pile-chunk3-450000-500000', 'tomekkorbak/pii-pile-chunk3-500000-550000', 'tomekkorbak/pii-pile-chunk3-550000-600000', 'tomekkorbak/pii-pile-chunk3-600000-650000', 'tomekkorbak/pii-pile-chunk3-650000-700000', 'tomekkorbak/pii-pile-chunk3-700000-750000', 'tomekkorbak/pii-pile-chunk3-750000-800000', 'tomekkorbak/pii-pile-chunk3-800000-850000', 'tomekkorbak/pii-pile-chunk3-850000-900000', 'tomekkorbak/pii-pile-chunk3-900000-950000', 'tomekkorbak/pii-pile-chunk3-950000-1000000', 'tomekkorbak/pii-pile-chunk3-1000000-1050000', 'tomekkorbak/pii-pile-chunk3-1050000-1100000', 'tomekkorbak/pii-pile-chunk3-1100000-1150000', 'tomekkorbak/pii-pile-chunk3-1150000-1200000', 'tomekkorbak/pii-pile-chunk3-1200000-1250000', 'tomekkorbak/pii-pile-chunk3-1250000-1300000', 'tomekkorbak/pii-pile-chunk3-1300000-1350000', 'tomekkorbak/pii-pile-chunk3-1350000-1400000', 'tomekkorbak/pii-pile-chunk3-1400000-1450000', 'tomekkorbak/pii-pile-chunk3-1450000-1500000', 'tomekkorbak/pii-pile-chunk3-1500000-1550000', 'tomekkorbak/pii-pile-chunk3-1550000-1600000', 'tomekkorbak/pii-pile-chunk3-1600000-1650000', 'tomekkorbak/pii-pile-chunk3-1650000-1700000', 'tomekkorbak/pii-pile-chunk3-1700000-1750000', 'tomekkorbak/pii-pile-chunk3-1750000-1800000', 'tomekkorbak/pii-pile-chunk3-1800000-1850000', 'tomekkorbak/pii-pile-chunk3-1850000-1900000', 'tomekkorbak/pii-pile-chunk3-1900000-1950000'], 'is_split_by_sentences': True}, 'generation': {'force_call_on': [25177], 'metrics_configs': [{}, {'n': 1}, {'n': 2}, {'n': 5}], 'scenario_configs': [{'generate_kwargs': {'bad_words_ids': [[50257], [50258]], 'do_sample': True, 'max_length': 128, 'min_length': 10, 'temperature': 0.7, 'top_k': 0, 'top_p': 0.9}, 'name': 'unconditional', 'num_samples': 2048, 'prefix': '<|aligned|>'}], 'scorer_config': {}}, 'kl_gpt3_callback': {'force_call_on': [25177], 'max_tokens': 64, 'num_samples': 4096, 'prefix': '<|aligned|>'}, 'model': {'from_scratch': True, 'gpt2_config_kwargs': {'reorder_and_upcast_attn': True, 'scale_attn_by': True}, 'num_additional_tokens': 2, 'path_or_name': 'gpt2'}, 'objective': {'name': 'MLE'}, 'tokenizer': {'path_or_name': 'gpt2', 'special_tokens': ['<|aligned|>', '<|misaligned|>']}, 'training': {'dataloader_num_workers': 0, 'effective_batch_size': 64, 'evaluation_strategy': 'no', 'fp16': True, 'hub_model_id': 'boring_mcclintock', 'hub_strategy': 'all_checkpoints', 'learning_rate': 0.0005, 'logging_first_step': True, 'logging_steps': 1, 'num_tokens': 3300000000, 'output_dir': 'training_output2', 'per_device_train_batch_size': 16, 'push_to_hub': True, 'remove_unused_columns': False, 'save_steps': 25177, 'save_strategy': 'steps', 'seed': 42, 'warmup_ratio': 0.01, 'weight_decay': 0.1}} # Wandb URL: https://wandb.ai/tomekkorbak/apo/runs/c17x87uu
Chaima/TunBerto
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
This is an Embedding built for Stable Diffusion 2.0. Trained on 41 Images of a vapourwave aesthetic Training was done with the Automatic1111 WebUI Batch Size 7 Gradient Accumulation Steps 6, 200 Steps (I will include previous step versions for you to try as well) Phrase to invoke the Embedding is "VapWav". Default Generations with the "VapWav" phrase: <img src="https://huggingface.co/Arron17/VapourWave/resolve/main/1662823937757-1425551138-VapWa___.png" alt="VapWav" width="600"/> Prompt: A Cinematic Photograph of Keira Knightley by VapWav <img src="https://huggingface.co/Arron17/VapourWave/resolve/main/1662823937762-1079716203-A%20Cin___.png" alt="A Cinematic Photograph of Keira Knightley by VapWav" width="600"/> An example of the different step results (Results are dependent on prompt, your mileage may vary): Prompt: A Cinematic Photograph of a Nissan GT-R by VapWav <img src="https://huggingface.co/Arron17/VapourWave/resolve/main/1662823937767-2403940164-A%20Cin___.png" alt="A Cinematic Photograph of a Nissan GT-R by VapWav" width="1200"/>
Chakita/KROBERT
[ "pytorch", "roberta", "fill-mask", "transformers", "masked-lm", "fill-in-the-blanks", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- language: - lt license: apache-2.0 tags: - hf-asr-leaderboard - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 metrics: - wer model-index: - name: whisper-lt-finetune results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: Common Voice 11.0 type: mozilla-foundation/common_voice_11_0 config: 'null' split: None args: 'config: lt, split: test' metrics: - name: Wer type: wer value: 28.115930505307517 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # whisper-lt-finetune This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the Common Voice 11.0 dataset. It achieves the following results on the evaluation set: - Loss: 0.3634 - Wer: 28.1159 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 250 - training_steps: 4000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:-------:| | 0.399 | 0.9 | 500 | 0.4877 | 49.1790 | | 0.1925 | 1.8 | 1000 | 0.4019 | 39.1325 | | 0.0734 | 2.7 | 1500 | 0.3989 | 37.5581 | | 0.0324 | 3.6 | 2000 | 0.3947 | 32.9662 | | 0.0053 | 5.4 | 3000 | 0.3708 | 29.2808 | | 0.0007 | 7.19 | 4000 | 0.3634 | 28.1159 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.12.1+cu113 - Datasets 2.7.1 - Tokenizers 0.13.2
Chakita/Kalbert
[ "pytorch", "tensorboard", "albert", "fill-mask", "transformers", "generated_from_trainer", "license:mit", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "AlbertForMaskedLM" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- language: - en - zh - de - es - ru - ko - fr - ja - pt - tr - pl - ca - nl - ar - sv - it - id - hi - fi - vi - he - uk - el - ms - cs - ro - da - hu - ta - no - th - ur - hr - bg - lt - la - mi - ml - cy - sk - te - fa - lv - bn - sr - az - sl - kn - et - mk - br - eu - is - hy - ne - mn - bs - kk - sq - sw - gl - mr - pa - si - km - sn - yo - so - af - oc - ka - be - tg - sd - gu - am - yi - lo - uz - fo - ht - ps - tk - nn - mt - sa - lb - my - bo - tl - mg - as - tt - haw - ln - ha - ba - jw - su tags: - audio - automatic-speech-recognition - hf-asr-leaderboard widget: - example_title: Librispeech sample 1 src: https://cdn-media.huggingface.co/speech_samples/sample1.flac - example_title: Librispeech sample 2 src: https://cdn-media.huggingface.co/speech_samples/sample2.flac pipeline_tag: automatic-speech-recognition license: apache-2.0 --- # Whisper Whisper is a pre-trained model for automatic speech recognition (ASR) and speech translation. Trained on 680k hours of labelled data, Whisper models demonstrate a strong ability to generalise to many datasets and domains **without** the need for fine-tuning. Whisper was proposed in the paper [Robust Speech Recognition via Large-Scale Weak Supervision](https://arxiv.org/abs/2212.04356) by Alec Radford et al. from OpenAI. The original code repository can be found [here](https://github.com/openai/whisper). Compared to the Whisper large model, the large-v2 model is trained for 2.5x more epochs with added regularization for improved performance. **Disclaimer**: Content for this model card has partly been written by the Hugging Face team, and parts of it were copied and pasted from the original model card. ## Model details Whisper is a Transformer based encoder-decoder model, also referred to as a _sequence-to-sequence_ model. It was trained on 680k hours of labelled speech data annotated using large-scale weak supervision. The models were trained on either English-only data or multilingual data. The English-only models were trained on the task of speech recognition. The multilingual models were trained on both speech recognition and speech translation. For speech recognition, the model predicts transcriptions in the *same* language as the audio. For speech translation, the model predicts transcriptions to a *different* language to the audio. Whisper checkpoints come in five configurations of varying model sizes. The smallest four are trained on either English-only or multilingual data. The largest checkpoints are multilingual only. All ten of the pre-trained checkpoints are available on the [Hugging Face Hub](https://huggingface.co/models?search=openai/whisper). The checkpoints are summarised in the following table with links to the models on the Hub: | Size | Parameters | English-only | Multilingual | |----------|------------|------------------------------------------------------|-----------------------------------------------------| | tiny | 39 M | [✓](https://huggingface.co/openai/whisper-tiny.en) | [✓](https://huggingface.co/openai/whisper-tiny) | | base | 74 M | [✓](https://huggingface.co/openai/whisper-base.en) | [✓](https://huggingface.co/openai/whisper-base) | | small | 244 M | [✓](https://huggingface.co/openai/whisper-small.en) | [✓](https://huggingface.co/openai/whisper-small) | | medium | 769 M | [✓](https://huggingface.co/openai/whisper-medium.en) | [✓](https://huggingface.co/openai/whisper-medium) | | large | 1550 M | x | [✓](https://huggingface.co/openai/whisper-large) | | large-v2 | 1550 M | x | [✓](https://huggingface.co/openai/whisper-large-v2) | # Usage To transcribe audio samples, the model has to be used alongside a [`WhisperProcessor`](https://huggingface.co/docs/transformers/model_doc/whisper#transformers.WhisperProcessor). The `WhisperProcessor` is used to: 1. Pre-process the audio inputs (converting them to log-Mel spectrograms for the model) 2. Post-process the model outputs (converting them from tokens to text) The model is informed of which task to perform (transcription or translation) by passing the appropriate "context tokens". These context tokens are a sequence of tokens that are given to the decoder at the start of the decoding process, and take the following order: 1. The transcription always starts with the `<|startoftranscript|>` token 2. The second token is the language token (e.g. `<|en|>` for English) 3. The third token is the "task token". It can take one of two values: `<|transcribe|>` for speech recognition or `<|translate|>` for speech translation 4. In addition, a `<|notimestamps|>` token is added if the model should not include timestamp prediction Thus, a typical sequence of context tokens might look as follows: ``` <|startoftranscript|> <|en|> <|transcribe|> <|notimestamps|> ``` Which tells the model to decode in English, under the task of speech recognition, and not to predict timestamps. These tokens can either be forced or un-forced. If they are forced, the model is made to predict each token at each position. This allows one to control the output language and task for the Whisper model. If they are un-forced, the Whisper model will automatically predict the output langauge and task itself. The context tokens can be set accordingly: ```python model.config.forced_decoder_ids = WhisperProcessor.get_decoder_prompt_ids(language="english", task="transcribe") ``` Which forces the model to predict in English under the task of speech recognition. ## Transcription ### English to English In this example, the context tokens are 'unforced', meaning the model automatically predicts the output language (English) and task (transcribe). ```python >>> from transformers import WhisperProcessor, WhisperForConditionalGeneration >>> from datasets import load_dataset >>> # load model and processor >>> processor = WhisperProcessor.from_pretrained("openai/whisper-large-v2") >>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large-v2") >>> model.config.forced_decoder_ids = None >>> # load dummy dataset and read audio files >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> sample = ds[0]["audio"] >>> input_features = processor(sample["array"], sampling_rate=sample["sampling_rate"], return_tensors="pt").input_features >>> # generate token ids >>> predicted_ids = model.generate(input_features) >>> # decode token ids to text >>> transcription = processor.batch_decode(predicted_ids, skip_special_tokens=False) ['<|startoftranscript|><|en|><|transcribe|><|notimestamps|> Mr. Quilter is the apostle of the middle classes and we are glad to welcome his gospel.<|endoftext|>'] >>> transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True) [' Mr. Quilter is the apostle of the middle classes and we are glad to welcome his gospel.'] ``` The context tokens can be removed from the start of the transcription by setting `skip_special_tokens=True`. ### French to French The following example demonstrates French to French transcription by setting the decoder ids appropriately. ```python >>> from transformers import WhisperProcessor, WhisperForConditionalGeneration >>> from datasets import Audio, load_dataset >>> # load model and processor >>> processor = WhisperProcessor.from_pretrained("openai/whisper-large-v2") >>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large-v2") >>> forced_decoder_ids = processor.get_decoder_prompt_ids(language="french", task="transcribe") >>> # load streaming dataset and read first audio sample >>> ds = load_dataset("common_voice", "fr", split="test", streaming=True) >>> ds = ds.cast_column("audio", Audio(sampling_rate=16_000)) >>> input_speech = next(iter(ds))["audio"] >>> input_features = processor(input_speech["array"], sampling_rate=input_speech["sampling_rate"], return_tensors="pt").input_features >>> # generate token ids >>> predicted_ids = model.generate(input_features, forced_decoder_ids=forced_decoder_ids) >>> # decode token ids to text >>> transcription = processor.batch_decode(predicted_ids) ['<|startoftranscript|><|fr|><|transcribe|><|notimestamps|> Un vrai travail intéressant va enfin être mené sur ce sujet.<|endoftext|>'] >>> transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True) [' Un vrai travail intéressant va enfin être mené sur ce sujet.'] ``` ## Translation Setting the task to "translate" forces the Whisper model to perform speech translation. ### French to English ```python >>> from transformers import WhisperProcessor, WhisperForConditionalGeneration >>> from datasets import Audio, load_dataset >>> # load model and processor >>> processor = WhisperProcessor.from_pretrained("openai/whisper-large-v2") >>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large-v2") >>> forced_decoder_ids = processor.get_decoder_prompt_ids(language="french", task="translate") >>> # load streaming dataset and read first audio sample >>> ds = load_dataset("common_voice", "fr", split="test", streaming=True) >>> ds = ds.cast_column("audio", Audio(sampling_rate=16_000)) >>> input_speech = next(iter(ds))["audio"] >>> input_features = processor(input_speech["array"], sampling_rate=input_speech["sampling_rate"], return_tensors="pt").input_features >>> # generate token ids >>> predicted_ids = model.generate(input_features, forced_decoder_ids=forced_decoder_ids) >>> # decode token ids to text >>> transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True) [' A very interesting work, we will finally be given on this subject.'] ``` ## Evaluation This code snippet shows how to evaluate Whisper Large on [LibriSpeech test-clean](https://huggingface.co/datasets/librispeech_asr): ```python >>> from datasets import load_dataset >>> from transformers import WhisperForConditionalGeneration, WhisperProcessor >>> import torch >>> from evaluate import load >>> librispeech_test_clean = load_dataset("librispeech_asr", "clean", split="test") >>> processor = WhisperProcessor.from_pretrained("openai/whisper-large-v2") >>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large-v2").to("cuda") >>> def map_to_pred(batch): >>> audio = batch["audio"] >>> input_features = processor(audio["array"], sampling_rate=audio["sampling_rate"], return_tensors="pt").input_features >>> batch["reference"] = processor.tokenizer._normalize(batch['text']) >>> >>> with torch.no_grad(): >>> predicted_ids = model.generate(input_features.to("cuda"))[0] >>> transcription = processor.decode(predicted_ids) >>> batch["prediction"] = processor.tokenizer._normalize(transcription) >>> return batch >>> result = librispeech_test_clean.map(map_to_pred) >>> wer = load("wer") >>> print(100 * wer.compute(references=result["reference"], predictions=result["prediction"])) 3.0003583080317572 ``` ## Long-Form Transcription The Whisper model is intrinsically designed to work on audio samples of up to 30s in duration. However, by using a chunking algorithm, it can be used to transcribe audio samples of up to arbitrary length. This is possible through Transformers [`pipeline`](https://huggingface.co/docs/transformers/main_classes/pipelines#transformers.AutomaticSpeechRecognitionPipeline) method. Chunking is enabled by setting `chunk_length_s=30` when instantiating the pipeline. With chunking enabled, the pipeline can be run with batched inference. It can also be extended to predict sequence level timestamps by passing `return_timestamps=True`: ```python >>> import torch >>> from transformers import pipeline >>> from datasets import load_dataset >>> device = "cuda:0" if torch.cuda.is_available() else "cpu" >>> pipe = pipeline( >>> "automatic-speech-recognition", >>> model="openai/whisper-large-v2", >>> chunk_length_s=30, >>> device=device, >>> ) >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> sample = ds[0]["audio"] >>> prediction = pipe(sample.copy(), batch_size=8)["text"] " Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel." >>> # we can also return timestamps for the predictions >>> prediction = pipe(sample.copy(), batch_size=8, return_timestamps=True)["chunks"] [{'text': ' Mr. Quilter is the apostle of the middle classes and we are glad to welcome his gospel.', 'timestamp': (0.0, 5.44)}] ``` Refer to the blog post [ASR Chunking](https://huggingface.co/blog/asr-chunking) for more details on the chunking algorithm. ## Fine-Tuning The pre-trained Whisper model demonstrates a strong ability to generalise to different datasets and domains. However, its predictive capabilities can be improved further for certain languages and tasks through *fine-tuning*. The blog post [Fine-Tune Whisper with 🤗 Transformers](https://huggingface.co/blog/fine-tune-whisper) provides a step-by-step guide to fine-tuning the Whisper model with as little as 5 hours of labelled data. ### Evaluated Use The primary intended users of these models are AI researchers studying robustness, generalization, capabilities, biases, and constraints of the current model. However, Whisper is also potentially quite useful as an ASR solution for developers, especially for English speech recognition. We recognize that once models are released, it is impossible to restrict access to only “intended” uses or to draw reasonable guidelines around what is or is not research. The models are primarily trained and evaluated on ASR and speech translation to English tasks. They show strong ASR results in ~10 languages. They may exhibit additional capabilities, particularly if fine-tuned on certain tasks like voice activity detection, speaker classification, or speaker diarization but have not been robustly evaluated in these areas. We strongly recommend that users perform robust evaluations of the models in a particular context and domain before deploying them. In particular, we caution against using Whisper models to transcribe recordings of individuals taken without their consent or purporting to use these models for any kind of subjective classification. We recommend against use in high-risk domains like decision-making contexts, where flaws in accuracy can lead to pronounced flaws in outcomes. The models are intended to transcribe and translate speech, use of the model for classification is not only not evaluated but also not appropriate, particularly to infer human attributes. ## Training Data The models are trained on 680,000 hours of audio and the corresponding transcripts collected from the internet. 65% of this data (or 438,000 hours) represents English-language audio and matched English transcripts, roughly 18% (or 126,000 hours) represents non-English audio and English transcripts, while the final 17% (or 117,000 hours) represents non-English audio and the corresponding transcript. This non-English data represents 98 different languages. As discussed in [the accompanying paper](https://cdn.openai.com/papers/whisper.pdf), we see that performance on transcription in a given language is directly correlated with the amount of training data we employ in that language. ## Performance and Limitations Our studies show that, over many existing ASR systems, the models exhibit improved robustness to accents, background noise, technical language, as well as zero shot translation from multiple languages into English; and that accuracy on speech recognition and translation is near the state-of-the-art level. However, because the models are trained in a weakly supervised manner using large-scale noisy data, the predictions may include texts that are not actually spoken in the audio input (i.e. hallucination). We hypothesize that this happens because, given their general knowledge of language, the models combine trying to predict the next word in audio with trying to transcribe the audio itself. Our models perform unevenly across languages, and we observe lower accuracy on low-resource and/or low-discoverability languages or languages where we have less training data. The models also exhibit disparate performance on different accents and dialects of particular languages, which may include higher word error rate across speakers of different genders, races, ages, or other demographic criteria. Our full evaluation results are presented in [the paper accompanying this release](https://cdn.openai.com/papers/whisper.pdf). In addition, the sequence-to-sequence architecture of the model makes it prone to generating repetitive texts, which can be mitigated to some degree by beam search and temperature scheduling but not perfectly. Further analysis on these limitations are provided in [the paper](https://cdn.openai.com/papers/whisper.pdf). It is likely that this behavior and hallucinations may be worse on lower-resource and/or lower-discoverability languages. ## Broader Implications We anticipate that Whisper models’ transcription capabilities may be used for improving accessibility tools. While Whisper models cannot be used for real-time transcription out of the box – their speed and size suggest that others may be able to build applications on top of them that allow for near-real-time speech recognition and translation. The real value of beneficial applications built on top of Whisper models suggests that the disparate performance of these models may have real economic implications. There are also potential dual use concerns that come with releasing Whisper. While we hope the technology will be used primarily for beneficial purposes, making ASR technology more accessible could enable more actors to build capable surveillance technologies or scale up existing surveillance efforts, as the speed and accuracy allow for affordable automatic transcription and translation of large volumes of audio communication. Moreover, these models may have some capabilities to recognize specific individuals out of the box, which in turn presents safety concerns related both to dual use and disparate performance. In practice, we expect that the cost of transcription is not the limiting factor of scaling up surveillance projects. ### BibTeX entry and citation info ```bibtex @misc{radford2022whisper, doi = {10.48550/ARXIV.2212.04356}, url = {https://arxiv.org/abs/2212.04356}, author = {Radford, Alec and Kim, Jong Wook and Xu, Tao and Brockman, Greg and McLeavey, Christine and Sutskever, Ilya}, title = {Robust Speech Recognition via Large-Scale Weak Supervision}, publisher = {arXiv}, year = {2022}, copyright = {arXiv.org perpetual, non-exclusive license} } ```
Chakita/gpt2_mwp
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- license: mit tags: - pytorch - diffusers - unconditional-image-generation - diffusion-models-class --- # Model Card for Unit 1 of the [Diffusion Models Class 🧨](https://github.com/huggingface/diffusion-models-class) This model is a diffusion model for unconditional image generation of cute 🦋. ## Usage ```python from diffusers import DDPMPipeline pipeline = DDPMPipeline.from_pretrained('rrustom/sd-class-butterflies-32') image = pipeline().images[0] image ```
Chan/distilgpt2-finetuned-wikitext2
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - glue metrics: - matthews_correlation model-index: - name: distilbert-base-uncased-finetuned-cola results: - task: name: Text Classification type: text-classification dataset: name: glue type: glue config: cola split: train args: cola metrics: - name: Matthews Correlation type: matthews_correlation value: 0.5474713423103301 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-cola This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the glue dataset. It achieves the following results on the evaluation set: - Loss: 0.5254 - Matthews Correlation: 0.5475 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Matthews Correlation | |:-------------:|:-----:|:----:|:---------------:|:--------------------:| | 0.5221 | 1.0 | 535 | 0.5360 | 0.4307 | | 0.3491 | 2.0 | 1070 | 0.5128 | 0.4972 | | 0.2382 | 3.0 | 1605 | 0.5254 | 0.5475 | | 0.1756 | 4.0 | 2140 | 0.7479 | 0.5330 | | 0.1248 | 5.0 | 2675 | 0.7978 | 0.5414 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.12.1+cu113 - Datasets 2.7.1 - Tokenizers 0.13.2
ChaseBread/DialoGPT-small-harrypotter
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: distilbert-base-uncased-finetuned-ner results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-ner This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.0048 - Precision: 0.9203 - Recall: 0.9777 - F1: 0.9482 - Accuracy: 0.9984 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 358 | 0.0067 | 0.9229 | 0.9332 | 0.9280 | 0.9978 | | 0.0545 | 2.0 | 716 | 0.0052 | 0.9167 | 0.9800 | 0.9473 | 0.9984 | | 0.0052 | 3.0 | 1074 | 0.0048 | 0.9203 | 0.9777 | 0.9482 | 0.9984 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.12.1+cu113 - Datasets 2.7.1 - Tokenizers 0.13.2
Cheapestmedsshop/Buymodafinilus
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: edgertej/poebert-clean-checkpoint-finetuned-poetry-foundation-clean results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # edgertej/poebert-clean-checkpoint-finetuned-poetry-foundation-clean This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 3.8658 - Validation Loss: 3.6186 - Epoch: 2 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'learning_rate': 3e-05, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False} - training_precision: float32 ### Training results | Train Loss | Validation Loss | Epoch | |:----------:|:---------------:|:-----:| | 4.0379 | 3.6686 | 0 | | 3.9346 | 3.6478 | 1 | | 3.8658 | 3.6186 | 2 | ### Framework versions - Transformers 4.19.2 - TensorFlow 2.9.1 - Datasets 2.4.0 - Tokenizers 0.12.1
Cheatham/xlm-roberta-large-finetuned-d12
[ "pytorch", "xlm-roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "XLMRobertaForSequenceClassification" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
20
null
--- license: mit tags: - pytorch - diffusers - unconditional-image-generation - diffusion-models-class --- # Model Card for Unit 1 of the [Diffusion Models Class 🧨](https://github.com/huggingface/diffusion-models-class) This model is a diffusion model for unconditional image generation of cute 🦋. ## Usage ```python from diffusers import DDPMPipeline pipeline = DDPMPipeline.from_pretrained('nudro/sd-class-butterflies-32') image = pipeline().images[0] image ```
Cheatham/xlm-roberta-large-finetuned-d1r01
[ "pytorch", "xlm-roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "XLMRobertaForSequenceClassification" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
21
null
--- tags: - autotrain - text-classification language: - pt widget: - text: "" datasets: - famube/autotrain-data-ciap co2_eq_emissions: emissions: 4.04378848235129 --- # Model Trained Using AutoTrain - Problem type: Multi-class Classification - Model ID: 2345673819 - CO2 Emissions (in grams): 4.0438 ## Validation Metrics - Loss: 2.342 - Accuracy: 0.631 - Macro F1: 0.542 - Micro F1: 0.631 - Weighted F1: 0.546 - Macro Precision: 0.514 - Micro Precision: 0.631 - Weighted Precision: 0.515 - Macro Recall: 0.617 - Micro Recall: 0.631 - Weighted Recall: 0.631 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/famube/autotrain-ciap-2345673819 ``` Or Python API: ``` from transformers import AutoModelForSequenceClassification, AutoTokenizer model = AutoModelForSequenceClassification.from_pretrained("famube/autotrain-ciap-2345673819", use_auth_token=True) tokenizer = AutoTokenizer.from_pretrained("famube/autotrain-ciap-2345673819", use_auth_token=True) inputs = tokenizer("dor de cabeça", return_tensors="pt") outputs = model(**inputs) ```
Cheatham/xlm-roberta-large-finetuned-r01
[ "pytorch", "xlm-roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "XLMRobertaForSequenceClassification" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
23
null
--- language: en license: apache-2.0 library_name: diffusers tags: [] datasets: huggan/smithsonian_butterflies_subset metrics: [] --- <!-- This model card has been generated automatically according to the information the training script had access to. You should probably proofread and complete it, then remove this comment. --> # ddpm-butterflies-128 ## Model description This diffusion model is trained with the [🤗 Diffusers](https://github.com/huggingface/diffusers) library on the `huggan/smithsonian_butterflies_subset` dataset. ## Intended uses & limitations #### How to use ```python # TODO: add an example code snippet for running this diffusion pipeline ``` #### Limitations and bias [TODO: provide examples of latent issues and potential remediations] ## Training data [TODO: describe the data used to train the model] ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 16 - eval_batch_size: 16 - gradient_accumulation_steps: 1 - optimizer: AdamW with betas=(None, None), weight_decay=None and epsilon=None - lr_scheduler: None - lr_warmup_steps: 500 - ema_inv_gamma: None - ema_inv_gamma: None - ema_inv_gamma: None - mixed_precision: fp16 ### Training results 📈 [TensorBoard logs](https://huggingface.co/weirtz/ddpm-butterflies-128/tensorboard?#scalars)
CheonggyeMountain-Sherpa/kogpt-trinity-poem
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
15
2022-12-05T20:03:22Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imagefolder metrics: - accuracy model-index: - name: swin-tiny-patch4-window7-224-finetuned-eurosat results: - task: name: Image Classification type: image-classification dataset: name: imagefolder type: imagefolder config: default split: train args: default metrics: - name: Accuracy type: accuracy value: 0.9792592592592593 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # swin-tiny-patch4-window7-224-finetuned-eurosat This model is a fine-tuned version of [microsoft/swin-tiny-patch4-window7-224](https://huggingface.co/microsoft/swin-tiny-patch4-window7-224) on the imagefolder dataset. It achieves the following results on the evaluation set: - Loss: 0.0557 - Accuracy: 0.9793 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.24 | 1.0 | 190 | 0.1153 | 0.9619 | | 0.1665 | 2.0 | 380 | 0.0798 | 0.9737 | | 0.1392 | 3.0 | 570 | 0.0557 | 0.9793 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.12.1+cu113 - Datasets 2.7.1 - Tokenizers 0.13.2
Chertilasus/main
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: cc-by-sa-4.0 --- Hypernetwork trained on AnythingV3. Keyword: `iwakuralain` [![output.jpg](https://i.postimg.cc/ydg1YSQq/output.jpg)](https://postimg.cc/VrwQK5P4)
Chester/traffic-rec
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity --- # {MODEL_NAME} This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 512 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 1320 with parameters: ``` {'batch_size': 32, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.CosineSimilarityLoss.CosineSimilarityLoss` Parameters of the fit()-Method: ``` { "epochs": 1, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": 1320, "warmup_steps": 132, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 128, 'do_lower_case': False}) with Transformer model: DistilBertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) (2): Dense({'in_features': 768, 'out_features': 512, 'bias': True, 'activation_function': 'torch.nn.modules.activation.Tanh'}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
Ching/negation_detector
[ "pytorch", "roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "RobertaForQuestionAnswering" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy - f1 model-index: - name: 04_model_sales_external results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # 04_model_sales_external This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.5133 - Accuracy: 0.7657 - F1: 0.7778 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results ### Framework versions - Transformers 4.25.1 - Pytorch 1.12.1+cu113 - Datasets 2.7.1 - Tokenizers 0.13.2
Chinmay/mlindia
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 267.70 +/- 35.13 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Chiuchiyin/Donald
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: mit tags: - generated_from_trainer model-index: - name: bert-base-historic-multilingual-64k-td-cased-squad-nl results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-historic-multilingual-64k-td-cased-squad-nl This model is a fine-tuned version of [dbmdz/bert-base-historic-multilingual-64k-td-cased](https://huggingface.co/dbmdz/bert-base-historic-multilingual-64k-td-cased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.6382 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.2 - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 2.0101 | 1.0 | 4659 | 1.8679 | | 1.6528 | 2.0 | 9318 | 1.6382 | ### Framework versions - Transformers 4.24.0 - Pytorch 1.13.0+cu117 - Datasets 2.7.1 - Tokenizers 0.13.2
ChoboAvenger/DialoGPT-small-joshua
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: -137.96 +/- 24.14 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
ChrisVCB/DialoGPT-medium-cmjs
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - wer model-index: - name: whisper-small-sv-SE results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # whisper-small-sv-SE This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.3729 - Wer: 21.0583 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 4000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:-------:| | 0.0646 | 2.19 | 1000 | 0.2945 | 21.5744 | | 0.0121 | 4.38 | 2000 | 0.3362 | 21.2551 | | 0.0021 | 6.56 | 3000 | 0.3647 | 20.9186 | | 0.0013 | 8.75 | 4000 | 0.3729 | 21.0583 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.0+cu116 - Datasets 2.7.1 - Tokenizers 0.13.2
ChrisVCB/DialoGPT-medium-ej
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
null
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity --- # {MODEL_NAME} This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 512 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 1320 with parameters: ``` {'batch_size': 32, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.CosineSimilarityLoss.CosineSimilarityLoss` Parameters of the fit()-Method: ``` { "epochs": 1, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": 1320, "warmup_steps": 132, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 128, 'do_lower_case': False}) with Transformer model: DistilBertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) (2): Dense({'in_features': 768, 'out_features': 512, 'bias': True, 'activation_function': 'torch.nn.modules.activation.Tanh'}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
ChristianOrr/madnet_keras
[ "tensorboard", "dataset:flyingthings-3d", "dataset:kitti", "arxiv:1810.05424", "vision", "deep-stereo", "depth-estimation", "Tensorflow2", "Keras", "license:apache-2.0" ]
depth-estimation
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - generated_from_trainer model-index: - name: MacBERTh-squad-en results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # MacBERTh-squad-en This model is a fine-tuned version of [emanjavacas/MacBERTh](https://huggingface.co/emanjavacas/MacBERTh) on the None dataset. It achieves the following results on the evaluation set: - Loss: 2.1805 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.2 - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 2.5789 | 1.0 | 5110 | 2.3494 | | 2.2681 | 2.0 | 10220 | 2.1805 | ### Framework versions - Transformers 4.24.0 - Pytorch 1.13.0+cu117 - Datasets 2.7.1 - Tokenizers 0.13.2
Chun/w-en2zh-hsk
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
2022-12-05T20:53:22Z
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy model-index: - name: all-roberta-large-v1-home-1000-16-5-oos results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # all-roberta-large-v1-home-1000-16-5-oos This model is a fine-tuned version of [sentence-transformers/all-roberta-large-v1](https://huggingface.co/sentence-transformers/all-roberta-large-v1) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 4.3075 - Accuracy: 0.3022 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 48 - eval_batch_size: 48 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 5.0829 | 1.0 | 1 | 4.7909 | 0.1467 | | 4.4304 | 2.0 | 2 | 4.6082 | 0.2644 | | 4.0728 | 3.0 | 3 | 4.4645 | 0.2933 | | 3.525 | 4.0 | 4 | 4.3617 | 0.2956 | | 3.3931 | 5.0 | 5 | 4.3075 | 0.3022 | ### Framework versions - Transformers 4.20.0 - Pytorch 1.11.0+cu102 - Datasets 2.3.2 - Tokenizers 0.12.1
Chun/w-en2zh-mtm
[ "pytorch", "mbart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MBartForConditionalGeneration" ], "model_type": "mbart", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
2022-12-05T20:54:22Z
--- license: other tags: - generated_from_trainer model-index: - name: segformer-b0-drone-real results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # segformer-b0-drone-real This model is a fine-tuned version of [nvidia/mit-b0](https://huggingface.co/nvidia/mit-b0) on the None dataset. It achieves the following results on the evaluation set: - Loss: 2.5066 - Mean Iou: 0.0277 - Mean Accuracy: 0.0561 - Overall Accuracy: 0.3272 - Per Category Iou: [nan, 0.3359579661339679, 0.0, 0.20517716653585355, 0.003702068923287315, 0.0, 0.0, 0.0, 0.006312134342948017, 0.058787282412541476, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, nan] - Per Category Accuracy: [nan, 0.6093986043791881, 0.0, 0.4105641817321649, 0.003921086446309355, 0.0, 0.0, 0.0, 0.00706877745310955, 0.20348775859354865, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, nan] ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 6e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 20 ### Training results | Training Loss | Epoch | Step | Validation Loss | Mean Iou | Mean Accuracy | Overall Accuracy | Per Category Iou | Per Category Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:|:-------------:|:----------------:|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:| | 2.8285 | 2.0 | 40 | 2.8542 | 0.0229 | 0.0536 | 0.2211 | [nan, 0.23060686675928652, 3.3649639948852546e-05, 0.16273692278074856, 0.013325250843685254, 4.063223761732559e-05, 0.0, 0.0, 0.035811860573766785, 0.06198642655834834, 0.0, 0.0, 6.128953174797744e-05, 0.0, 0.0, 0.0, 5.1148278860416345e-05, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, nan] | [nan, 0.33385338422824606, 3.373169528473065e-05, 0.299629413956809, 0.014924989967585504, 4.09158863797309e-05, 0.0, 0.0, 0.06156816279101685, 0.46948081490592114, 0.0, 0.0, 7.101768340316739e-05, 0.0, 0.0, 0.0, 5.163155720776539e-05, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, nan] | | 2.6567 | 4.0 | 80 | 2.6894 | 0.0262 | 0.0563 | 0.2866 | [nan, 0.30624516924744655, 0.0, 0.18452743224435408, 0.004815305953306897, 0.0, 0.0, 0.0, 0.013916000286575587, 0.06596235864669749, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, nan] | [nan, 0.49410241551371004, 0.0, 0.3789267923602906, 0.005102681931249924, 0.0, 0.0, 0.0, 0.016502745882809654, 0.3439400151190408, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, nan] | | 2.3286 | 6.0 | 120 | 2.5923 | 0.0267 | 0.0560 | 0.3328 | [nan, 0.35729772739055154, 0.0, 0.1539404924138176, 0.0035934817901112264, 0.0, 0.0, 0.0, 0.005766957703680072, 0.06586525335295575, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, nan] | [nan, 0.7280486819078453, 0.0, 0.23220288304265976, 0.003859608350752127, 0.0, 0.0, 0.0, 0.006166880921555261, 0.2614486947285752, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, nan] | | 2.3836 | 8.0 | 160 | 2.5890 | 0.0275 | 0.0575 | 0.3244 | [nan, 0.349601471481378, 0.0, 0.17496917223449057, 0.0044570694472161245, 0.0, 0.0, 0.0, 0.007437757319012064, 0.0682080200846761, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, nan] | [nan, 0.6614103864255038, 0.0, 0.29170107802854334, 0.004828395043379214, 0.0, 0.0, 0.0, 0.008074588150255963, 0.29956113264795986, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, nan] | | 2.2849 | 10.0 | 200 | 2.5525 | 0.0283 | 0.0569 | 0.3388 | [nan, 0.3351423574874955, 0.0, 0.21293393590619594, 0.002350142484175345, 0.0, 0.0, 0.0, 0.005453996034797477, 0.06749403988523864, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, nan] | [nan, 0.6106816065491506, 0.0, 0.4703984171366385, 0.002485471577527932, 0.0, 0.0, 0.0, 0.0058035081160884615, 0.16273986773434082, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, nan] | | 2.1539 | 12.0 | 240 | 2.5352 | 0.0289 | 0.0579 | 0.3431 | [nan, 0.34790380657207765, 0.0, 0.21813897765519252, 0.0039179488370322925, 0.0, 0.0, 0.0, 0.004069615148127275, 0.06184727957512016, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, nan] | [nan, 0.6213138456628569, 0.0, 0.4695635471053968, 0.004103493982578053, 0.0, 0.0, 0.0, 0.004355898828122703, 0.17384734612397268, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, nan] | | 2.1894 | 14.0 | 280 | 2.5923 | 0.0271 | 0.0567 | 0.3056 | [nan, 0.3019300415296601, 0.0, 0.2108717486241356, 0.003421017543191299, 0.0, 0.0, 0.0, 0.012217783066499338, 0.06721066611554333, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, nan] | [nan, 0.4912292097745947, 0.0, 0.49141307925385636, 0.0036400437237620272, 0.0, 0.0, 0.0, 0.01400423107111977, 0.2475275870599001, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, nan] | | 1.9499 | 16.0 | 320 | 2.5169 | 0.0282 | 0.0569 | 0.3320 | [nan, 0.33589786653498954, 0.0, 0.20789931305652776, 0.004207056605876958, 0.0, 0.0, 0.0, 0.0075080990386678535, 0.06424320004285503, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, nan] | [nan, 0.613616488775929, 0.0, 0.4253884604222434, 0.0044872253922099824, 0.0, 0.0, 0.0, 0.008369991941750555, 0.19915876549744932, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, nan] | | 2.115 | 18.0 | 360 | 2.5153 | 0.0277 | 0.0562 | 0.3256 | [nan, 0.3342799309383031, 0.0, 0.2066227292961174, 0.0034518271833141106, 0.0, 0.0, 0.0, 0.006799912048482982, 0.05915124911379064, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, nan] | [nan, 0.5983876453859164, 0.0, 0.4204451510267332, 0.0036636891451301916, 0.0, 0.0, 0.0, 0.007619718595211191, 0.20718275529815128, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, nan] | | 2.0119 | 20.0 | 400 | 2.5066 | 0.0277 | 0.0561 | 0.3272 | [nan, 0.3359579661339679, 0.0, 0.20517716653585355, 0.003702068923287315, 0.0, 0.0, 0.0, 0.006312134342948017, 0.058787282412541476, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, nan] | [nan, 0.6093986043791881, 0.0, 0.4105641817321649, 0.003921086446309355, 0.0, 0.0, 0.0, 0.00706877745310955, 0.20348775859354865, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, nan] | ### Framework versions - Transformers 4.25.1 - Pytorch 1.12.1+cu113 - Datasets 2.7.1 - Tokenizers 0.13.2
Chun/w-zh2en-mtm
[ "pytorch", "mbart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MBartForConditionalGeneration" ], "model_type": "mbart", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
Access to model Monkud/Hunigondi is restricted and you are not in the authorized list. Visit https://huggingface.co/Monkud/Hunigondi to ask for access.
Chun/w-zh2en-mto
[ "pytorch", "mbart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MBartForConditionalGeneration" ], "model_type": "mbart", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- language: - hi license: apache-2.0 tags: - hf-asr-leaderboard - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 model-index: - name: Whisper Small - Swedish results: [] metrics: - {wer} --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Small Hi - Swedish This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the Common Voice 11.0 dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 4000 ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.12.1+cu113 - Datasets 2.7.1 - Tokenizers 0.13.2