modelId
stringlengths
4
81
tags
list
pipeline_tag
stringclasses
17 values
config
dict
downloads
int64
0
59.7M
first_commit
timestamp[ns, tz=UTC]
card
stringlengths
51
438k
Barleysack/klue-roberta-LSTM
[ "pytorch", "roberta", "transformers" ]
null
{ "architectures": [ "QAWithLSTMModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - wnut_17 metrics: - precision - recall - f1 - accuracy model-index: - name: my_awesome_wnut_model results: - task: name: Token Classification type: token-classification dataset: name: wnut_17 type: wnut_17 config: wnut_17 split: train args: wnut_17 metrics: - name: Precision type: precision value: 0.5675675675675675 - name: Recall type: recall value: 0.2919369786839666 - name: F1 type: f1 value: 0.3855569155446756 - name: Accuracy type: accuracy value: 0.9411739557949639 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # my_awesome_wnut_model This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the wnut_17 dataset. It achieves the following results on the evaluation set: - Loss: 0.2777 - Precision: 0.5676 - Recall: 0.2919 - F1: 0.3856 - Accuracy: 0.9412 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 213 | 0.2872 | 0.4563 | 0.2373 | 0.3122 | 0.9377 | | No log | 2.0 | 426 | 0.2777 | 0.5676 | 0.2919 | 0.3856 | 0.9412 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.7.1 - Tokenizers 0.13.2
Batsy24/DialoGPT-small-Twilight_EdBot
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - wnut_17 metrics: - precision - recall - f1 - accuracy model-index: - name: my_awesome_wnut_model results: - task: name: Token Classification type: token-classification dataset: name: wnut_17 type: wnut_17 config: wnut_17 split: train args: wnut_17 metrics: - name: Precision type: precision value: 0.49644128113879005 - name: Recall type: recall value: 0.25857275254865614 - name: F1 type: f1 value: 0.3400365630712979 - name: Accuracy type: accuracy value: 0.9386516181437305 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # my_awesome_wnut_model This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the wnut_17 dataset. It achieves the following results on the evaluation set: - Loss: 0.2892 - Precision: 0.4964 - Recall: 0.2586 - F1: 0.3400 - Accuracy: 0.9387 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 213 | 0.3054 | 0.3875 | 0.1613 | 0.2277 | 0.9344 | | No log | 2.0 | 426 | 0.2892 | 0.4964 | 0.2586 | 0.3400 | 0.9387 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.7.1 - Tokenizers 0.13.2
BatuhanYilmaz/bert-finetuned-ner
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-12-08T14:23:06Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 168.71 +/- 90.33 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
BatuhanYilmaz/marian-finetuned-kde4-en-to-fr
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: - as license: apache-2.0 tags: - whisper-event - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 metrics: - wer model-index: - name: openai/whisper-medium-Assamese results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: Common Voice 11.0 type: mozilla-foundation/common_voice_11_0 config: as split: test args: as metrics: - name: Wer type: wer value: 60.06136076520484 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # openai/whisper-medium-Assamese This model is a fine-tuned version of [openai/whisper-medium](https://huggingface.co/openai/whisper-medium) on the Common Voice 11.0 dataset. It achieves the following results on the evaluation set: - Loss: 1.1306 - Wer: 60.0614 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 2 - eval_batch_size: 1 - seed: 42 - gradient_accumulation_steps: 16 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 50 - training_steps: 100 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:-------:| | 0.2362 | 1.0 | 100 | 1.1306 | 60.0614 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.11.0 - Datasets 2.1.0 - Tokenizers 0.12.1
BeIR/query-gen-msmarco-t5-base-v1
[ "pytorch", "jax", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
1,816
null
--- language: - sv license: apache-2.0 tags: - whisper-event - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 metrics: - wer model-index: - name: Whisper Tiny Swedish Fast results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: mozilla-foundation/common_voice_11_0 sv-SE type: mozilla-foundation/common_voice_11_0 config: sv-SE split: test args: sv-SE metrics: - name: Wer type: wer value: 73.01634232878185 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Tiny Swedish Fast This model is a fine-tuned version of [openai/whisper-tiny](https://huggingface.co/openai/whisper-tiny) on the mozilla-foundation/common_voice_11_0 sv-SE dataset. It achieves the following results on the evaluation set: - Loss: 1.4344 - Wer: 73.0163 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 128 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 5000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 1.5547 | 6.01 | 1000 | 1.9244 | 113.4448 | | 0.7244 | 12.02 | 2000 | 1.4593 | 81.0128 | | 0.3583 | 18.03 | 3000 | 1.4019 | 74.3415 | | 0.2157 | 25.01 | 4000 | 1.4249 | 73.8953 | | 0.1897 | 31.02 | 5000 | 1.4344 | 73.0163 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.0+cu117 - Datasets 2.7.1.dev0 - Tokenizers 0.13.2
BeIR/sparta-msmarco-distilbert-base-v1
[ "pytorch", "distilbert", "feature-extraction", "arxiv:2009.13013", "arxiv:2104.08663", "transformers" ]
feature-extraction
{ "architectures": [ "DistilBertModel" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
106
2022-12-08T14:49:30Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: ppo results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 266.53 +/- 18.86 name: mean_reward verified: false --- # **ppo** Agent playing **LunarLander-v2** This is a trained model of a **ppo** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Beatriz/model_name
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-12-08T14:54:55Z
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: bert-base-cased-finetuned-wikitext2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-cased-finetuned-wikitext2 This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.6212 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 1.8335 | 1.0 | 2393 | 1.7164 | | 1.738 | 2.0 | 4786 | 1.6589 | | 1.7029 | 3.0 | 7179 | 1.6216 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.7.1 - Tokenizers 0.13.2
Bee-Garbs/DialoGPT-real-cartman-small
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- license: apache-2.0 --- Just an example for https://github.com/microsoft/onnxruntime/issues/13894
BertChristiaens/EmojiPredictor
[ "pytorch", "distilbert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "DistilBertForTokenClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
2022-12-08T15:13:43Z
Please refer to [flaim](https://github.com/bobmcdear/flaim) for sample usage and more information.
Berzemu/Coco
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-12-08T15:13:44Z
Please refer to [flaim](https://github.com/bobmcdear/flaim) for sample usage and more information.
BhanuSama/gpt2-finetuned-xsum
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-12-08T15:14:14Z
--- language: - th license: apache-2.0 tags: - whisper-event - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 model-index: - name: Whisper Small Thai Newmm Tokenized - Parinthapat Pengpun results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Small Thai Newmm Tokenized - Parinthapat Pengpun This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the Common Voice 11.0 dataset. It achieves the following results on the evaluation set: - eval_loss: 0.2095 - eval_wer: 26.6533 - eval_cer: 8.0405 - eval_runtime: 5652.2819 - eval_samples_per_second: 1.934 - eval_steps_per_second: 0.061 - epoch: 5.06 - step: 2000 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 64 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 5000 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.0+cu116 - Datasets 2.7.1 - Tokenizers 0.13.2
BigBoy/model
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Huggy library_name: ml-agents --- # **ppo** Agent playing **Huggy** This is a trained model of a **ppo** agent playing **Huggy** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Huggy 2. Step 1: Write your model_id: zbenmo/ppo-Huggy 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
BigSalmon/BertaMyWorda
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
Please refer to [flaim](https://github.com/bobmcdear/flaim) for sample usage and more information.
BigSalmon/BestMask2
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
Please refer to [flaim](https://github.com/bobmcdear/flaim) for sample usage and more information.
BigSalmon/BlankSlots
[ "pytorch", "jax", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
4
2022-12-08T15:35:08Z
Please refer to [flaim](https://github.com/bobmcdear/flaim) for sample usage and more information.
BigSalmon/GPTIntro
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: distilbert-base-multilingual-cased-finetuned-dakshina-ml results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-multilingual-cased-finetuned-dakshina-ml This model is a fine-tuned version of [distilbert-base-multilingual-cased](https://huggingface.co/distilbert-base-multilingual-cased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.2255 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 2.5614 | 1.0 | 16 | 1.4559 | | 1.4984 | 2.0 | 32 | 1.3131 | | 1.3218 | 3.0 | 48 | 1.2369 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.7.1 - Tokenizers 0.13.2
BigSalmon/MrLincoln6
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- ### noggles_v21_3400_30percent Dreambooth model trained by alxdfy with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb) Or you can run your new concept via `diffusers` [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb) Sample pictures of this concept: ![0](https://huggingface.co/alxdfy/noggles-v21-3400-30percent/resolve/main/sample_images/noggles_(15).jpg)
BigSalmon/MrLincoln8
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
2022-12-08T16:31:26Z
--- license: creativeml-openrail-m tags: - text-to-image widget: - text: asoon --- ### Asoon Dreambooth SD Model Dreambooth model trained by AlekseyCalvin with [Hugging Face Dreambooth Training Space](https://huggingface.co/spaces/multimodalart/dreambooth-training) with the v2-1-768 base model You run your new concept via `diffusers` [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb). Don't forget to use the concept prompts! Sample pictures of: To generate custom images of my primary public self – one known as A.C.T. SOON® – use "asoon" or "asoon person" in your Stable Diffusion prompt (implemented via this model only). Checkpoints herein trained based on SD 2.1. [asoon:](https://huggingface.co/AlekseyCalvin/asoon-dreambooth-sd-model/resolve/main/concept_images/asoon_%2812%29.jpg)![asoon 12](https://huggingface.co/AlekseyCalvin/asoon-dreambooth-sd-model/resolve/main/concept_images/asoon_%2813%29.jpg)![asoon 13](https://huggingface.co/AlekseyCalvin/asoon-dreambooth-sd-model/resolve/main/concept_images/asoon_%2814%29.jpg)![asoon 14](https://huggingface.co/AlekseyCalvin/asoon-dreambooth-sd-model/resolve/main/concept_images/asoon_%2815%29.jpg)![asoon 10](https://huggingface.co/AlekseyCalvin/asoon-dreambooth-sd-model/resolve/main/concept_images/asoon_%2811%29.jpg)![asoon 15](https://huggingface.co/AlekseyCalvin/asoon-dreambooth-sd-model/resolve/main/concept_images/asoon_%2816%29.jpg)![asoon 16](https://huggingface.co/AlekseyCalvin/asoon-dreambooth-sd-model/resolve/main/concept_images/asoon_%2817%29.jpg)![asoon 17](https://huggingface.co/AlekseyCalvin/asoon-dreambooth-sd-model/resolve/main/concept_images/asoon_%2818%29.jpg)![asoon 18](https://huggingface.co/AlekseyCalvin/asoon-dreambooth-sd-model/resolve/main/concept_images/asoon_%2819%29.jpg)![asoon 19](https://huggingface.co/AlekseyCalvin/asoon-dreambooth-sd-model/resolve/main/concept_images/asoon_%2820%29.jpg)!
BigSalmon/MrLincolnBerta
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- license: apache-2.0 tags: - whisper-event - generated_from_trainer - hf-asr-leaderboard datasets: - google/fleurs metrics: - wer model-index: - name: Whisper Medium Tajik results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: google/fleurs type: google/fleurs config: tg_tj split: test args: tg_tj metrics: - name: Wer type: wer value: 23.153018764230197 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Medium Tajik This model is a fine-tuned version of [openai/whisper-medium](https://huggingface.co/openai/whisper-medium) on the google/fleurs tg_tj dataset. It achieves the following results on the evaluation set: - Loss: 0.9217 - Wer: 23.1530 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 32 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 5000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:-------:| | 0.0016 | 66.0 | 1000 | 0.6929 | 24.2993 | | 0.0001 | 133.0 | 2000 | 0.8054 | 23.3022 | | 0.0001 | 199.0 | 3000 | 0.8652 | 23.2237 | | 0.0 | 266.0 | 4000 | 0.9019 | 23.2394 | | 0.0 | 333.0 | 5000 | 0.9217 | 23.1530 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.0+cu117 - Datasets 2.7.1.dev0 - Tokenizers 0.13.2
BigSalmon/NEO125InformalToFormalLincoln
[ "pytorch", "gpt_neo", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPTNeoForCausalLM" ], "model_type": "gpt_neo", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- ### Craig-Wazowski-style Dreambooth model trained by Kagerage with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb) Or you can run your new concept via `diffusers` [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb) Consistancy is a little rough here, as it seems to struggle choosing between being painterly or realistic, but it works well enough for me. Pre-append every prompt with "GregRutkowski artwork, ", as this seems to give better results than putting it at the end of the prompt. Also use a CFG between 9.0 and 12.0, with a non-ancestral sampler, as these values personally gave me the best quality outputs. Also negative prompts, they help. NOTE: This model is FP16, so it may have issues working without Xformers, though I haven't tested this myself. Extra note, it was trained on the 768 model, so don't go below that. As with the standard 2.1 768 model, extreme aspect ratios above 768x768 work pretty decently, despite the training images being 1:1. Sample pictures of this concept: ![0](https://huggingface.co/Kagerage/craig-wazowski-style/resolve/main/sample_images/00015-2847494744-GregRutkowski_artwork,_shallow_people_taking_selfies,_defined_faces.png) ![1](https://huggingface.co/Kagerage/craig-wazowski-style/resolve/main/sample_images/00122-324663361-(GregRutkowski_1.35)%20portrait%20artwork%2C%201girl%20(anime_0.2)%20catgirl%20nekomimi%2C%20(cat%20ears_1.2)%2C%20cute%2C%20slight%20(cat-like_1.4)%20closed%20mo.png) ![2](https://huggingface.co/Kagerage/craig-wazowski-style/resolve/main/sample_images/00007-3086692139-GregRutkowski%20artwork%2C%20landscape%20view%20of%20active%20volcano%20in%20vicious%20plains.png) ![3](https://huggingface.co/Kagerage/craig-wazowski-style/resolve/main/sample_images/00024-2865323196-GregRutkowski_artwork,_Elon_Musk_driving_an_SUV,_passengers_seat_view.png) ![4](https://huggingface.co/Kagerage/craig-wazowski-style/resolve/main/sample_images/00008-1642941557-gregrutkowski%20artwork%2C%20flaming%20dumpster%20floating%20through%20a%20flooded%20city.png) ![5](https://huggingface.co/Kagerage/craig-wazowski-style/resolve/main/sample_images/00010-3302942428-GregRutkowski%20artwork%2C%20landscape%20view%20of%20Zion.png)
BigSalmon/ParaphraseParentheses
[ "pytorch", "tensorboard", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- language: - hi license: apache-2.0 tags: - hf-asr-leaderboard - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 metrics: - wer model-index: - name: Whisper Small Hi - Sanchit Gandhi results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: Common Voice 11.0 type: mozilla-foundation/common_voice_11_0 config: or split: test args: 'config: hi, split: test' metrics: - name: Wer type: wer value: 60.045792787635946 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Small Hi - Sanchit Gandhi This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the Common Voice 11.0 dataset. It achieves the following results on the evaluation set: - Loss: 0.7019 - Wer: 60.0458 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 4000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:-------:| | 0.0048 | 20.0 | 1000 | 0.3577 | 60.3892 | | 0.0001 | 40.0 | 2000 | 0.4971 | 60.0458 | | 0.0001 | 60.0 | 3000 | 0.6355 | 60.6754 | | 0.0001 | 80.0 | 4000 | 0.7019 | 60.0458 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.0+cu117 - Datasets 2.7.1 - Tokenizers 0.13.2
BigSalmon/PhraseBerta
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
Please refer to [flaim](https://github.com/bobmcdear/flaim) for sample usage and more information.
BigSalmon/Points2
[ "pytorch", "gpt2", "text-generation", "transformers", "has_space" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
Please refer to [flaim](https://github.com/bobmcdear/flaim) for sample usage and more information.
BigSalmon/T52
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
8
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: -170.35 +/- 86.64 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
BigTooth/DialoGPT-Megumin
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
16
2022-12-08T16:57:10Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 289.92 +/- 10.39 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Bilz/DialoGPT-small-harrypotter
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity --- # lambdaofgod/paperswithcode_word2vec This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 200 dimensional dense vector space and can be used for tasks like clustering or semantic search. ## Training This model was trained on PapersWithCode dataset on abstracts and READMEs using gensim. <!--- Describe your model here --> ## Usage (Sentence-Transformers) ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('lambdaofgod/paperswithcode_word2vec') embeddings = model.encode(sentences) print(embeddings) ``` ## Full Model Architecture ``` SentenceTransformer( (0): WordEmbeddings( (emb_layer): Embedding(147043, 200) ) (1): Pooling({'word_embedding_dimension': 200, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
Blaine-Mason/hackMIT-finetuned-sst2
[ "pytorch", "tensorboard", "bert", "text-classification", "dataset:glue", "transformers", "generated_from_trainer" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
36
2022-12-08T17:26:36Z
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- ### Froggy Style V1.5 #### V1.5 Model by TheLastBen This model is trained on 11 Midjourney images 512x512, 1300 steps and 300 steps text_encoder (30% because the total steps is low, normally 15%) #### Prompts to start with : ttdddd , __________, movie, ultra high quality render, high quality graphical details, 8k, volumetric lighting, micro details, (cinematic) Negative : bad, low-quality, 3d, game The prompt also can be as simple as the instance name : ttdddd and you will still get great results. You can also train your own concepts and upload them to the library by using [fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb). Test the concept via A1111 Colab :[fast-stable-diffusion-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb) #### Sample pictures of this concept: !["" 0](https://huggingface.co/TheLastBen/froggy-style/resolve/main/concept_images/img%20(1).png) !["" 1](https://huggingface.co/TheLastBen/froggy-style/resolve/main/concept_images/img%20(10).png) !["" 2](https://huggingface.co/TheLastBen/froggy-style/resolve/main/concept_images/img%20(11).png) !["" 3](https://huggingface.co/TheLastBen/froggy-style/resolve/main/concept_images/img%20(14).png) !["" 4](https://huggingface.co/TheLastBen/froggy-style/resolve/main/concept_images/img%20(15).png) !["" 5](https://huggingface.co/TheLastBen/froggy-style/resolve/main/concept_images/img%20(18).png) !["" 6](https://huggingface.co/TheLastBen/froggy-style/resolve/main/concept_images/img%20(19).png) !["" 7](https://huggingface.co/TheLastBen/froggy-style/resolve/main/concept_images/img%20(24).png) !["" 8](https://huggingface.co/TheLastBen/froggy-style/resolve/main/concept_images/img%20(25).png) !["" 9](https://huggingface.co/TheLastBen/froggy-style/resolve/main/concept_images/img%20(26).png) !["" 10](https://huggingface.co/TheLastBen/froggy-style/resolve/main/concept_images/img%20(27).png) !["" 11](https://huggingface.co/TheLastBen/froggy-style/resolve/main/concept_images/img%20(29).png) !["" 12](https://huggingface.co/TheLastBen/froggy-style/resolve/main/concept_images/img%20(3).png) !["" 13](https://huggingface.co/TheLastBen/froggy-style/resolve/main/concept_images/img%20(32).png) !["" 14](https://huggingface.co/TheLastBen/froggy-style/resolve/main/concept_images/img%20(33).png) !["" 15](https://huggingface.co/TheLastBen/froggy-style/resolve/main/concept_images/img%20(34).png) !["" 16](https://huggingface.co/TheLastBen/froggy-style/resolve/main/concept_images/img%20(7).png) !["" 17](https://huggingface.co/TheLastBen/froggy-style/resolve/main/concept_images/img%20(8).png)
Blazeolmo/Scrabunzi
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - wnut_17 metrics: - precision - recall - f1 - accuracy model-index: - name: bert-large-uncased_ner_wnut_17 results: - task: name: Token Classification type: token-classification dataset: name: wnut_17 type: wnut_17 args: wnut_17 metrics: - name: Precision type: precision value: 0.7052785923753666 - name: Recall type: recall value: 0.5753588516746412 - name: F1 type: f1 value: 0.6337285902503295 - name: Accuracy type: accuracy value: 0.9602644796236252 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-large-uncased_ner_wnut_17 This model is a fine-tuned version of [bert-large-uncased](https://huggingface.co/bert-large-uncased) on the wnut_17 dataset. It achieves the following results on the evaluation set: - Loss: 0.2516 - Precision: 0.7053 - Recall: 0.5754 - F1: 0.6337 - Accuracy: 0.9603 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 213 | 0.2143 | 0.6353 | 0.4605 | 0.5340 | 0.9490 | | No log | 2.0 | 426 | 0.2299 | 0.7322 | 0.5036 | 0.5967 | 0.9556 | | 0.1489 | 3.0 | 639 | 0.2137 | 0.6583 | 0.5945 | 0.6248 | 0.9603 | | 0.1489 | 4.0 | 852 | 0.2494 | 0.7035 | 0.5789 | 0.6352 | 0.9604 | | 0.0268 | 5.0 | 1065 | 0.2516 | 0.7053 | 0.5754 | 0.6337 | 0.9603 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0 - Datasets 2.1.0 - Tokenizers 0.12.1
BlightZz/DialoGPT-medium-Kurisu
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
19
2022-12-08T17:28:44Z
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- ### bladegirl Dreambooth model trained by CiroN2022 with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb) Or you can run your new concept via `diffusers` [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb) Sample pictures of this concept: ![0](https://huggingface.co/CiroN2022/bladegirl/resolve/main/sample_images/bladegirl_(82).jpg)
BlindMan820/Sarcastic-News-Headlines
[ "pytorch", "distilbert", "text-classification", "English", "dataset:Kaggle Dataset", "transformers", "Text", "Sequence-Classification", "Sarcasm", "DistilBert" ]
text-classification
{ "architectures": [ "DistilBertForSequenceClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
28
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 279.11 +/- 21.71 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
BlueGamerBeast/DialoGPT-small-Morgana
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 200.12 +/- 92.12 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Branex/gpt-neo-2.7B
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: - it license: apache-2.0 tags: - generated_from_trainer - whisper-event datasets: - mozilla-foundation/common_voice_11_0 metrics: - wer model-index: - name: luigisaetta/whisper-medium-it results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: mozilla-foundation/common_voice_11_0 it type: mozilla-foundation/common_voice_11_0 config: it split: test args: it metrics: - name: Wer type: wer value: 5.7191 --- # luigisaetta/whisper-medium-it This model is a fine-tuned version of [openai/whisper-medium](https://huggingface.co/openai/whisper-medium) on the common_voice_11_0 dataset. It achieves the following results on the evaluation set: - Loss: 0.1452 - Wer: 5.7191 ## Model description This model is a fine-tuning of the OpenAI Whisper Medium model, on the specified dataset. ## Intended uses & limitations This model has been developed as part of the Hugging Face Whisper Fine Tuning sprint, December 2022. It is meant to spread the knowledge on how these models are built and can be used to develop solutions where it is needed ASR on the Italian Language. It has not been extensively tested. It is possible that on other datasets the accuracy will be lower. Please, test it before using it. ## Training and evaluation data Trained and tested on Mozilla Common Voice, vers. 11 ## Training procedure The script **run.sh**, and the Python file, used for the training are saved in the repository. ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 32 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 5000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:-------:| | 0.1216 | 0.2 | 1000 | 0.2289 | 10.0594 | | 0.1801 | 0.4 | 2000 | 0.1851 | 7.6593 | | 0.1763 | 0.6 | 3000 | 0.1615 | 6.5258 | | 0.1337 | 0.8 | 4000 | 0.1506 | 6.0427 | | 0.0742 | 1.05 | 5000 | 0.1452 | 5.7191 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.0+cu117 - Datasets 2.7.1.dev0 - Tokenizers 0.13.2
Brayan/CNN_Brain_Tumor
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 266.47 +/- 18.25 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
BritishLibraryLabs/bl-books-genre
[ "pytorch", "distilbert", "text-classification", "multilingual", "dataset:blbooksgenre", "transformers", "genre", "books", "library", "historic", "glam ", "lam", "license:mit", "has_space" ]
text-classification
{ "architectures": [ "DistilBertForSequenceClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
76
2022-12-08T18:22:17Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 186.66 +/- 74.41 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Brokette/projetCS
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "transformers" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- license: cc --- Tuned model created with Fast Dream Booth, built on SD 1.5. Tuned for creating imgaes of the Pacific North West landscape. Use the following phrase near the beginning of your prompt: "dvPNW" Example: "dvPNW style, forest" or "xyz character, standing in the dvPNW" ![PNW_Thumb.jpg](https://s3.amazonaws.com/moonup/production/uploads/1670524459314-6331c100acb6472115ae666a.jpeg)
Brona/model1
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: mit tags: - generated_from_trainer datasets: - wnut_17 metrics: - precision - recall - f1 - accuracy model-index: - name: microsoft-deberta-v3-large_ner_wnut_17 results: - task: name: Token Classification type: token-classification dataset: name: wnut_17 type: wnut_17 args: wnut_17 metrics: - name: Precision type: precision value: 0.7670623145400594 - name: Recall type: recall value: 0.618421052631579 - name: F1 type: f1 value: 0.6847682119205298 - name: Accuracy type: accuracy value: 0.9666942096230853 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # microsoft-deberta-v3-large_ner_wnut_17 This model is a fine-tuned version of [microsoft/deberta-v3-large](https://huggingface.co/microsoft/deberta-v3-large) on the wnut_17 dataset. It achieves the following results on the evaluation set: - Loss: 0.2199 - Precision: 0.7671 - Recall: 0.6184 - F1: 0.6848 - Accuracy: 0.9667 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 213 | 0.1751 | 0.6884 | 0.5682 | 0.6225 | 0.9601 | | No log | 2.0 | 426 | 0.1702 | 0.7351 | 0.6208 | 0.6732 | 0.9655 | | 0.1003 | 3.0 | 639 | 0.1954 | 0.7360 | 0.6136 | 0.6693 | 0.9656 | | 0.1003 | 4.0 | 852 | 0.2113 | 0.7595 | 0.6232 | 0.6846 | 0.9669 | | 0.015 | 5.0 | 1065 | 0.2199 | 0.7671 | 0.6184 | 0.6848 | 0.9667 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0 - Datasets 2.1.0 - Tokenizers 0.12.1
BrunoNogueira/DialoGPT-kungfupanda
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - wer model-index: - name: whisper-small-se results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # whisper-small-se This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.2787 - Wer: 19.8570 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1.5e-05 - train_batch_size: 4 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 150 - training_steps: 2000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:-------:| | 0.1047 | 1.3 | 1000 | 0.2900 | 21.3842 | | 0.0357 | 2.59 | 2000 | 0.2787 | 19.8570 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.0+cu116 - Datasets 2.7.1 - Tokenizers 0.13.2
Brykee/DialoGPT-medium-Morty
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- language: - ar tags: - whisper-event - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 metrics: - wer model-index: - name: "Whisper Small AR - gerryc" results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: FLEURS type: google/fleurs config: ar_eg split: test args: ar metrics: - name: Wer type: wer value: 59.26 --- # Whisper Small Ar - gerryc Model was trained on CommonVoice Train. Normalized and lowercased.
BumBelDumBel/ZORK_AI_FANTASY
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-12-08T19:18:08Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 270.92 +/- 11.97 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Buntan/BuntanAI
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-12-08T19:24:45Z
--- license: mit --- ### jozef-tominc2 on Stable Diffusion This is the `<jozef-tominc>` concept taught to Stable Diffusion via Textual Inversion. You can load this concept into the [Stable Conceptualizer](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_conceptualizer_inference.ipynb) notebook. You can also train your own concepts and load them into the concept libraries using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb). Here is the new concept you will be able to use as a `style`: ![<jozef-tominc> 0](https://huggingface.co/sd-concepts-library/jozef-tominc2/resolve/main/concept_images/2.jpeg) ![<jozef-tominc> 1](https://huggingface.co/sd-concepts-library/jozef-tominc2/resolve/main/concept_images/1.jpeg) ![<jozef-tominc> 2](https://huggingface.co/sd-concepts-library/jozef-tominc2/resolve/main/concept_images/3.jpeg) ![<jozef-tominc> 3](https://huggingface.co/sd-concepts-library/jozef-tominc2/resolve/main/concept_images/4.jpeg) ![<jozef-tominc> 4](https://huggingface.co/sd-concepts-library/jozef-tominc2/resolve/main/concept_images/0.jpeg)
CALM/CALM
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 249.35 +/- 23.08 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
CALM/backup
[ "lean_albert", "transformers" ]
null
{ "architectures": [ "LeanAlbertForPretraining", "LeanAlbertForTokenClassification", "LeanAlbertForSequenceClassification" ], "model_type": "lean_albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: -547.86 +/- 404.01 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
CAMeL-Lab/bert-base-arabic-camelbert-ca-ner
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
85
null
Example prompt (I am going to be training it on a larger dataset later). ``` lowercase: that heady and almost intoxicating mix of ripening dairy produce and friendly competition was swirling around a conference center in the united kingdom on wednesday as 250 international judges sniffed, prodded and chomped their way along tables groaning with cheese to decide which should take the crown at the 2022 edition of the world cheese awards. actual: That heady and almost intoxicating mix of ripening dairy produce and friendly competition was swirling around a conference center in the United Kingdom on Wednesday as 250 international judges sniffed, prodded and chomped their way along tables groaning with cheese to decide which should take the crown at the 2022 edition of the World Cheese Awards. ```
CAMeL-Lab/bert-base-arabic-camelbert-ca-pos-egy
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
16,451
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: BERTModified-fullsize-finetuned-wikitext-test results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # BERTModified-fullsize-finetuned-wikitext-test This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 6.7813 - Precision: 0.1094 - Recall: 0.1094 - F1: 0.1094 - Accuracy: 0.1094 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:---------:|:------:|:------:|:--------:| | 9.2391 | 1.0 | 4382 | 8.1610 | 0.0373 | 0.0373 | 0.0373 | 0.0373 | | 7.9147 | 2.0 | 8764 | 7.6870 | 0.0635 | 0.0635 | 0.0635 | 0.0635 | | 7.5164 | 3.0 | 13146 | 7.4388 | 0.0727 | 0.0727 | 0.0727 | 0.0727 | | 7.2439 | 4.0 | 17528 | 7.2088 | 0.0930 | 0.0930 | 0.0930 | 0.0930 | | 7.1068 | 5.0 | 21910 | 7.0455 | 0.0943 | 0.0943 | 0.0943 | 0.0943 | | 6.9711 | 6.0 | 26292 | 6.9976 | 0.1054 | 0.1054 | 0.1054 | 0.1054 | | 6.8486 | 7.0 | 30674 | 6.8850 | 0.1054 | 0.1054 | 0.1054 | 0.1054 | | 6.78 | 8.0 | 35056 | 6.7990 | 0.1153 | 0.1153 | 0.1153 | 0.1153 | | 6.73 | 9.0 | 39438 | 6.8041 | 0.1074 | 0.1074 | 0.1074 | 0.1074 | | 6.6921 | 10.0 | 43820 | 6.7412 | 0.1251 | 0.1251 | 0.1251 | 0.1251 | ### Framework versions - Transformers 4.24.0 - Pytorch 1.13.0 - Datasets 2.6.1 - Tokenizers 0.13.2
CAMeL-Lab/bert-base-arabic-camelbert-da-pos-egy
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
32
null
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Huggy library_name: ml-agents --- # **ppo** Agent playing **Huggy** This is a trained model of a **ppo** agent playing **Huggy** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Huggy 2. Step 1: Write your model_id: juanmi1234/ppo-Huggy 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
CAMeL-Lab/bert-base-arabic-camelbert-da-sentiment
[ "pytorch", "tf", "bert", "text-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "has_space" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
19,850
null
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Huggy library_name: ml-agents --- # **ppo** Agent playing **Huggy** This is a trained model of a **ppo** agent playing **Huggy** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Huggy 2. Step 1: Write your model_id: Nnarruqt/ppo-Huggy 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
CAMeL-Lab/bert-base-arabic-camelbert-mix-did-nadi
[ "pytorch", "tf", "bert", "text-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
63
null
--- language: - te license: apache-2.0 tags: - whisper-event - generated_from_trainer datasets: - google/fleurs metrics: - wer model-index: - name: whisper-small-telugu results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: google/fleurs type: google/fleurs config: te_in split: test metrics: - name: Wer type: wer value: 39.67740444608772 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # whisper-small-telugu This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the google/fleurs dataset. It achieves the following results on the evaluation set (google/flerus telugu test set): - Loss: 0.3622 - Wer: 39.6774 [openai/whisper-small](https://huggingface.co/openai/whisper-small) has the following zero shot performance on google/fleurs test set: - Wer: 117.91 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 4 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 8 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 10000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:-----:|:---------------:|:-------:| | 0.2623 | 1.55 | 500 | 0.2733 | 65.9750 | | 0.0859 | 3.1 | 1000 | 0.2045 | 39.7652 | | 0.0538 | 4.64 | 1500 | 0.2220 | 42.3811 | | 0.0265 | 6.19 | 2000 | 0.2526 | 42.3626 | | 0.0179 | 7.74 | 2500 | 0.2754 | 42.1685 | | 0.008 | 9.29 | 3000 | 0.2966 | 41.2257 | | 0.0061 | 10.83 | 3500 | 0.2950 | 40.6202 | | 0.0034 | 12.38 | 4000 | 0.3049 | 40.3198 | | 0.004 | 13.93 | 4500 | 0.3106 | 40.5879 | | 0.0018 | 15.48 | 5000 | 0.3199 | 40.1812 | | 0.0016 | 17.03 | 5500 | 0.3346 | 39.8345 | | 0.0006 | 18.57 | 6000 | 0.3337 | 40.2274 | | 0.0003 | 20.12 | 6500 | 0.3396 | 40.2597 | | 0.0005 | 21.67 | 7000 | 0.3465 | 40.1072 | | 0.0002 | 23.22 | 7500 | 0.3485 | 39.7282 | | 0.0002 | 24.77 | 8000 | 0.3519 | 39.7837 | | 0.0001 | 26.32 | 8500 | 0.3567 | 39.7560 | | 0.0001 | 27.86 | 9000 | 0.3614 | 39.8068 | | 0.0 | 29.41 | 9500 | 0.3609 | 39.4925 | | 0.0 | 30.96 | 10000 | 0.3622 | 39.6774 | ### Framework versions - Transformers 4.24.0 - Pytorch 1.13.0+cu117 - Datasets 2.7.1 - Tokenizers 0.13.2
CAMeL-Lab/bert-base-arabic-camelbert-mix-poetry
[ "pytorch", "tf", "bert", "text-classification", "ar", "arxiv:1905.05700", "arxiv:2103.06678", "transformers", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
31
2022-12-08T20:37:50Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - opus_books metrics: - bleu model-index: - name: my_awesome_opus_books_model results: - task: name: Sequence-to-sequence Language Modeling type: text2text-generation dataset: name: opus_books type: opus_books config: en-fr split: train args: en-fr metrics: - name: Bleu type: bleu value: 5.8848 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # my_awesome_opus_books_model This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on the opus_books dataset. It achieves the following results on the evaluation set: - Loss: 1.5732 - Bleu: 5.8848 - Gen Len: 17.6211 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Bleu | Gen Len | |:-------------:|:-----:|:-----:|:---------------:|:------:|:-------:| | 1.8085 | 1.0 | 12709 | 1.5986 | 5.6979 | 17.6331 | | 1.7824 | 2.0 | 25418 | 1.5732 | 5.8848 | 17.6211 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.12.1+cu116 - Datasets 2.7.1 - Tokenizers 0.13.2
CAMeL-Lab/bert-base-arabic-camelbert-mix-pos-glf
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
132
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 289.10 +/- 13.75 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
CAMeL-Lab/bert-base-arabic-camelbert-mix-pos-msa
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1,862
null
-- Yeah -- Mirage Model. Not intended for download, only for tests.
CAMeL-Lab/bert-base-arabic-camelbert-mix-sentiment
[ "pytorch", "tf", "bert", "text-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
855
null
--- license: apache-2.0 tags: - hf-asr-leaderboard - generated_from_trainer model-index: - name: Whisper Small Hi - Robert Rey results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Small Hi - Robert Rey This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.000599 - train_batch_size: 1 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 16 - total_train_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 5 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.0+cu116 - Datasets 2.7.1 - Tokenizers 0.13.2
CAMeL-Lab/bert-base-arabic-camelbert-msa-eighth
[ "pytorch", "tf", "jax", "bert", "fill-mask", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
21
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - wer model-index: - name: whisper-base-ar-quran results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # whisper-base-ar-quran This model is a fine-tuned version of [openai/whisper-base](https://huggingface.co/openai/whisper-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.0839 - Wer: 5.7544 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - distributed_type: multi-GPU - num_devices: 8 - total_train_batch_size: 128 - total_eval_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 5000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:-------:| | 0.1092 | 0.05 | 250 | 0.1969 | 13.3890 | | 0.0361 | 0.1 | 500 | 0.1583 | 10.6375 | | 0.0192 | 0.15 | 750 | 0.1109 | 8.8468 | | 0.0144 | 0.2 | 1000 | 0.1157 | 7.9754 | | 0.008 | 0.25 | 1250 | 0.1000 | 7.5360 | | 0.0048 | 1.03 | 1500 | 0.0933 | 6.8227 | | 0.0113 | 1.08 | 1750 | 0.0955 | 6.9638 | | 0.0209 | 1.13 | 2000 | 0.0824 | 6.3586 | | 0.0043 | 1.18 | 2250 | 0.0830 | 6.3444 | | 0.002 | 1.23 | 2500 | 0.1015 | 6.3025 | | 0.0013 | 2.01 | 2750 | 0.0863 | 6.0639 | | 0.0014 | 2.06 | 3000 | 0.0905 | 6.0213 | | 0.0018 | 2.11 | 3250 | 0.0864 | 6.0293 | | 0.0008 | 2.16 | 3500 | 0.0887 | 5.9308 | | 0.0029 | 2.21 | 3750 | 0.0777 | 5.9159 | | 0.0022 | 2.26 | 4000 | 0.0847 | 5.8749 | | 0.0005 | 3.05 | 4250 | 0.0827 | 5.8352 | | 0.0003 | 3.1 | 4500 | 0.0826 | 5.7800 | | 0.0006 | 3.15 | 4750 | 0.0833 | 5.7625 | | 0.0003 | 3.2 | 5000 | 0.0839 | 5.7544 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.0+cu117 - Datasets 2.7.1.dev0 - Tokenizers 0.13.2
CAUKiel/JavaBERT-uncased
[ "pytorch", "safetensors", "bert", "fill-mask", "java", "code", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
2022-12-08T21:19:49Z
--- license: apache-2.0 language: - eu tags: - whisper-event - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 metrics: - wer model-index: - name: openai/whisper-small results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: mozilla-foundation/common_voice_11_0 eu type: mozilla-foundation/common_voice_11_0 config: eu split: test args: eu metrics: - name: Wer type: wer value: 19.766305675433596 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # openai/whisper-small Basque-Euskera This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the common_voice_11_0 dataset. It achieves the following results on the evaluation set: - Loss: 0.4485 - Wer: 19.7663 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 64 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 5000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:-------:| | 0.048 | 4.04 | 1000 | 0.3402 | 21.7816 | | 0.0047 | 9.03 | 2000 | 0.3862 | 20.1694 | | 0.0012 | 14.02 | 3000 | 0.4221 | 19.7419 | | 0.0008 | 19.02 | 4000 | 0.4411 | 19.7174 | | 0.0006 | 24.01 | 5000 | 0.4485 | 19.7663 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.0+cu117 - Datasets 2.7.1.dev0 - Tokenizers 0.13.2
CL/safe-math-bot
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: mit tags: - generated_from_trainer model-index: - name: EHR_ML_simulation_2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # EHR_ML_simulation_2 This model is a fine-tuned version of [gpt2](https://huggingface.co/gpt2) on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 8 - total_train_batch_size: 256 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_steps: 1000 - num_epochs: 10 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.24.0 - Pytorch 1.13.0 - Datasets 2.7.1 - Tokenizers 0.13.2
CLAck/en-vi
[ "pytorch", "marian", "text2text-generation", "en", "vi", "dataset:ALT", "transformers", "translation", "license:apache-2.0", "autotrain_compatible" ]
translation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # {MODEL_NAME} This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}') model = AutoModel.from_pretrained('{MODEL_NAME}') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 188 with parameters: ``` {'batch_size': 32, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.CosineSimilarityLoss.CosineSimilarityLoss` Parameters of the fit()-Method: ``` { "epochs": 1, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": 188, "warmup_steps": 19, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: BertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
CLAck/indo-pure
[ "pytorch", "marian", "text2text-generation", "en", "id", "dataset:ALT", "transformers", "translation", "license:apache-2.0", "autotrain_compatible" ]
translation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 236.31 +/- 14.65 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
CLS/WubiBERT_models
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-12-08T21:52:44Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 251.76 +/- 22.35 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
CLTL/icf-domains
[ "pytorch", "roberta", "nl", "transformers", "license:mit", "text-classification" ]
text-classification
{ "architectures": [ "RobertaForMultiLabelSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
35
null
--- tags: - generated_from_trainer model-index: - name: whisper-small-hi-2000-temp results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # whisper-small-hi-2000-temp This model was trained from scratch on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.000599 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 2000 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.0+cu116 - Datasets 2.7.1 - Tokenizers 0.13.2
CLTL/icf-levels-att
[ "pytorch", "roberta", "text-classification", "nl", "transformers", "license:mit" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
32
2022-12-08T22:06:00Z
--- license: mit tags: - pytorch - diffusers - unconditional-image-generation - diffusion-models-class --- # Example Fine-Tuned Model for Unit 2 of the [Diffusion Models Class 🧨](https://github.com/huggingface/diffusion-models-class) This is a fun model! ## Usage ```python from diffusers import DDPMPipeline pipeline = DDPMPipeline.from_pretrained('lewtun/ddpm-celebahq-finetuned-butterflies') image = pipeline().images[0] image ```
CLTL/icf-levels-fac
[ "pytorch", "roberta", "text-classification", "nl", "transformers", "license:mit" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
32
null
--- license: creativeml-openrail-m --- ### 💥🎨 The Simpsons dreambooth model. This is a fine-tuned Stable Diffusion model based on The Simpsons. Use **asim style** in your prompts. The model has some trouble with double pupils and no pupils. Using "cross-eyed" in the negative prompt appears to help? ### Sample images: Samples are made with [dynamic prompts](https://github.com/adieyal/sd-dynamic-prompts), Euler 80 steps @ CFG 12. Negative prompts: watermark, text, signature, cross-eyed ![asim.jpg](https://huggingface.co/PiyarSquare/sd_asim_simpsons/resolve/main/grid_famous_people.png) ![asim.jpg](https://huggingface.co/PiyarSquare/sd_asim_simpsons/resolve/main/grid_famous_people2.png) ![asim.jpg](https://huggingface.co/PiyarSquare/sd_asim_simpsons/resolve/main/grid_characters.png) For people / characters: asim style. dramatic beautiful { headshot | portrait } of \_\_person\_\_ {outside { in a garden | in a desert | on a mountain top | at a roman ruin} {at sunrise | at sunset | on an overcast afternoon | in the rain | in the snow | at night} | inside {a fancy living room | on a movie set | a vast empty dark space | a kaleidoscope | an ancient library} with {spotlights | neon lights | soft mood lighting | firefly lights } }. detailed background. ![asim.jpg](https://huggingface.co/PiyarSquare/sd_asim_simpsons/resolve/main/grid_animals.png) For animals: asim style. dramatic closeup national geographic image of a \_\_animal\_\_ in its natural habitat. at {sunrise|sunset|night}. detailed background. ![asim.jpg](https://huggingface.co/PiyarSquare/sd_asim_simpsons/resolve/main/grid_buildings.png) asim style. + random prompt from the internet of cool looking structures: steampunk library, tower of babel, tree house, haunted victorian. ![asim.jpg](https://huggingface.co/PiyarSquare/sd_asim_simpsons/resolve/main/grid_landscapes.png) ![asim.jpg](https://huggingface.co/PiyarSquare/sd_asim_simpsons/resolve/main/grid_famous_places.png) biomes: asim style. a beautiful {summer | autumn | winter | spring } landscape panorama painting of \_\_biome\_\_ {at sunrise | at sunset | on an overcast afternoon | in the rain | in the snow | at night} famous places: asim style. a beautiful panorama view of \_\_places\_\_ {at sunrise | at sunset | on a cloudy afternoon | in the rain | covered in snow}. ![asim.jpg](https://huggingface.co/PiyarSquare/sd_asim_simpsons/resolve/main/grid_flowers.png) flowers: asim style. a beautiful vase of \_\_flower\_\_ flowers. on a balcony table at { sunrise | sunset | night} . nearby a {bottle of {beer | wine} and a half-empty glass | bowl of fruit}. ![asim.jpg](https://huggingface.co/PiyarSquare/sd_asim_simpsons/resolve/main/grid_internet_examples.png) ![asim.jpg](https://huggingface.co/PiyarSquare/sd_asim_simpsons/resolve/main/grid_internet_examples2.png) asim style. + random prompt from the internet. The model mixes well with existing prompts with artists and styles, though not so well with keywords like "photo-realistic." Based on StableDiffusion 1.5 model (full weights). ### Training Made with [automatic1111 webui](https://github.com/AUTOMATIC1111/stable-diffusion-webui) + [d8ahazard dreambooth extension](https://github.com/d8ahazard/sd_dreambooth_extension) + [nitrosocke guide](https://github.com/nitrosocke/dreambooth-training-guide). 100 hand-cut training images. About 70% people, 20% landscapes and 10% animals and objects. Maybe one too many Cletus. Detailed captions were written for each image such as: "A wide shot of a 40-year-old Caucasian man with glasses and a mustache. Dressed in a fishing hat, pink shirt, an olive fishing vest with pockets and brown trousers, sitting in a canoe on a lake. The man is fishing with a red fishing rod. There are trees and mountains in the background at sunset with a few clouds in the sky." Learning rate was 1.72e-6 for 10,000 steps without prior preservation. Useful tips from the reddit stablediffusion and the discussions on d8ahazard's extension. Notes on training on [d8ahazard dreambooth extension discussion](https://github.com/d8ahazard/sd_dreambooth_extension/discussions/443). I am excited to see what people do with this and I would like to improve the eyes, if anyone has suggestions.
CM-CA/Cartman
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-12-08T22:26:13Z
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Huggy library_name: ml-agents --- # **ppo** Agent playing **Huggy** This is a trained model of a **ppo** agent playing **Huggy** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Huggy 2. Step 1: Write your model_id: ksaml/ppo-Huggy 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
CSResearcher/TestModel
[ "license:mit" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 metrics: - wer language: - ga model-index: - name: wav2vec2-large-xls-r-300m-irish-colab results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: mozilla-foundation/common_voice_11_0 type: mozilla-foundation/common_voice_11_0 config: ga-IE split: train+validation args: ga-IE metrics: - name: Wer type: wer value: 52.44117647058824 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-large-xls-r-300m-irish-colab This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the common_voice dataset. It achieves the following results on the evaluation set: - Loss: 1.148 - Wer: 52.4 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 30 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 4.6516 | 12.12 | 400 | 1.2867 | 0.7653 | | 0.4188 | 24.24 | 800 | 1.1262 | 0.5509 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.10.0+cu113 - Datasets 2.0.0 - Tokenizers 0.13.2
CZWin32768/xlm-align
[ "pytorch", "xlm-roberta", "fill-mask", "arxiv:2106.06381", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "XLMRobertaForMaskedLM" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Huggy library_name: ml-agents --- # **ppo** Agent playing **Huggy** This is a trained model of a **ppo** agent playing **Huggy** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Huggy 2. Step 1: Write your model_id: jinghua2tang/ppo-Huggy 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
Callidior/bert2bert-base-arxiv-titlegen
[ "pytorch", "safetensors", "encoder-decoder", "text2text-generation", "en", "dataset:arxiv_dataset", "transformers", "summarization", "license:apache-2.0", "autotrain_compatible", "has_space" ]
summarization
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
145
null
--- language: - ja license: apache-2.0 tags: - whisper-event - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 metrics: - wer model-index: - name: Whisper Large V2 Japanese results: - task: type: automatic-speech-recognition name: Automatic Speech Recognition dataset: name: mozilla-foundation/common_voice_11_0 ja type: mozilla-foundation/common_voice_11_0 config: ja split: test args: ja metrics: - type: wer value: 8.1166 name: Wer - type: cer value: 5.0032 name: Cer --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # openai/whisper-large-v2 This model is a fine-tuned version of [openai/whisper-large-v2](https://huggingface.co/openai/whisper-large-v2) on the mozilla-foundation/common_voice_11_0 dataset. It achieves the following results on the evaluation set: - Loss: 0.2352 - Wer: 8.1166 - Cer: 5.0032 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 10000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | Cer | |:-------------:|:-----:|:-----:|:---------------:|:-------:|:------:| | 0.0897 | 0.1 | 1000 | 0.1884 | 11.0068 | 6.6992 | | 0.0396 | 0.2 | 2000 | 0.1749 | 9.7399 | 5.9350 | | 0.036 | 1.1 | 3000 | 0.1698 | 9.1419 | 5.6781 | | 0.012 | 1.2 | 4000 | 0.1849 | 9.3041 | 5.7661 | | 0.0151 | 2.09 | 5000 | 0.1879 | 9.1959 | 5.6761 | | 0.0047 | 2.19 | 6000 | 0.2097 | 8.6706 | 5.4422 | | 0.0046 | 3.09 | 7000 | 0.2040 | 8.8277 | 5.4717 | | 0.0015 | 3.19 | 8000 | 0.2260 | 8.4949 | 5.3101 | | 0.0013 | 4.09 | 9000 | 0.2339 | 8.3716 | 5.1471 | | 0.0005 | 4.19 | 10000 | 0.2352 | 8.1166 | 5.0032 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.0+cu117 - Datasets 2.7.1.dev0 - Tokenizers 0.13.2
dccuchile/albert-large-spanish-finetuned-pawsx
[ "pytorch", "albert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "AlbertForSequenceClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
25
null
--- language: - pt license: apache-2.0 tags: - whisper-event - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 metrics: - wer model-index: - name: Whisper Large v2 Portuguese results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: mozilla-foundation/common_voice_11_0 pt type: mozilla-foundation/common_voice_11_0 config: pt split: test args: pt metrics: - name: Wer type: wer value: 5.590020342630419 --- # Whisper Large V2 Portuguese 🇧🇷🇵🇹 Bem-vindo ao **whisper large-v2** para transcrição em português 👋🏻 Transcribe Portuguese audio to text with the highest precision. - Loss: 0.282 - Wer: 5.590 This model is a fine-tuned version of [openai/whisper-large-v2](https://huggingface.co/openai/whisper-large-v2) on the [mozilla-foundation/common_voice_11](https://huggingface.co/datasets/mozilla-foundation/common_voice_11_0) dataset. If you want a lighter model, you may be interested in [jlondonobo/whisper-medium-pt](https://huggingface.co/jlondonobo/whisper-medium-pt). It achieves faster inference with almost no difference in WER. ### Comparable models Reported **WER** is based on the evaluation subset of Common Voice. | Model | WER | # Parameters | |--------------------------------------------------|:--------:|:------------:| | [jlondonobo/whisper-large-v2-pt](https://huggingface.co/jlondonobo/whisper-large-v2-pt) | **5.590** 🤗 | 1550M | | [openai/whisper-large-v2](https://huggingface.co/openai/whisper-large-v2) | 6.300 | 1550M | | [jlondonobo/whisper-medium-pt](https://huggingface.co/jlondonobo/whisper-medium-pt) | 6.579 | 769M | | [jonatasgrosman/wav2vec2-large-xlsr-53-portuguese](https://huggingface.co/jonatasgrosman/wav2vec2-large-xlsr-53-portuguese) | 11.310 | 317M | | [Edresson/wav2vec2-large-xlsr-coraa-portuguese](https://huggingface.co/Edresson/wav2vec2-large-xlsr-coraa-portuguese) | 20.080 | 317M | ### Training hyperparameters We used the following hyperparameters for training: - `learning_rate`: 1e-05 - `train_batch_size`: 16 - `eval_batch_size`: 8 - `seed`: 42 - `gradient_accumulation_steps`: 2 - `total_train_batch_size`: 32 - `optimizer`: Adam with betas=(0.9,0.999) and epsilon=1e-08 - `lr_scheduler_type`: linear - `lr_scheduler_warmup_steps`: 500 - `training_steps`: 5000 - `mixed_precision_training`: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.0828 | 1.09 | 1000 | 0.1868 | 6.778 | | 0.0241 | 3.07 | 2000 | 0.2057 | 6.109 | | 0.0084 | 5.06 | 3000 | 0.2367 | 6.029 | | 0.0015 | 7.04 | 4000 | 0.2469 | 5.709 | | 0.0009 | 9.02 | 5000 | 0.2821 | 5.590 🤗| ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.0+cu117 - Datasets 2.7.1.dev0 - Tokenizers 0.13.2
dccuchile/albert-large-spanish-finetuned-xnli
[ "pytorch", "albert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "AlbertForSequenceClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
29
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - zeroth_korean_asr model-index: - name: wav2vec2-large-xls-r-300m-kor-11385-3 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-large-xls-r-300m-kor-11385-3 This model is a fine-tuned version of [teddy322/wav2vec2-large-xls-r-300m-kor-11385-2](https://huggingface.co/teddy322/wav2vec2-large-xls-r-300m-kor-11385-2) on the zeroth_korean_asr dataset. It achieves the following results on the evaluation set: - eval_loss: 0.2425 - eval_wer: 0.1495 - eval_runtime: 137.8001 - eval_samples_per_second: 3.316 - eval_steps_per_second: 0.421 - epoch: 10.59 - step: 3600 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 20 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.11.3 - Pytorch 1.10.0+cu113 - Datasets 1.18.3 - Tokenizers 0.10.3
dccuchile/albert-tiny-spanish-finetuned-xnli
[ "pytorch", "albert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "AlbertForSequenceClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
31
null
Access to model akinoshi/Fall2022-COMP258-002-Group1 is restricted and you are not in the authorized list. Visit https://huggingface.co/akinoshi/Fall2022-COMP258-002-Group1 to ask for access.
dccuchile/albert-xlarge-spanish-finetuned-mldoc
[ "pytorch", "albert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "AlbertForSequenceClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
26
null
--- license: unknown --- Token: su_mdl Class: style Example: 1girl, grin, solo, female focus, smile, sparkling eyes, shiny hair, su_mdl style I get good results using these negative prompts: bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry With a CFG Scale of 11. This is trained on top of Anything.ckpt using 100 screenshots from Steven Universe at 10k steps.
dccuchile/albert-xlarge-spanish-finetuned-pawsx
[ "pytorch", "albert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "AlbertForSequenceClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
24
2022-12-09T02:48:01Z
This is a capstone project serving for training the model and exploring implementation on AIs.
dccuchile/albert-xlarge-spanish-finetuned-pos
[ "pytorch", "albert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "AlbertForTokenClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- license: wtfpl --- Trained for 500 steps with a lr of 0.003 and 4 steps gradient accumulation. ![08039-3409504356-portrait of woman in trenchcoat, city bokeh background, art by bforangeteal, extremely detailed, embers, debris, art by photohel.png](https://s3.amazonaws.com/moonup/production/uploads/1670555250809-6312579fc7577b68d90a7646.png) ![08009-1360552088-ford mustang, city bokeh background, art by bforangeteal, extremely detailed, embers, debris, art by photohelper, nukesd.png](https://s3.amazonaws.com/moonup/production/uploads/1670555259179-6312579fc7577b68d90a7646.png) ![07949-29151249-portrait of gigachad, city bokeh background, art by bforangeteal, extremely detailed, embers, debris, art by photohelper, mascul.png](https://s3.amazonaws.com/moonup/production/uploads/1670555280835-6312579fc7577b68d90a7646.png) ![07868-2761092669-headshot portrait of henry cavill in general uniform, city bokeh background, art by actionhelper, extremely detailed, embers, de.png](https://s3.amazonaws.com/moonup/production/uploads/1670555326731-6312579fc7577b68d90a7646.png) ![07874-1633627578-headshot portrait of john wick in uniform, city bokeh background, art by actionhelper, extremely detailed, embers, debris, art b.png](https://s3.amazonaws.com/moonup/production/uploads/1670555331232-6312579fc7577b68d90a7646.png) ![08016-1561599122-fighter jet flying, city bokeh background, art by bforangeteal, extremely detailed, embers, debris, art by photohelper, nukesd.png](https://s3.amazonaws.com/moonup/production/uploads/1670555384383-6312579fc7577b68d90a7646.png)
dccuchile/albert-xlarge-spanish-finetuned-qa-mlqa
[ "pytorch", "albert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "AlbertForQuestionAnswering" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 275.66 +/- 17.91 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
dccuchile/albert-xxlarge-spanish-finetuned-mldoc
[ "pytorch", "albert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "AlbertForSequenceClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
26
2022-12-09T03:17:39Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad model-index: - name: distilbert-base-uncased-finetuned-squad results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-squad This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the squad dataset. It achieves the following results on the evaluation set: - Loss: 1.1565 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 1.2059 | 1.0 | 5533 | 1.1450 | | 0.9519 | 2.0 | 11066 | 1.1236 | | 0.7477 | 3.0 | 16599 | 1.1565 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.7.1 - Tokenizers 0.13.2
dccuchile/albert-xxlarge-spanish-finetuned-pawsx
[ "pytorch", "albert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "AlbertForSequenceClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
26
null
--- license: mit --- # Introduction QTC4SO(Qusetion Title Completion for Stack Overflow) is a pre-trained model based on T5. We fine-tuned it on our downstream task. It is used for question title completion on StackOverflow # More details You can find our code and dataset on our [GitHub project](https://github.com/shaoyuyoung/QTC4SO)<br> For more details, please refer to [our paper](https://smartse.github.io/paper/icpc2023.pdf)
dccuchile/albert-xxlarge-spanish-finetuned-pos
[ "pytorch", "albert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "AlbertForTokenClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
2022-12-09T03:24:55Z
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Huggy library_name: ml-agents --- # **ppo** Agent playing **Huggy** This is a trained model of a **ppo** agent playing **Huggy** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Huggy 2. Step 1: Write your model_id: rpharale/ppo-Huggy 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
dccuchile/albert-xxlarge-spanish-finetuned-qa-mlqa
[ "pytorch", "albert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "AlbertForQuestionAnswering" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - conll2003 metrics: - precision - recall - f1 - accuracy model-index: - name: distilbert-base-uncased_ner_conll2003 results: - task: name: Token Classification type: token-classification dataset: name: conll2003 type: conll2003 args: conll2003 metrics: - name: Precision type: precision value: 0.9357583847822459 - name: Recall type: recall value: 0.9437899697071693 - name: F1 type: f1 value: 0.939757017176372 - name: Accuracy type: accuracy value: 0.987675713562556 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased_ner_conll2003 This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the conll2003 dataset. It achieves the following results on the evaluation set: - Loss: 0.0524 - Precision: 0.9358 - Recall: 0.9438 - F1: 0.9398 - Accuracy: 0.9877 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | 0.1897 | 1.0 | 878 | 0.0544 | 0.9223 | 0.9270 | 0.9246 | 0.9848 | | 0.0363 | 2.0 | 1756 | 0.0486 | 0.9316 | 0.9391 | 0.9353 | 0.9869 | | 0.0194 | 3.0 | 2634 | 0.0496 | 0.9369 | 0.9403 | 0.9386 | 0.9873 | | 0.0114 | 4.0 | 3512 | 0.0526 | 0.9340 | 0.9436 | 0.9388 | 0.9875 | | 0.0089 | 5.0 | 4390 | 0.0524 | 0.9358 | 0.9438 | 0.9398 | 0.9877 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0 - Datasets 2.1.0 - Tokenizers 0.12.1
dccuchile/albert-large-spanish
[ "pytorch", "tf", "albert", "pretraining", "es", "dataset:large_spanish_corpus", "transformers", "spanish", "OpenCENIA" ]
null
{ "architectures": [ "AlbertForPreTraining" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
75
null
--- license: creativeml-openrail-m tags: - text-to-image widget: - text: hsksk ---
dccuchile/albert-xxlarge-spanish
[ "pytorch", "tf", "albert", "pretraining", "es", "dataset:large_spanish_corpus", "transformers", "spanish", "OpenCENIA" ]
null
{ "architectures": [ "AlbertForPreTraining" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
42
null
--- license: creativeml-openrail-m language: - en tags: - stable-diffusion - text-to-image --- # Any(thing) Mix(es) Mixed weeb models :) # Models All of the sample images uses the following prompt: ``` masterpiece, best quality, 1girl, blonde hair, short hair, wavy hair, blue eyes, hair ribbon, blue ribbon, sleeveless shirt, portrait Negative prompt: lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry Steps: 20, Sampler: Euler, CFG scale: 7, Size: 512x768, Clip skip: 2 ``` If you want the exact seed and stuff, you can get the EXIF data of the images :) ## anything-berry-30.ckpt ![](images/anything-berry.png) [Re-uploaded from](https://huggingface.co/misobarisic/anything-berrymix) Step | Interpolation Method | Primary Model | Secondary model | Tertiary Model | Merge Name --- | --- | --- | --- | --- | --- 1 | Weighted Sum @ 0.30 | Anything V3 | Berry Mix | n/a | **anything-berry-30** ## anything-f222-15.ckpt ![](images/anything-f222.png) [Recipe Source](https://www.reddit.com/r/WaifuDiffusion/comments/zdbs3r/comment/iz0nr48/?utm_source=reddit&utm_medium=web2x&context=3) Step | Interpolation Method | Primary Model | Secondary model | Tertiary Model | Merge Name --- | --- | --- | --- | --- | --- 1 | Weighted Sum @ 0.15 | Anything V3 | Zeipher F222 | n/a | **anything-f222-15** ## anything-f222-15-elysiumv2-10.ckpt ![](images/anything-f222-elysium.png) [Recipe Source](https://www.reddit.com/r/WaifuDiffusion/comments/zg1d8x/comment/izei93c/?utm_source=reddit&utm_medium=web2x&context=3) Step | Interpolation Method | Primary Model | Secondary model | Tertiary Model | Merge Name --- | --- | --- | --- | --- | --- 1 | Weighted Sum @ 0.10 | anything-f222-15 | Elysium Anime v2 | n/a | **anything-f222-15-elysiumv2-10** ## berrymix-v3.ckpt ![](images/berrymix.png) [Recipe Source](https://rentry.org/hdgrecipes#berrymix-v3-535d98a3) Step | Interpolation Method | Primary Model | Secondary model | Tertiary Model | Merge Name --- | --- | --- | --- | --- | --- 1 | Weighted Sum @ 0.05 | AnythingV3.0 | Stable Diffusion 1.5 | n/a | Anything Fix 2 | Add Difference @ 1 | Anything fix | Zeipher F222 | Stable Diffusion 1.5 | berrymix3 lite 3 | Weighted Sum @ 0.25 | berrymix3 lite |r34_e4 | n/a | **berrymix V3** ## blossom-extract.safetensors ![](images/blossom-extract.png) [Recipe Source](https://www.reddit.com/r/StableDiffusion/comments/zk8y50/comment/izyhn8w/?utm_source=reddit&utm_medium=web2x&context=3) Step | Interpolation Method | Primary Model | Secondary model | Tertiary Model | Merge Name --- | --- | --- | --- | --- | --- 1 | Add Difference @ 1 | Anything V3 | Zeipher F222 | Stable Diffusion 1.4 | **blossom-extract** ## hentai-elysium-50.safetensors ![](images/hentai-elysium.png) [Recipe Source](https://www.reddit.com/r/WaifuDiffusion/comments/zn6wdb/comment/j0fabe6/?utm_source=reddit&utm_medium=web2x&context=3) Step | Interpolation Method | Primary Model | Secondary model | Tertiary Model | Merge Name --- | --- | --- | --- | --- | --- 1 | Weighted Sum @ 0.5 | Hentai Diffusion 17 | Elysium Anime v2 | n/a | **hentai-elysium-50** ## nutmeg-mix.ckpt ![](images/nutmeg-mix.png) [Recipe Source](https://rentry.org/hdgrecipes#nutmegmix-aa3e502b) Step | Interpolation Method | Primary Model | Secondary model | Tertiary Model | Merge Name --- | --- | --- | --- | --- | --- 1 | Weighted Sum @ 0.05 | NovelAI | Stable Diffusion 1.5 | n/a | nutmegmix-part1 2 | Weighted Sum @ 0.05 | nutmegmix-part1 | Zeipher F222 | n/a | nutmegmix-part2 3 | Weighted Sum @ 0.05 | nutmegmix-part2 | r34_e4 | n/a | nutmegmix-part3 4 | Weighted Sum @ 0.05 | nutmegmix-part3 | SmirkingFace | n/a | nutmegmix-part4 5 | Weighted Sum @ 0.3 | AnythingV3.0 | nutmegmix-part4 | n/a | **nutmeg-mix** ## raspberry-mix.ckpt ![](images/raspberry-mix.png) [Recipe Source](https://rentry.org/hdgrecipes#raspberry-mix-4d202242) Step | Interpolation Method | Primary Model | Secondary model | Tertiary Model | Merge Name --- | --- | --- | --- | --- | --- 1 | Weighted Sum @ 0.25 | AnythingV3.0 | Stable Diffusion 1.5 | n/a | AnyV3-SD1.5 2 | Add Difference @ 1 | AnyV3-SD1.5 | Zeipher F222 | Stable Diffusion 1.4 | raspberry-lite 3 | Weighted Sum @ 0.15 | raspberry-lite | r34_e4 | n/a | **raspberry mix** ## strawberry-mix.ckpt ![](images/strawberry-mix.png) [Recipe Source](https://rentry.org/hdgrecipes#strawberry-mix-e043dfc5) Step | Interpolation Method | Primary Model | Secondary model | Tertiary Model | Merge Name --- | --- | --- | --- | --- | --- 1 | Weighted Sum @ 0.25 | AnythingV3.0 | Stable Diffusion 1.4 | n/a | AnyV3-SD1.4 2 | Add Difference @ 1 | AnyV3-SD1.4 | Zeipher F111 | Stable Diffusion 1.4 | strawberry-lite 3 | Weighted Sum @ 0.15 | strawberry-lite | r34_e4 | n/a | **strawberry mix**
dccuchile/bert-base-spanish-wwm-cased-finetuned-mldoc
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
27
2022-12-09T03:45:05Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - conll2003 metrics: - precision - recall - f1 - accuracy model-index: - name: bert-large-uncased_ner_conll2003 results: - task: name: Token Classification type: token-classification dataset: name: conll2003 type: conll2003 args: conll2003 metrics: - name: Precision type: precision value: 0.9424197037776668 - name: Recall type: recall value: 0.9530461124200605 - name: F1 type: f1 value: 0.947703121077734 - name: Accuracy type: accuracy value: 0.9897784354191815 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-large-uncased_ner_conll2003 This model is a fine-tuned version of [bert-large-uncased](https://huggingface.co/bert-large-uncased) on the conll2003 dataset. It achieves the following results on the evaluation set: - Loss: 0.0516 - Precision: 0.9424 - Recall: 0.9530 - F1: 0.9477 - Accuracy: 0.9898 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | 0.1605 | 1.0 | 878 | 0.0533 | 0.9252 | 0.9329 | 0.9290 | 0.9864 | | 0.032 | 2.0 | 1756 | 0.0433 | 0.9320 | 0.9475 | 0.9397 | 0.9887 | | 0.0125 | 3.0 | 2634 | 0.0454 | 0.9424 | 0.9524 | 0.9474 | 0.9897 | | 0.006 | 4.0 | 3512 | 0.0507 | 0.9417 | 0.9519 | 0.9468 | 0.9896 | | 0.0036 | 5.0 | 4390 | 0.0516 | 0.9424 | 0.9530 | 0.9477 | 0.9898 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0 - Datasets 2.1.0 - Tokenizers 0.12.1
dccuchile/bert-base-spanish-wwm-cased-finetuned-ner
[ "pytorch", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
81
null
--- license: mit tags: - generated_from_trainer datasets: - conll2003 metrics: - precision - recall - f1 - accuracy model-index: - name: roberta-large_ner_conll2003 results: - task: name: Token Classification type: token-classification dataset: name: conll2003 type: conll2003 args: conll2003 metrics: - name: Precision type: precision value: 0.9622389306599833 - name: Recall type: recall value: 0.9692022887916526 - name: F1 type: f1 value: 0.9657080573488722 - name: Accuracy type: accuracy value: 0.9939449398387913 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-large_ner_conll2003 This model is a fine-tuned version of [roberta-large](https://huggingface.co/roberta-large) on the conll2003 dataset. It achieves the following results on the evaluation set: - Loss: 0.0345 - Precision: 0.9622 - Recall: 0.9692 - F1: 0.9657 - Accuracy: 0.9939 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | 0.1227 | 1.0 | 878 | 0.0431 | 0.9511 | 0.9559 | 0.9535 | 0.9914 | | 0.0295 | 2.0 | 1756 | 0.0334 | 0.9541 | 0.9657 | 0.9599 | 0.9930 | | 0.0163 | 3.0 | 2634 | 0.0327 | 0.9616 | 0.9682 | 0.9649 | 0.9938 | | 0.0073 | 4.0 | 3512 | 0.0342 | 0.9624 | 0.9692 | 0.9658 | 0.9939 | | 0.0042 | 5.0 | 4390 | 0.0345 | 0.9622 | 0.9692 | 0.9657 | 0.9939 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0 - Datasets 2.1.0 - Tokenizers 0.12.1
dccuchile/bert-base-spanish-wwm-cased-finetuned-pawsx
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
25
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 276.82 +/- 15.15 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
dccuchile/bert-base-spanish-wwm-cased-finetuned-pos
[ "pytorch", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
# Fine Tuned models for wear particle classification
dccuchile/bert-base-spanish-wwm-cased-finetuned-qa-mlqa
[ "pytorch", "bert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "BertForQuestionAnswering" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- license: gpl-3.0 --- My first model of the subject of me.
dccuchile/bert-base-spanish-wwm-uncased-finetuned-mldoc
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
39
null
--- tags: - stable-diffusion - text-to-image license: creativeml-openrail-m --- This is a <b>Stable Diffusion V2-768px</b> fine tuned model on Midjourney images mixing the artists Banksy and Romero Britto, by [DavidLandore](https://www.youtube.com/naomorra) This model can be used just like any other Stable Diffusion model. Use in your prompts: '<b>babrimodelo</b>' <img src="https://s3.amazonaws.com/moonup/production/uploads/1670560141426-632111228c0da827c72c6331.png" width="512"/> <img src="https://s3.amazonaws.com/moonup/production/uploads/1670560141422-632111228c0da827c72c6331.png" width="512"/> <img src="https://s3.amazonaws.com/moonup/production/uploads/1670560141429-632111228c0da827c72c6331.png" width="512"/> <img src="https://s3.amazonaws.com/moonup/production/uploads/1670560141597-632111228c0da827c72c6331.png" width="768"/> <img src="https://s3.amazonaws.com/moonup/production/uploads/1670560141596-632111228c0da827c72c6331.png" width="768"/> <img src="https://s3.amazonaws.com/moonup/production/uploads/1670560141814-632111228c0da827c72c6331.png" width="768"/> <img src="https://s3.amazonaws.com/moonup/production/uploads/1670560141758-632111228c0da827c72c6331.png" width="768"/>
dccuchile/bert-base-spanish-wwm-uncased-finetuned-qa-mlqa
[ "pytorch", "bert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "BertForQuestionAnswering" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: my-finetuned-distilbert results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # my-finetuned-distilbert This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 1.6482 - Validation Loss: 1.3103 - Epoch: 0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': {'class_name': 'WarmUp', 'config': {'initial_learning_rate': 2e-05, 'decay_schedule_fn': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 1500, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}, '__passive_serialization__': True}, 'warmup_steps': 1000, 'power': 1.0, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.01} - training_precision: float32 ### Training results | Train Loss | Validation Loss | Epoch | |:----------:|:---------------:|:-----:| | 1.6482 | 1.3103 | 0 | ### Framework versions - Transformers 4.25.1 - TensorFlow 2.9.2 - Datasets 2.7.1 - Tokenizers 0.13.2
dccuchile/distilbert-base-spanish-uncased-finetuned-ner
[ "pytorch", "distilbert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "DistilBertForTokenClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
28
2022-12-09T04:42:32Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - conll2003 metrics: - precision - recall - f1 - accuracy model-index: - name: albert-large-v2_ner_conll2003 results: - task: name: Token Classification type: token-classification dataset: name: conll2003 type: conll2003 args: conll2003 metrics: - name: Precision type: precision value: 0.9396018069265518 - name: Recall type: recall value: 0.9451363177381353 - name: F1 type: f1 value: 0.9423609363201612 - name: Accuracy type: accuracy value: 0.9874810170943499 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # albert-large-v2_ner_conll2003 This model is a fine-tuned version of [albert-large-v2](https://huggingface.co/albert-large-v2) on the conll2003 dataset. It achieves the following results on the evaluation set: - Loss: 0.0584 - Precision: 0.9396 - Recall: 0.9451 - F1: 0.9424 - Accuracy: 0.9875 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | 0.2034 | 1.0 | 878 | 0.0653 | 0.9114 | 0.9278 | 0.9195 | 0.9837 | | 0.0561 | 2.0 | 1756 | 0.0602 | 0.9316 | 0.9280 | 0.9298 | 0.9845 | | 0.0303 | 3.0 | 2634 | 0.0536 | 0.9380 | 0.9424 | 0.9402 | 0.9872 | | 0.0177 | 4.0 | 3512 | 0.0535 | 0.9393 | 0.9456 | 0.9425 | 0.9877 | | 0.011 | 5.0 | 4390 | 0.0584 | 0.9396 | 0.9451 | 0.9424 | 0.9875 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0 - Datasets 2.1.0 - Tokenizers 0.12.1
dccuchile/distilbert-base-spanish-uncased-finetuned-pawsx
[ "pytorch", "distilbert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "DistilBertForSequenceClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
29
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - conll2003 metrics: - precision - recall - f1 - accuracy model-index: - name: funnel-transformer-xlarge_ner_conll2003 results: - task: name: Token Classification type: token-classification dataset: name: conll2003 type: conll2003 args: conll2003 metrics: - name: Precision type: precision value: 0.9565363315992617 - name: Recall type: recall value: 0.9592729720632783 - name: F1 type: f1 value: 0.9579026972523318 - name: Accuracy type: accuracy value: 0.9914528250457537 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # funnel-transformer-xlarge_ner_conll2003 This model is a fine-tuned version of [funnel-transformer/xlarge](https://huggingface.co/funnel-transformer/xlarge) on the conll2003 dataset. It achieves the following results on the evaluation set: - Loss: 0.0436 - Precision: 0.9565 - Recall: 0.9593 - F1: 0.9579 - Accuracy: 0.9915 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | 0.1349 | 1.0 | 878 | 0.0441 | 0.9328 | 0.9438 | 0.9383 | 0.9881 | | 0.0308 | 2.0 | 1756 | 0.0377 | 0.9457 | 0.9561 | 0.9509 | 0.9901 | | 0.0144 | 3.0 | 2634 | 0.0432 | 0.9512 | 0.9578 | 0.9545 | 0.9906 | | 0.007 | 4.0 | 3512 | 0.0419 | 0.9551 | 0.9584 | 0.9567 | 0.9913 | | 0.0041 | 5.0 | 4390 | 0.0436 | 0.9565 | 0.9593 | 0.9579 | 0.9915 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0 - Datasets 2.1.0 - Tokenizers 0.12.1
dccuchile/distilbert-base-spanish-uncased-finetuned-qa-mlqa
[ "pytorch", "distilbert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "DistilBertForQuestionAnswering" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 238.46 +/- 22.84 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
CennetOguz/distilbert-base-uncased-finetuned-recipe-1
[ "pytorch", "tensorboard", "distilbert", "fill-mask", "transformers", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "DistilBertForMaskedLM" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
Access to model chubedan/CSGO is restricted and you are not in the authorized list. Visit https://huggingface.co/chubedan/CSGO to ask for access.
CennetOguz/distilbert-base-uncased-finetuned-recipe-accelerate
[ "pytorch", "distilbert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "DistilBertForMaskedLM" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
2022-12-09T04:59:30Z
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- ### abstract_nature_patterns_v2 Dreambooth model trained by apurik-parv with https://github.com/ShivamShrirao/diffusers dreambooth implementation. inference prompt : **abnapa**\\ The model is an attempt at teaching symmetry and scales associated with nature to SD 1.5 base model. This version v2 is trained on better curated images for 40,000 steps. I am still working on finding what the model really does and if it has any impact on the base model. With that being said, the following are my findings, at the outset it seems that -Images have better symmetry and lighting. -Images have less artifacts. -Does not seem to work with large canvas such as 1024x1024 the repetition problem isstill there. Feel free to experiment with the model.
CennetOguz/distilbert-base-uncased-finetuned-recipe
[ "pytorch", "tensorboard", "distilbert", "fill-mask", "transformers", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "DistilBertForMaskedLM" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
Access to model chubedan/vn-ner-bert is restricted and you are not in the authorized list. Visit https://huggingface.co/chubedan/vn-ner-bert to ask for access.
Certified-Zoomer/DialoGPT-small-rick
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2023-03-03T15:43:02Z
--- language: - en tags: - code --- # This is what powered almost all of my colab Mostly uses LZ4 compression, which means you'll need a specialized program to extract it, especially in windows. For Windows users, I recommend using [7zip-zstd](https://github.com/mcmilk/7-Zip-zstd/releases/latest) (it's 7zip but with lz4 support and more) For Linux users, use tar with liblz4-tool like this: `tar -xI lz4 -f repo.tar.lz4`
Chaewon/mnmt_decoder_en_gpt2
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: creativeml-openrail-m --- # Monstergirl-Lamia Subject Model / Dreambooth Training ## Usage To use this model you have to download the .ckpt file as well as drop it into the "\stable-diffusion-webui\models\Stable-diffusion" folder To use it in a prompt: ```"Lamia monstergirl"``` for highest strength or just "Lamia" To increase the strength put "Lamia monstergirl" in () brackets To decrease the strength put "Lamia monstergirl" in [] brackets AnythingV3 base trained model trained to 8,000 steps Have fun :) ## Example Pictures from Lamia 8k <table> <tr> <td><img src=https://i.imgur.com/EEQCv5X.png width=150% height=150%/></td> <td><img src=https://i.imgur.com/FhsRzeI.png width=100% height=100%/></td> <td><img src=https://i.imgur.com/TkTUkwZ.png width=150% height=150%/></td> </tr> </table>
chainyo/speaker-recognition-meetup
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 271.65 +/- 26.13 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
ChaitanyaU/FineTuneLM
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-12-17T20:04:55Z
--- tags: - generated_from_trainer model-index: - name: wav2vec2-korean-v2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-korean-v2 This model is a fine-tuned version of [teddy322/hyuk_Second_SON](https://huggingface.co/teddy322/hyuk_Second_SON) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.4514 - Wer: 0.1679 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 4 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 8 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 400 - num_epochs: 5 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.0289 | 0.41 | 300 | 0.4758 | 0.1605 | | 0.0481 | 0.82 | 600 | 0.4885 | 0.1867 | | 0.0578 | 1.22 | 900 | 0.4700 | 0.1862 | | 0.0599 | 1.63 | 1200 | 0.4733 | 0.1857 | | 0.0575 | 2.04 | 1500 | 0.4504 | 0.1844 | | 0.0547 | 2.45 | 1800 | 0.4741 | 0.1865 | | 0.0495 | 2.86 | 2100 | 0.4473 | 0.1794 | | 0.045 | 3.27 | 2400 | 0.4559 | 0.1782 | | 0.0426 | 3.67 | 2700 | 0.4502 | 0.1722 | | 0.0466 | 4.08 | 3000 | 0.4464 | 0.1697 | | 0.0369 | 4.49 | 3300 | 0.4487 | 0.1665 | | 0.0342 | 4.9 | 3600 | 0.4514 | 0.1679 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.10.0+cu113 - Datasets 1.18.3 - Tokenizers 0.10.3
Chakita/KNUBert
[ "pytorch", "tensorboard", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
20
2022-12-09T05:58:56Z
--- language: zh widget: text: "[CLS]当是时" --- # Chinese Ancient GPT2 Model ## Model description The model is used to generate ancient Chinese.The model uses the frame of GPT2-medium. We trained on 4 P100 for about 8 days.(batch size = 4, steps = 1M) ## How to use You can use the model directly with a pipeline for text generation: ```python from transformers import BertTokenizer, GPT2LMHeadModel, TextGenerationPipeline tokenizer = BertTokenizer.from_pretrained("zhuimengshaonian/gpt2-ancient-medium") model = GPT2LMHeadModel.from_pretrained("zhuimengshaonian/gpt2-ancient-medum") text_generator = TextGenerationPipeline(model, tokenizer) text_generator("[CLS]当是时", max_length=100, do_sample=True) ```
Chan/distilgpt2-finetuned-wikitext2
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity --- # {MODEL_NAME} This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 384 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 40 with parameters: ``` {'batch_size': 16, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.CosineSimilarityLoss.CosineSimilarityLoss` Parameters of the fit()-Method: ``` { "epochs": 1, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": 40, "warmup_steps": 4, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 128, 'do_lower_case': False}) with Transformer model: BertModel (1): Pooling({'word_embedding_dimension': 384, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) (2): Normalize() ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
Cheapestmedsshop/Buymodafinilus
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: en license: apache-2.0 library_name: diffusers tags: [] datasets: EmileEsmaili/sheet_music_clean metrics: [] --- <!-- This model card has been generated automatically according to the information the training script had access to. You should probably proofread and complete it, then remove this comment. --> # ddpm-sheetmusic-clean-l2loss-colabVM ## Model description This diffusion model is trained with the [🤗 Diffusers](https://github.com/huggingface/diffusers) library on the `EmileEsmaili/sheet_music_clean` dataset. ## Intended uses & limitations #### How to use ```python # TODO: add an example code snippet for running this diffusion pipeline ``` #### Limitations and bias [TODO: provide examples of latent issues and potential remediations] ## Training data [TODO: describe the data used to train the model] ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 16 - eval_batch_size: 8 - gradient_accumulation_steps: 1 - optimizer: AdamW with betas=(None, None), weight_decay=None and epsilon=None - lr_scheduler: None - lr_warmup_steps: 500 - ema_inv_gamma: None - ema_inv_gamma: None - ema_inv_gamma: None - mixed_precision: no ### Training results 📈 [TensorBoard logs](https://huggingface.co/EmileEsmaili/ddpm-sheetmusic-clean-l2loss-colabVM/tensorboard?#scalars)
Cheatham/xlm-roberta-base-finetuned
[ "pytorch", "xlm-roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "XLMRobertaForSequenceClassification" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
20
2022-12-09T07:19:59Z
--- datasets: - AliMeeting language: - zh license: apache-2.0 metrics: - name: "IHM test CER" type: cer value: 11.53 - name: "SDM test CER" type: cer value: 25.85 - name: "GSS test CER" type: cer value: 14.22 tags: - k2 - icefall --- # AliMeeting This is an ASR recipe for the AliMeeting corpus. AliMeeting provides recordings from the speaker's headset microphones and an 8-channel microphone array. We pool data in the following 4 ways and train a single model on the pooled data: (i) individual headset microphone (IHM) (ii) IHM with simulated reverb (iii) Single distant microphone (SDM) (iv) GSS-enhanced array microphones Speed perturbation and MUSAN noise augmentation are additionally performed on the pooled data. ## Performance Record ### pruned_transducer_stateless7 The following are decoded using `modified_beam_search`: | Evaluation set | eval CER | test CER | |--------------------------|------------|---------| | IHM | 9.58 | 11.53 | | SDM | 23.37 | 25.85 | | MDM (GSS-enhanced) | 11.82 | 14.22 | See the [recipe](https://github.com/k2-fsa/icefall/tree/master/egs/alimeeting/ASR_v2) for details.
Cheatham/xlm-roberta-large-finetuned-d1r01
[ "pytorch", "xlm-roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "XLMRobertaForSequenceClassification" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
21
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: ppo results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 298.95 +/- 14.87 name: mean_reward verified: false --- # **ppo** Agent playing **LunarLander-v2** This is a trained model of a **ppo** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Cheatham/xlm-roberta-large-finetuned
[ "pytorch", "xlm-roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "XLMRobertaForSequenceClassification" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
20
2022-12-09T07:33:49Z
--- tags: - image-classification - pytorch - huggingpics metrics: - accuracy model-index: - name: tds-huggingpics results: - task: name: Image Classification type: image-classification metrics: - name: Accuracy type: accuracy value: 0.875 --- # tds-huggingpics Autogenerated by HuggingPics🤗🖼️ Create your own image classifier for **anything** by running [the demo on Google Colab](https://colab.research.google.com/github/nateraw/huggingpics/blob/main/HuggingPics.ipynb). Report any issues with the demo at the [github repo](https://github.com/nateraw/huggingpics). ## Example Images #### bed ![bed](images/bed.jpg) #### chair ![chair](images/chair.jpg) #### closet ![closet](images/closet.jpg) #### couch ![couch](images/couch.jpg) #### table ![table](images/table.jpg)