modelId
stringlengths
4
81
tags
list
pipeline_tag
stringclasses
17 values
config
dict
downloads
int64
0
59.7M
first_commit
timestamp[ns, tz=UTC]
card
stringlengths
51
438k
AnonymousSub/SR_specter
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- license: mit tags: - nowcasting - forecasting - timeseries - remote-sensing --- # Nowcasting CNN ## Model description 3d conv model, that takes in different data streams architecture is roughly 1. satellite image time series goes into many 3d convolution layers. 2. nwp time series goes into many 3d convolution layers. 3. Final convolutional layer goes to full connected layer. This is joined by other data inputs like - pv yield - time variables Then there ~4 fully connected layers which end up forecasting the pv yield / gsp into the future ## Intended uses & limitations Forecasting short term PV power for different regions and nationally in the UK ## How to use [More information needed] ## Limitations and bias [More information needed] ## Training data Training data is EUMETSAT RSS imagery over the UK, on-the-ground PV data, and NWP predictions. ## Training procedure [More information needed] ## Evaluation results [More information needed]
AnonymousSub/T5_pubmedqa_question_generation
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
6
2022-09-30T14:29:25Z
--- language: - is - en - multilingual tags: - translation inference: parameters: src_lang: is_IS tgt_lang: en_XX decoder_start_token_id: 2 max_length: 512 widget: - text: Einu sinni átti ég hest. Hann var svartur og hvítur. --- # mBART based translation model This model was trained to translate multiple sentences at once, compared to one sentence at a time. It will occasionally combine sentences or add an extra sentence. This is the same model as are provided on CLARIN: https://repository.clarin.is/repository/xmlui/handle/20.500.12537/278
AnonymousSub/bert-base-uncased_wikiqa
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
30
null
--- language: - en tags: - esb datasets: - esb/datasets - librispeech_asr --- To reproduce this run, execute: ```python #!/usr/bin/env bash python run_flax_speech_recognition_seq2seq.py \ --dataset_name="esb/datasets" \ --model_name_or_path="esb/wav2vec2-aed-pretrained" \ --dataset_config_name="librispeech" \ --output_dir="./" \ --wandb_name="wav2vec2-aed-librispeech" \ --wandb_project="wav2vec2-aed" \ --per_device_train_batch_size="8" \ --per_device_eval_batch_size="2" \ --learning_rate="1e-4" \ --warmup_steps="500" \ --logging_steps="25" \ --max_steps="50001" \ --eval_steps="10000" \ --save_steps="10000" \ --generation_max_length="40" \ --generation_num_beams="1" \ --final_generation_max_length="300" \ --final_generation_num_beams="12" \ --generation_length_penalty="1.6" \ --hidden_dropout="0.2" \ --activation_dropout="0.2" \ --feat_proj_dropout="0.2" \ --overwrite_output_dir \ --gradient_checkpointing \ --freeze_feature_encoder \ --predict_with_generate \ --do_eval \ --do_train \ --do_predict \ --push_to_hub \ --use_auth_token ```
AnonymousSub/bert_hier_diff_equal_wts_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- language: - en tags: - esb datasets: - esb/datasets - mozilla-foundation/common_voice_9_0 --- To reproduce this run, execute: ```python #!/usr/bin/env bash python run_flax_speech_recognition_seq2seq.py \ --dataset_name="esb/datasets" \ --model_name_or_path="esb/wav2vec2-aed-pretrained" \ --dataset_config_name="common_voice" \ --output_dir="./" \ --wandb_name="wav2vec2-aed-common-voice" \ --wandb_project="wav2vec2-aed" \ --per_device_train_batch_size="8" \ --per_device_eval_batch_size="2" \ --learning_rate="1e-4" \ --warmup_steps="500" \ --logging_steps="25" \ --max_steps="50001" \ --eval_steps="10000" \ --save_steps="10000" \ --generation_max_length="40" \ --generation_num_beams="1" \ --final_generation_max_length="200" \ --generation_num_beams="14" \ --generation_length_penalty="1.2" \ --max_eval_duration_in_seconds="20" \ --overwrite_output_dir \ --gradient_checkpointing \ --freeze_feature_encoder \ --predict_with_generate \ --do_eval \ --do_train \ --do_predict \ --push_to_hub \ --use_auth_token ```
AnonymousSub/bert_hier_diff_equal_wts_epochs_1_shard_10
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- language: - en tags: - esb datasets: - esb/datasets - LIUM/tedlium --- To reproduce this run, execute: ```python #!/usr/bin/env bash python run_flax_speech_recognition_seq2seq.py \ --dataset_name="esb/datasets" \ --model_name_or_path="esb/wav2vec2-aed-tedlium" \ --dataset_config_name="tedlium" \ --output_dir="./" \ --wandb_name="wav2vec2-aed-tedlium" \ --wandb_project="wav2vec2-aed" \ --per_device_train_batch_size="8" \ --per_device_eval_batch_size="2" \ --learning_rate="1e-4" \ --warmup_steps="500" \ --logging_steps="25" \ --max_steps="50001" \ --eval_steps="10000" \ --save_steps="10000" \ --generation_max_length="40" \ --generation_num_beams="1" \ --final_generation_max_length="250" \ --final_generation_num_beams="12" \ --generation_length_penalty="1.5" \ --hidden_dropout="0.2" \ --activation_dropout="0.2" \ --feat_proj_dropout="0.2" \ --overwrite_output_dir \ --gradient_checkpointing \ --freeze_feature_encoder \ --predict_with_generate \ --do_eval \ --do_train \ --do_predict \ --push_to_hub \ --use_auth_token ```
AnonymousSub/bert_mean_diff_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
2022-09-30T14:39:11Z
--- language: - en tags: - esb datasets: - esb/datasets - facebook/voxpopuli --- To reproduce this run, execute: ```python #!/usr/bin/env bash python run_flax_speech_recognition_seq2seq.py \ --dataset_name="esb/datasets" \ --model_name_or_path="esb/wav2vec2-aed-pretrained" \ --dataset_config_name="voxpopuli" \ --output_dir="./" \ --wandb_name="wav2vec2-aed-voxpopuli" \ --wandb_project="wav2vec2-aed" \ --per_device_train_batch_size="8" \ --per_device_eval_batch_size="1" \ --learning_rate="1e-4" \ --warmup_steps="500" \ --logging_steps="25" \ --max_steps="10001" \ --eval_steps="10000" \ --save_steps="10000" \ --generation_max_length="40" \ --generation_num_beams="1" \ --final_generation_max_length="225" \ --final_generation_num_beams="5" \ --generation_length_penalty="0.8" \ --hidden_dropout="0.2" \ --activation_dropout="0.2" \ --feat_proj_dropout="0.2" \ --overwrite_output_dir \ --gradient_checkpointing \ --freeze_feature_encoder \ --predict_with_generate \ --do_eval \ --do_train \ --do_predict \ --push_to_hub \ --use_auth_token ```
AnonymousSub/bert_mean_diff_epochs_1_shard_10
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- language: - en tags: - esb datasets: - esb/datasets - speechcolab/gigaspeech --- To reproduce this run, execute: ```python #!/usr/bin/env bash python run_flax_speech_recognition_seq2seq.py \ --dataset_name="esb/datasets" \ --model_name_or_path="esb/wav2vec2-aed-pretrained" \ --dataset_config_name="gigaspeech" \ --output_dir="./" \ --wandb_name="wav2vec2-aed-gigaspeech" \ --wandb_project="wav2vec2-aed" \ --per_device_train_batch_size="8" \ --per_device_eval_batch_size="2" \ --learning_rate="1e-4" \ --warmup_steps="500" \ --logging_steps="25" \ --max_steps="50001" \ --eval_steps="10000" \ --save_steps="10000" \ --generation_max_length="40" \ --generation_num_beams="1" \ --final_generation_max_length="200" \ --final_generation_num_beams="14" \ --generation_length_penalty="1.2" \ --overwrite_output_dir \ --gradient_checkpointing \ --freeze_feature_encoder \ --predict_with_generate \ --do_eval \ --do_train \ --do_predict \ --push_to_hub \ --use_auth_token ```
AnonymousSub/bert_snips
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- language: - en tags: - esb datasets: - esb/datasets - kensho/spgispeech --- To reproduce this run, execute: ```python #!/usr/bin/env bash python run_flax_speech_recognition_seq2seq.py \ --dataset_name="esb/datasets" \ --model_name_or_path="esb/wav2vec2-aed-pretrained" \ --dataset_config_name="spgispeech" \ --output_dir="./" \ --wandb_name="wav2vec2-aed-spgispeech" \ --wandb_project="wav2vec2-aed" \ --per_device_train_batch_size="8" \ --per_device_eval_batch_size="2" \ --learning_rate="1e-4" \ --warmup_steps="500" \ --logging_steps="25" \ --max_steps="50001" \ --eval_steps="10000" \ --save_steps="10000" \ --generation_max_length="40" \ --generation_num_beams="1" \ --final_generation_max_length="225" \ --final_generation_num_beams="14" \ --generation_length_penalty="1.6" \ --overwrite_output_dir \ --gradient_checkpointing \ --freeze_feature_encoder \ --predict_with_generate \ --do_eval \ --do_train \ --do_predict \ --push_to_hub \ --use_auth_token ```
AnonymousSub/bert_triplet_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- language: - en tags: - esb datasets: - esb/datasets - revdotcom/earnings22 --- To reproduce this run, execute: ```python #!/usr/bin/env bash python run_flax_speech_recognition_seq2seq.py \ --dataset_name="esb/datasets" \ --model_name_or_path="esb/wav2vec2-aed-pretrained" \ --dataset_config_name="earnings22" \ --output_dir="./" \ --wandb_name="wav2vec2-aed-earnings22" \ --wandb_project="wav2vec2-aed" \ --per_device_train_batch_size="8" \ --per_device_eval_batch_size="4" \ --logging_steps="25" \ --max_steps="50000" \ --eval_steps="10000" \ --save_steps="10000" \ --generation_max_length="40" \ --generation_num_beams="1" \ --generation_length_penalty="1.2" \ --final_generation_max_length="200" \ --final_generation_num_beams="5" \ --learning_rate="1e-4" \ --warmup_steps="500" \ --hidden_dropout="0.2" \ --activation_dropout="0.2" \ --feat_proj_dropout="0.2" \ --overwrite_output_dir \ --gradient_checkpointing \ --freeze_feature_encoder \ --predict_with_generate \ --do_eval \ --do_train \ --do_predict \ --push_to_hub \ --use_auth_token ```
AnonymousSub/bert_triplet_epochs_1_shard_10
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- language: - en tags: - esb datasets: - esb/datasets - edinburghcstr/ami --- To reproduce this run, execute: ```python #!/usr/bin/env bash python run_flax_speech_recognition_seq2seq.py \ --dataset_name="esb/datasets" \ --model_name_or_path="esb/wav2vec2-aed-pretrained" \ --dataset_config_name="ami" \ --output_dir="./" \ --wandb_name="wav2vec2-aed-ami" \ --wandb_project="wav2vec2-aed" \ --per_device_train_batch_size="8" \ --per_device_eval_batch_size="4" \ --learning_rate="1e-4" \ --warmup_steps="500" \ --logging_steps="25" \ --max_steps="50001" \ --eval_steps="10000" \ --save_steps="10000" \ --generation_max_length="40" \ --generation_num_beams="1" \ --final_generation_max_length="225" \ --final_generation_num_beams="5" \ --generation_length_penalty="1.4" \ --hidden_dropout="0.2" \ --activation_dropout="0.2" \ --feat_proj_dropout="0.2" \ --overwrite_output_dir \ --gradient_checkpointing \ --freeze_feature_encoder \ --predict_with_generate \ --do_eval \ --do_train \ --do_predict \ --push_to_hub \ --use_auth_token ```
AnonymousSub/cline-papers-biomed-0.618
[ "pytorch", "roberta", "transformers" ]
null
{ "architectures": [ "LecbertForPreTraining" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- license: mit --- ### mate on Stable Diffusion via Dreambooth #### model by machinelearnear This your the Stable Diffusion model fine-tuned the mate concept taught to Stable Diffusion with Dreambooth. It can be used by modifying the `instance_prompt`: **a photo of sks mate** You can also train your own concepts and upload them to the library by using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_training.ipynb). And you can run your new concept via `diffusers`: [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb), [Spaces with the Public Concepts loaded](https://huggingface.co/spaces/sd-dreambooth-library/stable-diffusion-dreambooth-concepts) Here are the images used for training this concept: ![image 0](https://huggingface.co/sd-dreambooth-library/mate/resolve/main/concept_images/0.jpeg) ![image 1](https://huggingface.co/sd-dreambooth-library/mate/resolve/main/concept_images/7.jpeg) ![image 2](https://huggingface.co/sd-dreambooth-library/mate/resolve/main/concept_images/3.jpeg) ![image 3](https://huggingface.co/sd-dreambooth-library/mate/resolve/main/concept_images/4.jpeg) ![image 4](https://huggingface.co/sd-dreambooth-library/mate/resolve/main/concept_images/5.jpeg) ![image 5](https://huggingface.co/sd-dreambooth-library/mate/resolve/main/concept_images/1.jpeg) ![image 6](https://huggingface.co/sd-dreambooth-library/mate/resolve/main/concept_images/6.jpeg) ![image 7](https://huggingface.co/sd-dreambooth-library/mate/resolve/main/concept_images/2.jpeg) ![image 8](https://huggingface.co/sd-dreambooth-library/mate/resolve/main/concept_images/8.jpeg)
AnonymousSub/cline-s10-SR
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: - en tags: - esb datasets: - esb/datasets - ldc/switchboard --- To reproduce this run, execute: ```python #!/usr/bin/env bash python run_flax_speech_recognition_seq2seq.py \ --dataset_name="esb/datasets" \ --model_name_or_path="esb/wav2vec2-aed-pretrained" \ --dataset_config_name="switchboard" \ --output_dir="./" \ --wandb_name="wav2vec2-aed-switchboard" \ --wandb_project="wav2vec2-aed" \ --per_device_train_batch_size="8" \ --per_device_eval_batch_size="2" \ --learning_rate="1e-4" \ --warmup_steps="500" \ --logging_steps="25" \ --max_steps="50001" \ --eval_steps="10000" \ --save_steps="10000" \ --generation_max_length="40" \ --generation_num_beams="1" \ --final_generation_max_length="260" \ --final_generation_num_beams="5" \ --generation_length_penalty="0.8" \ --overwrite_output_dir \ --gradient_checkpointing \ --freeze_feature_encoder \ --predict_with_generate \ --do_eval \ --do_train \ --do_predict \ --push_to_hub \ --use_auth_token ```
AnonymousSub/cline_emanuals
[ "pytorch", "roberta", "transformers" ]
null
{ "architectures": [ "LecbertForPreTraining" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- language: - en tags: - esb datasets: - esb/datasets - ldc/chime-4 --- To reproduce this run, execute: ```python #!/usr/bin/env bash python run_flax_speech_recognition_seq2seq.py \ --dataset_name="esb/datasets" \ --model_name_or_path="esb/wav2vec2-aed-pretrained" \ --dataset_config_name="chime4" \ --output_dir="./" \ --wandb_name="wav2vec2-aed-chime4" \ --wandb_project="wav2vec2-aed" \ --per_device_train_batch_size="8" \ --per_device_eval_batch_size="4" \ --logging_steps="25" \ --max_steps="50001" \ --eval_steps="10000" \ --save_steps="10000" \ --generation_max_length="40" \ --generation_num_beams="1" \ --final_generation_max_length="250" \ --final_generation_num_beams="5" \ --generation_length_penalty="0.6" \ --learning_rate="1e-4" \ --warmup_steps="500" \ --hidden_dropout="0.2" \ --activation_dropout="0.2" \ --feat_proj_dropout="0.2" \ --overwrite_output_dir \ --gradient_checkpointing \ --freeze_feature_encoder \ --predict_with_generate \ --do_eval \ --do_train \ --do_predict \ --push_to_hub \ --use_auth_token ```
AnonymousSub/cline_wikiqa
[ "pytorch", "roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
27
null
--- license: mit tags: - generated_from_trainer metrics: - f1 model-index: - name: stbl_clinical_bert_ft_rs7 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # stbl_clinical_bert_ft_rs7 This model is a fine-tuned version of [emilyalsentzer/Bio_ClinicalBERT](https://huggingface.co/emilyalsentzer/Bio_ClinicalBERT) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.0848 - F1: 0.9208 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 12 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.2755 | 1.0 | 101 | 0.0986 | 0.8484 | | 0.0655 | 2.0 | 202 | 0.0780 | 0.8873 | | 0.0299 | 3.0 | 303 | 0.0622 | 0.9047 | | 0.0145 | 4.0 | 404 | 0.0675 | 0.9110 | | 0.0097 | 5.0 | 505 | 0.0706 | 0.9141 | | 0.0057 | 6.0 | 606 | 0.0753 | 0.9174 | | 0.0032 | 7.0 | 707 | 0.0755 | 0.9182 | | 0.0024 | 8.0 | 808 | 0.0835 | 0.9219 | | 0.0014 | 9.0 | 909 | 0.0838 | 0.9197 | | 0.0013 | 10.0 | 1010 | 0.0838 | 0.9204 | | 0.0009 | 11.0 | 1111 | 0.0850 | 0.9183 | | 0.0009 | 12.0 | 1212 | 0.0848 | 0.9208 | ### Framework versions - Transformers 4.22.2 - Pytorch 1.12.1+cu113 - Datasets 2.5.1 - Tokenizers 0.12.1
AnonymousSub/rule_based_bert_mean_diff_epochs_1_shard_10
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- license: mit --- # Warning: Heavy Overfitting # # Description Trainer: Hank Demiurge from Overlord # Dataset >Training: 6 images >Regularization: 20 images # Info >Model Used: Waifu Diffusion 1.2 >Steps: 4000 >Keyword: Demiurge (Use this in the prompt) >Class Phrase: anime_man_slick_black_hair_with_glasses ![tGDKPHveAe.png](https://s3.amazonaws.com/moonup/production/uploads/1664571211946-6303fe3cd14428368d1a4137.png)
AnonymousSub/rule_based_roberta_twostage_quadruplet_epochs_1_shard_10
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- language: - en - ja tags: - nllb license: cc-by-nc-4.0 --- # NLLB 1.3B fine-tuned on Japanese to English Light Novel translation This model was fine-tuned on light and web novel for Japanese to English translation. It can translate sentences and paragraphs up to 512 tokens. ## Usage ```python from transformers import AutoTokenizer, AutoModelForSeq2SeqLM tokenizer = AutoTokenizer.from_pretrained("thefrigidliquidation/nllb-jaen-1.3B-lightnovels") model = AutoModelForSeq2SeqLM.from_pretrained("thefrigidliquidation/nllb-jaen-1.3B-lightnovels") generated_tokens = model.generate( **inputs, forced_bos_token_id=tokenizer.lang_code_to_id[tokenizer.tgt_lang], max_new_tokens=1024, no_repeat_ngram_size=6, ).cpu() translated_text = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0] ``` Generating with diverse beam search seems to work best. Add the following to `model.generate`: ```python num_beams=8, num_beam_groups=4, do_sample=False, ``` ## Glossary You can provide up to 10 custom translations for nouns and character names at runtime. To do so, surround the Japanese term with term tokens. Prefix the word with one of `<t0>, <t1>, ..., <t9>` and suffix the word with `</t>`. The term will be translated as the prefix term token which can then be string replaced. For example, in `マイン、ルッツが迎えに来たよ` if you wish to have `マイン` translated as `Myne` you would replace `マイン` with `<t0>マイン</t>`. The model will translate `<t0>マイン</t>、ルッツが迎えに来たよ` as `<t0>, Lutz is here to pick you up.` Then simply do a string replacement on the output, replacing `<t0>` with `Myne`. ## Honorifics You can force the model to generate or ignore honorifics. ```python # default, the model decides whether to use honorifics tokenizer.tgt_lang = "jpn_Jpan" # no honorifics, the model is discouraged from using honorifics tokenizer.tgt_lang = "zsm_Latn" # honorifics, the model is encouraged to use honorifics tokenizer.tgt_lang = "zul_Latn" ```
AnonymousSub/unsup-consert-base_squad2.0
[ "pytorch", "bert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "BertForQuestionAnswering" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="FIT17/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) evaluate_agent(env, model["max_steps"], model["n_eval_episodes"], model["qtable"], model["eval_seed"]) ```
AnonymousSub/unsup-consert-emanuals
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-Taxi-v3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.50 +/- 2.76 name: mean_reward verified: false --- # **Q-Learning** Agent playing **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="FIT17/q-Taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) evaluate_agent(env, model["max_steps"], model["n_eval_episodes"], model["qtable"], model["eval_seed"]) ```
AnonymousSubmission/pretrained-model-1
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 275.78 +/- 16.76 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Anonymreign/savagebeta
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: mit --- ### ChairTest on Stable Diffusion via Dreambooth #### model by dadosdq This your the Stable Diffusion model fine-tuned the ChairTest concept taught to Stable Diffusion with Dreambooth. It can be used by modifying the `instance_prompt`: **ChA1r** You can also train your own concepts and upload them to the library by using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_training.ipynb). And you can run your new concept via `diffusers`: [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb), [Spaces with the Public Concepts loaded](https://huggingface.co/spaces/sd-dreambooth-library/stable-diffusion-dreambooth-concepts) Here are the images used for training this concept: ![image 0](https://huggingface.co/dadosdq/chairtest/resolve/main/concept_images/1.jpeg) ![image 1](https://huggingface.co/dadosdq/chairtest/resolve/main/concept_images/4.jpeg) ![image 2](https://huggingface.co/dadosdq/chairtest/resolve/main/concept_images/2.jpeg) ![image 3](https://huggingface.co/dadosdq/chairtest/resolve/main/concept_images/0.jpeg) ![image 4](https://huggingface.co/dadosdq/chairtest/resolve/main/concept_images/3.jpeg)
Anthos23/my-awesome-model
[ "pytorch", "tf", "roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
30
null
--- language: en license: apache-2.0 library_name: diffusers tags: [] datasets: microsoft/fluentui-emoji metrics: [] --- <!-- This model card has been generated automatically according to the information the training script had access to. You should probably proofread and complete it, then remove this comment. --> # emoji-diffusion ## Model description This diffusion model is trained with the [🤗 Diffusers](https://github.com/huggingface/diffusers) library on the `microsoft/fluentui-emoji` dataset. ## Intended uses & limitations #### How to use ```python # TODO: add an example code snippet for running this diffusion pipeline ``` #### Limitations and bias [TODO: provide examples of latent issues and potential remediations] ## Training data [TODO: describe the data used to train the model] ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 16 - eval_batch_size: 16 - gradient_accumulation_steps: 1 - optimizer: AdamW with betas=(None, None), weight_decay=None and epsilon=None - lr_scheduler: None - lr_warmup_steps: 500 - ema_inv_gamma: None - ema_inv_gamma: None - ema_inv_gamma: None - mixed_precision: False ### Training results 📈 [TensorBoard logs](https://huggingface.co/rycont/emoji-diffusion/tensorboard?#scalars)
ArashEsk95/bert-base-uncased-finetuned-cola
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-10-01T13:23:05Z
--- license: mit --- ### Joseph Russel Ammen on Stable Diffusion via Dreambooth #### model by wallowbitz This your the Stable Diffusion model fine-tuned the Joseph Russel Ammen concept taught to Stable Diffusion with Dreambooth. It can be used by modifying the `instance_prompt`: **Joseph Russel Ammen** You can also train your own concepts and upload them to the library by using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_training.ipynb). And you can run your new concept via `diffusers`: [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb), [Spaces with the Public Concepts loaded](https://huggingface.co/spaces/sd-dreambooth-library/stable-diffusion-dreambooth-concepts) Here are the images used for training this concept: ![image 0](https://huggingface.co/sd-dreambooth-library/joseph-russel-ammen/resolve/main/concept_images/1.jpeg) ![image 1](https://huggingface.co/sd-dreambooth-library/joseph-russel-ammen/resolve/main/concept_images/4.jpeg) ![image 2](https://huggingface.co/sd-dreambooth-library/joseph-russel-ammen/resolve/main/concept_images/2.jpeg) ![image 3](https://huggingface.co/sd-dreambooth-library/joseph-russel-ammen/resolve/main/concept_images/0.jpeg) ![image 4](https://huggingface.co/sd-dreambooth-library/joseph-russel-ammen/resolve/main/concept_images/3.jpeg)
ArashEsk95/bert-base-uncased-finetuned-sst2
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-10-01T13:24:06Z
--- language: en thumbnail: http://www.huggingtweets.com/elonmusk-nftfreaks-nftgirl/1664630772232/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1572573363255525377/Xz3fufYY_400x400.jpg&#39;)"> </div> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1524408283674591232/ZcdTVEPl_400x400.jpg&#39;)"> </div> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1384551299681714177/fHRGvDJR_400x400.jpg&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI CYBORG 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Elon Musk & NFT Freaks 🗝🏰🦸🏿‍♂️ & NFTGirl 🖼</div> <div style="text-align: center; font-size: 14px;">@elonmusk-nftfreaks-nftgirl</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Elon Musk & NFT Freaks 🗝🏰🦸🏿‍♂️ & NFTGirl 🖼. | Data | Elon Musk | NFT Freaks 🗝🏰🦸🏿‍♂️ | NFTGirl 🖼 | | --- | --- | --- | --- | | Tweets downloaded | 3200 | 3247 | 2210 | | Retweets | 121 | 1753 | 298 | | Short tweets | 984 | 306 | 395 | | Tweets kept | 2095 | 1188 | 1517 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/3aevkd35/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @elonmusk-nftfreaks-nftgirl's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/al5jjb8v) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/al5jjb8v/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/elonmusk-nftfreaks-nftgirl') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
ArenaGrenade/char-cnn
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-10-01T14:28:12Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: din0s/msmarco-nlgen model-index: - name: t5-base-msmarco-nlgen-cb results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # t5-base-msmarco-nlgen-cb This model is a fine-tuned version of [t5-base](https://huggingface.co/t5-base) on the [MS MARCO Natural Language Generation](https://huggingface.co/datasets/din0s/msmarco-nlgen) dataset. It achieves the following results on the evaluation set: - Loss: 2.0571 - Rougelsum: 24.7427 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rougelsum | |:-------------:|:-----:|:----:|:---------------:|:---------:| | 2.1393 | 0.26 | 2500 | 2.1099 | 24.5028 | | 2.1006 | 0.52 | 5000 | 2.0739 | 24.6017 | | 2.0694 | 0.78 | 7500 | 2.0571 | 24.7427 | ### Framework versions - Transformers 4.23.0.dev0 - Pytorch 1.12.1+cu102 - Datasets 2.4.0 - Tokenizers 0.12.1
AriakimTaiyo/kumiko
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-10-01T15:02:22Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - emotion metrics: - accuracy - f1 model-index: - name: distilbert-base-uncased-finetuned-emotion results: - task: name: Text Classification type: text-classification dataset: name: emotion type: emotion args: default metrics: - name: Accuracy type: accuracy value: 0.9415 - name: F1 type: f1 value: 0.9414702638466222 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-emotion This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.1764 - Accuracy: 0.9415 - F1: 0.9415 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.436 | 1.0 | 2000 | 0.2178 | 0.93 | 0.9305 | | 0.1615 | 2.0 | 4000 | 0.1764 | 0.9415 | 0.9415 | ### Framework versions - Transformers 4.13.0 - Pytorch 1.12.1+cu113 - Datasets 1.16.1 - Tokenizers 0.10.3
Arina/Erine
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="rwheel/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) evaluate_agent(env, model["max_steps"], model["n_eval_episodes"], model["qtable"], model["eval_seed"]) ```
ArjunKadya/HuggingFace
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-10-01T15:12:12Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: din0s/asqa model-index: - name: t5-base-asqa-ob results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # t5-base-asqa-ob This model is a fine-tuned version of [t5-base](https://huggingface.co/t5-base) on the [ASQA](https://huggingface.co/datasets/din0s/asqa) dataset. It achieves the following results on the evaluation set: - Loss: 1.7356 - Rougelsum: 12.0879 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 20 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rougelsum | |:-------------:|:-----:|:----:|:---------------:|:---------:| | No log | 1.0 | 355 | 1.8545 | 11.6549 | | 2.4887 | 2.0 | 710 | 1.8050 | 11.7533 | | 1.9581 | 3.0 | 1065 | 1.7843 | 11.8327 | | 1.9581 | 4.0 | 1420 | 1.7722 | 11.9442 | | 1.9252 | 5.0 | 1775 | 1.7648 | 11.9331 | | 1.8853 | 6.0 | 2130 | 1.7567 | 11.9788 | | 1.8853 | 7.0 | 2485 | 1.7519 | 12.0300 | | 1.8512 | 8.0 | 2840 | 1.7483 | 12.0225 | | 1.8328 | 9.0 | 3195 | 1.7451 | 12.0402 | | 1.8115 | 10.0 | 3550 | 1.7436 | 12.0444 | | 1.8115 | 11.0 | 3905 | 1.7419 | 12.0850 | | 1.7878 | 12.0 | 4260 | 1.7408 | 12.1047 | | 1.774 | 13.0 | 4615 | 1.7394 | 12.0839 | | 1.774 | 14.0 | 4970 | 1.7390 | 12.0910 | | 1.7787 | 15.0 | 5325 | 1.7381 | 12.0880 | | 1.7632 | 16.0 | 5680 | 1.7380 | 12.1088 | | 1.7623 | 17.0 | 6035 | 1.7370 | 12.1046 | | 1.7623 | 18.0 | 6390 | 1.7368 | 12.0997 | | 1.7508 | 19.0 | 6745 | 1.7359 | 12.0902 | | 1.7597 | 20.0 | 7100 | 1.7356 | 12.0879 | ### Framework versions - Transformers 4.23.0.dev0 - Pytorch 1.12.1+cu102 - Datasets 2.4.0 - Tokenizers 0.12.1
asaakyan/mbart-poetic-all
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-10-01T15:25:30Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - conll2003 metrics: - precision - recall - f1 - accuracy model-index: - name: bert-finetuned-ner results: - task: name: Token Classification type: token-classification dataset: name: conll2003 type: conll2003 config: conll2003 split: train args: conll2003 metrics: - name: Precision type: precision value: 0.9363425925925926 - name: Recall type: recall value: 0.9530461124200605 - name: F1 type: f1 value: 0.9446205170975813 - name: Accuracy type: accuracy value: 0.986769294166127 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-finetuned-ner This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on the conll2003 dataset. It achieves the following results on the evaluation set: - Loss: 0.0774 - Precision: 0.9363 - Recall: 0.9530 - F1: 0.9446 - Accuracy: 0.9868 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | 0.0273 | 1.0 | 1756 | 0.0787 | 0.9286 | 0.9411 | 0.9348 | 0.9845 | | 0.0141 | 2.0 | 3512 | 0.0772 | 0.9299 | 0.9504 | 0.9400 | 0.9863 | | 0.0054 | 3.0 | 5268 | 0.0774 | 0.9363 | 0.9530 | 0.9446 | 0.9868 | ### Framework versions - Transformers 4.22.2 - Pytorch 1.12.1+cu113 - Datasets 2.5.1 - Tokenizers 0.12.1
Arnold/common_voiceha
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-10-01T15:48:32Z
--- language: - en tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - image-to-image datasets: - rrustom/architecture2022clean pipeline: image-to-image ---
Arnold/wav2vec2-hausa2-demo-colab
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "dataset:common_voice", "transformers", "generated_from_trainer", "license:apache-2.0" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
2022-10-01T15:54:14Z
--- license: mit --- # Description Trainer: Chris Aqua from Konosuba # Dataset >Training: 16 images >Regularization: 3249 images - waifu-research-department reg images # Info >Model Used: Waifu Diffusion 1.3 epoch 5 >Steps: 3000 >Keyword: Aqua (Use this in the prompt) >Class Phrase: Useless_Goddess
Arnold/wav2vec2-large-xlsr-hausa2-demo-colab
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "dataset:common_voice", "transformers", "generated_from_trainer", "license:apache-2.0" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- license: mit tags: - generated_from_trainer metrics: - accuracy - precision - recall - f1 model-index: - name: roberta-large-finetuned-ours-DS results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-large-finetuned-ours-DS This model is a fine-tuned version of [roberta-large](https://huggingface.co/roberta-large) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 2.3369 - Accuracy: 0.75 - Precision: 0.7054 - Recall: 0.6949 - F1: 0.6974 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 43 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 20 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | Precision | Recall | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:---------:|:------:|:------:| | 1.0561 | 0.99 | 99 | 0.8773 | 0.615 | 0.4054 | 0.5584 | 0.4591 | | 0.762 | 1.98 | 198 | 0.6514 | 0.715 | 0.6735 | 0.6672 | 0.6588 | | 0.5661 | 2.97 | 297 | 0.6806 | 0.71 | 0.6764 | 0.6608 | 0.6435 | | 0.3699 | 3.96 | 396 | 0.8358 | 0.71 | 0.6611 | 0.6691 | 0.6570 | | 0.2184 | 4.95 | 495 | 1.1627 | 0.7 | 0.6597 | 0.6337 | 0.6414 | | 0.1743 | 5.94 | 594 | 1.0544 | 0.725 | 0.6831 | 0.6949 | 0.6831 | | 0.098 | 6.93 | 693 | 1.4757 | 0.73 | 0.6885 | 0.6902 | 0.6892 | | 0.0813 | 7.92 | 792 | 1.8146 | 0.73 | 0.6840 | 0.6772 | 0.6800 | | 0.0435 | 8.91 | 891 | 1.6697 | 0.755 | 0.7141 | 0.7127 | 0.7132 | | 0.0209 | 9.9 | 990 | 1.8931 | 0.755 | 0.7102 | 0.7070 | 0.7082 | | 0.0201 | 10.89 | 1089 | 2.1934 | 0.74 | 0.6971 | 0.6866 | 0.6907 | | 0.0095 | 11.88 | 1188 | 2.1389 | 0.75 | 0.7014 | 0.6915 | 0.6932 | | 0.0141 | 12.87 | 1287 | 2.1902 | 0.74 | 0.6942 | 0.6943 | 0.6936 | | 0.0112 | 13.86 | 1386 | 2.5021 | 0.73 | 0.6889 | 0.6669 | 0.6741 | | 0.0054 | 14.85 | 1485 | 2.3840 | 0.73 | 0.6819 | 0.6715 | 0.6746 | | 0.0088 | 15.84 | 1584 | 2.3224 | 0.74 | 0.6909 | 0.6825 | 0.6787 | | 0.003 | 16.83 | 1683 | 2.2641 | 0.75 | 0.7054 | 0.6949 | 0.6974 | | 0.0017 | 17.82 | 1782 | 2.3361 | 0.75 | 0.7077 | 0.6968 | 0.7012 | | 0.0014 | 18.81 | 1881 | 2.3041 | 0.755 | 0.7131 | 0.7009 | 0.7051 | | 0.0083 | 19.8 | 1980 | 2.3369 | 0.75 | 0.7054 | 0.6949 | 0.6974 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.10.1+cu111 - Datasets 2.3.2 - Tokenizers 0.12.1
ArtemisZealot/DialoGTP-small-Qkarin
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: din0s/msmarco-nlgen model-index: - name: t5-base-msmarco-nlgen-ob results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # t5-base-msmarco-nlgen-ob This model is a fine-tuned version of [t5-base](https://huggingface.co/t5-base) on the [MS MARCO Natural Language Generation](https://huggingface.co/datasets/din0s/msmarco-nlgen) dataset. It achieves the following results on the evaluation set: - Loss: 0.3874 - Rougelsum: 14.4418 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rougelsum | |:-------------:|:-----:|:-----:|:---------------:|:---------:| | 0.5472 | 0.13 | 2500 | 0.4383 | 14.2497 | | 0.5238 | 0.26 | 5000 | 0.4252 | 14.4210 | | 0.501 | 0.39 | 7500 | 0.4093 | 14.3315 | | 0.4958 | 0.52 | 10000 | 0.4006 | 14.3347 | | 0.4913 | 0.66 | 12500 | 0.3944 | 14.5925 | | 0.4873 | 0.79 | 15000 | 0.3914 | 14.4193 | | 0.466 | 0.92 | 17500 | 0.3874 | 14.4418 | ### Framework versions - Transformers 4.23.0.dev0 - Pytorch 1.12.1+cu102 - Datasets 2.4.0 - Tokenizers 0.12.1
ArthurBaia/bert-base-portuguese-cased-finetuned-squad
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: mit tags: - generated_from_trainer metrics: - accuracy - precision - recall - f1 model-index: - name: roberta-large-finetuned-TRAC-DS results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-large-finetuned-TRAC-DS This model is a fine-tuned version of [roberta-large](https://huggingface.co/roberta-large) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 2.8198 - Accuracy: 0.7190 - Precision: 0.6955 - Recall: 0.6979 - F1: 0.6963 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 16 - eval_batch_size: 32 - seed: 43 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 20 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | Precision | Recall | F1 | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:---------:|:------:|:------:| | 0.9538 | 1.0 | 612 | 0.8083 | 0.6111 | 0.6192 | 0.6164 | 0.5994 | | 0.7924 | 2.0 | 1224 | 0.7594 | 0.6601 | 0.6688 | 0.6751 | 0.6424 | | 0.6844 | 3.0 | 1836 | 0.6986 | 0.7042 | 0.6860 | 0.6969 | 0.6858 | | 0.5715 | 3.99 | 2448 | 0.7216 | 0.7075 | 0.6957 | 0.6978 | 0.6925 | | 0.45 | 4.99 | 3060 | 0.7963 | 0.7288 | 0.7126 | 0.7074 | 0.7073 | | 0.352 | 5.99 | 3672 | 1.0824 | 0.7141 | 0.6999 | 0.6774 | 0.6818 | | 0.2546 | 6.99 | 4284 | 1.0884 | 0.7230 | 0.7006 | 0.7083 | 0.7028 | | 0.1975 | 7.99 | 4896 | 1.5338 | 0.7337 | 0.7090 | 0.7063 | 0.7074 | | 0.1656 | 8.99 | 5508 | 1.8182 | 0.7100 | 0.6882 | 0.6989 | 0.6896 | | 0.1358 | 9.98 | 6120 | 2.1623 | 0.7173 | 0.6917 | 0.6959 | 0.6934 | | 0.1235 | 10.98 | 6732 | 2.3249 | 0.7141 | 0.6881 | 0.6914 | 0.6888 | | 0.1003 | 11.98 | 7344 | 2.3474 | 0.7124 | 0.6866 | 0.6920 | 0.6887 | | 0.0826 | 12.98 | 7956 | 2.3574 | 0.7083 | 0.6853 | 0.6959 | 0.6874 | | 0.0727 | 13.98 | 8568 | 2.4989 | 0.7116 | 0.6858 | 0.6934 | 0.6883 | | 0.0553 | 14.98 | 9180 | 2.8090 | 0.7026 | 0.6747 | 0.6710 | 0.6725 | | 0.0433 | 15.97 | 9792 | 2.6647 | 0.7255 | 0.7010 | 0.7028 | 0.7018 | | 0.0449 | 16.97 | 10404 | 2.6568 | 0.7247 | 0.7053 | 0.6997 | 0.7010 | | 0.0373 | 17.97 | 11016 | 2.7632 | 0.7149 | 0.6888 | 0.6938 | 0.6909 | | 0.0278 | 18.97 | 11628 | 2.8245 | 0.7124 | 0.6866 | 0.6930 | 0.6889 | | 0.0288 | 19.97 | 12240 | 2.8198 | 0.7190 | 0.6955 | 0.6979 | 0.6963 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.10.1+cu111 - Datasets 2.3.2 - Tokenizers 0.12.1
ArvinZhuang/BiTAG-t5-large
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
4
null
--- license: mit --- ### chekos on Stable Diffusion via Dreambooth #### model by chekos This your the Stable Diffusion model fine-tuned the chekos concept taught to Stable Diffusion with Dreambooth. It can be used by modifying the `instance_prompt`: **chekos** You can also train your own concepts and upload them to the library by using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_training.ipynb). And you can run your new concept via `diffusers`: [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb), [Spaces with the Public Concepts loaded](https://huggingface.co/spaces/sd-dreambooth-library/stable-diffusion-dreambooth-concepts) Here are the images used for training this concept: ![image 0](https://huggingface.co/chekos/chekos/resolve/main/concept_images/1.jpeg) ![image 1](https://huggingface.co/chekos/chekos/resolve/main/concept_images/4.jpeg) ![image 2](https://huggingface.co/chekos/chekos/resolve/main/concept_images/7.jpeg) ![image 3](https://huggingface.co/chekos/chekos/resolve/main/concept_images/2.jpeg) ![image 4](https://huggingface.co/chekos/chekos/resolve/main/concept_images/0.jpeg) ![image 5](https://huggingface.co/chekos/chekos/resolve/main/concept_images/3.jpeg) ![image 6](https://huggingface.co/chekos/chekos/resolve/main/concept_images/6.jpeg) ![image 7](https://huggingface.co/chekos/chekos/resolve/main/concept_images/5.jpeg) ![image 8](https://huggingface.co/chekos/chekos/resolve/main/concept_images/8.jpeg)
AshLukass/AshLukass
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - conversational --- # Mental Health Chatbot
Ashl3y/model_name
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: mit --- ### Leone From Akame Ga Kill V2 on Stable Diffusion via Dreambooth #### model by Mrkimmon This your the Stable Diffusion model fine-tuned the Leone From Akame Ga Kill V2 concept taught to Stable Diffusion with Dreambooth. It can be used by modifying the `instance_prompt`: **an anime woman character of sks** You can also train your own concepts and upload them to the library by using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_training.ipynb). And you can run your new concept via `diffusers`: [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb), [Spaces with the Public Concepts loaded](https://huggingface.co/spaces/sd-dreambooth-library/stable-diffusion-dreambooth-concepts) Here are the images used for training this concept: ![image 0](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/277.jpeg) ![image 1](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/242.jpeg) ![image 2](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/660.jpeg) ![image 3](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/234.jpeg) ![image 4](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/265.jpeg) ![image 5](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/883.jpeg) ![image 6](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/899.jpeg) ![image 7](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/255.jpeg) ![image 8](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/573.jpeg) ![image 9](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/163.jpeg) ![image 10](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/188.jpeg) ![image 11](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/598.jpeg) ![image 12](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/990.jpeg) ![image 13](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/712.jpeg) ![image 14](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1002.jpeg) ![image 15](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/389.jpeg) ![image 16](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/316.jpeg) ![image 17](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/161.jpeg) ![image 18](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/947.jpeg) ![image 19](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/497.jpeg) ![image 20](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/731.jpeg) ![image 21](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/200.jpeg) ![image 22](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/121.jpeg) ![image 23](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/27.jpeg) ![image 24](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/104.jpeg) ![image 25](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/600.jpeg) ![image 26](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/913.jpeg) ![image 27](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1067.jpeg) ![image 28](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/311.jpeg) ![image 29](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1009.jpeg) ![image 30](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/690.jpeg) ![image 31](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/819.jpeg) ![image 32](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1211.jpeg) ![image 33](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/806.jpeg) ![image 34](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/529.jpeg) ![image 35](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/610.jpeg) ![image 36](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/874.jpeg) ![image 37](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1059.jpeg) ![image 38](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/164.jpeg) ![image 39](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1004.jpeg) ![image 40](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1151.jpeg) ![image 41](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/693.jpeg) ![image 42](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/100.jpeg) ![image 43](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/222.jpeg) ![image 44](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/250.jpeg) ![image 45](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/666.jpeg) ![image 46](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1016.jpeg) ![image 47](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/638.jpeg) ![image 48](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1162.jpeg) ![image 49](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/960.jpeg) ![image 50](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/348.jpeg) ![image 51](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/689.jpeg) ![image 52](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/664.jpeg) ![image 53](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/632.jpeg) ![image 54](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/433.jpeg) ![image 55](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1140.jpeg) ![image 56](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/759.jpeg) ![image 57](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1.jpeg) ![image 58](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1141.jpeg) ![image 59](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/215.jpeg) ![image 60](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/212.jpeg) ![image 61](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/82.jpeg) ![image 62](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/569.jpeg) ![image 63](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/128.jpeg) ![image 64](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/900.jpeg) ![image 65](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1049.jpeg) ![image 66](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/355.jpeg) ![image 67](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1066.jpeg) ![image 68](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/516.jpeg) ![image 69](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/378.jpeg) ![image 70](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/950.jpeg) ![image 71](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/527.jpeg) ![image 72](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/287.jpeg) ![image 73](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/214.jpeg) ![image 74](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/185.jpeg) ![image 75](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1148.jpeg) ![image 76](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/428.jpeg) ![image 77](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/948.jpeg) ![image 78](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/86.jpeg) ![image 79](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/814.jpeg) ![image 80](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/28.jpeg) ![image 81](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/80.jpeg) ![image 82](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/758.jpeg) ![image 83](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/408.jpeg) ![image 84](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/918.jpeg) ![image 85](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/191.jpeg) ![image 86](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/450.jpeg) ![image 87](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/537.jpeg) ![image 88](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/596.jpeg) ![image 89](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/771.jpeg) ![image 90](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/54.jpeg) ![image 91](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/4.jpeg) ![image 92](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/490.jpeg) ![image 93](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/151.jpeg) ![image 94](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/374.jpeg) ![image 95](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/184.jpeg) ![image 96](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/373.jpeg) ![image 97](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1166.jpeg) ![image 98](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1124.jpeg) ![image 99](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/624.jpeg) ![image 100](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/461.jpeg) ![image 101](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/440.jpeg) ![image 102](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/681.jpeg) ![image 103](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/857.jpeg) ![image 104](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/865.jpeg) ![image 105](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1045.jpeg) ![image 106](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/904.jpeg) ![image 107](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1035.jpeg) ![image 108](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/697.jpeg) ![image 109](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/520.jpeg) ![image 110](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/153.jpeg) ![image 111](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/499.jpeg) ![image 112](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/111.jpeg) ![image 113](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1112.jpeg) ![image 114](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/72.jpeg) ![image 115](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1052.jpeg) ![image 116](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/743.jpeg) ![image 117](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/51.jpeg) ![image 118](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/387.jpeg) ![image 119](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/578.jpeg) ![image 120](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1208.jpeg) ![image 121](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/710.jpeg) ![image 122](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/491.jpeg) ![image 123](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/217.jpeg) ![image 124](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/391.jpeg) ![image 125](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/648.jpeg) ![image 126](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/924.jpeg) ![image 127](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1178.jpeg) ![image 128](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/667.jpeg) ![image 129](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/854.jpeg) ![image 130](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/327.jpeg) ![image 131](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1145.jpeg) ![image 132](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/76.jpeg) ![image 133](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/108.jpeg) ![image 134](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/354.jpeg) ![image 135](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/792.jpeg) ![image 136](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/928.jpeg) ![image 137](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/413.jpeg) ![image 138](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/318.jpeg) ![image 139](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/966.jpeg) ![image 140](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1098.jpeg) ![image 141](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1206.jpeg) ![image 142](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/684.jpeg) ![image 143](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/765.jpeg) ![image 144](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/68.jpeg) ![image 145](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/247.jpeg) ![image 146](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/657.jpeg) ![image 147](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/781.jpeg) ![image 148](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1061.jpeg) ![image 149](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/415.jpeg) ![image 150](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/183.jpeg) ![image 151](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/315.jpeg) ![image 152](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1101.jpeg) ![image 153](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/607.jpeg) ![image 154](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/418.jpeg) ![image 155](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/601.jpeg) ![image 156](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/237.jpeg) ![image 157](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/208.jpeg) ![image 158](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/399.jpeg) ![image 159](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/134.jpeg) ![image 160](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1109.jpeg) ![image 161](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/898.jpeg) ![image 162](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/411.jpeg) ![image 163](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/768.jpeg) ![image 164](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/262.jpeg) ![image 165](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/304.jpeg) ![image 166](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/964.jpeg) ![image 167](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/547.jpeg) ![image 168](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/942.jpeg) ![image 169](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/724.jpeg) ![image 170](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/946.jpeg) ![image 171](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/879.jpeg) ![image 172](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/611.jpeg) ![image 173](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/526.jpeg) ![image 174](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/798.jpeg) ![image 175](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/486.jpeg) ![image 176](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/859.jpeg) ![image 177](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1204.jpeg) ![image 178](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/949.jpeg) ![image 179](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/839.jpeg) ![image 180](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/885.jpeg) ![image 181](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/146.jpeg) ![image 182](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1013.jpeg) ![image 183](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/653.jpeg) ![image 184](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/162.jpeg) ![image 185](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/202.jpeg) ![image 186](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/951.jpeg) ![image 187](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/73.jpeg) ![image 188](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/698.jpeg) ![image 189](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/10.jpeg) ![image 190](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/502.jpeg) ![image 191](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/886.jpeg) ![image 192](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/353.jpeg) ![image 193](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/524.jpeg) ![image 194](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1048.jpeg) ![image 195](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1084.jpeg) ![image 196](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1087.jpeg) ![image 197](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/532.jpeg) ![image 198](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/589.jpeg) ![image 199](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/581.jpeg) ![image 200](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/987.jpeg) ![image 201](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1100.jpeg) ![image 202](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/98.jpeg) ![image 203](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/166.jpeg) ![image 204](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/241.jpeg) ![image 205](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/956.jpeg) ![image 206](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/560.jpeg) ![image 207](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/926.jpeg) ![image 208](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/734.jpeg) ![image 209](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/850.jpeg) ![image 210](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/789.jpeg) ![image 211](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/816.jpeg) ![image 212](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/176.jpeg) ![image 213](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1080.jpeg) ![image 214](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/154.jpeg) ![image 215](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/496.jpeg) ![image 216](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/254.jpeg) ![image 217](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/856.jpeg) ![image 218](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/168.jpeg) ![image 219](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/453.jpeg) ![image 220](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/36.jpeg) ![image 221](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1189.jpeg) ![image 222](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/362.jpeg) ![image 223](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1003.jpeg) ![image 224](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/649.jpeg) ![image 225](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/825.jpeg) ![image 226](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/305.jpeg) ![image 227](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1007.jpeg) ![image 228](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/776.jpeg) ![image 229](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/194.jpeg) ![image 230](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/489.jpeg) ![image 231](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/438.jpeg) ![image 232](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1077.jpeg) ![image 233](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/263.jpeg) ![image 234](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/791.jpeg) ![image 235](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/89.jpeg) ![image 236](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1040.jpeg) ![image 237](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/967.jpeg) ![image 238](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/172.jpeg) ![image 239](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1169.jpeg) ![image 240](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/273.jpeg) ![image 241](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/713.jpeg) ![image 242](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1213.jpeg) ![image 243](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1005.jpeg) ![image 244](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/169.jpeg) ![image 245](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/274.jpeg) ![image 246](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/213.jpeg) ![image 247](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/139.jpeg) ![image 248](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/509.jpeg) ![image 249](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/363.jpeg) ![image 250](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/996.jpeg) ![image 251](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/553.jpeg) ![image 252](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/310.jpeg) ![image 253](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/385.jpeg) ![image 254](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/224.jpeg) ![image 255](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/682.jpeg) ![image 256](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/50.jpeg) ![image 257](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/115.jpeg) ![image 258](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/300.jpeg) ![image 259](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/585.jpeg) ![image 260](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/920.jpeg) ![image 261](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/444.jpeg) ![image 262](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/171.jpeg) ![image 263](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/437.jpeg) ![image 264](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/29.jpeg) ![image 265](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/748.jpeg) ![image 266](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/729.jpeg) ![image 267](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1102.jpeg) ![image 268](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1024.jpeg) ![image 269](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/976.jpeg) ![image 270](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/7.jpeg) ![image 271](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/938.jpeg) ![image 272](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/635.jpeg) ![image 273](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/970.jpeg) ![image 274](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/818.jpeg) ![image 275](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/400.jpeg) ![image 276](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/442.jpeg) ![image 277](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/405.jpeg) ![image 278](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/832.jpeg) ![image 279](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1175.jpeg) ![image 280](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/646.jpeg) ![image 281](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/827.jpeg) ![image 282](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1075.jpeg) ![image 283](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1070.jpeg) ![image 284](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1159.jpeg) ![image 285](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/165.jpeg) ![image 286](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/142.jpeg) ![image 287](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/617.jpeg) ![image 288](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/284.jpeg) ![image 289](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/472.jpeg) ![image 290](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/811.jpeg) ![image 291](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/173.jpeg) ![image 292](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/341.jpeg) ![image 293](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/159.jpeg) ![image 294](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/414.jpeg) ![image 295](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/366.jpeg) ![image 296](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/174.jpeg) ![image 297](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/621.jpeg) ![image 298](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/257.jpeg) ![image 299](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/512.jpeg) ![image 300](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/545.jpeg) ![image 301](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/384.jpeg) ![image 302](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/244.jpeg) ![image 303](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/292.jpeg) ![image 304](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1022.jpeg) ![image 305](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/102.jpeg) ![image 306](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1187.jpeg) ![image 307](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1127.jpeg) ![image 308](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/2.jpeg) ![image 309](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/0.jpeg) ![image 310](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/882.jpeg) ![image 311](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/770.jpeg) ![image 312](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/993.jpeg) ![image 313](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/622.jpeg) ![image 314](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/253.jpeg) ![image 315](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/590.jpeg) ![image 316](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1212.jpeg) ![image 317](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/269.jpeg) ![image 318](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/178.jpeg) ![image 319](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/568.jpeg) ![image 320](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/368.jpeg) ![image 321](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/84.jpeg) ![image 322](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/186.jpeg) ![image 323](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/903.jpeg) ![image 324](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/979.jpeg) ![image 325](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/986.jpeg) ![image 326](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/359.jpeg) ![image 327](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/571.jpeg) ![image 328](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/989.jpeg) ![image 329](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/978.jpeg) ![image 330](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/673.jpeg) ![image 331](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/243.jpeg) ![image 332](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/427.jpeg) ![image 333](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1020.jpeg) ![image 334](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1180.jpeg) ![image 335](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/443.jpeg) ![image 336](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/594.jpeg) ![image 337](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/488.jpeg) ![image 338](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1118.jpeg) ![image 339](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/802.jpeg) ![image 340](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/921.jpeg) ![image 341](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/691.jpeg) ![image 342](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/246.jpeg) ![image 343](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/367.jpeg) ![image 344](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1120.jpeg) ![image 345](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/376.jpeg) ![image 346](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/994.jpeg) ![image 347](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/66.jpeg) ![image 348](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/705.jpeg) ![image 349](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/937.jpeg) ![image 350](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/804.jpeg) ![image 351](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/821.jpeg) ![image 352](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/772.jpeg) ![image 353](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/637.jpeg) ![image 354](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1199.jpeg) ![image 355](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/722.jpeg) ![image 356](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/468.jpeg) ![image 357](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/30.jpeg) ![image 358](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/210.jpeg) ![image 359](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/402.jpeg) ![image 360](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1055.jpeg) ![image 361](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/549.jpeg) ![image 362](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/787.jpeg) ![image 363](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/459.jpeg) ![image 364](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1074.jpeg) ![image 365](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/915.jpeg) ![image 366](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/686.jpeg) ![image 367](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1188.jpeg) ![image 368](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1163.jpeg) ![image 369](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/40.jpeg) ![image 370](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/477.jpeg) ![image 371](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/44.jpeg) ![image 372](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/914.jpeg) ![image 373](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/674.jpeg) ![image 374](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/895.jpeg) ![image 375](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/256.jpeg) ![image 376](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/91.jpeg) ![image 377](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/127.jpeg) ![image 378](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/170.jpeg) ![image 379](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/618.jpeg) ![image 380](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/672.jpeg) ![image 381](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/116.jpeg) ![image 382](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/401.jpeg) ![image 383](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/631.jpeg) ![image 384](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1064.jpeg) ![image 385](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1072.jpeg) ![image 386](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/866.jpeg) ![image 387](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/193.jpeg) ![image 388](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/869.jpeg) ![image 389](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/198.jpeg) ![image 390](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/546.jpeg) ![image 391](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/847.jpeg) ![image 392](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/160.jpeg) ![image 393](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1047.jpeg) ![image 394](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/980.jpeg) ![image 395](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1096.jpeg) ![image 396](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/43.jpeg) ![image 397](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/167.jpeg) ![image 398](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/790.jpeg) ![image 399](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/784.jpeg) ![image 400](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/293.jpeg) ![image 401](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/322.jpeg) ![image 402](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/279.jpeg) ![image 403](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/494.jpeg) ![image 404](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/268.jpeg) ![image 405](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1192.jpeg) ![image 406](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/678.jpeg) ![image 407](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/995.jpeg) ![image 408](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/406.jpeg) ![image 409](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/953.jpeg) ![image 410](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1156.jpeg) ![image 411](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/464.jpeg) ![image 412](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/101.jpeg) ![image 413](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/528.jpeg) ![image 414](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/261.jpeg) ![image 415](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/118.jpeg) ![image 416](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/845.jpeg) ![image 417](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/696.jpeg) ![image 418](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1116.jpeg) ![image 419](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/977.jpeg) ![image 420](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1215.jpeg) ![image 421](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/955.jpeg) ![image 422](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/726.jpeg) ![image 423](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/992.jpeg) ![image 424](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/397.jpeg) ![image 425](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/965.jpeg) ![image 426](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/534.jpeg) ![image 427](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/510.jpeg) ![image 428](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/78.jpeg) ![image 429](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1042.jpeg) ![image 430](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/218.jpeg) ![image 431](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1186.jpeg) ![image 432](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/777.jpeg) ![image 433](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/763.jpeg) ![image 434](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/959.jpeg) ![image 435](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1193.jpeg) ![image 436](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1008.jpeg) ![image 437](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/897.jpeg) ![image 438](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/286.jpeg) ![image 439](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/535.jpeg) ![image 440](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/894.jpeg) ![image 441](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/732.jpeg) ![image 442](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/24.jpeg) ![image 443](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1194.jpeg) ![image 444](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/333.jpeg) ![image 445](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/479.jpeg) ![image 446](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/688.jpeg) ![image 447](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/264.jpeg) ![image 448](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1185.jpeg) ![image 449](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/891.jpeg) ![image 450](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/888.jpeg) ![image 451](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/911.jpeg) ![image 452](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/863.jpeg) ![image 453](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/473.jpeg) ![image 454](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/484.jpeg) ![image 455](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/272.jpeg) ![image 456](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1172.jpeg) ![image 457](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1114.jpeg) ![image 458](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/662.jpeg) ![image 459](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/985.jpeg) ![image 460](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/794.jpeg) ![image 461](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/79.jpeg) ![image 462](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/361.jpeg) ![image 463](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/864.jpeg) ![image 464](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/687.jpeg) ![image 465](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/810.jpeg) ![image 466](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/313.jpeg) ![image 467](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/963.jpeg) ![image 468](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/326.jpeg) ![image 469](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/745.jpeg) ![image 470](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/944.jpeg) ![image 471](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/905.jpeg) ![image 472](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/58.jpeg) ![image 473](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1165.jpeg) ![image 474](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1062.jpeg) ![image 475](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1133.jpeg) ![image 476](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/650.jpeg) ![image 477](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/390.jpeg) ![image 478](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/727.jpeg) ![image 479](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/584.jpeg) ![image 480](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/126.jpeg) ![image 481](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/843.jpeg) ![image 482](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1108.jpeg) ![image 483](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/642.jpeg) ![image 484](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1203.jpeg) ![image 485](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/189.jpeg) ![image 486](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1095.jpeg) ![image 487](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/9.jpeg) ![image 488](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/753.jpeg) ![image 489](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/699.jpeg) ![image 490](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/403.jpeg) ![image 491](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/785.jpeg) ![image 492](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1011.jpeg) ![image 493](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/829.jpeg) ![image 494](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/350.jpeg) ![image 495](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/844.jpeg) ![image 496](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1106.jpeg) ![image 497](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/25.jpeg) ![image 498](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/917.jpeg) ![image 499](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/733.jpeg) ![image 500](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/144.jpeg) ![image 501](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/271.jpeg) ![image 502](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1121.jpeg) ![image 503](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1029.jpeg) ![image 504](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/692.jpeg) ![image 505](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/141.jpeg) ![image 506](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/357.jpeg) ![image 507](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/503.jpeg) ![image 508](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1000.jpeg) ![image 509](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/474.jpeg) ![image 510](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/561.jpeg) ![image 511](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1030.jpeg) ![image 512](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1205.jpeg) ![image 513](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/429.jpeg) ![image 514](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/586.jpeg) ![image 515](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/69.jpeg) ![image 516](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/812.jpeg) ![image 517](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/940.jpeg) ![image 518](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/380.jpeg) ![image 519](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/325.jpeg) ![image 520](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/248.jpeg) ![image 521](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/715.jpeg) ![image 522](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1197.jpeg) ![image 523](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/774.jpeg) ![image 524](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/93.jpeg) ![image 525](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1006.jpeg) ![image 526](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/23.jpeg) ![image 527](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/501.jpeg) ![image 528](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1034.jpeg) ![image 529](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1110.jpeg) ![image 530](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1097.jpeg) ![image 531](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1218.jpeg) ![image 532](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/75.jpeg) ![image 533](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/129.jpeg) ![image 534](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/837.jpeg) ![image 535](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/150.jpeg) ![image 536](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/278.jpeg) ![image 537](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/602.jpeg) ![image 538](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/613.jpeg) ![image 539](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/424.jpeg) ![image 540](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/495.jpeg) ![image 541](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/383.jpeg) ![image 542](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1217.jpeg) ![image 543](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/927.jpeg) ![image 544](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1032.jpeg) ![image 545](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/908.jpeg) ![image 546](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/749.jpeg) ![image 547](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/475.jpeg) ![image 548](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/404.jpeg) ![image 549](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/365.jpeg) ![image 550](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/393.jpeg) ![image 551](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/332.jpeg) ![image 552](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/294.jpeg) ![image 553](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/281.jpeg) ![image 554](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/375.jpeg) ![image 555](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/912.jpeg) ![image 556](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/504.jpeg) ![image 557](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/229.jpeg) ![image 558](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/46.jpeg) ![image 559](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/119.jpeg) ![image 560](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/458.jpeg) ![image 561](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/430.jpeg) ![image 562](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/974.jpeg) ![image 563](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/55.jpeg) ![image 564](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1051.jpeg) ![image 565](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/935.jpeg) ![image 566](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/449.jpeg) ![image 567](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/454.jpeg) ![image 568](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/893.jpeg) ![image 569](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/199.jpeg) ![image 570](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/371.jpeg) ![image 571](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/220.jpeg) ![image 572](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/417.jpeg) ![image 573](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/324.jpeg) ![image 574](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/470.jpeg) ![image 575](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/3.jpeg) ![image 576](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1107.jpeg) ![image 577](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/728.jpeg) ![image 578](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/280.jpeg) ![image 579](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/572.jpeg) ![image 580](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/563.jpeg) ![image 581](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/701.jpeg) ![image 582](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/849.jpeg) ![image 583](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/559.jpeg) ![image 584](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/289.jpeg) ![image 585](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/762.jpeg) ![image 586](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/330.jpeg) ![image 587](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1209.jpeg) ![image 588](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/426.jpeg) ![image 589](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1117.jpeg) ![image 590](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/592.jpeg) ![image 591](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/114.jpeg) ![image 592](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/409.jpeg) ![image 593](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/182.jpeg) ![image 594](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/652.jpeg) ![image 595](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/973.jpeg) ![image 596](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/297.jpeg) ![image 597](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/872.jpeg) ![image 598](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/575.jpeg) ![image 599](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1184.jpeg) ![image 600](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/916.jpeg) ![image 601](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/110.jpeg) ![image 602](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/730.jpeg) ![image 603](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/6.jpeg) ![image 604](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1122.jpeg) ![image 605](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/991.jpeg) ![image 606](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1056.jpeg) ![image 607](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/492.jpeg) ![image 608](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/983.jpeg) ![image 609](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/57.jpeg) ![image 610](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/878.jpeg) ![image 611](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/35.jpeg) ![image 612](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/906.jpeg) ![image 613](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/462.jpeg) ![image 614](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/232.jpeg) ![image 615](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/668.jpeg) ![image 616](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/634.jpeg) ![image 617](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/420.jpeg) ![image 618](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/566.jpeg) ![image 619](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/52.jpeg) ![image 620](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/296.jpeg) ![image 621](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/270.jpeg) ![image 622](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1125.jpeg) ![image 623](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/531.jpeg) ![image 624](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/485.jpeg) ![image 625](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/860.jpeg) ![image 626](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1161.jpeg) ![image 627](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/515.jpeg) ![image 628](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/831.jpeg) ![image 629](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/335.jpeg) ![image 630](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1093.jpeg) ![image 631](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/291.jpeg) ![image 632](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/786.jpeg) ![image 633](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/227.jpeg) ![image 634](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1170.jpeg) ![image 635](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1039.jpeg) ![image 636](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/736.jpeg) ![image 637](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/225.jpeg) ![image 638](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/764.jpeg) ![image 639](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/961.jpeg) ![image 640](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/506.jpeg) ![image 641](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/18.jpeg) ![image 642](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/436.jpeg) ![image 643](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/285.jpeg) ![image 644](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/175.jpeg) ![image 645](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/434.jpeg) ![image 646](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1021.jpeg) ![image 647](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/754.jpeg) ![image 648](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/63.jpeg) ![image 649](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1182.jpeg) ![image 650](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/203.jpeg) ![image 651](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/862.jpeg) ![image 652](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/943.jpeg) ![image 653](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/228.jpeg) ![image 654](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1119.jpeg) ![image 655](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/466.jpeg) ![image 656](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/38.jpeg) ![image 657](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/654.jpeg) ![image 658](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/260.jpeg) ![image 659](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1050.jpeg) ![image 660](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1043.jpeg) ![image 661](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/629.jpeg) ![image 662](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1069.jpeg) ![image 663](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/140.jpeg) ![image 664](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/562.jpeg) ![image 665](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/61.jpeg) ![image 666](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/542.jpeg) ![image 667](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/518.jpeg) ![image 668](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1053.jpeg) ![image 669](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/514.jpeg) ![image 670](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/550.jpeg) ![image 671](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/64.jpeg) ![image 672](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/661.jpeg) ![image 673](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/820.jpeg) ![image 674](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1158.jpeg) ![image 675](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/83.jpeg) ![image 676](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/815.jpeg) ![image 677](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/533.jpeg) ![image 678](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/809.jpeg) ![image 679](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/481.jpeg) ![image 680](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1076.jpeg) ![image 681](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/663.jpeg) ![image 682](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/725.jpeg) ![image 683](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1153.jpeg) ![image 684](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/251.jpeg) ![image 685](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1168.jpeg) ![image 686](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/737.jpeg) ![image 687](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/685.jpeg) ![image 688](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/583.jpeg) ![image 689](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/541.jpeg) ![image 690](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/603.jpeg) ![image 691](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1196.jpeg) ![image 692](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/334.jpeg) ![image 693](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/565.jpeg) ![image 694](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/703.jpeg) ![image 695](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1026.jpeg) ![image 696](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1017.jpeg) ![image 697](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/954.jpeg) ![image 698](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1134.jpeg) ![image 699](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/838.jpeg) ![image 700](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/382.jpeg) ![image 701](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/828.jpeg) ![image 702](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1167.jpeg) ![image 703](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/744.jpeg) ![image 704](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/707.jpeg) ![image 705](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/187.jpeg) ![image 706](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/934.jpeg) ![image 707](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1037.jpeg) ![image 708](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/17.jpeg) ![image 709](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/343.jpeg) ![image 710](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/669.jpeg) ![image 711](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/498.jpeg) ![image 712](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/522.jpeg) ![image 713](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/42.jpeg) ![image 714](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1123.jpeg) ![image 715](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/392.jpeg) ![image 716](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/614.jpeg) ![image 717](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/647.jpeg) ![image 718](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/999.jpeg) ![image 719](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1177.jpeg) ![image 720](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/870.jpeg) ![image 721](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/49.jpeg) ![image 722](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/719.jpeg) ![image 723](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/738.jpeg) ![image 724](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/207.jpeg) ![image 725](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/608.jpeg) ![image 726](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/56.jpeg) ![image 727](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/755.jpeg) ![image 728](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/435.jpeg) ![image 729](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/301.jpeg) ![image 730](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/95.jpeg) ![image 731](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/846.jpeg) ![image 732](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/147.jpeg) ![image 733](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1198.jpeg) ![image 734](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/972.jpeg) ![image 735](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1111.jpeg) ![image 736](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1152.jpeg) ![image 737](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/67.jpeg) ![image 738](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/190.jpeg) ![image 739](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/945.jpeg) ![image 740](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/871.jpeg) ![image 741](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1025.jpeg) ![image 742](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/19.jpeg) ![image 743](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/782.jpeg) ![image 744](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/312.jpeg) ![image 745](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/303.jpeg) ![image 746](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/677.jpeg) ![image 747](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1041.jpeg) ![image 748](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/456.jpeg) ![image 749](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1103.jpeg) ![image 750](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/939.jpeg) ![image 751](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/968.jpeg) ![image 752](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1081.jpeg) ![image 753](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/817.jpeg) ![image 754](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1071.jpeg) ![image 755](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/181.jpeg) ![image 756](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/395.jpeg) ![image 757](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/381.jpeg) ![image 758](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/379.jpeg) ![image 759](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/314.jpeg) ![image 760](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/833.jpeg) ![image 761](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/655.jpeg) ![image 762](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/106.jpeg) ![image 763](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/192.jpeg) ![image 764](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1105.jpeg) ![image 765](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/740.jpeg) ![image 766](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/591.jpeg) ![image 767](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/867.jpeg) ![image 768](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/801.jpeg) ![image 769](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/328.jpeg) ![image 770](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/779.jpeg) ![image 771](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/471.jpeg) ![image 772](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/482.jpeg) ![image 773](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/62.jpeg) ![image 774](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/861.jpeg) ![image 775](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/807.jpeg) ![image 776](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/39.jpeg) ![image 777](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/540.jpeg) ![image 778](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/416.jpeg) ![image 779](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/41.jpeg) ![image 780](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/204.jpeg) ![image 781](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1149.jpeg) ![image 782](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/760.jpeg) ![image 783](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/896.jpeg) ![image 784](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1171.jpeg) ![image 785](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/299.jpeg) ![image 786](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/633.jpeg) ![image 787](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/971.jpeg) ![image 788](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/507.jpeg) ![image 789](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/233.jpeg) ![image 790](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1028.jpeg) ![image 791](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1173.jpeg) ![image 792](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/604.jpeg) ![image 793](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/88.jpeg) ![image 794](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/59.jpeg) ![image 795](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1036.jpeg) ![image 796](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/508.jpeg) ![image 797](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/858.jpeg) ![image 798](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1144.jpeg) ![image 799](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/423.jpeg) ![image 800](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1131.jpeg) ![image 801](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/336.jpeg) ![image 802](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/455.jpeg) ![image 803](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/445.jpeg) ![image 804](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/85.jpeg) ![image 805](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/751.jpeg) ![image 806](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/984.jpeg) ![image 807](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/478.jpeg) ![image 808](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/641.jpeg) ![image 809](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/580.jpeg) ![image 810](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/136.jpeg) ![image 811](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1063.jpeg) ![image 812](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/671.jpeg) ![image 813](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/543.jpeg) ![image 814](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/276.jpeg) ![image 815](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/386.jpeg) ![image 816](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1126.jpeg) ![image 817](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/889.jpeg) ![image 818](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/975.jpeg) ![image 819](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1046.jpeg) ![image 820](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/329.jpeg) ![image 821](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/644.jpeg) ![image 822](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/757.jpeg) ![image 823](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1058.jpeg) ![image 824](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/923.jpeg) ![image 825](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/513.jpeg) ![image 826](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1083.jpeg) ![image 827](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/836.jpeg) ![image 828](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/799.jpeg) ![image 829](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/656.jpeg) ![image 830](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/196.jpeg) ![image 831](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/981.jpeg) ![image 832](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1060.jpeg) ![image 833](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/26.jpeg) ![image 834](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/675.jpeg) ![image 835](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/283.jpeg) ![image 836](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1147.jpeg) ![image 837](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/851.jpeg) ![image 838](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/588.jpeg) ![image 839](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/750.jpeg) ![image 840](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/742.jpeg) ![image 841](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/211.jpeg) ![image 842](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1094.jpeg) ![image 843](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/388.jpeg) ![image 844](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1190.jpeg) ![image 845](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/746.jpeg) ![image 846](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1150.jpeg) ![image 847](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/398.jpeg) ![image 848](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/887.jpeg) ![image 849](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/962.jpeg) ![image 850](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/493.jpeg) ![image 851](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/521.jpeg) ![image 852](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/793.jpeg) ![image 853](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/718.jpeg) ![image 854](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/231.jpeg) ![image 855](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/109.jpeg) ![image 856](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/96.jpeg) ![image 857](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/796.jpeg) ![image 858](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/290.jpeg) ![image 859](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/929.jpeg) ![image 860](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/156.jpeg) ![image 861](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/941.jpeg) ![image 862](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/803.jpeg) ![image 863](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1146.jpeg) ![image 864](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/558.jpeg) ![image 865](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/370.jpeg) ![image 866](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/45.jpeg) ![image 867](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/125.jpeg) ![image 868](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/741.jpeg) ![image 869](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/616.jpeg) ![image 870](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/756.jpeg) ![image 871](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/868.jpeg) ![image 872](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/834.jpeg) ![image 873](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/773.jpeg) ![image 874](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/135.jpeg) ![image 875](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/152.jpeg) ![image 876](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/149.jpeg) ![image 877](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/853.jpeg) ![image 878](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/130.jpeg) ![image 879](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/892.jpeg) ![image 880](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/822.jpeg) ![image 881](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1088.jpeg) ![image 882](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/717.jpeg) ![image 883](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/321.jpeg) ![image 884](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/517.jpeg) ![image 885](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/500.jpeg) ![image 886](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/628.jpeg) ![image 887](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1207.jpeg) ![image 888](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/352.jpeg) ![image 889](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/930.jpeg) ![image 890](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/554.jpeg) ![image 891](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/797.jpeg) ![image 892](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1115.jpeg) ![image 893](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1157.jpeg) ![image 894](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/747.jpeg) ![image 895](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1183.jpeg) ![image 896](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/523.jpeg) ![image 897](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/369.jpeg) ![image 898](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/659.jpeg) ![image 899](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/65.jpeg) ![image 900](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/775.jpeg) ![image 901](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/448.jpeg) ![image 902](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/582.jpeg) ![image 903](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/548.jpeg) ![image 904](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/778.jpeg) ![image 905](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/824.jpeg) ![image 906](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/636.jpeg) ![image 907](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1091.jpeg) ![image 908](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/932.jpeg) ![image 909](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/536.jpeg) ![image 910](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/709.jpeg) ![image 911](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/306.jpeg) ![image 912](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/53.jpeg) ![image 913](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/922.jpeg) ![image 914](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/319.jpeg) ![image 915](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/205.jpeg) ![image 916](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1079.jpeg) ![image 917](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/407.jpeg) ![image 918](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/615.jpeg) ![image 919](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1142.jpeg) ![image 920](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/556.jpeg) ![image 921](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1200.jpeg) ![image 922](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/340.jpeg) ![image 923](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/219.jpeg) ![image 924](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/564.jpeg) ![image 925](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/593.jpeg) ![image 926](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/982.jpeg) ![image 927](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/623.jpeg) ![image 928](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/769.jpeg) ![image 929](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1216.jpeg) ![image 930](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1085.jpeg) ![image 931](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1065.jpeg) ![image 932](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/626.jpeg) ![image 933](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/239.jpeg) ![image 934](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/288.jpeg) ![image 935](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/511.jpeg) ![image 936](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/339.jpeg) ![image 937](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/658.jpeg) ![image 938](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/783.jpeg) ![image 939](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/323.jpeg) ![image 940](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/577.jpeg) ![image 941](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/925.jpeg) ![image 942](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/716.jpeg) ![image 943](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1104.jpeg) ![image 944](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/117.jpeg) ![image 945](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/97.jpeg) ![image 946](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/988.jpeg) ![image 947](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/840.jpeg) ![image 948](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/830.jpeg) ![image 949](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/735.jpeg) ![image 950](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1202.jpeg) ![image 951](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1128.jpeg) ![image 952](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/625.jpeg) ![image 953](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/609.jpeg) ![image 954](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/881.jpeg) ![image 955](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/576.jpeg) ![image 956](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/216.jpeg) ![image 957](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1099.jpeg) ![image 958](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/309.jpeg) ![image 959](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/275.jpeg) ![image 960](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/177.jpeg) ![image 961](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/795.jpeg) ![image 962](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/358.jpeg) ![image 963](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/552.jpeg) ![image 964](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1160.jpeg) ![image 965](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/143.jpeg) ![image 966](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/364.jpeg) ![image 967](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/519.jpeg) ![image 968](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/702.jpeg) ![image 969](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1073.jpeg) ![image 970](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/179.jpeg) ![image 971](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/551.jpeg) ![image 972](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1181.jpeg) ![image 973](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/105.jpeg) ![image 974](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/238.jpeg) ![image 975](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/721.jpeg) ![image 976](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/567.jpeg) ![image 977](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/331.jpeg) ![image 978](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1078.jpeg) ![image 979](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/530.jpeg) ![image 980](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/422.jpeg) ![image 981](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/425.jpeg) ![image 982](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1033.jpeg) ![image 983](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1191.jpeg) ![image 984](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/446.jpeg) ![image 985](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1086.jpeg) ![image 986](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/15.jpeg) ![image 987](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/931.jpeg) ![image 988](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/451.jpeg) ![image 989](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/643.jpeg) ![image 990](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/505.jpeg) ![image 991](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1068.jpeg) ![image 992](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1010.jpeg) ![image 993](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/77.jpeg) ![image 994](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/94.jpeg) ![image 995](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1214.jpeg) ![image 996](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1219.jpeg) ![image 997](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/145.jpeg) ![image 998](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/706.jpeg) ![image 999](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/421.jpeg) ![image 1000](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/544.jpeg) ![image 1001](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/875.jpeg) ![image 1002](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/761.jpeg) ![image 1003](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/337.jpeg) ![image 1004](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/805.jpeg) ![image 1005](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/645.jpeg) ![image 1006](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/630.jpeg) ![image 1007](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/694.jpeg) ![image 1008](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/958.jpeg) ![image 1009](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/901.jpeg) ![image 1010](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/587.jpeg) ![image 1011](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1210.jpeg) ![image 1012](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/714.jpeg) ![image 1013](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/157.jpeg) ![image 1014](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/720.jpeg) ![image 1015](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/267.jpeg) ![image 1016](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/579.jpeg) ![image 1017](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/752.jpeg) ![image 1018](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/209.jpeg) ![image 1019](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/808.jpeg) ![image 1020](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/236.jpeg) ![image 1021](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1001.jpeg) ![image 1022](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/813.jpeg) ![image 1023](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/11.jpeg) ![image 1024](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/823.jpeg) ![image 1025](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1038.jpeg) ![image 1026](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/848.jpeg) ![image 1027](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1138.jpeg) ![image 1028](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/302.jpeg) ![image 1029](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1179.jpeg) ![image 1030](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/842.jpeg) ![image 1031](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/137.jpeg) ![image 1032](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1027.jpeg) ![image 1033](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/14.jpeg) ![image 1034](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1130.jpeg) ![image 1035](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/670.jpeg) ![image 1036](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/32.jpeg) ![image 1037](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/223.jpeg) ![image 1038](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/676.jpeg) ![image 1039](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/307.jpeg) ![image 1040](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/457.jpeg) ![image 1041](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1089.jpeg) ![image 1042](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/396.jpeg) ![image 1043](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1195.jpeg) ![image 1044](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1044.jpeg) ![image 1045](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/12.jpeg) ![image 1046](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/788.jpeg) ![image 1047](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/48.jpeg) ![image 1048](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/342.jpeg) ![image 1049](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/13.jpeg) ![image 1050](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1155.jpeg) ![image 1051](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/356.jpeg) ![image 1052](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/467.jpeg) ![image 1053](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/957.jpeg) ![image 1054](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1129.jpeg) ![image 1055](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/452.jpeg) ![image 1056](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1023.jpeg) ![image 1057](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/574.jpeg) ![image 1058](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/90.jpeg) ![image 1059](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/432.jpeg) ![image 1060](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/5.jpeg) ![image 1061](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/180.jpeg) ![image 1062](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/909.jpeg) ![image 1063](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/71.jpeg) ![image 1064](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/910.jpeg) ![image 1065](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/936.jpeg) ![image 1066](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/539.jpeg) ![image 1067](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/998.jpeg) ![image 1068](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/206.jpeg) ![image 1069](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/195.jpeg) ![image 1070](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/907.jpeg) ![image 1071](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/884.jpeg) ![image 1072](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1014.jpeg) ![image 1073](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/595.jpeg) ![image 1074](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/107.jpeg) ![image 1075](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/739.jpeg) ![image 1076](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/20.jpeg) ![image 1077](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/877.jpeg) ![image 1078](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1018.jpeg) ![image 1079](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/87.jpeg) ![image 1080](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/597.jpeg) ![image 1081](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/60.jpeg) ![image 1082](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/704.jpeg) ![image 1083](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/525.jpeg) ![image 1084](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/240.jpeg) ![image 1085](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/8.jpeg) ![image 1086](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/259.jpeg) ![image 1087](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/640.jpeg) ![image 1088](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/282.jpeg) ![image 1089](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/476.jpeg) ![image 1090](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/463.jpeg) ![image 1091](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/120.jpeg) ![image 1092](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/103.jpeg) ![image 1093](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1019.jpeg) ![image 1094](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/723.jpeg) ![image 1095](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/441.jpeg) ![image 1096](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1057.jpeg) ![image 1097](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/606.jpeg) ![image 1098](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/766.jpeg) ![image 1099](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/612.jpeg) ![image 1100](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/92.jpeg) ![image 1101](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/394.jpeg) ![image 1102](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/605.jpeg) ![image 1103](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/133.jpeg) ![image 1104](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/780.jpeg) ![image 1105](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/295.jpeg) ![image 1106](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/841.jpeg) ![image 1107](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1092.jpeg) ![image 1108](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/876.jpeg) ![image 1109](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/113.jpeg) ![image 1110](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/465.jpeg) ![image 1111](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/320.jpeg) ![image 1112](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/619.jpeg) ![image 1113](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/22.jpeg) ![image 1114](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/680.jpeg) ![image 1115](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/969.jpeg) ![image 1116](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1135.jpeg) ![image 1117](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/317.jpeg) ![image 1118](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1137.jpeg) ![image 1119](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/70.jpeg) ![image 1120](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/852.jpeg) ![image 1121](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/377.jpeg) ![image 1122](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/131.jpeg) ![image 1123](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/902.jpeg) ![image 1124](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/700.jpeg) ![image 1125](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/74.jpeg) ![image 1126](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/599.jpeg) ![image 1127](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/997.jpeg) ![image 1128](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/412.jpeg) ![image 1129](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/338.jpeg) ![image 1130](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/665.jpeg) ![image 1131](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/555.jpeg) ![image 1132](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1174.jpeg) ![image 1133](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/439.jpeg) ![image 1134](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/557.jpeg) ![image 1135](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/570.jpeg) ![image 1136](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1176.jpeg) ![image 1137](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/31.jpeg) ![image 1138](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1154.jpeg) ![image 1139](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/112.jpeg) ![image 1140](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1031.jpeg) ![image 1141](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/890.jpeg) ![image 1142](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/431.jpeg) ![image 1143](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1201.jpeg) ![image 1144](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/679.jpeg) ![image 1145](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/873.jpeg) ![image 1146](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1164.jpeg) ![image 1147](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/372.jpeg) ![image 1148](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/347.jpeg) ![image 1149](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/122.jpeg) ![image 1150](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/138.jpeg) ![image 1151](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/419.jpeg) ![image 1152](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/538.jpeg) ![image 1153](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/683.jpeg) ![image 1154](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1143.jpeg) ![image 1155](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1015.jpeg) ![image 1156](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/351.jpeg) ![image 1157](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/767.jpeg) ![image 1158](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/21.jpeg) ![image 1159](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/123.jpeg) ![image 1160](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/308.jpeg) ![image 1161](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/952.jpeg) ![image 1162](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/197.jpeg) ![image 1163](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/447.jpeg) ![image 1164](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/620.jpeg) ![image 1165](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/37.jpeg) ![image 1166](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/266.jpeg) ![image 1167](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/826.jpeg) ![image 1168](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/711.jpeg) ![image 1169](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/480.jpeg) ![image 1170](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1113.jpeg) ![image 1171](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/345.jpeg) ![image 1172](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/880.jpeg) ![image 1173](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/360.jpeg) ![image 1174](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1012.jpeg) ![image 1175](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/346.jpeg) ![image 1176](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/800.jpeg) ![image 1177](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/47.jpeg) ![image 1178](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/410.jpeg) ![image 1179](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/16.jpeg) ![image 1180](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/933.jpeg) ![image 1181](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1054.jpeg) ![image 1182](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/245.jpeg) ![image 1183](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/627.jpeg) ![image 1184](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/221.jpeg) ![image 1185](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1136.jpeg) ![image 1186](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/835.jpeg) ![image 1187](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/148.jpeg) ![image 1188](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/230.jpeg) ![image 1189](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/158.jpeg) ![image 1190](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/201.jpeg) ![image 1191](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/99.jpeg) ![image 1192](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/226.jpeg) ![image 1193](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/487.jpeg) ![image 1194](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/258.jpeg) ![image 1195](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/132.jpeg) ![image 1196](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/298.jpeg) ![image 1197](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/33.jpeg) ![image 1198](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/855.jpeg) ![image 1199](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/344.jpeg) ![image 1200](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/460.jpeg) ![image 1201](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/34.jpeg) ![image 1202](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/349.jpeg) ![image 1203](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/708.jpeg) ![image 1204](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/124.jpeg) ![image 1205](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1139.jpeg) ![image 1206](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/919.jpeg) ![image 1207](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/651.jpeg) ![image 1208](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/695.jpeg) ![image 1209](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1132.jpeg) ![image 1210](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/483.jpeg) ![image 1211](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/249.jpeg) ![image 1212](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/639.jpeg) ![image 1213](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/155.jpeg) ![image 1214](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1090.jpeg) ![image 1215](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/235.jpeg) ![image 1216](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/469.jpeg) ![image 1217](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/81.jpeg) ![image 1218](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/1082.jpeg) ![image 1219](https://huggingface.co/sd-dreambooth-library/leone-from-akame-ga-kill-v2/resolve/main/concept_images/252.jpeg)
Aspect11/DialoGPT-Medium-LiSBot
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- language: - en tags: - stable-diffusion - text-to-image license: unknown datasets: - Danbooru2021 inference: true --- ## Repo Overview A version of Waifu Diffusion v1.4 Float32 Booru 110k model and VAE from Waifu Diffusion v1.4 converted to work with Diffusers library. ![](https://files.catbox.moe/hqfckc.png) ^ Quality/Style test ![](https://files.catbox.moe/eac57m.png) ^ Unnatural resolution test ![](https://files.catbox.moe/ulrm28.png) ^ Landscape test ![](https://files.catbox.moe/odvq06.png) ^ Extremely long resolution test ## Model Description The current model has been fine-tuned on a Stable Diffusion 2.1 model with 110k anime-styled images using a technique known as aspect ratio bucketing. That allows Waifu Diffusion v1.4 to handle different resolutions much better than its previous models. ## Source WD v1.4 Model:https://huggingface.co/hakurei/waifu-diffusion-v1-4/blob/9fa4a42a9c4a0948472fa909e6c1a39be0dda699/models/wd-1-4-float32-booru-110k.ckpt WD v1.4 VAE: https://huggingface.co/hakurei/waifu-diffusion-v1-4/blob/main/vae/kl-f8-anime2.ckpt
Atampy26/GPT-Glacier
[ "pytorch", "gpt_neo", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPTNeoForCausalLM" ], "model_type": "gpt_neo", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- language: en thumbnail: http://www.huggingtweets.com/luisbetx9-microversoslt/1664650577553/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1572306079282872326/rX5Nbrid_400x400.jpg&#39;)"> </div> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1296918435469897732/ctOlkbD3_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI CYBORG 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">BetaP̾e̾t̾r̾a̾ 🅼²🆙 & MicroversosLT</div> <div style="text-align: center; font-size: 14px;">@luisbetx9-microversoslt</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from BetaP̾e̾t̾r̾a̾ 🅼²🆙 & MicroversosLT. | Data | BetaP̾e̾t̾r̾a̾ 🅼²🆙 | MicroversosLT | | --- | --- | --- | | Tweets downloaded | 3248 | 1105 | | Retweets | 1892 | 709 | | Short tweets | 644 | 185 | | Tweets kept | 712 | 211 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/zzml0e6d/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @luisbetx9-microversoslt's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/ewz96ki7) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/ewz96ki7/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/luisbetx9-microversoslt') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
Atarax/rick
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: mit --- ### <852style-girl> on Stable Diffusion This is the `<852style-girl>` concept taught to Stable Diffusion via Textual Inversion. You can load this concept into the [Stable Conceptualizer](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_conceptualizer_inference.ipynb) notebook. You can also train your own concepts and load them into the concept libraries using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb). Here is the new concept you will be able to use as a `style`: ![<852style-girl> 0](https://huggingface.co/sd-concepts-library/852style-girl/resolve/main/concept_images/1.jpeg) ![<852style-girl> 1](https://huggingface.co/sd-concepts-library/852style-girl/resolve/main/concept_images/2.jpeg) ![<852style-girl> 2](https://huggingface.co/sd-concepts-library/852style-girl/resolve/main/concept_images/0.jpeg) ![<852style-girl> 3](https://huggingface.co/sd-concepts-library/852style-girl/resolve/main/concept_images/3.jpeg)
Atchuth/MBOT
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: mit --- ### juanma on Stable Diffusion via Dreambooth #### model by juanmapath This your the Stable Diffusion model fine-tuned the juanma concept taught to Stable Diffusion with Dreambooth. It can be used by modifying the `instance_prompt`: **juanmapath** You can also train your own concepts and upload them to the library by using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_training.ipynb). And you can run your new concept via `diffusers`: [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb), [Spaces with the Public Concepts loaded](https://huggingface.co/spaces/sd-dreambooth-library/stable-diffusion-dreambooth-concepts) Here are the images used for training this concept: ![image 0](https://huggingface.co/juanmapath/juanma/resolve/main/concept_images/1.jpeg) ![image 1](https://huggingface.co/juanmapath/juanma/resolve/main/concept_images/4.jpeg) ![image 2](https://huggingface.co/juanmapath/juanma/resolve/main/concept_images/10.jpeg) ![image 3](https://huggingface.co/juanmapath/juanma/resolve/main/concept_images/7.jpeg) ![image 4](https://huggingface.co/juanmapath/juanma/resolve/main/concept_images/2.jpeg) ![image 5](https://huggingface.co/juanmapath/juanma/resolve/main/concept_images/0.jpeg) ![image 6](https://huggingface.co/juanmapath/juanma/resolve/main/concept_images/9.jpeg) ![image 7](https://huggingface.co/juanmapath/juanma/resolve/main/concept_images/3.jpeg) ![image 8](https://huggingface.co/juanmapath/juanma/resolve/main/concept_images/6.jpeg) ![image 9](https://huggingface.co/juanmapath/juanma/resolve/main/concept_images/5.jpeg) ![image 10](https://huggingface.co/juanmapath/juanma/resolve/main/concept_images/8.jpeg)
Ateeb/QA
[ "pytorch", "distilbert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "DistilBertForQuestionAnswering" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- tags: - generated_from_trainer model-index: - name: nl3 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # nl3 This model was trained from scratch on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 4 - eval_batch_size: 25 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Framework versions - Transformers 4.22.2 - Pytorch 1.12.1+cu113 - Datasets 2.5.1 - Tokenizers 0.12.1
Ateeb/SquadQA
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - generated_from_trainer model-index: - name: du_ge_all_2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # du_ge_all_2 This model was trained from scratch on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 4 - eval_batch_size: 20 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Framework versions - Transformers 4.22.2 - Pytorch 1.12.1+cu113 - Datasets 2.5.1 - Tokenizers 0.12.1
Augustvember/WokkaBot2
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: mit --- ### Nomad on Stable Diffusion This is the `<nomad>` concept taught to Stable Diffusion via Textual Inversion. You can load this concept into the [Stable Conceptualizer](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_conceptualizer_inference.ipynb) notebook. You can also train your own concepts and load them into the concept libraries using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb). Here is the new concept you will be able to use as an `object`: ![<nomad> 0](https://huggingface.co/sd-concepts-library/nomad/resolve/main/concept_images/1.jpeg) ![<nomad> 1](https://huggingface.co/sd-concepts-library/nomad/resolve/main/concept_images/4.jpeg) ![<nomad> 2](https://huggingface.co/sd-concepts-library/nomad/resolve/main/concept_images/10.jpeg) ![<nomad> 3](https://huggingface.co/sd-concepts-library/nomad/resolve/main/concept_images/7.jpeg) ![<nomad> 4](https://huggingface.co/sd-concepts-library/nomad/resolve/main/concept_images/2.jpeg) ![<nomad> 5](https://huggingface.co/sd-concepts-library/nomad/resolve/main/concept_images/0.jpeg) ![<nomad> 6](https://huggingface.co/sd-concepts-library/nomad/resolve/main/concept_images/9.jpeg) ![<nomad> 7](https://huggingface.co/sd-concepts-library/nomad/resolve/main/concept_images/3.jpeg) ![<nomad> 8](https://huggingface.co/sd-concepts-library/nomad/resolve/main/concept_images/6.jpeg) ![<nomad> 9](https://huggingface.co/sd-concepts-library/nomad/resolve/main/concept_images/5.jpeg) ![<nomad> 10](https://huggingface.co/sd-concepts-library/nomad/resolve/main/concept_images/8.jpeg)
Augustvember/wokka
[ "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- license: mit --- ### kaltsit_v2 on Stable Diffusion via Dreambooth This your the Stable Diffusion model fine-tuned the kaltsit_v2 concept taught to Stable Diffusion with Dreambooth. It can be used by modifying the `instance_prompt`: **kaltsit** v2 update: 1. increase sample size. more stable results. 2. prompt update: kaltsit. 3. prior update: cat girl Use the model in Google Colab: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/11yzVX9rNEkzMBq6rj1HyQxkDjllI4P1-) Here is an example output: prompt = "detailed wallpaper of kaltsit on beach, green animal ears, white hair, green eyes, cleavage breasts and thigh, by ilya kuvshinov and alphonse mucha, strong rim light, splash particles, intense shadows, by Canon EOS, SIGMA Art Lens" ![<kaltsit> 0](https://i.imgur.com/IuuyzOj.jpg) You can also train your own concepts and upload them to the library by using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_training.ipynb). And you can run your new concept via `diffusers`: [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb), [Spaces with the Public Concepts loaded](https://huggingface.co/spaces/sd-dreambooth-library/stable-diffusion-dreambooth-concepts)
Aviora/news2vec
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: - en tags: - stable-diffusion - text-to-image license: creativeml-openrail-m inference: false --- Stable Diffusion model trained on E621 data, specializing on the kinkier side. Model is also live in my discord server on a free-to-use bot. [The Gooey Pack](https://discord.gg/WBjvffyJZf) ## License This model is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage. The CreativeML OpenRAIL License specifies: 1. You can't use the model to deliberately produce nor share illegal or harmful outputs or content 2. The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license 3. You may re-distribute the weights and use the model commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully) [Please read the full license here](https://huggingface.co/spaces/CompVis/stable-diffusion-license)
Axon/resnet50-v1
[ "dataset:ImageNet", "arxiv:1512.03385", "Axon", "Elixir", "license:apache-2.0" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: mit --- # Lumine (Genshin Impact) on Stable Diffusion This is the Lumine concept taught to Stable Diffusion via Textual Inversion. You can load this concept into a Stable Diffusion fork such as this [repo](https://github.com/AUTOMATIC1111/stable-diffusion-webui) (Instructions [here](https://github.com/AUTOMATIC1111/stable-diffusion-webui-feature-showcase#textual-inversion)). You can invoke the concept with the keyword lumine_genshin. Note that this should be used in conjunction with the [Waifu-Diffusion](https://huggingface.co/hakurei/waifu-diffusion#model-description) model. # Example Outputs Here are several Example outputs ![Example1](examples/ex1.png) ![Example2](examples/ex2.png) ![Example3](examples/ex3.png)
Ayham/albert_roberta_summarization_cnn_dailymail
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- license: apache-2.0 --- For an eventual web demo of Lyra v2 (SoundStream). Currently this repo just contains a copy of the model files in the official Lyra repo as of October 2nd 2022: https://github.com/google/lyra/tree/main/model_coeffs I'm aiming to produce ONNX versions of the models too. WIP demo here: https://github.com/josephrocca/lyra-v2-soundstream-web
Ayham/bert_gpt2_summarization_cnndm
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- tags: - conversational --- # DialoGPT Model trained based on testimonial given during Brazilian's ex-president trial.
Ayham/bert_gpt2_summarization_xsum
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:xsum", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- language: en license: apache-2.0 library_name: diffusers tags: [] datasets: yuntian-deng/im2latex-100k metrics: [] --- <!-- This model card has been generated automatically according to the information the training script had access to. You should probably proofread and complete it, then remove this comment. --> # latex2im_ss_100 ## Model description This diffusion model is trained with the [🤗 Diffusers](https://github.com/huggingface/diffusers) library on the `yuntian-deng/im2latex-100k` dataset. ## Intended uses & limitations #### How to use ```python # TODO: add an example code snippet for running this diffusion pipeline ``` #### Limitations and bias [TODO: provide examples of latent issues and potential remediations] ## Training data [TODO: describe the data used to train the model] ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 16 - eval_batch_size: 16 - gradient_accumulation_steps: 1 - optimizer: AdamW with betas=(None, None), weight_decay=None and epsilon=None - lr_scheduler: None - lr_warmup_steps: 500 - ema_inv_gamma: None - ema_inv_gamma: None - ema_inv_gamma: None - mixed_precision: no ### Training results 📈 [TensorBoard logs](https://huggingface.co/yuntian-deng/latex2im_ss_100/tensorboard?#scalars)
Ayham/bert_roberta_summarization_cnn_dailymail
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- language: - en tags: - text-classification - claim-detection license: "mit" datasets: - claimbuster metrics: - accuracy value:"0.83" widget: - text: "This is the best cast iron skillet you will ever buy." - text: "Barack Obama nominated Hilary Clinton as his secretary of state on Monday." - text: "On a shelf, there are five books: a gray book, a red book, a purple book, a blue book, and a black book" --- This is the mDeBERTa model finetuned on the ClaimBuster dataset. It is used for claim detection and has an accuracy of 83%.
Ayham/distilbert_gpt2_summarization_cnndm
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- tags: - CartPole-v1 - reinforce - reinforcement-learning - custom-implementation - deep-rl-class model-index: - name: Reinforce-CartPole-v1 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 127.50 +/- 35.97 name: mean_reward verified: false --- # **Reinforce** Agent playing **CartPole-v1** This is a trained model of a **Reinforce** agent playing **CartPole-v1** . To learn to use this model and train yours check Unit 5 of the Deep Reinforcement Learning Class: https://github.com/huggingface/deep-rl-class/tree/main/unit5
Ayham/roberta_bert_summarization_cnn_dailymail
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
Access to model pigletpeak/dreambooth is restricted and you are not in the authorized list. Visit https://huggingface.co/pigletpeak/dreambooth to ask for access.
Ayham/xlnet_roberta_summarization_cnn_dailymail
[ "pytorch", "tensorboard", "encoder-decoder", "text2text-generation", "dataset:cnn_dailymail", "transformers", "generated_from_trainer", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "EncoderDecoderModel" ], "model_type": "encoder-decoder", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- tags: - generated_from_trainer model-index: - name: greek_legal_bert_v2-finetuned-re-V1 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # greek_legal_bert_v2-finetuned-re-V1 This model is a fine-tuned version of [alexaapo/greek_legal_bert_v2](https://huggingface.co/alexaapo/greek_legal_bert_v2) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
BAHIJA/distilbert-base-uncased-finetuned-cola
[ "pytorch", "tensorboard", "distilbert", "text-classification", "dataset:glue", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
{ "architectures": [ "DistilBertForSequenceClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
36
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - rouge model-index: - name: mt5-small-finetuned-1.1.0 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mt5-small-finetuned-1.1.0 This model is a fine-tuned version of [google/mt5-small](https://huggingface.co/google/mt5-small) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 4.5550 - Rouge1: 18.5458 - Rouge2: 5.7454 - Rougel: 15.5515 - Rougelsum: 15.7806 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5.6e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | |:-------------:|:-----:|:----:|:---------------:|:-------:|:------:|:-------:|:---------:| | 15.4799 | 1.0 | 97 | 6.9041 | 16.3755 | 5.6407 | 13.8081 | 13.8801 | | 9.8046 | 2.0 | 194 | 4.5550 | 18.5458 | 5.7454 | 15.5515 | 15.7806 | ### Framework versions - Transformers 4.22.2 - Pytorch 1.12.1+cu113 - Datasets 2.5.1 - Tokenizers 0.12.1
BSC-LT/roberta-large-bne-capitel-ner
[ "pytorch", "roberta", "token-classification", "es", "dataset:bne", "dataset:capitel", "arxiv:1907.11692", "arxiv:2107.07253", "transformers", "national library of spain", "spanish", "bne", "capitel", "ner", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "RobertaForTokenClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
# ReadMe This is a pretrained model based on [gpt2](https://huggingface.co/gpt2) that has been trained on [copenlu/answerable_tydiqa](https://huggingface.co/datasets/copenlu/answerable_tydiqa), specifically the text field of the English samples for 2 epochs. To use the pretrained head, use: `AutoModelForCausalLM.from_pretrained`. ```python from transformers import AutoModelForCausalLM, AutoTokenizer model = AutoModelForCausalLM.from_pretrained("PartiallyTyped/answerable_tydiqa_lm_pretrained_japenese") tokenizer = AutoTokenizer.from_pretrained("rinna/japanese-gpt2-small") ```
BSC-LT/roberta-large-bne-sqac
[ "pytorch", "roberta", "question-answering", "es", "dataset:BSC-TeMU/SQAC", "arxiv:1907.11692", "arxiv:2107.07253", "transformers", "national library of spain", "spanish", "bne", "qa", "question answering", "license:apache-2.0", "autotrain_compatible" ]
question-answering
{ "architectures": [ "RobertaForQuestionAnswering" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
15
null
--- tags: - text2text-generation - Trinidadian Creole - Caribbean dialect license: apache-2.0 --- # Standard English to Trinidad English Creole Translator This model utilises T5-base pre-trained model. It was fine tuned using a custom dataset for translation of English to Trinidad English Creole. This model will be updated periodically as more data is compiled. For more on the Caribbean English Creole checkout the library [Caribe](https://pypi.org/project/Caribe/). ___ # Usage with Transformers ```python from transformers import AutoTokenizer, AutoModelForSeq2SeqLM tokenizer = AutoTokenizer.from_pretrained("KES/ENG-TEC") model = AutoModelForSeq2SeqLM.from_pretrained("KES/ENG-TEC") text = "Where are you going now?" inputs = tokenizer("eng:"+text, truncation=True, return_tensors='pt') output = model.generate(inputs['input_ids'], num_beams=4, max_length=512, early_stopping=True) translation=tokenizer.batch_decode(output, skip_special_tokens=True) print("".join(translation)) #translation: Weh yuh going now. ``` ___
Banshee/dialoGPT-luke-small
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-10-02T17:06:34Z
--- language: sw license: cc-by-4.0 datasets: - kenyacorpus_v2 model-index: - name: innocent-charles/Swahili-question-answer-latest-cased results: - task: type: question-answering name: Question Answering dataset: name: kenyacorpus type: kenyacorpus config: kenyacorpus split: validation metrics: - type: exact_match value: 51.9309 name: Exact Match verified: true verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiOTIyN2VhODRhMTQzOGYwNGU0NjM4NmMyOWQ1ZmM4ODliNGRlNjdjMTY3MWU5YzVkYWJmODhiNTMyZDE4NGQ5ZSIsInZlcnNpb24iOjF9.oVd4HFhao0K7AwV0sZTCy2Sa4mG2LP-BX0ImCynZQJ-zReQtgoK1x0LRn31chEKF_CHOQ4ZZ5SBrOuCwK5KNCQ - type: f1 value: 63.9501 name: F1 verified: true verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiM2E3YWU0YTljNjI4YmEyNjRkZWFlZTZlZmMzNjc2NzhiMmEzNmNlZDQ1YjEwZGY1MTEzYTUyZWNjMWJiMzBlMiIsInZlcnNpb24iOjF9.x_DxEhpVLb_JRhk0z12lJhVV_ugvUdK_axOe7Cb6oyH7ir7Ky0TJpIDfmk6w7IgNKiYAZ_yObNbjyov6QNoeCw - type: total value: 445 name: total verified: true verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiNTFkYzExMDZiZmUwOTA3ZDYyZjhhZjZmZmFhNWU1NDI4NjY4ZTY1NjQxMjhkNjNiMzBmMGY0YTlhNzVjY2NjNyIsInZlcnNpb24iOjF9.RexL6OXVW3eQRdd7tk9RQPNACCFSwXi3DHz0cd77vZ2Jai7ESLTf8vFIM6j7V2nBGcON4-bJ7MQeRrRg16qyCg --- # SWAHILI QUESTION - ANSWER MODEL This is the [bert-base-multilingual-cased](https://huggingface.co/bert-base-multilingual-cased) model, fine-tuned using the [KenyaCorpus](https://github.com/Neurotech-HQ/Swahili-QA-dataset) dataset. It's been trained on question-answer pairs, including unanswerable questions, for the task of Question Answering in Swahili Language. Question answering (QA) is a computer science discipline within the fields of information retrieval and NLP that help in the development of systems in such a way that, given a question in natural language, can extract relevant information from provided data and present it in the form of natural language answers. ## Overview **Language model used:** bert-base-multilingual-cased **Language:** Kiswahili **Downstream-task:** Extractive Swahili QA **Training data:** KenyaCorpus **Eval data:** KenyaCorpus **Code:** See [an example QA pipeline on Haystack](https://blog.neurotech.africa/building-swahili-question-and-answering-with-haystack/) **Infrastructure**: AWS NVIDIA A100 Tensor Core GPU ## Hyperparameters ``` batch_size = 16 n_epochs = 10 base_LM_model = "bert-base-multilingual-cased" max_seq_len = 386 learning_rate = 3e-5 lr_schedule = LinearWarmup warmup_proportion = 0.2 doc_stride=128 max_query_length=64 ``` ## Usage ### In Haystack Haystack is an NLP framework by deepset. You can use this model in a Haystack pipeline to do question answering at scale (over many documents). To load the model in [Haystack](https://github.com/deepset-ai/haystack/): ```python reader = FARMReader(model_name_or_path="innocent-charles/Swahili-question-answer-latest-cased") # or reader = TransformersReader(model_name_or_path="innocent-charles/Swahili-question-answer-latest-cased",tokenizer="innocent-charles/Swahili-question-answer-latest-cased") ``` For a complete example of ``Swahili-question-answer-latest-cased`` being used for Swahili Question Answering, check out the [Tutorials in Haystack Documentation](https://haystack.deepset.ai) ### In Transformers ```python from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline model_name = "innocent-charles/Swahili-question-answer-latest-cased" # a) Get predictions nlp = pipeline('question-answering', model=model_name, tokenizer=model_name) QA_input = { 'question': 'Asubuhi ilitupata pambajioi pa hospitali gani?', 'context': 'Asubuhi hiyo ilitupata pambajioni pa hospitali ya Uguzwa.' } res = nlp(QA_input) # b) Load model & tokenizer model = AutoModelForQuestionAnswering.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) ``` ## Performance ``` "exact": 51.87029394424324, "f1": 63.91251169582613, "total": 445, "HasAns_exact": 50.93522267206478, "HasAns_f1": 62.02838248389763, "HasAns_total": 386, "NoAns_exact": 49.79983179142137, "NoAns_f1": 60.79983179142137, "NoAns_total": 59 ``` ## Special consideration The project is still going, hence the model is still updated after training the model in more data, Therefore pull requests are welcome to contribute to increase the performance of the model. ## Author **Innocent Charles:** [email protected] ## About Me <P> I build good things using Artificial Intelligence ,Data and Analytics , with over 3 Years of Experience as Applied AI Engineer & Data scientist from a strong background in Software Engineering ,with passion and extensive experience in Data and Businesses. </P> [Linkedin](https://www.linkedin.com/in/innocent-charles/) | [GitHub](https://github.com/innocent-charles) | [Website](innocentcharles.com)
Banshee/dialoGPT-small-luke
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-10-02T17:06:56Z
--- license: mit --- Aqua_anime_girl_blk_reg_2000.ckpt # Dataset >Training: 16 images >Regularization: 400 images - black reg images # Info >Model Used: Waifu Diffusion 1.3 epoch 5 >Steps: 2000 >Keyword: Aqua (Use this in the prompt) >Class Phrase: Anime girl Aqua_animegirl_wrd_reg_2000.ckpt # Dataset >Training: 16 images >Regularization: 400 images - Waifu Research Department reg images # Info >Model Used: Waifu Diffusion 1.3 epoch 5 >Steps: 2000 >Keyword: Aqua (Use this in the prompt) >Class Phrase: Anime girl Aqua_CxRwMwCYViVz2JUH_blk_reg_2000.ckpt # Dataset >Training: 16 images >Regularization: 400 images - black reg images # Info >Model Used: Waifu Diffusion 1.3 epoch 5 >Steps: 2000 >Keyword: Aqua (Use this in the prompt) >Class Phrase: CxRwMwCYViVz2JUH Aqua_CxRwMwCYViVz2JUH_wrd_reg_2000.ckpt # Dataset >Training: 16 images >Regularization: 400 images - Waifu Research Department reg images # Info >Model Used: Waifu Diffusion 1.3 epoch 5 >Steps: 2000 >Keyword: Aqua (Use this in the prompt) >Class Phrase: CxRwMwCYViVz2JUH
Barleysack/AERoberta
[ "pytorch", "roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "RobertaForQuestionAnswering" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
2022-10-02T17:58:34Z
"Question generation model conversational capabilities"
BatuhanYilmaz/mt5-small-finetuned-amazonbooks-en-es
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: openrail --- # <span style="color:blue">FishNet: AI For Fish Stock Estimation</span> The attached model was trained on a 63000 dataset of fish images belonging to 163 species. First, we trained a detectron2 model to detect and segment fish and fiduciary markers on a board. The detectron2 model was written in PyTorch, and the final classifier is a ResNet50 keras model. Below is an example of how to use the 2 models. ## Packages ## Load Models ## load an image and transform ## run the segmentation model ## visualize ## classify the fish
Baybars/debateGPT
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-10-02T02:17:04Z
--- library_name: pytorch tags: - style-transfer - face-stylization --- ## Model Details This system provides a web demo for the following paper: **VToonify: Controllable High-Resolution Portrait Video Style Transfer (TOG/SIGGRAPH Asia 2022)** - Developed by: Shuai Yang, Liming Jiang, Ziwei Liu and Chen Change Loy - Resources for more information: - [Project Page](https://www.mmlab-ntu.com/project/vtoonify/) - [Research Paper](https://arxiv.org/abs/2209.11224) - [GitHub Repo](https://github.com/williamyang1991/VToonify) **Abstract** > Generating high-quality artistic portrait videos is an important and desirable task in computer graphics and vision. Although a series of successful portrait image toonification models built upon the powerful StyleGAN have been proposed, these image-oriented methods have obvious limitations when applied to videos, such as the fixed frame size, the requirement of face alignment, missing non-facial details and temporal inconsistency. In this work, we investigate the challenging controllable high-resolution portrait video style transfer by introducing a novel **VToonify** framework. Specifically, VToonify leverages the mid- and high-resolution layers of StyleGAN to render high-quality artistic portraits based on the multi-scale content features extracted by an encoder to better preserve the frame details. The resulting fully convolutional architecture accepts non-aligned faces in videos of variable size as input, contributing to complete face regions with natural motions in the output. Our framework is compatible with existing StyleGAN-based image toonification models to extend them to video toonification, and inherits appealing features of these models for flexible style control on color and intensity. This work presents two instantiations of VToonify built upon Toonify and DualStyleGAN for collection-based and exemplar-based portrait video style transfer, respectively. Extensive experimental results demonstrate the effectiveness of our proposed VToonify framework over existing methods in generating high-quality and temporally-coherent artistic portrait videos with flexible style controls. ## Citation Information ```bibtex @article{yang2022Vtoonify, title={VToonify: Controllable High-Resolution Portrait Video Style Transfer}, author={Yang, Shuai and Jiang, Liming and Liu, Ziwei and Loy, Chen Change}, journal={ACM Transactions on Graphics (TOG)}, volume={41}, number={6}, articleno={203}, pages={1--15}, year={2022}, publisher={ACM New York, NY, USA}, doi={10.1145/3550454.3555437}, } ``` ## License [S-Lab License 1.0](https://github.com/williamyang1991/VToonify/blob/main/LICENSE.md)
Baybars/wav2vec2-xls-r-1b-turkish
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "tr", "dataset:common_voice", "transformers", "common_voice", "generated_from_trainer" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
2022-10-02T21:06:00Z
--- license: wtfpl language: es tags: - gpt-j - spanish - gpt-j-6b - LLM - Spanish GPT-J --- # BERTIN-GPT-J-6B with 8-bit weights (Quantized) This model (and model card) is an adaptation of [hivemind/gpt-j-6B-8bit](https://huggingface.co/hivemind/gpt-j-6B-8bit), so all credits to him/her. This is a version of the **latest checkpoint (1M steps)** **[bertin-project/bertin-gpt-j-6B](https://huggingface.co/bertin-project/bertin-gpt-j-6B)** that is modified so you can generate **and fine-tune the model in Colab or equivalent desktop GPU**. Here's how to run it: [![colab](https://camo.githubusercontent.com/84f0493939e0c4de4e6dbe113251b4bfb5353e57134ffd9fcab6b8714514d4d1/68747470733a2f2f636f6c61622e72657365617263682e676f6f676c652e636f6d2f6173736574732f636f6c61622d62616467652e737667)](https://colab.research.google.com/drive/1ft6wQU0BhqG5PRlwgaZJv2VukKKjU4Es) __The [original GPT-J](https://huggingface.co/EleutherAI/gpt-j-6B/tree/main)__ takes 22+ GB memory for float32 parameters alone, and that's before you account for gradients & optimizer. Even if you cast everything to 16-bit, it will still not fit onto most single-GPU setups short of A6000 and A100. You can inference it [on TPU](https://colab.research.google.com/github/kingoflolz/mesh-transformer-jax/blob/master/colab_demo.ipynb) or CPUs, but fine-tuning is way more expensive. Here, we apply several techniques to make GPT-J usable and fine-tunable on a single GPU with ~11 GB memory: - large weight tensors are quantized using dynamic 8-bit quantization and de-quantized just-in-time for multiplication - using gradient checkpoints to store one only activation per layer: using dramatically less memory at the cost of 30% slower training - scalable fine-tuning with [LoRA](https://arxiv.org/abs/2106.09685) and [8-bit Adam](https://arxiv.org/abs/2110.02861) In other words, all of the large weight-matrices are frozen in 8-bit, and you only train small adapters and optionally 1d tensors (layernorm scales, biases). ![img](https://i.imgur.com/n4XXo1x.png) __Does 8-bit affect model quality?__ Technically yes, but the effect is negligible in practice. [This notebook measures wikitext test perplexity](https://nbviewer.org/urls/huggingface.co/hivemind/gpt-j-6B-8bit/raw/main/check_perplexity.ipynb) and it is nigh indistinguishable from the original GPT-J. Quantized model is even slightly better, but that is not statistically significant. Our code differs from other 8-bit methods in that we use **8-bit only for storage, and all computations are performed in float16 or float32**. As a result, we can take advantage of nonlinear quantization that fits to each individual weight distribution. Such nonlinear quantization does not accelerate inference, but it allows for much smaller error. __What about performance?__ Both checkpointing and de-quantization has some overhead, but it's surprisingly manageable. Depending on GPU and batch size, the quantized model is 1-10% slower than the original model on top of using gradient checkpoints (which is 30% overhead). In short, this is because block-wise quantization from bitsandbytes is really fast on GPU. ### How should I fine-tune the model? We recommend starting with the original hyperparameters from [the LoRA paper](https://arxiv.org/pdf/2106.09685.pdf). On top of that, there is one more trick to consider: the overhead from de-quantizing weights does not depend on batch size. As a result, the larger batch size you can fit, the more efficient you will train. ### Can I use this technique with other models? The model was converted using [this notebook](https://nbviewer.org/urls/huggingface.co/hivemind/gpt-j-6B-8bit/raw/main/convert-gpt-j.ipynb). It can be adapted to work with other model types. However, please bear in mind that some models replace Linear and Embedding with custom alternatives that require their own BNBWhateverWithAdapters. ### How to use ```sh wget https://huggingface.co/mrm8488/bertin-gpt-j-6B-ES-v1-8bit/resolve/main/utils.py -O Utils.py pip install transformers pip install bitsandbytes-cuda111==0.26.0 ``` ```py import transformers import torch from Utils import GPTJBlock, GPTJForCausalLM device = "cuda" if torch.cuda.is_available() else "cpu" transformers.models.gptj.modeling_gptj.GPTJBlock = GPTJBlock # monkey-patch GPT-J ckpt = "mrm8488/bertin-gpt-j-6B-ES-v1-8bit" tokenizer = transformers.AutoTokenizer.from_pretrained(ckpt) model = GPTJForCausalLM.from_pretrained(ckpt, pad_token_id=tokenizer.eos_token_id, low_cpu_mem_usage=True).to(device) prompt = tokenizer("El sentido de la vida es", return_tensors='pt') feats = {key: value.to(device) for key, value in prompt.items()} out = model.generate(**feats, max_length=64, do_sample=True) print(tokenizer.decode(out[0])) ```
BeIR/query-gen-msmarco-t5-base-v1
[ "pytorch", "jax", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
1,816
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: din0s/asqa model-index: - name: t5-base-pt-asqa-ob results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # t5-base-pt-asqa-ob This model is a fine-tuned version of [din0s/t5-base-msmarco-nlgen-ob](https://huggingface.co/din0s/t5-base-msmarco-nlgen-ob) on the [ASQA](https://huggingface.co/datasets/din0s/asqa) dataset. It achieves the following results on the evaluation set: - Loss: 1.7481 - Rougelsum: 12.3722 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 20 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rougelsum | |:-------------:|:-----:|:----:|:---------------:|:---------:| | No log | 1.0 | 355 | 1.8760 | 11.5138 | | 2.1344 | 2.0 | 710 | 1.8322 | 11.6843 | | 1.979 | 3.0 | 1065 | 1.8109 | 11.8592 | | 1.979 | 4.0 | 1420 | 1.7967 | 11.9466 | | 1.9493 | 5.0 | 1775 | 1.7871 | 12.0333 | | 1.9099 | 6.0 | 2130 | 1.7778 | 12.0805 | | 1.9099 | 7.0 | 2485 | 1.7720 | 12.1659 | | 1.8748 | 8.0 | 2840 | 1.7668 | 12.2039 | | 1.8584 | 9.0 | 3195 | 1.7628 | 12.2506 | | 1.8362 | 10.0 | 3550 | 1.7601 | 12.2557 | | 1.8362 | 11.0 | 3905 | 1.7575 | 12.2718 | | 1.8134 | 12.0 | 4260 | 1.7562 | 12.2789 | | 1.7996 | 13.0 | 4615 | 1.7538 | 12.3179 | | 1.7996 | 14.0 | 4970 | 1.7529 | 12.3035 | | 1.8049 | 15.0 | 5325 | 1.7519 | 12.3317 | | 1.7898 | 16.0 | 5680 | 1.7510 | 12.3717 | | 1.7872 | 17.0 | 6035 | 1.7497 | 12.3750 | | 1.7872 | 18.0 | 6390 | 1.7486 | 12.3580 | | 1.7759 | 19.0 | 6745 | 1.7483 | 12.3698 | | 1.785 | 20.0 | 7100 | 1.7481 | 12.3722 | ### Framework versions - Transformers 4.23.0.dev0 - Pytorch 1.12.1+cu102 - Datasets 2.4.0 - Tokenizers 0.12.1
BenQLange/HF_bot
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-10-02T22:34:58Z
--- tags: - generated_from_trainer model-index: - name: OscarRoBERTo_pt results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # OscarRoBERTo_pt This model is a fine-tuned version of [](https://huggingface.co/) on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.22.2 - Pytorch 1.12.1+cu113 - Datasets 2.5.1 - Tokenizers 0.12.1
Bharathdamu/wav2vec2-large-xls-r-300m-hindi2-colab
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-10-03T00:44:12Z
--- license: apache-2.0 --- This model receives scrambled or incoherent sentences as input and returns a meaningful sentence using the same words in the input . A form of grammar correction if you may . It was trained on a dataset of permutated sentences derived from wikipedia pages as input with the correct arrangement of words as labels . It is an encoder-decoder model that uses BERT's weight in both it's encoder and decoder .
Bhuvana/t5-base-spellchecker
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
93
2022-10-03T01:07:42Z
--- license: apache-2.0 --- A simple text generation model trained on 17+K "Elon Musk tweets" with an accuracy of 92%.
Biasface/DDDC
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
14
null
# Description Trainer: naotsue Ayanokouji Kiyotaka from Classroom of the Elite # Dataset >Training: 20 images >Regularization: 300 images # Info >Model Used: Waifu Diffusion 1.3 (Epoch 6) >Steps: 3000 >Keyword: AYANOKOUJI (Use this in the prompt) >Class Phrase: 1boy ![Sak](https://animevania.com/wp-content/uploads/2022/07/classroom-of-the-elite-kiyotaka-ayanokoji-4k-wallpaper-uhdpaper.com-557@[email protected])
BigSalmon/BlankSlots
[ "pytorch", "jax", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
4
null
--- pipeline_tag: text-generation widget: text: You could not prevent a thunderstorm, but you could use --- Levanter GPT is trained on OpenWebText2. More complete model card will be made in the future.
BigSalmon/FormalRobertaa
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- license: mit --- ### New priests on Stable Diffusion This is the `<new-priest>` concept taught to Stable Diffusion via Textual Inversion. You can load this concept into the [Stable Conceptualizer](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_conceptualizer_inference.ipynb) notebook. You can also train your own concepts and load them into the concept libraries using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb). Here is the new concept you will be able to use as a `style`: ![<new-priest> 0](https://huggingface.co/sd-concepts-library/new-priests/resolve/main/concept_images/1.jpeg) ![<new-priest> 1](https://huggingface.co/sd-concepts-library/new-priests/resolve/main/concept_images/2.jpeg) ![<new-priest> 2](https://huggingface.co/sd-concepts-library/new-priests/resolve/main/concept_images/0.jpeg) ![<new-priest> 3](https://huggingface.co/sd-concepts-library/new-priests/resolve/main/concept_images/3.jpeg)
BigSalmon/FormalRobertaaa
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- license: mit --- ### Liminalspaces on Stable Diffusion This is the `<liminal image>` concept taught to Stable Diffusion via Textual Inversion. You can load this concept into the [Stable Conceptualizer](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_conceptualizer_inference.ipynb) notebook. You can also train your own concepts and load them into the concept libraries using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb). Here is the new concept you will be able to use as a `style`: ![<liminal image> 0](https://huggingface.co/sd-concepts-library/liminalspaces/resolve/main/concept_images/1.jpeg) ![<liminal image> 1](https://huggingface.co/sd-concepts-library/liminalspaces/resolve/main/concept_images/4.jpeg) ![<liminal image> 2](https://huggingface.co/sd-concepts-library/liminalspaces/resolve/main/concept_images/2.jpeg) ![<liminal image> 3](https://huggingface.co/sd-concepts-library/liminalspaces/resolve/main/concept_images/0.jpeg) ![<liminal image> 4](https://huggingface.co/sd-concepts-library/liminalspaces/resolve/main/concept_images/3.jpeg) ![<liminal image> 5](https://huggingface.co/sd-concepts-library/liminalspaces/resolve/main/concept_images/5.jpeg)
BigSalmon/FroBurta
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imdb metrics: - accuracy - f1 model-index: - name: finetuning-sentiment-model-3000-samples results: - task: name: Text Classification type: text-classification dataset: name: imdb type: imdb config: plain_text split: train args: plain_text metrics: - name: Accuracy type: accuracy value: 0.6566666666666666 - name: F1 type: f1 value: 0.6555183946488293 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuning-sentiment-model-3000-samples This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the imdb dataset. It achieves the following results on the evaluation set: - Loss: 0.6751 - Accuracy: 0.6567 - F1: 0.6555 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results ### Framework versions - Transformers 4.22.2 - Pytorch 1.12.1+cu113 - Datasets 2.5.1 - Tokenizers 0.12.1
BigSalmon/GoodMaskResults
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- tags: - generated_from_trainer datasets: - invoice metrics: - precision - recall - f1 - accuracy model-index: - name: layoutlmv3-finetuned-invoice results: - task: name: Token Classification type: token-classification dataset: name: Invoice type: invoice args: invoice metrics: - name: Precision type: precision value: 1.0 - name: Recall type: recall value: 1.0 - name: F1 type: f1 value: 1.0 - name: Accuracy type: accuracy value: 1.0 --- # LayoutLM-v3 model fine-tuned on invoice dataset This model is a fine-tuned version of [microsoft/layoutlmv3-base](https://huggingface.co/microsoft/layoutlmv3-base) on the Rhenus dataset. We use Microsoft’s LayoutLMv3 trained on Eurocorporation Dataset to predict the labels. To use it, simply upload an image. Results will show up in a few seconds. It achieves the following results on the evaluation set: - Loss: 0.0012 - Precision: 1.0 - Recall: 1.0 - F1: 1.0 - Accuracy: 1.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data All the training codes are available from the below GitHub link. The model can be evaluated at the HuggingFace Spaces link: https://huggingface.co/DataIntelligenceTeam/Rhenus ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 5 - eval_batch_size: 2 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 1000 ### Training results | Step | Training Loss | Validation Loss| Precision | Recall | F1 | Accuracy | |:---:|:------------:|:--------------:|:---------:|:------:|:------:|:---------:| |100 | No log | 0.57765 |0.568773 | 0.600000 |0.583969| 0.895848| |200 | No log | 0.181364 | 0.933594| 0.937255| 0.935421| 0.988037| |300 | No log | 0.091626| 0.945312| 0.949020| 0.947162| 0.991555| |400 | No log | 0.060504| 0.964981| 0.972549| 0.968750| 0.995074| |500 | 0.360900 | 0.046041| 0.988327| 0.996078| 0.992188| 0.999296| |600 | 0.360900 | 0.036889| 0.988327| 0.996078| 0.992188| 0.999296| |700 | 0.360900 | 0.032077| 0.988327| 0.996078| 0.992188| 0.999296| |800 | 0.360900 | 0.028109| 0.988327| 0.996078| 0.992188| 0.999296| |900 | 0.360900 | 0.027945| 0.988327| 0.996078| 0.992188| 0.999296| |1000| 0.037800 | 0.027469| 0.988327| 0.996078| 0.992188| 0.999296 ### Framework versions - Transformers 4.20.0.dev0 - Pytorch 1.11.0+cu113 - Datasets 2.2.2 - Tokenizers 0.12.1
BigSalmon/InformalToFormalLincoln15
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
11
null
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: krm/my_exercice_mrpc results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # krm/my_exercice_mrpc This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 0.6942 - Train Accuracy: 0.6200 - Validation Loss: 0.6486 - Validation Accuracy: 0.6838 - Epoch: 2 ## Model description Ce modèle n'est pas à utiliser. Il s'agit d'un petit essai. ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'learning_rate': 0.001, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False} - training_precision: float32 ### Training results | Train Loss | Train Accuracy | Validation Loss | Validation Accuracy | Epoch | |:----------:|:--------------:|:---------------:|:-------------------:|:-----:| | 0.6860 | 0.6314 | 0.7727 | 0.6838 | 0 | | 0.6862 | 0.6347 | 0.6326 | 0.6838 | 1 | | 0.6942 | 0.6200 | 0.6486 | 0.6838 | 2 | ### Framework versions - Transformers 4.22.2 - TensorFlow 2.8.2 - Datasets 2.5.1 - Tokenizers 0.12.1
BigSalmon/MrLincoln10
[ "pytorch", "tensorboard", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- language: - en tags: - esb datasets: - esb/datasets - librispeech_asr --- To reproduce this run, first install Whisper from the Transformers compatible repo [patrickvonplaten/whisper](https://github.com/patrickvonplaten/whisper): ``` pip install git+https://github.com/openai/whisper.git ``` Then execute the command: ```python #!/usr/bin/env bash CUDA_VISIBLE_DEVICES=0 python run_speech_recognition_whisper.py \ --model_name_or_path="medium.en" \ --dataset_name="esb/datasets" \ --dataset_config_name="librispeech" \ --max_steps="5000" \ --output_dir="./" \ --run_name="whisper-librispeech" \ --wandb_project="whisper" \ --per_device_train_batch_size="64" \ --per_device_eval_batch_size="16" \ --logging_steps="25" \ --learning_rate="1e-4" \ --warmup_steps="500" \ --report_to="wandb" \ --preprocessing_num_workers="16" \ --evaluation_strategy="steps" \ --eval_steps="1000" \ --save_strategy="steps" \ --save_steps="1000" \ --generation_max_length="224" \ --length_column_name="input_lengths" \ --gradient_checkpointing \ --group_by_length \ --freeze_encoder \ --fp16 \ --overwrite_output_dir \ --do_train \ --do_eval \ --do_predict \ --predict_with_generate \ --use_auth_token ```
BigSalmon/MrLincoln13
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- language: - en tags: - esb datasets: - esb/datasets - LIUM/tedlium --- To reproduce this run, first install Whisper from the Transformers compatible repo [patrickvonplaten/whisper](https://github.com/patrickvonplaten/whisper): ``` pip install git+https://github.com/openai/whisper.git ``` Then execute the command: ```python #!/usr/bin/env bash CUDA_VISIBLE_DEVICES=0 python run_speech_recognition_whisper.py \ --model_name_or_path="medium.en" \ --dataset_name="esb/datasets" \ --dataset_config_name="tedlium" \ --max_steps="2500" \ --output_dir="./" \ --run_name="whisper-tedlium" \ --wandb_project="whisper" \ --per_device_train_batch_size="64" \ --per_device_eval_batch_size="16" \ --logging_steps="25" \ --learning_rate="1e-4" \ --warmup_steps="500" \ --report_to="wandb" \ --preprocessing_num_workers="16" \ --evaluation_strategy="steps" \ --eval_steps="500" \ --save_strategy="steps" \ --save_steps="500" \ --generation_max_length="224" \ --length_column_name="input_lengths" \ --gradient_checkpointing \ --group_by_length \ --freeze_encoder \ --fp16 \ --overwrite_output_dir \ --do_train \ --do_eval \ --do_predict \ --predict_with_generate \ --use_auth_token ```
BigSalmon/MrLincoln2
[ "pytorch", "tensorboard", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- language: - en tags: - esb datasets: - esb/datasets - speechcolab/gigaspeech --- To reproduce this run, first install Whisper from the Transformers compatible repo [patrickvonplaten/whisper](https://github.com/patrickvonplaten/whisper): ``` pip install git+https://github.com/openai/whisper.git ``` Then execute the command: ```python #!/usr/bin/env bash CUDA_VISIBLE_DEVICES=0 python run_speech_recognition_whisper.py \ --model_name_or_path="medium.en" \ --dataset_name="esb/datasets" \ --dataset_config_name="gigaspeech" \ --max_steps="5000" \ --output_dir="./" \ --run_name="whisper-gigaspeech" \ --wandb_project="whisper" \ --per_device_train_batch_size="64" \ --per_device_eval_batch_size="16" \ --logging_steps="25" \ --learning_rate="1e-4" \ --warmup_steps="500" \ --report_to="wandb" \ --preprocessing_num_workers="16" \ --evaluation_strategy="steps" \ --eval_steps="1000" \ --save_strategy="steps" \ --save_steps="1000" \ --generation_max_length="224" \ --length_column_name="input_lengths" \ --gradient_checkpointing \ --group_by_length \ --freeze_encoder \ --fp16 \ --overwrite_output_dir \ --do_train \ --do_eval \ --do_predict \ --predict_with_generate \ --use_auth_token ```
BigSalmon/MrLincoln6
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- language: - en tags: - esb datasets: - esb/datasets - ldc/chime-4 --- To reproduce this run, first install Whisper from the Transformers compatible repo [patrickvonplaten/whisper](https://github.com/patrickvonplaten/whisper): ``` pip install git+https://github.com/openai/whisper.git ``` Then execute the command: ```python #!/usr/bin/env bash CUDA_VISIBLE_DEVICES=0 python run_speech_recognition_whisper.py \ --model_name_or_path="medium.en" \ --dataset_name="esb/datasets" \ --dataset_config_name="chime4" \ --max_steps="2500" \ --output_dir="./" \ --run_name="whisper-chime4" \ --dropout_rate="0.1" \ --wandb_project="whisper" \ --per_device_train_batch_size="64" \ --per_device_eval_batch_size="16" \ --logging_steps="25" \ --learning_rate="1e-4" \ --warmup_steps="500" \ --report_to="wandb" \ --preprocessing_num_workers="16" \ --evaluation_strategy="steps" \ --eval_steps="500" \ --save_strategy="steps" \ --save_steps="500" \ --generation_max_length="224" \ --length_column_name="input_lengths" \ --gradient_checkpointing \ --group_by_length \ --freeze_encoder \ --fp16 \ --overwrite_output_dir \ --do_train \ --do_eval \ --do_predict \ --predict_with_generate \ --use_auth_token ```
BigSalmon/ParaphraseParentheses2.0
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
null
--- language: - en tags: - esb datasets: - esb/datasets - mozilla-foundation/common_voice_9_0 --- To reproduce this run, first install NVIDIA NeMo according to the [official instructions](https://github.com/NVIDIA/NeMo#installation), then execute: ```python #!/usr/bin/env bash CUDA_VISIBLE_DEVICES=0 python run_speech_recognition_rnnt.py \ --config_path="conf/conformer_transducer_bpe_xlarge.yaml" \ --model_name_or_path="stt_en_conformer_transducer_xlarge" \ --dataset_name="esb/datasets" \ --tokenizer_path="tokenizer" \ --vocab_size="1024" \ --max_steps="100000" \ --dataset_config_name="common_voice" \ --output_dir="./" \ --run_name="conformer-rnnt-common-voice" \ --wandb_project="rnnt" \ --per_device_train_batch_size="8" \ --per_device_eval_batch_size="4" \ --logging_steps="50" \ --learning_rate="1e-4" \ --warmup_steps="500" \ --save_strategy="steps" \ --save_steps="20000" \ --evaluation_strategy="steps" \ --eval_steps="20000" \ --report_to="wandb" \ --preprocessing_num_workers="4" \ --fused_batch_size="4" \ --length_column_name="input_lengths" \ --max_eval_duration_in_seconds="20" \ --fuse_loss_wer \ --group_by_length \ --overwrite_output_dir \ --do_train \ --do_eval \ --do_predict \ --use_auth_token ```
BigSalmon/PhraseBerta
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- language: - en tags: - esb datasets: - esb/datasets - facebook/voxpopuli --- To reproduce this run, first install NVIDIA NeMo according to the [official instructions](https://github.com/NVIDIA/NeMo#installation), then execute: ```python #!/usr/bin/env bash CUDA_VISIBLE_DEVICES=0 python run_speech_recognition_rnnt.py \ --config_path="conf/conformer_transducer_bpe_xlarge.yaml" \ --model_name_or_path="stt_en_conformer_transducer_xlarge" \ --dataset_name="esb/datasets" \ --tokenizer_path="tokenizer" \ --vocab_size="1024" \ --max_steps="100000" \ --dataset_config_name="voxpopuli" \ --output_dir="./" \ --run_name="conformer-rnnt-voxpopuli" \ --wandb_project="rnnt" \ --per_device_train_batch_size="8" \ --per_device_eval_batch_size="4" \ --logging_steps="50" \ --learning_rate="1e-4" \ --warmup_steps="500" \ --save_strategy="steps" \ --save_steps="20000" \ --evaluation_strategy="steps" \ --eval_steps="20000" \ --report_to="wandb" \ --preprocessing_num_workers="4" \ --fused_batch_size="4" \ --length_column_name="input_lengths" \ --fuse_loss_wer \ --group_by_length \ --overwrite_output_dir \ --do_train \ --do_eval \ --do_predict \ --use_auth_token ```
BigSalmon/Points
[ "pytorch", "tensorboard", "gpt2", "text-generation", "transformers", "has_space" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
null
--- language: - en tags: - esb datasets: - esb/datasets - LIUM/tedlium --- To reproduce this run, first install NVIDIA NeMo according to the [official instructions](https://github.com/NVIDIA/NeMo#installation), then execute: ```python #!/usr/bin/env bash CUDA_VISIBLE_DEVICES=0 python run_speech_recognition_rnnt.py \ --config_path="conf/conformer_transducer_bpe_xlarge.yaml" \ --model_name_or_path="stt_en_conformer_transducer_xlarge" \ --dataset_name="esb/datasets" \ --tokenizer_path="tokenizer" \ --vocab_size="1024" \ --max_steps="100000" \ --dataset_config_name="tedlium" \ --output_dir="./" \ --run_name="rnnt-tedlium-baseline" \ --wandb_project="rnnt" \ --per_device_train_batch_size="8" \ --per_device_eval_batch_size="4" \ --logging_steps="50" \ --learning_rate="1e-4" \ --warmup_steps="500" \ --save_strategy="steps" \ --save_steps="20000" \ --evaluation_strategy="steps" \ --eval_steps="20000" \ --report_to="wandb" \ --preprocessing_num_workers="4" \ --fused_batch_size="4" \ --length_column_name="input_lengths" \ --fuse_loss_wer \ --group_by_length \ --overwrite_output_dir \ --do_train \ --do_eval \ --do_predict \ --use_auth_token ```
BigSalmon/Points2
[ "pytorch", "gpt2", "text-generation", "transformers", "has_space" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- language: - en tags: - esb datasets: - esb/datasets - kensho/spgispeech --- To reproduce this run, first install NVIDIA NeMo according to the [official instructions](https://github.com/NVIDIA/NeMo#installation), then execute: ```python #!/usr/bin/env bash CUDA_VISIBLE_DEVICES=0 python run_speech_recognition_rnnt.py \ --config_path="conf/conformer_transducer_bpe_xlarge.yaml" \ --model_name_or_path="stt_en_conformer_transducer_xlarge" \ --dataset_name="esb/datasets" \ --tokenizer_path="tokenizer" \ --vocab_size="1024" \ --max_steps="100000" \ --dataset_config_name="spgispeech" \ --output_dir="./" \ --run_name="conformer-rnnt-spgispeech" \ --wandb_project="rnnt" \ --per_device_train_batch_size="8" \ --per_device_eval_batch_size="4" \ --logging_steps="50" \ --learning_rate="1e-4" \ --warmup_steps="500" \ --save_strategy="steps" \ --save_steps="20000" \ --evaluation_strategy="steps" \ --eval_steps="20000" \ --report_to="wandb" \ --preprocessing_num_workers="4" \ --fused_batch_size="4" \ --length_column_name="input_lengths" \ --fuse_loss_wer \ --group_by_length \ --overwrite_output_dir \ --do_train \ --do_eval \ --do_predict \ --use_auth_token ```
BigSalmon/SimplifyText
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
17
null
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # {MODEL_NAME} This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}') model = AutoModel.from_pretrained('{MODEL_NAME}') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 40 with parameters: ``` {'batch_size': 16, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.CosineSimilarityLoss.CosineSimilarityLoss` Parameters of the fit()-Method: ``` { "epochs": 1, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": 40, "warmup_steps": 4, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: MPNetModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
BigSalmon/T52
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
8
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - emotion metrics: - accuracy - f1 model-index: - name: distilbert-base-uncased-finetuned-emotion results: - task: name: Text Classification type: text-classification dataset: name: emotion type: emotion config: default split: train args: default metrics: - name: Accuracy type: accuracy value: 0.9245 - name: F1 type: f1 value: 0.9244334678544425 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-emotion This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.2185 - Accuracy: 0.9245 - F1: 0.9244 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.847 | 1.0 | 250 | 0.3228 | 0.8985 | 0.8951 | | 0.2543 | 2.0 | 500 | 0.2185 | 0.9245 | 0.9244 | ### Framework versions - Transformers 4.22.2 - Pytorch 1.12.1+cu116 - Datasets 2.5.1 - Tokenizers 0.12.1
BigSalmon/T5F
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
6
null
--- language: - en tags: - esb datasets: - esb/datasets - ldc/switchboard --- To reproduce this run, first install NVIDIA NeMo according to the [official instructions](https://github.com/NVIDIA/NeMo#installation), then execute: ```python #!/usr/bin/env bash CUDA_VISIBLE_DEVICES=0 python run_speech_recognition_rnnt.py \ --config_path="conf/conformer_transducer_bpe_xlarge.yaml" \ --model_name_or_path="stt_en_conformer_transducer_xlarge" \ --dataset_name="esb/datasets" \ --tokenizer_path="tokenizer" \ --vocab_size="1024" \ --max_steps="100000" \ --dataset_config_name="switchboard" \ --output_dir="./" \ --run_name="conformer-rnnt-switchboard" \ --wandb_project="rnnt" \ --per_device_train_batch_size="8" \ --per_device_eval_batch_size="4" \ --logging_steps="50" \ --learning_rate="1e-4" \ --warmup_steps="500" \ --save_strategy="steps" \ --save_steps="20000" \ --evaluation_strategy="steps" \ --eval_steps="20000" \ --report_to="wandb" \ --preprocessing_num_workers="4" \ --fused_batch_size="4" \ --length_column_name="input_lengths" \ --fuse_loss_wer \ --group_by_length \ --overwrite_output_dir \ --do_train \ --do_eval \ --do_predict \ --use_auth_token ```
BigSalmon/T5Salmon
[ "pytorch", "jax", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
6
null
--- language: - en tags: - esb datasets: - esb/datasets - revdotcom/earnings22 --- To reproduce this run, first install NVIDIA NeMo according to the [official instructions](https://github.com/NVIDIA/NeMo#installation), then execute: ```python #!/usr/bin/env bash CUDA_VISIBLE_DEVICES=0 python run_speech_recognition_rnnt.py \ --config_path="conf/conformer_transducer_bpe_xlarge.yaml" \ --model_name_or_path="stt_en_conformer_transducer_xlarge" \ --dataset_name="esb/datasets" \ --tokenizer_path="tokenizer" \ --vocab_size="1024" \ --max_steps="100000" \ --dataset_config_name="earnings22" \ --output_dir="./" \ --run_name="conformer-rnnt-earnings22" \ --wandb_project="rnnt" \ --per_device_train_batch_size="8" \ --per_device_eval_batch_size="4" \ --logging_steps="50" \ --learning_rate="1e-4" \ --warmup_steps="500" \ --save_strategy="steps" \ --save_steps="20000" \ --evaluation_strategy="steps" \ --eval_steps="20000" \ --report_to="wandb" \ --preprocessing_num_workers="4" \ --fused_batch_size="4" \ --length_column_name="input_lengths" \ --fuse_loss_wer \ --group_by_length \ --overwrite_output_dir \ --do_train \ --do_eval \ --do_predict \ --use_auth_token ```
BigSalmon/T5Salmon2
[ "pytorch", "jax", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
13
null
--- language: - en tags: - esb datasets: - esb/datasets - ldc/chime-4 --- To reproduce this run, first install NVIDIA NeMo according to the [official instructions](https://github.com/NVIDIA/NeMo#installation), then execute: ```python #!/usr/bin/env bash CUDA_VISIBLE_DEVICES=0 python run_speech_recognition_rnnt.py \ --config_path="conf/conformer_transducer_bpe_xlarge.yaml" \ --model_name_or_path="stt_en_conformer_transducer_xlarge" \ --dataset_name="esb/datasets" \ --dataset_config_name="chime4" \ --tokenizer_path="tokenizer" \ --vocab_size="1024" \ --max_steps="100000" \ --output_dir="./" \ --run_name="conformer-rnnt-chime4" \ --wandb_project="rnnt" \ --per_device_train_batch_size="8" \ --per_device_eval_batch_size="4" \ --logging_steps="50" \ --learning_rate="1e-4" \ --warmup_steps="500" \ --save_strategy="steps" \ --save_steps="20000" \ --evaluation_strategy="steps" \ --eval_steps="20000" \ --report_to="wandb" \ --preprocessing_num_workers="4" \ --fused_batch_size="4" \ --length_column_name="input_lengths" \ --fuse_loss_wer \ --group_by_length \ --overwrite_output_dir \ --do_train \ --do_eval \ --do_predict \ --use_auth_token ```
BigSalmon/TS3
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible", "has_space" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # {MODEL_NAME} This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}') model = AutoModel.from_pretrained('{MODEL_NAME}') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 40 with parameters: ``` {'batch_size': 16, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.CosineSimilarityLoss.CosineSimilarityLoss` Parameters of the fit()-Method: ``` { "epochs": 1, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": 40, "warmup_steps": 4, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: MPNetModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
BigTooth/DialoGPT-Megumin
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
16
null
--- language: - en tags: - esb datasets: - esb/datasets - edinburghcstr/ami --- To reproduce this run, first install NVIDIA NeMo according to the [official instructions](https://github.com/NVIDIA/NeMo#installation), then execute: ```python #!/usr/bin/env bash CUDA_VISIBLE_DEVICES=0 python run_speech_recognition_rnnt.py \ --config_path="conf/conformer_transducer_bpe_xlarge.yaml" \ --model_name_or_path="stt_en_conformer_transducer_xlarge" \ --dataset_name="esb/datasets" \ --tokenizer_path="tokenizer" \ --vocab_size="1024" \ --max_steps="100000" \ --dataset_config_name="ami" \ --output_dir="./" \ --run_name="conformer-rnnt-ami" \ --wandb_project="rnnt" \ --per_device_train_batch_size="8" \ --per_device_eval_batch_size="4" \ --logging_steps="50" \ --learning_rate="1e-4" \ --warmup_steps="500" \ --save_strategy="steps" \ --save_steps="20000" \ --evaluation_strategy="steps" \ --eval_steps="20000" \ --report_to="wandb" \ --preprocessing_num_workers="4" \ --fused_batch_size="4" \ --length_column_name="input_lengths" \ --fuse_loss_wer \ --group_by_length \ --overwrite_output_dir \ --do_train \ --do_eval \ --do_predict \ --use_auth_token ```
BigTooth/Megumin-v0.2
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
null
--- language: - en tags: - esb datasets: - esb/datasets - facebook/voxpopuli --- To reproduce this run, first install Whisper from the Transformers compatible repo [patrickvonplaten/whisper](https://github.com/patrickvonplaten/whisper): ``` pip install git+https://github.com/openai/whisper.git ``` Then execute the command: ```python #!/usr/bin/env bash CUDA_VISIBLE_DEVICES=0 python run_speech_recognition_whisper.py \ --model_name_or_path="medium.en" \ --dataset_name="esb/datasets" \ --dataset_config_name="voxpopuli" \ --max_steps="5000" \ --output_dir="./" \ --run_name="whisper-voxpopuli" \ --wandb_project="whisper" \ --per_device_train_batch_size="64" \ --per_device_eval_batch_size="16" \ --logging_steps="25" \ --learning_rate="1e-4" \ --warmup_steps="500" \ --report_to="wandb" \ --preprocessing_num_workers="16" \ --evaluation_strategy="steps" \ --eval_steps="500" \ --save_strategy="steps" \ --save_steps="500" \ --generation_max_length="224" \ --length_column_name="input_lengths" \ --gradient_checkpointing \ --group_by_length \ --freeze_encoder \ --fp16 \ --overwrite_output_dir \ --do_train \ --do_eval \ --do_predict \ --predict_with_generate \ --use_auth_token ```
BritishLibraryLabs/bl-books-genre
[ "pytorch", "distilbert", "text-classification", "multilingual", "dataset:blbooksgenre", "transformers", "genre", "books", "library", "historic", "glam ", "lam", "license:mit", "has_space" ]
text-classification
{ "architectures": [ "DistilBertForSequenceClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
76
null
--- license: mit language: - en library_name: adapter-transformers ---
Brokette/projetCS
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "transformers" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy model-index: - name: vit-base-tour-demo-v5 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-base-tour-demo-v5 This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 2.1467 - Accuracy: 0.4880 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 3.4573 | 0.13 | 100 | 3.2038 | 0.3334 | | 2.9547 | 0.27 | 200 | 2.8725 | 0.3672 | | 2.6093 | 0.4 | 300 | 2.7619 | 0.3954 | | 2.6212 | 0.54 | 400 | 2.6269 | 0.3942 | | 2.5063 | 0.67 | 500 | 2.5060 | 0.4211 | | 2.3113 | 0.81 | 600 | 2.5348 | 0.4201 | | 2.5702 | 0.94 | 700 | 2.3345 | 0.4502 | | 2.0479 | 1.08 | 800 | 2.3183 | 0.4484 | | 1.754 | 1.21 | 900 | 2.2546 | 0.4661 | | 1.7772 | 1.34 | 1000 | 2.1994 | 0.4794 | | 1.9276 | 1.48 | 1100 | 2.1672 | 0.4731 | | 1.6621 | 1.61 | 1200 | 2.1676 | 0.4845 | | 1.7063 | 1.75 | 1300 | 2.1446 | 0.4806 | | 1.8655 | 1.88 | 1400 | 2.1121 | 0.4933 | | 1.4577 | 2.02 | 1500 | 2.0934 | 0.4955 | | 1.1857 | 2.15 | 1600 | 2.1128 | 0.4906 | | 1.1684 | 2.28 | 1700 | 2.1218 | 0.4941 | | 1.3873 | 2.42 | 1800 | 2.1108 | 0.4957 | | 1.3545 | 2.55 | 1900 | 2.0985 | 0.4992 | | 0.9789 | 2.69 | 2000 | 2.0997 | 0.4961 | | 1.1772 | 2.82 | 2100 | 2.1141 | 0.4951 | | 1.0968 | 2.96 | 2200 | 2.1097 | 0.4922 | | 0.7883 | 3.09 | 2300 | 2.1170 | 0.5067 | | 0.7593 | 3.23 | 2400 | 2.1516 | 0.4847 | | 0.5671 | 3.36 | 2500 | 2.1414 | 0.4925 | | 0.6442 | 3.49 | 2600 | 2.1498 | 0.4880 | | 0.516 | 3.63 | 2700 | 2.1442 | 0.4878 | | 0.6283 | 3.76 | 2800 | 2.1518 | 0.4882 | | 0.5629 | 3.9 | 2900 | 2.1467 | 0.4880 | ### Framework versions - Transformers 4.22.2 - Pytorch 1.12.1+cu113 - Datasets 2.5.1 - Tokenizers 0.12.1
Bryan190/Aguy190
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
You can use Stable Diffusion with Dream's face on it! The object name is: dream-youtuber, and the class name is male. You can use it in the prompts like: photo of dream-youtuber male, digital painting Now to use this model: In AUTOMATIC1111's notebook (https://github.com/AUTOMATIC1111/stable-diffusion-webui) just below the Normal 1.4 Model block. Insert this block: ```` #@title Stable-Dreamfusion Model # get a token from https://huggingface.co/settings/tokens user_token = "" #@param {type:"string"} user_header = f"\"Authorization: Bearer {user_token}\"" !wget --header={user_header} https://huggingface.co/whyperr/stable-dreamfusion/resolve/main/model.ckpt -O models/sd-v1-4.ckpt ```` ![1.png](https://s3.amazonaws.com/moonup/production/uploads/1664870802717-6305531a99870e13d3de6019.png) Run the next steps normally, and you should be able to generate juicy dream faces! ![1.png](https://s3.amazonaws.com/moonup/production/uploads/1664871558024-6305531a99870e13d3de6019.png) ![2.png](https://s3.amazonaws.com/moonup/production/uploads/1664871567648-6305531a99870e13d3de6019.png) ![3.png](https://s3.amazonaws.com/moonup/production/uploads/1664871593792-6305531a99870e13d3de6019.png) ![4.png](https://s3.amazonaws.com/moonup/production/uploads/1664871605434-6305531a99870e13d3de6019.png) ![5.png](https://s3.amazonaws.com/moonup/production/uploads/1664871630536-6305531a99870e13d3de6019.png)
CAMeL-Lab/bert-base-arabic-camelbert-ca-poetry
[ "pytorch", "tf", "bert", "text-classification", "ar", "arxiv:1905.05700", "arxiv:2103.06678", "transformers", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
42
null
--- tags: - generated_from_trainer datasets: - samsum model-index: - name: pegagus-samsum results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # pegagus-samsum This model is a fine-tuned version of [google/pegasus-cnn_dailymail](https://huggingface.co/google/pegasus-cnn_dailymail) on the samsum dataset. It achieves the following results on the evaluation set: - Loss: 1.4876 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - gradient_accumulation_steps: 16 - total_train_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 1.6985 | 0.54 | 500 | 1.4876 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.12.1+cu102 - Datasets 2.0.0 - Tokenizers 0.10.3
CAMeL-Lab/bert-base-arabic-camelbert-ca-pos-msa
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
71
2022-10-03T17:16:25Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - metrics: - type: mean_reward value: 229.59 +/- 12.98 name: mean_reward task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
CAMeL-Lab/bert-base-arabic-camelbert-ca
[ "pytorch", "tf", "jax", "bert", "fill-mask", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
580
2022-10-03T17:35:27Z
--- language: - ru license: apache-2.0 --- # Model DmitryPogrebnoy/distilbert-base-russian-cased # Model Description This model is russian version of [distilbert-base-multilingual-cased](https://huggingface.co/distilbert-base-multilingual-cased). The code for the transforming process can be found [here](https://github.com/DmitryPogrebnoy/MedSpellChecker/blob/main/spellchecker/ml_ranging/models/distilbert_base_russian_cased/distilbert_from_multilang_to_ru.ipynb). This model give exactly the same representations produced by the original model which preserves the original accuracy. There is a similar model of [Geotrend/distilbert-base-ru-cased](https://huggingface.co/Geotrend/distilbert-base-ru-cased). However, our model is derived from a slightly different approach. Instead of using wikipedia's Russian dataset to pick the necessary tokens, we used regular expressions in this model to select only Russian tokens, punctuation marks, numbers and other service tokens. Thus, our model contains several hundred tokens, which have been filtered out in [Geotrend/distilbert-base-ru-cased](https://huggingface.co/Geotrend/distilbert-base-ru-cased). This model was created as part of a master's project to develop a method for correcting typos in medical histories using BERT models as a ranking of candidates. The project is open source and can be found [here](https://github.com/DmitryPogrebnoy/MedSpellChecker). # How to Get Started With the Model You can use the model directly with a pipeline for masked language modeling: ```python >>> from transformers import pipeline >>> pipeline = pipeline('fill-mask', model='DmitryPogrebnoy/distilbert-base-russian-cased') >>> pipeline("Я [MASK] на заводе.") [{'score': 0.11498937010765076, 'token': 1709, 'token_str': 'работал', 'sequence': 'Я работал на заводе.'}, {'score': 0.07212855666875839, 'token': 12375, 'token_str': '##росла', 'sequence': 'Яросла на заводе.'}, {'score': 0.03575785085558891, 'token': 4059, 'token_str': 'находился', 'sequence': 'Я находился на заводе.'}, {'score': 0.02496381290256977, 'token': 5075, 'token_str': 'работает', 'sequence': 'Я работает на заводе.'}, {'score': 0.020675526931881905, 'token': 5774, 'token_str': '##дро', 'sequence': 'Ядро на заводе.'}] ``` Or you can load the model and tokenizer and do what you need to do: ```python >>> from transformers import AutoTokenizer, AutoModelForMaskedLM >>> tokenizer = AutoTokenizer.from_pretrained("DmitryPogrebnoy/distilbert-base-russian-cased") >>> model = AutoModelForMaskedLM.from_pretrained("DmitryPogrebnoy/distilbert-base-russian-cased") ```
CAMeL-Lab/bert-base-arabic-camelbert-mix-did-madar-corpus6
[ "pytorch", "tf", "bert", "text-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
34
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imdb metrics: - accuracy - f1 model-index: - name: finetuning-sentiment-model-5000-samples results: - task: name: Text Classification type: text-classification dataset: name: imdb type: imdb config: plain_text split: train args: plain_text metrics: - name: Accuracy type: accuracy value: 0.9104 - name: F1 type: f1 value: 0.9115673114883537 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuning-sentiment-model-5000-samples This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the imdb dataset. It achieves the following results on the evaluation set: - Loss: 0.5802 - Accuracy: 0.9104 - F1: 0.9116 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results ### Framework versions - Transformers 4.22.2 - Pytorch 1.12.1+cu113 - Datasets 2.5.1 - Tokenizers 0.12.1
CAMeL-Lab/bert-base-arabic-camelbert-mix-ner
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1,860
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imdb metrics: - accuracy - f1 model-index: - name: finetuning-sentiment-model-3000-samples results: - task: name: Text Classification type: text-classification dataset: name: imdb type: imdb config: plain_text split: train args: plain_text metrics: - name: Accuracy type: accuracy value: 0.8166666666666667 - name: F1 type: f1 value: 0.8307692307692307 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuning-sentiment-model-3000-samples This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the imdb dataset. It achieves the following results on the evaluation set: - Loss: 0.6069 - Accuracy: 0.8167 - F1: 0.8308 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results ### Framework versions - Transformers 4.22.2 - Pytorch 1.12.1+cu113 - Datasets 2.5.1 - Tokenizers 0.12.1
CAMeL-Lab/bert-base-arabic-camelbert-mix-poetry
[ "pytorch", "tf", "bert", "text-classification", "ar", "arxiv:1905.05700", "arxiv:2103.06678", "transformers", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
31
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imdb metrics: - accuracy - f1 model-index: - name: finetuning-sentiment-model-3000-samples results: - task: name: Text Classification type: text-classification dataset: name: imdb type: imdb config: plain_text split: train args: plain_text metrics: - name: Accuracy type: accuracy value: 0.6633333333333333 - name: F1 type: f1 value: 0.7247956403269755 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuning-sentiment-model-3000-samples This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the imdb dataset. It achieves the following results on the evaluation set: - Loss: 0.6551 - Accuracy: 0.6633 - F1: 0.7248 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results ### Framework versions - Transformers 4.22.2 - Pytorch 1.12.1+cu113 - Datasets 2.5.1 - Tokenizers 0.12.1
CAMeL-Lab/bert-base-arabic-camelbert-mix-pos-egy
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
62
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - emotion metrics: - accuracy - f1 model-index: - name: distilbert-base-uncased-finetuned-emotion results: - task: name: Text Classification type: text-classification dataset: name: emotion type: emotion config: default split: train args: default metrics: - name: Accuracy type: accuracy value: 0.92 - name: F1 type: f1 value: 0.9202951757885023 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-emotion This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.2245 - Accuracy: 0.92 - F1: 0.9203 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.8171 | 1.0 | 250 | 0.3222 | 0.907 | 0.9055 | | 0.2546 | 2.0 | 500 | 0.2245 | 0.92 | 0.9203 | ### Framework versions - Transformers 4.21.1 - Pytorch 1.8.1 - Datasets 2.4.0 - Tokenizers 0.12.1
CAMeL-Lab/bert-base-arabic-camelbert-mix-pos-msa
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1,862
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imdb metrics: - accuracy - f1 model-index: - name: finetuning-sentiment-model-3000-samples results: - task: name: Text Classification type: text-classification dataset: name: imdb type: imdb config: plain_text split: train args: plain_text metrics: - name: Accuracy type: accuracy value: 0.82 - name: F1 type: f1 value: 0.8235294117647058 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuning-sentiment-model-3000-samples This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the imdb dataset. It achieves the following results on the evaluation set: - Loss: 0.6107 - Accuracy: 0.82 - F1: 0.8235 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results ### Framework versions - Transformers 4.22.2 - Pytorch 1.12.1+cu113 - Datasets 2.5.1 - Tokenizers 0.12.1