modelId
stringlengths
4
81
tags
list
pipeline_tag
stringclasses
17 values
config
dict
downloads
int64
0
59.7M
first_commit
timestamp[ns, tz=UTC]
card
stringlengths
51
438k
bert-base-german-dbmdz-uncased
[ "pytorch", "jax", "safetensors", "bert", "fill-mask", "de", "transformers", "license:mit", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
68,305
2022-10-14T02:29:30Z
--- language: - en license: mit tags: - generated_from_trainer datasets: - OpenTable metrics: - accuracy model-index: - name: roberta-base.CEBaB_confounding.food_service_positive.sa.5-class.seed_42 results: - task: name: Text Classification type: text-classification dataset: name: OpenTable OPENTABLE type: OpenTable args: opentable metrics: - name: Accuracy type: accuracy value: 0.702928870292887 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-base.CEBaB_confounding.food_service_positive.sa.5-class.seed_42 This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the OpenTable OPENTABLE dataset. It achieves the following results on the evaluation set: - Loss: 0.7254 - Accuracy: 0.7029 - Macro-f1: 0.6833 - Weighted-macro-f1: 0.6951 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5.0 ### Training results ### Framework versions - Transformers 4.18.0 - Pytorch 1.10.2+cu102 - Datasets 2.5.2 - Tokenizers 0.12.1
bert-base-multilingual-cased
[ "pytorch", "tf", "jax", "safetensors", "bert", "fill-mask", "multilingual", "af", "sq", "ar", "an", "hy", "ast", "az", "ba", "eu", "bar", "be", "bn", "inc", "bs", "br", "bg", "my", "ca", "ceb", "ce", "zh", "cv", "hr", "cs", "da", "nl", "en", "et", "fi", "fr", "gl", "ka", "de", "el", "gu", "ht", "he", "hi", "hu", "is", "io", "id", "ga", "it", "ja", "jv", "kn", "kk", "ky", "ko", "la", "lv", "lt", "roa", "nds", "lm", "mk", "mg", "ms", "ml", "mr", "mn", "min", "ne", "new", "nb", "nn", "oc", "fa", "pms", "pl", "pt", "pa", "ro", "ru", "sco", "sr", "scn", "sk", "sl", "aze", "es", "su", "sw", "sv", "tl", "tg", "th", "ta", "tt", "te", "tr", "uk", "ud", "uz", "vi", "vo", "war", "cy", "fry", "pnb", "yo", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4,749,504
2022-10-14T02:32:19Z
--- language: - en license: mit tags: - generated_from_trainer datasets: - OpenTable metrics: - accuracy model-index: - name: roberta-base.CEBaB_confounding.food_service_positive.sa.5-class.seed_43 results: - task: name: Text Classification type: text-classification dataset: name: OpenTable OPENTABLE type: OpenTable args: opentable metrics: - name: Accuracy type: accuracy value: 0.7112970711297071 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-base.CEBaB_confounding.food_service_positive.sa.5-class.seed_43 This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the OpenTable OPENTABLE dataset. It achieves the following results on the evaluation set: - Loss: 0.7180 - Accuracy: 0.7113 - Macro-f1: 0.6981 - Weighted-macro-f1: 0.7073 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 43 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5.0 ### Training results ### Framework versions - Transformers 4.18.0 - Pytorch 1.10.2+cu102 - Datasets 2.5.2 - Tokenizers 0.12.1
bert-base-uncased
[ "pytorch", "tf", "jax", "rust", "safetensors", "bert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "exbert", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
59,663,489
2022-10-14T03:06:13Z
--- license: mit language: en tags: - bert - cloze - distractor - generation datasets: - dgen widget: - text: "The only known planet with large amounts of water is [MASK]. [SEP] earth" - text: "The products of photosynthesis are glucose and [MASK] else. [SEP] oxygen" --- # cdgp-csg-scibert-dgen ## Model description This model is a Candidate Set Generator in **"CDGP: Automatic Cloze Distractor Generation based on Pre-trained Language Model", Findings of EMNLP 2022**. Its input are stem and answer, and output is candidate set of distractors. It is fine-tuned by [**DGen**](https://github.com/DRSY/DGen) dataset based on [**allenai/scibert_scivocab_uncased**](https://huggingface.co/allenai/scibert_scivocab_uncased) model. For more details, you can see our **paper** or [**GitHub**](https://github.com/AndyChiangSH/CDGP). ## How to use? 1. Download model by hugging face transformers. ```python from transformers import BertTokenizer, BertForMaskedLM, pipeline tokenizer = BertTokenizer.from_pretrained("AndyChiang/cdgp-csg-scibert-dgen") csg_model = BertForMaskedLM.from_pretrained("AndyChiang/cdgp-csg-scibert-dgen") ``` 2. Create a unmasker. ```python unmasker = pipeline("fill-mask", tokenizer=tokenizer, model=csg_model, top_k=10) ``` 3. Use the unmasker to generate the candidate set of distractors. ```python sent = "The only known planet with large amounts of water is [MASK]. [SEP] earth" cs = unmasker(sent) print(cs) ``` ## Dataset This model is fine-tuned by [DGen](https://github.com/DRSY/DGen) dataset, which covers multiple domains including science, vocabulary, common sense and trivia. It is compiled from a wide variety of datasets including SciQ, MCQL, AI2 Science Questions, etc. The detail of DGen dataset is shown below. | DGen dataset | Train | Valid | Test | Total | | ----------------------- | ----- | ----- | ---- | ----- | | **Number of questions** | 2321 | 300 | 259 | 2880 | You can also use the [dataset](https://huggingface.co/datasets/AndyChiang/dgen) we have already cleaned. ## Training We use a special way to fine-tune model, which is called **"Answer-Relating Fine-Tune"**. More details are in our paper. ### Training hyperparameters The following hyperparameters were used during training: - Pre-train language model: [allenai/scibert_scivocab_uncased](https://huggingface.co/allenai/scibert_scivocab_uncased) - Optimizer: adam - Learning rate: 0.0001 - Max length of input: 64 - Batch size: 64 - Epoch: 1 - Device: NVIDIA® Tesla T4 in Google Colab ## Testing The evaluations of this model as a Candidate Set Generator in CDGP is as follows: | P@1 | F1@3 | MRR | NDCG@10 | | ----- | ----- | ----- | ------- | | 13.13 | 12.23 | 25.12 | 34.17 | ## Other models ### Candidate Set Generator | Models | CLOTH | DGen | | ----------- | ----------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | | **BERT** | [cdgp-csg-bert-cloth](https://huggingface.co/AndyChiang/cdgp-csg-bert-cloth) | [cdgp-csg-bert-dgen](https://huggingface.co/AndyChiang/cdgp-csg-bert-dgen) | | **SciBERT** | [cdgp-csg-scibert-cloth](https://huggingface.co/AndyChiang/cdgp-csg-scibert-cloth) | [*cdgp-csg-scibert-dgen*](https://huggingface.co/AndyChiang/cdgp-csg-scibert-dgen) | | **RoBERTa** | [cdgp-csg-roberta-cloth](https://huggingface.co/AndyChiang/cdgp-csg-roberta-cloth) | [cdgp-csg-roberta-dgen](https://huggingface.co/AndyChiang/cdgp-csg-roberta-dgen) | | **BART** | [cdgp-csg-bart-cloth](https://huggingface.co/AndyChiang/cdgp-csg-bart-cloth) | [cdgp-csg-bart-dgen](https://huggingface.co/AndyChiang/cdgp-csg-bart-dgen) | ### Distractor Selector **fastText**: [cdgp-ds-fasttext](https://huggingface.co/AndyChiang/cdgp-ds-fasttext) ## Citation None
bert-large-cased-whole-word-masking
[ "pytorch", "tf", "jax", "bert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2,316
2022-10-14T03:12:08Z
--- language: - en license: mit tags: - generated_from_trainer datasets: - OpenTable metrics: - accuracy model-index: - name: roberta-base.CEBaB_confounding.observational.absa.5-class.seed_42 results: - task: name: Text Classification type: text-classification dataset: name: OpenTable OPENTABLE-ABSA type: OpenTable args: opentable-absa metrics: - name: Accuracy type: accuracy value: 0.8867809057527539 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-base.CEBaB_confounding.observational.absa.5-class.seed_42 This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the OpenTable OPENTABLE-ABSA dataset. It achieves the following results on the evaluation set: - Loss: 0.4927 - Accuracy: 0.8868 - Macro-f1: 0.8847 - Weighted-macro-f1: 0.8871 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5.0 ### Training results ### Framework versions - Transformers 4.18.0 - Pytorch 1.10.2+cu102 - Datasets 2.5.2 - Tokenizers 0.12.1
bert-large-cased
[ "pytorch", "tf", "jax", "safetensors", "bert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
388,769
2022-10-14T03:16:16Z
--- license: mit language: en tags: - roberta - cloze - distractor - generation datasets: - cloth widget: - text: "I feel <mask> now. </s> happy" - text: "The old man was waiting for a ride across the <mask>. </s> river" --- # cdgp-csg-roberta-cloth ## Model description This model is a Candidate Set Generator in **"CDGP: Automatic Cloze Distractor Generation based on Pre-trained Language Model", Findings of EMNLP 2022**. Its input are stem and answer, and output is candidate set of distractors. It is fine-tuned by [**CLOTH**](https://www.cs.cmu.edu/~glai1/data/cloth/) dataset based on [**roberta-base**](https://huggingface.co/roberta-base) model. For more details, you can see our **paper** or [**GitHub**](https://github.com/AndyChiangSH/CDGP). ## How to use? 1. Download the model by hugging face transformers. ```python from transformers import RobertaTokenizer, RobertaForMaskedLM, pipeline tokenizer = RobertaTokenizer.from_pretrained("AndyChiang/cdgp-csg-roberta-cloth") csg_model = RobertaForMaskedLM.from_pretrained("AndyChiang/cdgp-csg-roberta-cloth") ``` 2. Create a unmasker. ```python unmasker = pipeline("fill-mask", tokenizer=tokenizer, model=csg_model, top_k=10) ``` 3. Use the unmasker to generate the candidate set of distractors. ```python sent = "I feel <mask> now. </s> happy" cs = unmasker(sent) print(cs) ``` ## Dataset This model is fine-tuned by [CLOTH](https://www.cs.cmu.edu/~glai1/data/cloth/) dataset, which is a collection of nearly 100,000 cloze questions from middle school and high school English exams. The detail of CLOTH dataset is shown below. | Number of questions | Train | Valid | Test | | ------------------- | ----- | ----- | ----- | | Middle school | 22056 | 3273 | 3198 | | High school | 54794 | 7794 | 8318 | | Total | 76850 | 11067 | 11516 | You can also use the [dataset](https://huggingface.co/datasets/AndyChiang/cloth) we have already cleaned. ## Training We use a special way to fine-tune model, which is called **"Answer-Relating Fine-Tune"**. More detail is in our paper. ### Training hyperparameters The following hyperparameters were used during training: - Pre-train language model: [roberta-base](https://huggingface.co/roberta-base) - Optimizer: adam - Learning rate: 0.0001 - Max length of input: 64 - Batch size: 64 - Epoch: 1 - Device: NVIDIA® Tesla T4 in Google Colab ## Testing The evaluations of this model as a Candidate Set Generator in CDGP is as follows: | P@1 | F1@3 | F1@10 | MRR | NDCG@10 | | ----- | ---- | ----- | ----- | ------- | | 10.50 | 9.83 | 10.25 | 20.42 | 28.17 | ## Other models ### Candidate Set Generator | Models | CLOTH | DGen | | ----------- | ----------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | | **BERT** | [cdgp-csg-bert-cloth](https://huggingface.co/AndyChiang/cdgp-csg-bert-cloth) | [cdgp-csg-bert-dgen](https://huggingface.co/AndyChiang/cdgp-csg-bert-dgen) | | **SciBERT** | [cdgp-csg-scibert-cloth](https://huggingface.co/AndyChiang/cdgp-csg-scibert-cloth) | [cdgp-csg-scibert-dgen](https://huggingface.co/AndyChiang/cdgp-csg-scibert-dgen) | | **RoBERTa** | [*cdgp-csg-roberta-cloth*](https://huggingface.co/AndyChiang/cdgp-csg-roberta-cloth) | [cdgp-csg-roberta-dgen](https://huggingface.co/AndyChiang/cdgp-csg-roberta-dgen) | | **BART** | [cdgp-csg-bart-cloth](https://huggingface.co/AndyChiang/cdgp-csg-bart-cloth) | [cdgp-csg-bart-dgen](https://huggingface.co/AndyChiang/cdgp-csg-bart-dgen) | ### Distractor Selector **fastText**: [cdgp-ds-fasttext](https://huggingface.co/AndyChiang/cdgp-ds-fasttext) ## Citation None
bert-large-uncased-whole-word-masking-finetuned-squad
[ "pytorch", "tf", "jax", "safetensors", "bert", "question-answering", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
question-answering
{ "architectures": [ "BertForQuestionAnswering" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
480,510
2022-10-14T03:17:00Z
--- tags: - generated_from_trainer metrics: - rouge model-index: - name: pegasus-newsroom-headline_writer_57k results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # pegasus-newsroom-headline_writer_57k This model is a fine-tuned version of [google/pegasus-newsroom](https://huggingface.co/google/pegasus-newsroom) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.3599 - Rouge1: 42.2586 - Rouge2: 23.2731 - Rougel: 35.8685 - Rougelsum: 36.0581 - Gen Len: 34.3651 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:-----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:-------:| | 1.5213 | 1.0 | 5670 | 1.4040 | 41.8648 | 22.8205 | 35.3983 | 35.535 | 34.8817 | | 1.4171 | 2.0 | 11340 | 1.3672 | 42.26 | 23.2611 | 35.8016 | 35.9753 | 34.3492 | | 1.3722 | 3.0 | 17010 | 1.3599 | 42.2586 | 23.2731 | 35.8685 | 36.0581 | 34.3651 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu113 - Datasets 2.6.0 - Tokenizers 0.13.1
bert-large-uncased-whole-word-masking
[ "pytorch", "tf", "jax", "safetensors", "bert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
76,685
2022-10-14T03:17:11Z
--- language: - en license: mit tags: - generated_from_trainer datasets: - OpenTable metrics: - accuracy model-index: - name: roberta-base.CEBaB_confounding.observational.absa.5-class.seed_43 results: - task: name: Text Classification type: text-classification dataset: name: OpenTable OPENTABLE-ABSA type: OpenTable args: opentable-absa metrics: - name: Accuracy type: accuracy value: 0.8857609139126887 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-base.CEBaB_confounding.observational.absa.5-class.seed_43 This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the OpenTable OPENTABLE-ABSA dataset. It achieves the following results on the evaluation set: - Loss: 0.4706 - Accuracy: 0.8858 - Macro-f1: 0.8840 - Weighted-macro-f1: 0.8860 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 43 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5.0 ### Training results ### Framework versions - Transformers 4.18.0 - Pytorch 1.10.2+cu102 - Datasets 2.5.2 - Tokenizers 0.12.1
bert-large-uncased
[ "pytorch", "tf", "jax", "safetensors", "bert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1,058,496
2022-10-14T03:20:05Z
--- tags: - flair - token-classification - sequence-tagger-model language: en widget: - text: >- SELECT shipping FROM users WHERE shipping = '201 Thayer St Providence RI 02912' license: mit datasets: - beki/privy --- | Feature | Description | | --- | --- | | **Name** | `en_spacy_pii_distilbert` | | **Version** | `0.0.0` | | **spaCy** | `>=3.4.1,<3.5.0` | | **Default Pipeline** | `transformer`, `ner` | | **Components** | `transformer`, `ner` | | **Vectors** | 0 keys, 0 unique vectors (0 dimensions) | | **Sources** | Trained on a new [dataset for structured PII](https://huggingface.co/datasets/beki/privy) generated by [Privy](https://github.com/pixie-io/pixie/tree/main/src/datagen/pii/privy). For more details, see this [blog post](https://blog.px.dev/detect-pii/) | | **License** | MIT | | **Author** | [Benjamin Kilimnik](https://www.linkedin.com/in/benkilimnik/) | --- ## English PII in Flair This is the large 5-class NER model for English trained on protocol trace data generated by [Privy](https://github.com/pixie-io/pixie/tree/main/src/datagen/pii/privy/) F1-Score: **0.9522** Predicts 5 tags: | **tag** | **meaning** | |---------------------------------|-----------| | PER | person name | | LOC | location name | | ORG | organization name | | DATE_TIME | dates and times | | NRP | nationalities, religious and political groups | Uses distilbert embeddings. ---
camembert-base
[ "pytorch", "tf", "safetensors", "camembert", "fill-mask", "fr", "dataset:oscar", "arxiv:1911.03894", "transformers", "license:mit", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "CamembertForMaskedLM" ], "model_type": "camembert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1,440,898
2022-10-14T03:22:12Z
--- language: - en license: mit tags: - generated_from_trainer datasets: - OpenTable metrics: - accuracy model-index: - name: roberta-base.CEBaB_confounding.observational.absa.5-class.seed_44 results: - task: name: Text Classification type: text-classification dataset: name: OpenTable OPENTABLE-ABSA type: OpenTable args: opentable-absa metrics: - name: Accuracy type: accuracy value: 0.8824969400244798 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-base.CEBaB_confounding.observational.absa.5-class.seed_44 This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the OpenTable OPENTABLE-ABSA dataset. It achieves the following results on the evaluation set: - Loss: 0.5273 - Accuracy: 0.8825 - Macro-f1: 0.8809 - Weighted-macro-f1: 0.8827 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 44 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5.0 ### Training results ### Framework versions - Transformers 4.18.0 - Pytorch 1.10.2+cu102 - Datasets 2.5.2 - Tokenizers 0.12.1
ctrl
[ "pytorch", "tf", "ctrl", "en", "arxiv:1909.05858", "arxiv:1910.09700", "transformers", "license:bsd-3-clause", "has_space" ]
null
{ "architectures": null, "model_type": "ctrl", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
17,007
2022-10-14T03:25:26Z
--- language: - en license: apache-2.0 tags: - generated_from_trainer datasets: - OpenTable metrics: - accuracy model-index: - name: bert-base-uncased.CEBaB_confounding.observational.sa.5-class.seed_42 results: - task: name: Text Classification type: text-classification dataset: name: OpenTable OPENTABLE type: OpenTable args: opentable metrics: - name: Accuracy type: accuracy value: 0.6604901374775852 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-uncased.CEBaB_confounding.observational.sa.5-class.seed_42 This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the OpenTable OPENTABLE dataset. It achieves the following results on the evaluation set: - Loss: 0.8234 - Accuracy: 0.6605 - Macro-f1: 0.6242 - Weighted-macro-f1: 0.6524 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5.0 ### Training results ### Framework versions - Transformers 4.18.0 - Pytorch 1.10.2+cu102 - Datasets 2.5.2 - Tokenizers 0.12.1
distilbert-base-cased-distilled-squad
[ "pytorch", "tf", "rust", "safetensors", "openvino", "distilbert", "question-answering", "en", "dataset:squad", "arxiv:1910.01108", "arxiv:1910.09700", "transformers", "license:apache-2.0", "model-index", "autotrain_compatible", "has_space" ]
question-answering
{ "architectures": [ "DistilBertForQuestionAnswering" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
257,745
2022-10-14T03:27:21Z
--- language: - en license: mit tags: - generated_from_trainer datasets: - OpenTable metrics: - accuracy model-index: - name: roberta-base.CEBaB_confounding.uniform.absa.5-class.seed_42 results: - task: name: Text Classification type: text-classification dataset: name: OpenTable OPENTABLE-ABSA type: OpenTable args: opentable-absa metrics: - name: Accuracy type: accuracy value: 0.9024887800897593 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-base.CEBaB_confounding.uniform.absa.5-class.seed_42 This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the OpenTable OPENTABLE-ABSA dataset. It achieves the following results on the evaluation set: - Loss: 0.3315 - Accuracy: 0.9025 - Macro-f1: 0.9009 - Weighted-macro-f1: 0.9025 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5.0 ### Training results ### Framework versions - Transformers 4.18.0 - Pytorch 1.10.2+cu102 - Datasets 2.5.2 - Tokenizers 0.12.1
distilbert-base-cased
[ "pytorch", "tf", "onnx", "distilbert", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1910.01108", "transformers", "license:apache-2.0", "has_space" ]
null
{ "architectures": null, "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
574,859
2022-10-14T03:28:07Z
--- language: - en license: apache-2.0 tags: - generated_from_trainer datasets: - OpenTable metrics: - accuracy model-index: - name: bert-base-uncased.CEBaB_confounding.observational.sa.5-class.seed_43 results: - task: name: Text Classification type: text-classification dataset: name: OpenTable OPENTABLE type: OpenTable args: opentable metrics: - name: Accuracy type: accuracy value: 0.6592946802151823 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-uncased.CEBaB_confounding.observational.sa.5-class.seed_43 This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the OpenTable OPENTABLE dataset. It achieves the following results on the evaluation set: - Loss: 0.8422 - Accuracy: 0.6593 - Macro-f1: 0.6196 - Weighted-macro-f1: 0.6403 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 43 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5.0 ### Training results ### Framework versions - Transformers 4.18.0 - Pytorch 1.10.2+cu102 - Datasets 2.5.2 - Tokenizers 0.12.1
distilbert-base-german-cased
[ "pytorch", "safetensors", "distilbert", "fill-mask", "de", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "DistilBertForMaskedLM" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
43,667
2022-10-14T03:30:54Z
--- language: - en license: apache-2.0 tags: - generated_from_trainer datasets: - OpenTable metrics: - accuracy model-index: - name: bert-base-uncased.CEBaB_confounding.observational.sa.5-class.seed_44 results: - task: name: Text Classification type: text-classification dataset: name: OpenTable OPENTABLE type: OpenTable args: opentable metrics: - name: Accuracy type: accuracy value: 0.6467423789599522 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-uncased.CEBaB_confounding.observational.sa.5-class.seed_44 This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the OpenTable OPENTABLE dataset. It achieves the following results on the evaluation set: - Loss: 0.8446 - Accuracy: 0.6467 - Macro-f1: 0.5863 - Weighted-macro-f1: 0.6098 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 44 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5.0 ### Training results ### Framework versions - Transformers 4.18.0 - Pytorch 1.10.2+cu102 - Datasets 2.5.2 - Tokenizers 0.12.1
distilbert-base-multilingual-cased
[ "pytorch", "tf", "onnx", "safetensors", "distilbert", "fill-mask", "multilingual", "af", "sq", "ar", "an", "hy", "ast", "az", "ba", "eu", "bar", "be", "bn", "inc", "bs", "br", "bg", "my", "ca", "ceb", "ce", "zh", "cv", "hr", "cs", "da", "nl", "en", "et", "fi", "fr", "gl", "ka", "de", "el", "gu", "ht", "he", "hi", "hu", "is", "io", "id", "ga", "it", "ja", "jv", "kn", "kk", "ky", "ko", "la", "lv", "lt", "roa", "nds", "lm", "mk", "mg", "ms", "ml", "mr", "mn", "min", "ne", "new", "nb", "nn", "oc", "fa", "pms", "pl", "pt", "pa", "ro", "ru", "sco", "sr", "scn", "sk", "sl", "aze", "es", "su", "sw", "sv", "tl", "tg", "th", "ta", "tt", "te", "tr", "uk", "ud", "uz", "vi", "vo", "war", "cy", "fry", "pnb", "yo", "dataset:wikipedia", "arxiv:1910.01108", "arxiv:1910.09700", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "DistilBertForMaskedLM" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8,339,633
2022-10-14T03:32:20Z
--- language: - en license: mit tags: - generated_from_trainer datasets: - OpenTable metrics: - accuracy model-index: - name: roberta-base.CEBaB_confounding.uniform.absa.5-class.seed_43 results: - task: name: Text Classification type: text-classification dataset: name: OpenTable OPENTABLE-ABSA type: OpenTable args: opentable-absa metrics: - name: Accuracy type: accuracy value: 0.8912688698490412 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-base.CEBaB_confounding.uniform.absa.5-class.seed_43 This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the OpenTable OPENTABLE-ABSA dataset. It achieves the following results on the evaluation set: - Loss: 0.3790 - Accuracy: 0.8913 - Macro-f1: 0.8893 - Weighted-macro-f1: 0.8914 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 43 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5.0 ### Training results ### Framework versions - Transformers 4.18.0 - Pytorch 1.10.2+cu102 - Datasets 2.5.2 - Tokenizers 0.12.1
distilroberta-base
[ "pytorch", "tf", "jax", "rust", "safetensors", "roberta", "fill-mask", "en", "dataset:openwebtext", "arxiv:1910.01108", "arxiv:1910.09700", "transformers", "exbert", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3,342,240
2022-10-14T03:37:30Z
--- language: - en license: mit tags: - generated_from_trainer datasets: - OpenTable metrics: - accuracy model-index: - name: roberta-base.CEBaB_confounding.uniform.absa.5-class.seed_44 results: - task: name: Text Classification type: text-classification dataset: name: OpenTable OPENTABLE-ABSA type: OpenTable args: opentable-absa metrics: - name: Accuracy type: accuracy value: 0.9047327621379029 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-base.CEBaB_confounding.uniform.absa.5-class.seed_44 This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the OpenTable OPENTABLE-ABSA dataset. It achieves the following results on the evaluation set: - Loss: 0.3441 - Accuracy: 0.9047 - Macro-f1: 0.9031 - Weighted-macro-f1: 0.9047 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 44 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5.0 ### Training results ### Framework versions - Transformers 4.18.0 - Pytorch 1.10.2+cu102 - Datasets 2.5.2 - Tokenizers 0.12.1
gpt2-medium
[ "pytorch", "tf", "jax", "rust", "safetensors", "gpt2", "text-generation", "en", "arxiv:1910.09700", "transformers", "license:mit", "has_space" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
759,601
2022-10-14T03:39:54Z
--- language: - en license: mit tags: - generated_from_trainer datasets: - OpenTable metrics: - accuracy model-index: - name: gpt2.CEBaB_confounding.observational.sa.5-class.seed_44 results: - task: name: Text Classification type: text-classification dataset: name: OpenTable OPENTABLE type: OpenTable args: opentable metrics: - name: Accuracy type: accuracy value: 0.5636580992229527 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # gpt2.CEBaB_confounding.observational.sa.5-class.seed_44 This model is a fine-tuned version of [gpt2](https://huggingface.co/gpt2) on the OpenTable OPENTABLE dataset. It achieves the following results on the evaluation set: - Loss: 0.9660 - Accuracy: 0.5637 - Macro-f1: 0.4288 - Weighted-macro-f1: 0.4995 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 44 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5.0 ### Training results ### Framework versions - Transformers 4.18.0 - Pytorch 1.10.2+cu102 - Datasets 2.5.2 - Tokenizers 0.12.1
t5-11b
[ "pytorch", "tf", "t5", "text2text-generation", "en", "fr", "ro", "de", "multilingual", "dataset:c4", "arxiv:1805.12471", "arxiv:1708.00055", "arxiv:1704.05426", "arxiv:1606.05250", "arxiv:1808.09121", "arxiv:1810.12885", "arxiv:1905.10044", "arxiv:1910.09700", "transformers", "summarization", "translation", "license:apache-2.0", "autotrain_compatible", "has_space" ]
translation
{ "architectures": [ "T5WithLMHeadModel" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
37,600
2022-10-14T03:51:37Z
--- language: - en license: apache-2.0 tags: - generated_from_trainer datasets: - OpenTable metrics: - accuracy model-index: - name: bert-base-uncased.CEBaB_confounding.food_service_positive.sa.5-class.seed_42 results: - task: name: Text Classification type: text-classification dataset: name: OpenTable OPENTABLE type: OpenTable args: opentable metrics: - name: Accuracy type: accuracy value: 0.6610878661087866 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-uncased.CEBaB_confounding.food_service_positive.sa.5-class.seed_42 This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the OpenTable OPENTABLE dataset. It achieves the following results on the evaluation set: - Loss: 0.7885 - Accuracy: 0.6611 - Macro-f1: 0.6366 - Weighted-macro-f1: 0.6560 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5.0 ### Training results ### Framework versions - Transformers 4.18.0 - Pytorch 1.10.2+cu102 - Datasets 2.5.2 - Tokenizers 0.12.1
AdapterHub/bert-base-uncased-pf-race
[ "bert", "en", "dataset:race", "arxiv:2104.08247", "adapter-transformers", "adapterhub:rc/race" ]
null
{ "architectures": null, "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- language: - en tags: - generated_from_trainer datasets: - OpenTable metrics: - accuracy model-index: - name: lstm.CEBaB_confounding.food_service_positive.absa.5-class.seed_43 results: - task: name: Text Classification type: text-classification dataset: name: OpenTable OPENTABLE-ABSA type: OpenTable args: opentable-absa metrics: - name: Accuracy type: accuracy value: 0.7150142798857609 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # lstm.CEBaB_confounding.food_service_positive.absa.5-class.seed_43 This model is a fine-tuned version of [lstm](https://huggingface.co/lstm) on the OpenTable OPENTABLE-ABSA dataset. It achieves the following results on the evaluation set: - Loss: 1.1235 - Accuracy: 0.7150 - Macro-f1: 0.7127 - Weighted-macro-f1: 0.7174 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.001 - train_batch_size: 32 - eval_batch_size: 32 - seed: 43 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5.0 ### Training results ### Framework versions - Transformers 4.18.0 - Pytorch 1.10.2+cu102 - Datasets 2.5.2 - Tokenizers 0.12.1
AdapterHub/roberta-base-pf-quoref
[ "roberta", "en", "dataset:quoref", "arxiv:2104.08247", "adapter-transformers", "question-answering" ]
question-answering
{ "architectures": null, "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: other tags: - generated_from_trainer datasets: - scene_parse_150 model-index: - name: my_awesome_seg_model results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # my_awesome_seg_model This model is a fine-tuned version of [nvidia/mit-b0](https://huggingface.co/nvidia/mit-b0) on the scene_parse_150 dataset. It achieves the following results on the evaluation set: - Loss: 2.6432 - Mean Iou: 0.0911 - Mean Accuracy: 0.1868 - Overall Accuracy: 0.4793 - Per Category Iou: [0.5426296173059788, 0.2947250971937308, 0.2593935859781869, 0.45173125506778267, 0.38272757070710234, 0.20843480268400397, 0.428475951976423, 0.5954193004635483, 0.0008033243744483343, 0.011841963612875444, 0.0, 0.0, 0.39798770262716604, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.6574200718071466, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.050726353169051976, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] - Per Category Accuracy: [0.8164239683360996, 0.9948410129661233, 0.9814696886800068, 0.6506930635726559, 0.4434671330667499, 0.21132747906583407, 0.9948027668723126, 0.6222113597731216, 0.001242502799466184, 0.012012012012012012, nan, 0.0, 0.7683107773931028, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.9100106496272631, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.06326497745329611, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 6e-05 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 50 ### Training results | Training Loss | Epoch | Step | Validation Loss | Mean Iou | Mean Accuracy | Overall Accuracy | Per Category Iou | Per Category Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:|:-------------:|:----------------:|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:| | 3.634 | 1.0 | 20 | 3.8881 | 0.0566 | 0.1433 | 0.4114 | [0.504504029524742, 0.5950223325366936, 0.2625995070347788, 0.33940870574657567, 0.3850212978355308, 0.074853871376798, 0.18299680111030245, 0.0, 0.0, 0.0, nan, 0.007002521846226028, 4.47888207103507e-05, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.2535897038300512, 0.0, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | [0.9002304906157392, 0.9499009396025815, 0.99856493232759, 0.9708488580860635, 0.44038279641684747, 0.07532524407463058, 0.4042624789680314, 0.0, 0.0, 0.0, nan, 0.025626717032967032, 4.496200710399712e-05, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.9675778014436162, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | | 3.9739 | 2.0 | 40 | 3.7982 | 0.0552 | 0.1408 | 0.3786 | [0.5223254280750929, 0.45182301380082057, 0.22738516550127968, 0.3274937868524926, 0.25070695043905344, 0.04562350119904077, 0.2036186860451179, 0.0038928668938973585, 0.0, 0.0, nan, 0.0, 0.004375586015984283, 0.0, 0.0, 0.0, 0.0, 0.0, nan, nan, 0.33718928451461205, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | [0.8678005281796617, 0.9504599933305871, 0.9992645278178899, 0.843322264908589, 0.28524671064567864, 0.045968332590853145, 0.6828192185455225, 0.0038928668938973585, 0.0, 0.0, nan, 0.0, 0.004406276696191718, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.9502425748432138, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | | 2.955 | 3.0 | 60 | 3.5479 | 0.0554 | 0.1424 | 0.4044 | [0.49213043267600176, 0.30194482378590765, 0.2859892294581314, 0.38691602908496286, 0.3525017086564069, 0.015387721226531483, 0.2281319852061131, 0.0, 0.0, 0.0, nan, 3.3522174918708724e-05, 0.0013880182681113996, 0.0, 0.0, 0.0, 0.0, 0.0, nan, nan, 0.37159076248226036, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | [0.8531747898369094, 0.9888679652405893, 0.9902594781735177, 0.963841000791521, 0.3980052203909729, 0.015539229381073551, 0.632510749672836, 0.0, 0.0, 0.0, nan, 8.585164835164836e-05, 0.0013938222202239108, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.8520293456395693, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | | 2.9446 | 4.0 | 80 | 3.4800 | 0.0657 | 0.1517 | 0.4550 | [0.4449882423609476, 0.28648170853411653, 0.42453170028818443, 0.3914734837975144, 0.5349246204310152, 0.01874775283784478, 0.3501144731142875, 0.0, 3.067766972420775e-05, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, nan, nan, 0.43820729009713716, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | [0.8761465732160497, 0.9854253712312914, 0.9935960105118707, 0.9874032317226201, 0.6055605146850711, 0.01929190042208111, 0.7933538979248458, 0.0, 3.067908146830084e-05, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.8087208614365163, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | | 2.7001 | 5.0 | 100 | 3.3922 | 0.0620 | 0.1535 | 0.4195 | [0.4945334266564037, 0.3275884067643566, 0.2505517841208164, 0.40500233820919723, 0.3143916334644628, 0.0932093595882812, 0.3893460732292237, 0.006088407373004099, 0.006371170075268, 0.0, 0.0, 0.0, 0.054395164874233404, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.45006988662835845, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | [0.8570017404393433, 0.9820808568233979, 0.9947530338227512, 0.9571902932489044, 0.3623019069667303, 0.09832451166197269, 0.9636661058141709, 0.0060903109889819534, 0.006427267567609026, 0.0, nan, 0.0, 0.05503349669529248, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.8572949946751863, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | | 2.0539 | 6.0 | 120 | 3.3807 | 0.0665 | 0.1539 | 0.4096 | [0.4557239761254338, 0.538077093295625, 0.21519181129706139, 0.4164958736117365, 0.2521352814191409, 0.1688438030919332, 0.3341010466167167, 0.002403697996918336, 0.006810133478616181, 0.0, 0.0, 0.0, 0.032154199011997175, 0.0, 0.0, 0.0, 0.0, 0.0, nan, nan, 0.5061900494540144, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | [0.8458132018036059, 0.9778143941623021, 0.9959818105172522, 0.955293538485299, 0.2924314007242748, 0.18084552133435014, 0.9641054402692092, 0.0024044177874071922, 0.006902793330367689, 0.0, nan, 0.0, 0.0327773031788139, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.9023192521595077, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | | 3.4542 | 7.0 | 140 | 3.2846 | 0.0665 | 0.1558 | 0.4421 | [0.48285043920177817, 0.22899123035434077, 0.3979084730403262, 0.4324084024153106, 0.4183511256738188, 0.04914500055161255, 0.411052289475539, 0.009047203951713025, 0.0048163574403634985, 0.0, 0.0, 0.0, 0.07226666666666667, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.48484313794685396, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | [0.8663893611445237, 0.9918888169638479, 0.9845730225215933, 0.97879785324041, 0.47873984890379045, 0.05045341628976359, 0.9812768741820901, 0.00904959441963696, 0.0048779739534598335, 0.0, nan, 0.0, 0.07310822355109932, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.8128623831499231, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | | 2.1068 | 8.0 | 160 | 3.1460 | 0.0779 | 0.1626 | 0.4936 | [0.5044978619794759, 0.28969572999232934, 0.4415237942637398, 0.368545316839182, 0.7113955562160957, 0.08962002394929758, 0.37304472487212614, 0.004740128396798875, 0.003987748665696264, 0.0, nan, 0.0, 0.046011181116336855, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.5186168330147943, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | [0.8138334688501676, 0.9890052766825556, 0.9886001811772936, 0.8806926774648159, 0.8337070823626744, 0.09267662848556694, 0.9851093662366798, 0.0047471838366757385, 0.00403429921308156, 0.0, nan, 0.0, 0.046625601366845015, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.8669388238078334, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | | 3.0652 | 9.0 | 180 | 3.0503 | 0.0858 | 0.1720 | 0.5257 | [0.47798404514300696, 0.36283144684687263, 0.4367595082137244, 0.4594932038747587, 0.677201217621809, 0.16541877305642846, 0.43208867715615334, 0.1411433591035631, 0.013766786526748367, 0.0, 0.0, 0.0, 0.09402997932460372, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.6017965396698969, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.00016097875080489375, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | [0.9004892045721812, 0.9947527413248592, 0.9819988698842079, 0.9857960578389544, 0.7873380118486909, 0.18283134122124148, 0.9885492615442139, 0.14222175249469354, 0.014388489208633094, 0.0, nan, 0.0, 0.09815206150802572, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.8046385043190155, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.00016104788490444493, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | | 2.2965 | 10.0 | 200 | 3.1173 | 0.0811 | 0.1647 | 0.4896 | [0.5229488660985122, 0.392224879916029, 0.3463432976123331, 0.3697698922538323, 0.569269453753559, 0.14911380564455892, 0.36486779106416767, 0.22578360802974234, 0.006726111072105596, 0.0, 0.0, 0.0, 0.0580517070223143, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.5649560235616881, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | [0.875820660829363, 0.9858863453578924, 0.9913806247925879, 0.7875875982161817, 0.6665142641498479, 0.1612893483037474, 0.9922882781828379, 0.2302648382522613, 0.007347640011658051, 0.0, nan, 0.0, 0.06047389955487613, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.8284818364690569, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | | 3.2851 | 11.0 | 220 | 2.9625 | 0.0755 | 0.1612 | 0.4666 | [0.5053339820850222, 0.21792918454935623, 0.298880379395038, 0.5308182600288193, 0.3923031456182526, 0.11422445880356133, 0.43482414491541405, 0.364489404749228, 0.0030803769546035972, 0.0, 0.0, 0.0, 0.028203820656650846, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.5090299381843157, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.0005098070782688025, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | [0.7921250159596271, 0.9960473920633987, 0.97903007363691, 0.9618525454159347, 0.45438205617605154, 0.11585711157589532, 0.9913161338567956, 0.3768374419813107, 0.003620131613259499, 0.0, nan, 0.0, 0.03232768310777393, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.7454147438172998, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.0005099849688640756, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | | 2.5818 | 12.0 | 240 | 2.8716 | 0.1018 | 0.1892 | 0.5831 | [0.5015500596899881, 0.5378033377227164, 0.4605714428245389, 0.5088233520610543, 0.7301283494708399, 0.4526112448815791, 0.40433991971301025, 0.3821343893351621, 0.030590134681565195, 0.0, 0.0, 0.0, 0.030697825909942186, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5411111886647588, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.0015802781289506952, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | [0.8963665808766708, 0.9813942996135664, 0.9936946714143489, 0.9367024459931659, 0.8785158413949427, 0.5024350833213781, 0.9914189568143579, 0.40242731700443013, 0.033133407985764904, 0.0, nan, 0.0, 0.03390135335641383, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.9173470595195835, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.0016104788490444492, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | | 1.7639 | 13.0 | 260 | 2.9303 | 0.0831 | 0.1732 | 0.5094 | [0.523930247548338, 0.31197952983747357, 0.3666472883195613, 0.4425933170209121, 0.5795773707405771, 0.17544477717104104, 0.4208098777595474, 0.38291019244779795, 0.01638128131536703, 0.0, 0.0, 0.0, 0.11004442436563512, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.4876567656765677, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.0029476210350584306, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | [0.8165785247256624, 0.9961356637046627, 0.9786085225081396, 0.8354601440182243, 0.6823979931249531, 0.20305197109612727, 0.9901383436156291, 0.39577773667661903, 0.020662361368900616, 0.0, nan, 0.0, 0.1314239467649836, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.874216069104248, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.0030330684990337127, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | | 2.0265 | 14.0 | 280 | 2.8281 | 0.0899 | 0.1808 | 0.5366 | [0.5275058077355107, 0.381172590224533, 0.3408257856285028, 0.4825318104924716, 0.5716022473487147, 0.4418003770565178, 0.4100251442208972, 0.4223755356049786, 0.0013206884439761152, 0.0, 0.0, 0.0, 0.036131455399061034, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5184656618747607, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.0008532195707239035, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | [0.8567799856195359, 0.995184291571039, 0.9774335608513539, 0.8865808220235912, 0.6699203394463676, 0.4919019322103006, 0.989269022247149, 0.437564404047877, 0.0014419168290101395, 0.0, nan, 0.0, 0.043253450834045234, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.8812566560170394, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.0008589220528237063, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | | 1.7676 | 15.0 | 300 | 2.8422 | 0.0901 | 0.1738 | 0.4951 | [0.48959837596304334, 0.3398944056460107, 0.28756306092413275, 0.5226988669173221, 0.3886313301599894, 0.2592168569937975, 0.4213643115718376, 0.4193035936627882, 0.0026854928017718714, 0.0, 0.0, 0.0, 0.1318541284059818, 0.0, 0.0, 0.0, 0.0, 0.0, nan, nan, 0.6087148665370338, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.0028904033305931958, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | [0.8702969498632512, 0.9938406010317974, 0.9831379548491833, 0.9829678179115426, 0.4550545625201087, 0.2701168084929666, 0.9890166386240419, 0.4300649104728688, 0.0029758709024251813, 0.0, nan, 0.0, 0.15817634099186187, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.8149331440066264, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.0029257032424307495, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | | 1.7863 | 16.0 | 320 | 2.8414 | 0.0875 | 0.1734 | 0.5091 | [0.5094849616669943, 0.2584284795828389, 0.3308251963054808, 0.5029563790510051, 0.5650627925374119, 0.20325673052703325, 0.41237609393203206, 0.4340576744807543, 0.00010351029280474077, 0.0, nan, 0.0, 0.14540260667737903, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, 0.5767114788004136, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.00040119824542634, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | [0.7663107389811374, 0.9974303144432021, 0.9775860367915474, 0.8875943551033804, 0.6635218528275505, 0.20715952249714964, 0.9694615816040382, 0.45775975198386487, 0.00012271632587320335, 0.0, nan, 0.0, 0.18308529292747627, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.8248727961188025, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.0004026197122611123, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | | 2.6209 | 17.0 | 340 | 2.8173 | 0.0959 | 0.1842 | 0.5253 | [0.5431304031887496, 0.3992027043512823, 0.3402036201206292, 0.4878314494718093, 0.5159305103326407, 0.48576308257277095, 0.39682067694619094, 0.42953145655250974, 0.0013527182324481763, 0.0, 0.0, 0.0, 0.1713117919670443, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, 0.5365905170896547, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.0073377725551638595, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | [0.8538501340608683, 0.9949292846073874, 0.9809494766487582, 0.8406967315971351, 0.6102317727799429, 0.5433369324745732, 0.9480557113479156, 0.44500224588474646, 0.0017026890214906967, 0.0, nan, 0.0, 0.2393327638145767, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.901905099988167, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.007542409276358171, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | | 1.6729 | 18.0 | 360 | 2.7700 | 0.0846 | 0.1711 | 0.4967 | [0.5262359321066772, 0.27363677671396036, 0.2891123001495629, 0.5101731608303705, 0.4474550698542896, 0.20675850002224166, 0.3914688252923547, 0.5548907120679374, 0.0006609385327164573, 0.0, 0.0, 0.0, 0.08405594870261471, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.6086588947227908, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 2.668801708033093e-05, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | [0.8180468104265084, 0.9981953353341572, 0.9726529916676383, 0.810942296183324, 0.5194119681938366, 0.21057241447005792, 0.9963170686109554, 0.594190644789107, 0.0008283351996441227, 0.0, nan, 0.0, 0.11375387797311272, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.8093125073955745, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 2.684131415074082e-05, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | | 1.3945 | 19.0 | 380 | 2.7868 | 0.0903 | 0.1810 | 0.5020 | [0.5326273132745795, 0.3266208695427605, 0.2673369763205829, 0.5252358070831109, 0.4040554791521483, 0.35154624683722235, 0.40917587085811385, 0.5218286498646433, 0.00315539124496668, 0.0, 0.0, 0.0, 0.22665204824080534, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, 0.5763558422149684, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.007765783955890347, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | [0.8337645232607366, 0.992349791090449, 0.9872906819262196, 0.8546400509662349, 0.4754813379489524, 0.37766820951532404, 0.9903907272387362, 0.5424207995349698, 0.0041109969167523125, 0.0, nan, 0.0, 0.3067308124634684, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.867057152999645, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.008052394245222246, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | | 1.4481 | 20.0 | 400 | 2.6456 | 0.0987 | 0.1837 | 0.5156 | [0.5167163505504816, 0.35523302096280884, 0.28443662064344927, 0.5842090425978576, 0.3827271899396478, 0.36725726035886314, 0.43794539641573504, 0.5520829927078905, 0.0026167836878513875, 0.0, 0.0, 0.0, 0.20988903622025262, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, 0.6458629893238435, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.004934004558318932, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | [0.8770806313964505, 0.9978912885698032, 0.9709847254984618, 0.9401774165524431, 0.4473376731764313, 0.3937812879891875, 0.9897737894933633, 0.5947895473881681, 0.003098587228298385, 0.0, nan, 0.0, 0.2704464727305427, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.8590107679564548, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.005287738887695941, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | | 1.5418 | 21.0 | 420 | 2.6952 | 0.0977 | 0.1853 | 0.5180 | [0.5571456975491663, 0.3464053399958473, 0.270436971812536, 0.5237040212017121, 0.43342688449838607, 0.35333968455594367, 0.41582487636815335, 0.5807746830879159, 0.001005952874066535, 0.004368004368004368, 0.0, 0.0, 0.24892638503944203, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.642809334701709, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.016876519729397885, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | [0.8567631860119748, 0.9981364875733145, 0.9721327796363898, 0.8373713778258267, 0.507710841624756, 0.37806084310512766, 0.9950644980370162, 0.6242326560449529, 0.0013038609624027857, 0.004368004368004368, nan, 0.0, 0.3674744840609685, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.8523251686190984, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.018815761219669314, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | | 1.0027 | 22.0 | 440 | 2.7743 | 0.0905 | 0.1790 | 0.4919 | [0.5209354156765703, 0.2843385806105753, 0.2771650237920328, 0.5116204200521233, 0.43072580560539764, 0.15486390567690778, 0.40958104661049705, 0.5248976895424556, 0.0, 0.005238173187100998, 0.0, 0.0, 0.35612971757060735, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, 0.5924460890646931, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.005864881192106323, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | [0.8544414802470214, 0.9988132368230056, 0.9711999856493233, 0.8053485588524875, 0.4965007898320911, 0.15749137338699326, 0.990605720695457, 0.5388405950273469, 0.0, 0.005241605241605242, nan, 0.0, 0.5125219189784632, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.824103656372027, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.006254026197122611, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | | 1.5391 | 23.0 | 460 | 2.6653 | 0.0957 | 0.1905 | 0.5165 | [0.5464265840726548, 0.37240272352152215, 0.2636569178032237, 0.5695783453295997, 0.3857329707060851, 0.39396181532780594, 0.42623454419783424, 0.577479073695366, 0.0016427666929053014, 0.0025061291201307545, 0.0, 0.0, 0.31766393555684297, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.6386188825965683, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.003500875218804701, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | [0.8743254957564192, 0.9967045253928088, 0.9727068067053537, 0.8776569045734474, 0.4494567938864819, 0.4301754015055988, 0.985698261357263, 0.6143023225090496, 0.0022088938657176603, 0.0025116025116025116, nan, 0.0, 0.5177375118025269, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.8940362087326944, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.003757783981103715, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | | 1.5185 | 24.0 | 480 | 2.7796 | 0.0936 | 0.1837 | 0.5071 | [0.5260587262466296, 0.2656685100463759, 0.3170013686992637, 0.4827944922200086, 0.5316860586346387, 0.13506039225065775, 0.41164867954505824, 0.5480136506086178, 0.0, 0.0011971486096751375, 0.0, 0.0, 0.4189878268046099, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, 0.5699575538437353, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.006139315230224321, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | [0.8240106711107229, 0.9984209184173876, 0.9638811405200326, 0.7391745014382517, 0.6168334627545182, 0.1364401724567537, 0.9929706487193868, 0.5713839053733893, 0.0, 0.0012012012012012011, nan, 0.0, 0.6391349309833191, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.8580049698260561, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.006280867511273352, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | | 1.8219 | 25.0 | 500 | 2.7019 | 0.1003 | 0.1960 | 0.5425 | [0.548914060620104, 0.31987655796805103, 0.3499450051759834, 0.569423006247703, 0.5458098043184606, 0.2991119082716704, 0.4190626027891272, 0.6132728937503336, 0.00034587490137161014, 0.008399718203002221, 0.0, 0.0, 0.3848656223412242, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.6432202285267411, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.009856788141757167, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | [0.8277771431259365, 0.9973028109613763, 0.9702402841433991, 0.8599635128091276, 0.6392584045149851, 0.31101865764616166, 0.9884557861282482, 0.6577447794188883, 0.0004908653034928134, 0.008463008463008462, nan, 0.0, 0.6915156692594757, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.8759318423855165, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.010548636461241143, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | | 1.2149 | 26.0 | 520 | 2.7042 | 0.0941 | 0.1886 | 0.5126 | [0.5261364911314423, 0.31230184088283724, 0.2957297258919843, 0.5183577831444995, 0.4828180433789907, 0.20093892555029008, 0.40764210834869674, 0.5712691604945633, 0.0, 0.00883170691817042, 0.0, 0.0, 0.38391178413537275, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, 0.6073730862207897, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.01184122334483529, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | [0.8665909564352576, 0.9970085721571628, 0.9713434924165643, 0.7655456669047666, 0.5652633539681503, 0.20554368426218864, 0.9879510188820341, 0.5906676883240415, 0.0, 0.008845208845208846, nan, 0.0, 0.6793759273413965, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.8919062832800851, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.012803306849903372, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | | 1.1734 | 27.0 | 540 | 2.7429 | 0.0909 | 0.1809 | 0.4723 | [0.5007271065314941, 0.31658009440845064, 0.2675489096205322, 0.4011362846273801, 0.43979827738306015, 0.214119192008617, 0.3767709747239444, 0.504293186546018, 6.58855567878595e-05, 0.04689699989169284, nan, 0.0, 0.3722172964942129, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, 0.6361198407710036, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.013744252516465764, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | [0.8098418820936343, 0.995880656741011, 0.9753885894181697, 0.5847506708623719, 0.5149656852428401, 0.21914239763211743, 0.960029912133109, 0.5188257986101936, 9.203724440490251e-05, 0.047283647283647284, nan, 0.0, 0.6983948563463873, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.898177730446101, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.014843246725359674, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | | 1.214 | 28.0 | 560 | 2.7531 | 0.0928 | 0.1851 | 0.4936 | [0.5302814836372616, 0.26518336274052434, 0.2915829264027335, 0.5174096994391785, 0.4487303632283268, 0.1517389144666118, 0.40914252281589086, 0.5410509793331589, 0.0, 0.017601819757365685, 0.0, 0.0, 0.44306130550672035, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, 0.6381933576769258, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.016093943139678615, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | [0.8081115225148341, 0.997950136330646, 0.9720520570798167, 0.7832390586690863, 0.5223947031658599, 0.15453907081750845, 0.9935875864647598, 0.5646770770030209, 0.0, 0.017745017745017744, nan, 0.0, 0.7025313609999551, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.868595432493196, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.017473695512132275, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | | 2.1024 | 29.0 | 580 | 2.7427 | 0.0868 | 0.1782 | 0.4618 | [0.5189682292765813, 0.25443412690078654, 0.255594892642861, 0.4232316897469363, 0.3735208458508579, 0.12206265828016623, 0.415929203539823, 0.5751167916487175, 0.0, 0.0027697822190843424, 0.0, 0.0, 0.430233506401718, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.6091623144302339, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.011941517185068, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | [0.8014655977636362, 0.9981070636928931, 0.9693523360210955, 0.6025550686306685, 0.4331110191857835, 0.12264514229192307, 0.992456533931576, 0.6055565830845245, 0.0, 0.0027846027846027844, nan, 0.0, 0.7116136864349625, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.876405159152763, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.012561735022546703, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | | 1.8114 | 30.0 | 600 | 2.7012 | 0.0879 | 0.1810 | 0.4640 | [0.5393170218014155, 0.27366295039586364, 0.2580850422267017, 0.39798111105215683, 0.38107597187474657, 0.22198954412992225, 0.4150158648486408, 0.5812779915473412, 0.0005007658772239896, 0.008717782109594976, 0.0, 0.0, 0.3947216196673897, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.6439848978891368, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.015381250911211546, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | [0.7640091927452575, 0.9967045253928088, 0.9749490999434942, 0.5792196760555223, 0.4432736060612658, 0.2289204841474188, 0.9952234062441578, 0.6087008217295955, 0.0007823165774416714, 0.00879060879060879, nan, 0.0, 0.7363427903421609, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.8880605845462075, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.016990551857418938, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | | 0.4153 | 31.0 | 620 | 2.7978 | 0.0882 | 0.1748 | 0.4462 | [0.4950783564419678, 0.23200182606710795, 0.24911886408993536, 0.4170619362267979, 0.3466308929981021, 0.11445873655106088, 0.40740032800296655, 0.5688606418666905, 0.0, 0.006177522488349409, 0.0, 0.0, 0.45343178860408534, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, 0.6694511303034594, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.009842421832281156, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | [0.7552196380692547, 0.9968810686753369, 0.976285506713426, 0.5873279406938358, 0.39941554844343813, 0.11502654052054154, 0.9961581604038138, 0.5876863864154799, 0.0, 0.0062244062244062245, nan, 0.0, 0.7206060878557619, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.8392497929239143, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.010629160403693366, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | | 1.3527 | 32.0 | 640 | 2.6521 | 0.0930 | 0.1879 | 0.4978 | [0.5426346443295595, 0.28262893057295996, 0.28540268145848924, 0.4787677072402938, 0.45752147615799155, 0.19507991225321217, 0.4333711282393441, 0.5954184749433477, 0.0006034406006707094, 0.007879150138564365, 0.0, 0.0, 0.4249690594059406, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.6391808475141808, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.028665046920625013, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | [0.8079536062037591, 0.9976558975264325, 0.968195312710215, 0.7046709395934284, 0.5337789292634604, 0.19741163856567928, 0.9896522714526079, 0.6271346914330506, 0.0009357119847831756, 0.007917007917007918, nan, 0.0, 0.7410638010880806, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.9066974322565376, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.03451792999785269, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | | 1.5527 | 33.0 | 660 | 2.6693 | 0.0952 | 0.1853 | 0.4907 | [0.5244283455941845, 0.2912685828119365, 0.2799003837758997, 0.46493261137256886, 0.4101885523432751, 0.17309985985627815, 0.4272512969362637, 0.6021660185233593, 0.0, 0.001086307099016892, 0.0, 0.0, 0.4009935077710014, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.6782052488006772, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.028634059460200027, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | [0.849341119391451, 0.9980972557327527, 0.9707604961746478, 0.6651334967856523, 0.475057997624456, 0.17533354978518412, 0.996176855487007, 0.6593829541751438, 0.0, 0.001092001092001092, nan, 0.0, 0.733150487837777, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.8531534729617797, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.03365900794502899, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | | 1.2602 | 34.0 | 680 | 2.5582 | 0.1038 | 0.2014 | 0.5522 | [0.5493494197294155, 0.38296844330363883, 0.34949930396515333, 0.5264035187790749, 0.5627245166515873, 0.38334045818645623, 0.42658811946964637, 0.6061831614185239, 0.016146090389903407, 0.003898846591216765, 0.0, 0.0, 0.41177211047053197, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.6268644484510736, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.033320435100391246, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | [0.8558156881455249, 0.9950764040094942, 0.9772990232570654, 0.8017239715052414, 0.6605052506295676, 0.4130203338895643, 0.9963731538605347, 0.6475061871922918, 0.024589283796843123, 0.003931203931203931, nan, 0.0, 0.735398588192977, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.9050999881670808, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.041604036933648274, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | | 1.1385 | 35.0 | 700 | 2.6847 | 0.0911 | 0.1849 | 0.4870 | [0.5096079796442675, 0.29571685718689206, 0.3120902953756411, 0.4167750106098473, 0.4573808387302527, 0.20275773062060065, 0.4216022623160183, 0.5573729654415335, 0.0, 0.011100882655547734, 0.0, 0.0, 0.4020941383697035, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.6677984038096918, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.029623079473858737, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | [0.8337578034177122, 0.9973910826026403, 0.9731642345259344, 0.5972026486997819, 0.5308155469919855, 0.20640445790137346, 0.9936343241727426, 0.5813362573871993, 0.0, 0.011193011193011193, nan, 0.0, 0.7389955487612967, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.8960478049934919, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.0359673609619927, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | | 1.8222 | 36.0 | 720 | 2.6160 | 0.0961 | 0.1909 | 0.5088 | [0.5445573540126991, 0.32197014260958046, 0.2953386999439631, 0.5002989031553776, 0.47283026712399956, 0.26728574708047304, 0.42482238134516, 0.550042322668021, 0.0015064465523152526, 0.0028617710583153347, 0.0, 0.0, 0.41462011704096435, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.6831566012833933, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.035599764413323735, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | [0.8437266905445089, 0.993134427901685, 0.9832455849246141, 0.7431996756694145, 0.5502335629047436, 0.27253301519945033, 0.9965694522340625, 0.572321892532213, 0.002224233406451811, 0.0028938028938028937, nan, 0.0, 0.7454251157771683, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.8881197491421133, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.04380502469400902, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | | 1.9946 | 37.0 | 740 | 2.6883 | 0.0936 | 0.1832 | 0.4754 | [0.523952335629541, 0.256518001618123, 0.271209255398528, 0.44599788167650173, 0.4198386513745557, 0.19830876135152029, 0.4343064056402582, 0.591619100420431, 0.0, 0.003463390876129661, 0.0, 0.0, 0.42011730057643415, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.7030220844633863, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.036647036931631606, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | [0.7472499042422369, 0.995096019929775, 0.9812723668750505, 0.6401764512828433, 0.48606968423649966, 0.20363337083487493, 0.9955879603664236, 0.6202957521952422, 0.0, 0.0034944034944034944, nan, 0.0, 0.750415898565712, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.8588332741687374, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.044932359888340136, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | | 1.4272 | 38.0 | 760 | 2.6940 | 0.0915 | 0.1851 | 0.4818 | [0.5365798495647949, 0.2795723190818896, 0.2722143653145567, 0.47573354635077736, 0.4045011249675892, 0.16031738909330223, 0.4327805480208553, 0.5764036508418531, 0.0, 0.004315460135936994, 0.0, 0.0, 0.4179419132418959, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.7027767283717415, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.039500636063735745, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | [0.8051077526828974, 0.9963318229074717, 0.9771375781439193, 0.7070841135929266, 0.4679652328734648, 0.16186319739653726, 0.9962609833613759, 0.6018222492315551, 0.0, 0.004368004368004368, nan, 0.0, 0.7576547817094555, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.8804875162702639, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.049173287524157186, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | | 2.03 | 39.0 | 780 | 2.7420 | 0.0870 | 0.1789 | 0.4526 | [0.5150003121349136, 0.2558886132510781, 0.2532053075230316, 0.41002147848073733, 0.3533176436937534, 0.16357870387643175, 0.4309126666181077, 0.554386069027494, 0.0, 0.006875436429070204, 0.0, 0.0, 0.42182699273099694, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.6873865064953206, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.03571994476763927, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | [0.7761082701108102, 0.9975283940446066, 0.9742225969343367, 0.5822892333828839, 0.40638977790357034, 0.1655252606860517, 0.9954010095344924, 0.5759285192133238, 0.0, 0.006988806988806989, nan, 0.0, 0.7592734139651994, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.8734469293574725, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.042355593729869016, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | | 1.5451 | 40.0 | 800 | 2.6236 | 0.0925 | 0.1896 | 0.4961 | [0.5397304802137207, 0.3099527745703586, 0.27648153069359555, 0.49770820111416686, 0.41362692113171745, 0.23739839807947363, 0.43439442579296034, 0.5815334886552064, 0.0011047931806894315, 0.00754554274010995, 0.0, 0.0, 0.4212725259236887, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.6787200716043857, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.03848703200838162, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | [0.8267926861228522, 0.9951940995311795, 0.9798103916837828, 0.7494160118921215, 0.47878097339245584, 0.24192269648668444, 0.9941858291269396, 0.6100439488819017, 0.0016720099400223958, 0.007644007644007644, nan, 0.0, 0.7525740749067038, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.8972902615075139, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.04831436547133348, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | | 1.3932 | 41.0 | 820 | 2.6526 | 0.0933 | 0.1891 | 0.4936 | [0.54554711905106, 0.30732112942775985, 0.27692412636174385, 0.4499308962636904, 0.4430142615421803, 0.2579748284603433, 0.42352641365936794, 0.573622870217321, 0.001638901935703079, 0.006574338524545993, 0.0, 0.0, 0.38766371123812954, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.6615084525357607, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.04885641402568205, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | [0.8244172216137031, 0.9932128915828086, 0.9831200165032782, 0.6457653622656808, 0.5142955979863515, 0.26202251602624604, 0.994241914376519, 0.6005936181643635, 0.0025156846804006688, 0.006661206661206661, nan, 0.0, 0.7745604963805585, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.9029108981185658, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.06066136998067426, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | | 0.6266 | 42.0 | 840 | 2.6529 | 0.0912 | 0.1858 | 0.4800 | [0.5477434495152561, 0.2868179814398264, 0.26925989044731136, 0.4187395393518442, 0.4151601908657123, 0.22271465663891127, 0.42543191209353864, 0.5776530607974354, 0.001426700803252096, 0.0036639905167304273, 0.0, 0.0, 0.41212282269949024, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.6652623349548298, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.04257293284999137, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | [0.8069657892791624, 0.995488338335393, 0.9792184262689138, 0.6121981119326628, 0.47882935514382685, 0.22538678183918634, 0.9941577865021499, 0.6078200826133291, 0.002239572947185961, 0.003712803712803713, nan, 0.0, 0.7670068791870869, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.9062241154892912, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.05295791281941164, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | | 1.0177 | 43.0 | 860 | 2.7506 | 0.0875 | 0.1779 | 0.4459 | [0.5163039857760789, 0.24346056710594036, 0.25311422838218606, 0.3679840279933403, 0.3595412662032054, 0.1484688521416712, 0.4277504996521536, 0.5740373197269206, 0.0, 0.005929599482507681, 0.0, 0.0, 0.4145561310958871, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.6681873071246478, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.04662992793556592, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | [0.7551725991680834, 0.9974891622040448, 0.9741059976859534, 0.5034942759512732, 0.4121157581783303, 0.1505674310437258, 0.9942979996260983, 0.6024431703085229, 0.0, 0.006006006006006006, nan, 0.0, 0.780855177375118, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.8839190628328009, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.056098346575048315, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | | 0.609 | 44.0 | 880 | 2.6707 | 0.0897 | 0.1844 | 0.4664 | [0.5445890222461796, 0.2902385385119155, 0.25097692369542496, 0.41202005837886385, 0.35468876143702904, 0.22410797119590029, 0.4324328715268047, 0.5940284212869811, 0.0005990106662544443, 0.0030174039549544693, 0.0, 0.0, 0.3892848365691816, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.665252383868428, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.05379035881635535, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | [0.8044996068891831, 0.9953902587339885, 0.9798731758944508, 0.58451900615842, 0.4081170064275157, 0.22981901101639246, 0.9952140587025612, 0.6227486106340441, 0.000951051525517326, 0.0030576030576030576, nan, 0.0, 0.7834180117800459, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.8998343391314637, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.06747906377496242, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | | 1.4351 | 45.0 | 900 | 2.7574 | 0.0886 | 0.1796 | 0.4525 | [0.533761735484459, 0.2542889782695101, 0.24370287974292423, 0.41214598854253515, 0.3268665938735261, 0.1376453934704944, 0.4251993046814122, 0.5993604212107277, 0.0, 0.00021572645885017797, 0.0, 0.0, 0.4221449851042701, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.671957671957672, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.048716594021045216, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | [0.78701457533952, 0.9977147452872751, 0.9726978375324011, 0.5916716538929323, 0.3736716185389195, 0.1387657714117443, 0.9946158160403814, 0.6256198201530725, 0.0, 0.0002184002184002184, nan, 0.0, 0.764533968796367, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.8791267305644302, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.05939982821558944, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | | 0.5252 | 46.0 | 920 | 2.6873 | 0.0887 | 0.1830 | 0.4661 | [0.5318961888559552, 0.27711845229818777, 0.2562796540857526, 0.4344791419968197, 0.36295156632794806, 0.16399268356414948, 0.4289097998775418, 0.5756206623503657, 0.0, 0.0018867924528301887, 0.0, 0.0, 0.4050966815045102, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.6763522574877068, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.05280046028900839, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | [0.8151740775335489, 0.9969203005158987, 0.9752271443050237, 0.6197947836830827, 0.41716439393389604, 0.1658574891081932, 0.9952888390353337, 0.5986295699350895, 0.0, 0.001911001911001911, nan, 0.0, 0.7733465221887506, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.8951603360549047, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.0640433755636676, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | | 0.7922 | 47.0 | 940 | 2.6797 | 0.0889 | 0.1843 | 0.4739 | [0.5320148055654655, 0.2737466556849319, 0.2728960894980851, 0.42917801905846314, 0.42130162114966463, 0.16687488311202545, 0.43271389399316323, 0.5535587218758471, 0.00015798256267464479, 0.012011203274803404, 0.0, 0.0, 0.3931828001000523, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.634682224428997, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.05688327078807032, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | [0.8016705529758825, 0.9975087781243257, 0.9714242149731374, 0.6112424950288615, 0.48558344763522093, 0.16843225937978995, 0.9927463077210693, 0.5755321866110039, 0.0002454326517464067, 0.012175812175812175, nan, 0.0, 0.7774380648352143, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.907525736599219, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.06957268627872021, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | | 1.1004 | 48.0 | 960 | 2.6838 | 0.0896 | 0.1841 | 0.4649 | [0.540574302214772, 0.27993981033641724, 0.25434366985311263, 0.42256487427120587, 0.3654801073118221, 0.18682454796313788, 0.43272728750797695, 0.5849922720898206, 0.0008400347340204628, 0.010767160161507403, 0.0, 0.0, 0.38559592096876993, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.6759535359829097, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.06882144599266188, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | [0.7909658430379066, 0.9962631671864886, 0.9784291390490882, 0.5936070194405297, 0.4221598097629536, 0.18965712516705804, 0.9951392783697888, 0.6117041421160638, 0.0013652191253393873, 0.01092001092001092, nan, 0.0, 0.7888584146396295, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.8985918826174417, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.08710006441915397, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | | 1.1432 | 49.0 | 980 | 2.6985 | 0.0890 | 0.1834 | 0.4671 | [0.5381121001574045, 0.27250627835982705, 0.25872411330635564, 0.4274567807419871, 0.379984097045785, 0.16504215750762424, 0.43210422712421614, 0.5772880627756226, 0.0, 0.014348667239896818, 0.0, 0.0, 0.3868839303626758, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.678830146231721, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.05335195530726257, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | [0.796009085227769, 0.9972145393201122, 0.9748594082139685, 0.608139153265507, 0.4381354640656637, 0.16671826274737803, 0.995176668536175, 0.6035617089861812, 0.0, 0.014578214578214578, nan, 0.0, 0.7803605952969741, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.8926162584309549, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.06664698303628945, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | | 1.2524 | 50.0 | 1000 | 2.6432 | 0.0911 | 0.1868 | 0.4793 | [0.5426296173059788, 0.2947250971937308, 0.2593935859781869, 0.45173125506778267, 0.38272757070710234, 0.20843480268400397, 0.428475951976423, 0.5954193004635483, 0.0008033243744483343, 0.011841963612875444, 0.0, 0.0, 0.39798770262716604, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.6574200718071466, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.050726353169051976, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | [0.8164239683360996, 0.9948410129661233, 0.9814696886800068, 0.6506930635726559, 0.4434671330667499, 0.21132747906583407, 0.9948027668723126, 0.6222113597731216, 0.001242502799466184, 0.012012012012012012, nan, 0.0, 0.7683107773931028, nan, 0.0, 0.0, nan, 0.0, nan, nan, 0.9100106496272631, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.06326497745329611, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan] | ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
AdapterHub/roberta-base-pf-ud_en_ewt
[ "roberta", "en", "dataset:universal_dependencies", "adapter-transformers", "adapterhub:dp/ud_ewt" ]
null
{ "architectures": null, "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- tags: - conversational --- # Gamer Bot DialoGPT Model
AdapterHub/roberta-base-pf-wnut_17
[ "roberta", "en", "dataset:wnut_17", "arxiv:2104.08247", "adapter-transformers", "token-classification" ]
token-classification
{ "architectures": null, "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
2022-10-15T01:20:02Z
--- license: mit library_name: sklearn tags: - sklearn - skops - tabular-classification widget: structuredData: Contract: - Two year - Month-to-month - One year Dependents: - 'Yes' - 'No' - 'No' DeviceProtection: - 'No' - 'No' - 'Yes' InternetService: - Fiber optic - Fiber optic - DSL MonthlyCharges: - 79.05 - 84.95 - 68.8 MultipleLines: - 'Yes' - 'Yes' - 'Yes' OnlineBackup: - 'No' - 'No' - 'Yes' OnlineSecurity: - 'Yes' - 'No' - 'Yes' PaperlessBilling: - 'No' - 'Yes' - 'No' Partner: - 'Yes' - 'Yes' - 'No' PaymentMethod: - Bank transfer (automatic) - Electronic check - Bank transfer (automatic) PhoneService: - 'Yes' - 'Yes' - 'Yes' SeniorCitizen: - 0 - 0 - 0 StreamingMovies: - 'No' - 'No' - 'No' StreamingTV: - 'No' - 'Yes' - 'No' TechSupport: - 'No' - 'No' - 'Yes' TotalCharges: - 5730.7 - 1378.25 - 4111.35 gender: - Female - Female - Male tenure: - 72 - 16 - 63 --- # Model description This is a Logistic Regression model trained on churn dataset. ## Intended uses & limitations This model is not ready to be used in production. ## Training Procedure ### Hyperparameters The model is trained with below hyperparameters. <details> <summary> Click to expand </summary> | Hyperparameter | Value | |--------------------------------------------|-----------------------------------------------------------------------------------| | memory | | | steps | [('preprocessor', ColumnTransformer(transformers=[('num', Pipeline(steps=[('imputer', SimpleImputer(strategy='median')), ('std_scaler', StandardScaler())]), ['MonthlyCharges', 'TotalCharges', 'tenure']), ('cat', OneHotEncoder(), ['SeniorCitizen', 'gender', 'Partner', 'Dependents', 'PhoneService', 'MultipleLines', 'InternetService', 'OnlineSecurity', 'OnlineBackup', 'DeviceProtection', 'TechSupport', 'StreamingTV', 'StreamingMovies', 'Contract', 'PaperlessBilling', 'PaymentMethod'])])), ('classifier', LogisticRegression(class_weight='balanced', max_iter=300))] | | verbose | False | | preprocessor | ColumnTransformer(transformers=[('num', Pipeline(steps=[('imputer', SimpleImputer(strategy='median')), ('std_scaler', StandardScaler())]), ['MonthlyCharges', 'TotalCharges', 'tenure']), ('cat', OneHotEncoder(), ['SeniorCitizen', 'gender', 'Partner', 'Dependents', 'PhoneService', 'MultipleLines', 'InternetService', 'OnlineSecurity', 'OnlineBackup', 'DeviceProtection', 'TechSupport', 'StreamingTV', 'StreamingMovies', 'Contract', 'PaperlessBilling', 'PaymentMethod'])]) | | classifier | LogisticRegression(class_weight='balanced', max_iter=300) | | preprocessor__n_jobs | | | preprocessor__remainder | drop | | preprocessor__sparse_threshold | 0.3 | | preprocessor__transformer_weights | | | preprocessor__transformers | [('num', Pipeline(steps=[('imputer', SimpleImputer(strategy='median')), ('std_scaler', StandardScaler())]), ['MonthlyCharges', 'TotalCharges', 'tenure']), ('cat', OneHotEncoder(), ['SeniorCitizen', 'gender', 'Partner', 'Dependents', 'PhoneService', 'MultipleLines', 'InternetService', 'OnlineSecurity', 'OnlineBackup', 'DeviceProtection', 'TechSupport', 'StreamingTV', 'StreamingMovies', 'Contract', 'PaperlessBilling', 'PaymentMethod'])] | | preprocessor__verbose | False | | preprocessor__verbose_feature_names_out | True | | preprocessor__num | Pipeline(steps=[('imputer', SimpleImputer(strategy='median')), ('std_scaler', StandardScaler())]) | | preprocessor__cat | OneHotEncoder() | | preprocessor__num__memory | | | preprocessor__num__steps | [('imputer', SimpleImputer(strategy='median')), ('std_scaler', StandardScaler())] | | preprocessor__num__verbose | False | | preprocessor__num__imputer | SimpleImputer(strategy='median') | | preprocessor__num__std_scaler | StandardScaler() | | preprocessor__num__imputer__add_indicator | False | | preprocessor__num__imputer__copy | True | | preprocessor__num__imputer__fill_value | | | preprocessor__num__imputer__missing_values | nan | | preprocessor__num__imputer__strategy | median | | preprocessor__num__imputer__verbose | deprecated | | preprocessor__num__std_scaler__copy | True | | preprocessor__num__std_scaler__with_mean | True | | preprocessor__num__std_scaler__with_std | True | | preprocessor__cat__categories | auto | | preprocessor__cat__drop | | | preprocessor__cat__dtype | <class 'numpy.float64'> | | preprocessor__cat__handle_unknown | error | | preprocessor__cat__max_categories | | | preprocessor__cat__min_frequency | | | preprocessor__cat__sparse | True | | classifier__C | 1.0 | | classifier__class_weight | balanced | | classifier__dual | False | | classifier__fit_intercept | True | | classifier__intercept_scaling | 1 | | classifier__l1_ratio | | | classifier__max_iter | 300 | | classifier__multi_class | auto | | classifier__n_jobs | | | classifier__penalty | l2 | | classifier__random_state | | | classifier__solver | lbfgs | | classifier__tol | 0.0001 | | classifier__verbose | 0 | | classifier__warm_start | False | </details> ### Model Plot The model plot is below. <style>#sk-container-id-3 {color: black;background-color: white;}#sk-container-id-3 pre{padding: 0;}#sk-container-id-3 div.sk-toggleable {background-color: white;}#sk-container-id-3 label.sk-toggleable__label {cursor: pointer;display: block;width: 100%;margin-bottom: 0;padding: 0.3em;box-sizing: border-box;text-align: center;}#sk-container-id-3 label.sk-toggleable__label-arrow:before {content: "▸";float: left;margin-right: 0.25em;color: #696969;}#sk-container-id-3 label.sk-toggleable__label-arrow:hover:before {color: black;}#sk-container-id-3 div.sk-estimator:hover label.sk-toggleable__label-arrow:before {color: black;}#sk-container-id-3 div.sk-toggleable__content {max-height: 0;max-width: 0;overflow: hidden;text-align: left;background-color: #f0f8ff;}#sk-container-id-3 div.sk-toggleable__content pre {margin: 0.2em;color: black;border-radius: 0.25em;background-color: #f0f8ff;}#sk-container-id-3 input.sk-toggleable__control:checked~div.sk-toggleable__content {max-height: 200px;max-width: 100%;overflow: auto;}#sk-container-id-3 input.sk-toggleable__control:checked~label.sk-toggleable__label-arrow:before {content: "▾";}#sk-container-id-3 div.sk-estimator input.sk-toggleable__control:checked~label.sk-toggleable__label {background-color: #d4ebff;}#sk-container-id-3 div.sk-label input.sk-toggleable__control:checked~label.sk-toggleable__label {background-color: #d4ebff;}#sk-container-id-3 input.sk-hidden--visually {border: 0;clip: rect(1px 1px 1px 1px);clip: rect(1px, 1px, 1px, 1px);height: 1px;margin: -1px;overflow: hidden;padding: 0;position: absolute;width: 1px;}#sk-container-id-3 div.sk-estimator {font-family: monospace;background-color: #f0f8ff;border: 1px dotted black;border-radius: 0.25em;box-sizing: border-box;margin-bottom: 0.5em;}#sk-container-id-3 div.sk-estimator:hover {background-color: #d4ebff;}#sk-container-id-3 div.sk-parallel-item::after {content: "";width: 100%;border-bottom: 1px solid gray;flex-grow: 1;}#sk-container-id-3 div.sk-label:hover label.sk-toggleable__label {background-color: #d4ebff;}#sk-container-id-3 div.sk-serial::before {content: "";position: absolute;border-left: 1px solid gray;box-sizing: border-box;top: 0;bottom: 0;left: 50%;z-index: 0;}#sk-container-id-3 div.sk-serial {display: flex;flex-direction: column;align-items: center;background-color: white;padding-right: 0.2em;padding-left: 0.2em;position: relative;}#sk-container-id-3 div.sk-item {position: relative;z-index: 1;}#sk-container-id-3 div.sk-parallel {display: flex;align-items: stretch;justify-content: center;background-color: white;position: relative;}#sk-container-id-3 div.sk-item::before, #sk-container-id-3 div.sk-parallel-item::before {content: "";position: absolute;border-left: 1px solid gray;box-sizing: border-box;top: 0;bottom: 0;left: 50%;z-index: -1;}#sk-container-id-3 div.sk-parallel-item {display: flex;flex-direction: column;z-index: 1;position: relative;background-color: white;}#sk-container-id-3 div.sk-parallel-item:first-child::after {align-self: flex-end;width: 50%;}#sk-container-id-3 div.sk-parallel-item:last-child::after {align-self: flex-start;width: 50%;}#sk-container-id-3 div.sk-parallel-item:only-child::after {width: 0;}#sk-container-id-3 div.sk-dashed-wrapped {border: 1px dashed gray;margin: 0 0.4em 0.5em 0.4em;box-sizing: border-box;padding-bottom: 0.4em;background-color: white;}#sk-container-id-3 div.sk-label label {font-family: monospace;font-weight: bold;display: inline-block;line-height: 1.2em;}#sk-container-id-3 div.sk-label-container {text-align: center;}#sk-container-id-3 div.sk-container {/* jupyter's `normalize.less` sets `[hidden] { display: none; }` but bootstrap.min.css set `[hidden] { display: none !important; }` so we also need the `!important` here to be able to override the default hidden behavior on the sphinx rendered scikit-learn.org. See: https://github.com/scikit-learn/scikit-learn/issues/21755 */display: inline-block !important;position: relative;}#sk-container-id-3 div.sk-text-repr-fallback {display: none;}</style><div id="sk-container-id-3" class="sk-top-container"><div class="sk-text-repr-fallback"><pre>Pipeline(steps=[(&#x27;preprocessor&#x27;,ColumnTransformer(transformers=[(&#x27;num&#x27;,Pipeline(steps=[(&#x27;imputer&#x27;,SimpleImputer(strategy=&#x27;median&#x27;)),(&#x27;std_scaler&#x27;,StandardScaler())]),[&#x27;MonthlyCharges&#x27;,&#x27;TotalCharges&#x27;, &#x27;tenure&#x27;]),(&#x27;cat&#x27;, OneHotEncoder(),[&#x27;SeniorCitizen&#x27;, &#x27;gender&#x27;,&#x27;Partner&#x27;, &#x27;Dependents&#x27;,&#x27;PhoneService&#x27;,&#x27;MultipleLines&#x27;,&#x27;InternetService&#x27;,&#x27;OnlineSecurity&#x27;,&#x27;OnlineBackup&#x27;,&#x27;DeviceProtection&#x27;,&#x27;TechSupport&#x27;, &#x27;StreamingTV&#x27;,&#x27;StreamingMovies&#x27;,&#x27;Contract&#x27;,&#x27;PaperlessBilling&#x27;,&#x27;PaymentMethod&#x27;])])),(&#x27;classifier&#x27;,LogisticRegression(class_weight=&#x27;balanced&#x27;, max_iter=300))])</pre><b>In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook. <br />On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.</b></div><div class="sk-container" hidden><div class="sk-item sk-dashed-wrapped"><div class="sk-label-container"><div class="sk-label sk-toggleable"><input class="sk-toggleable__control sk-hidden--visually" id="sk-estimator-id-10" type="checkbox" ><label for="sk-estimator-id-10" class="sk-toggleable__label sk-toggleable__label-arrow">Pipeline</label><div class="sk-toggleable__content"><pre>Pipeline(steps=[(&#x27;preprocessor&#x27;,ColumnTransformer(transformers=[(&#x27;num&#x27;,Pipeline(steps=[(&#x27;imputer&#x27;,SimpleImputer(strategy=&#x27;median&#x27;)),(&#x27;std_scaler&#x27;,StandardScaler())]),[&#x27;MonthlyCharges&#x27;,&#x27;TotalCharges&#x27;, &#x27;tenure&#x27;]),(&#x27;cat&#x27;, OneHotEncoder(),[&#x27;SeniorCitizen&#x27;, &#x27;gender&#x27;,&#x27;Partner&#x27;, &#x27;Dependents&#x27;,&#x27;PhoneService&#x27;,&#x27;MultipleLines&#x27;,&#x27;InternetService&#x27;,&#x27;OnlineSecurity&#x27;,&#x27;OnlineBackup&#x27;,&#x27;DeviceProtection&#x27;,&#x27;TechSupport&#x27;, &#x27;StreamingTV&#x27;,&#x27;StreamingMovies&#x27;,&#x27;Contract&#x27;,&#x27;PaperlessBilling&#x27;,&#x27;PaymentMethod&#x27;])])),(&#x27;classifier&#x27;,LogisticRegression(class_weight=&#x27;balanced&#x27;, max_iter=300))])</pre></div></div></div><div class="sk-serial"><div class="sk-item sk-dashed-wrapped"><div class="sk-label-container"><div class="sk-label sk-toggleable"><input class="sk-toggleable__control sk-hidden--visually" id="sk-estimator-id-11" type="checkbox" ><label for="sk-estimator-id-11" class="sk-toggleable__label sk-toggleable__label-arrow">preprocessor: ColumnTransformer</label><div class="sk-toggleable__content"><pre>ColumnTransformer(transformers=[(&#x27;num&#x27;,Pipeline(steps=[(&#x27;imputer&#x27;,SimpleImputer(strategy=&#x27;median&#x27;)),(&#x27;std_scaler&#x27;,StandardScaler())]),[&#x27;MonthlyCharges&#x27;, &#x27;TotalCharges&#x27;, &#x27;tenure&#x27;]),(&#x27;cat&#x27;, OneHotEncoder(),[&#x27;SeniorCitizen&#x27;, &#x27;gender&#x27;, &#x27;Partner&#x27;,&#x27;Dependents&#x27;, &#x27;PhoneService&#x27;, &#x27;MultipleLines&#x27;,&#x27;InternetService&#x27;, &#x27;OnlineSecurity&#x27;,&#x27;OnlineBackup&#x27;, &#x27;DeviceProtection&#x27;,&#x27;TechSupport&#x27;, &#x27;StreamingTV&#x27;,&#x27;StreamingMovies&#x27;, &#x27;Contract&#x27;,&#x27;PaperlessBilling&#x27;, &#x27;PaymentMethod&#x27;])])</pre></div></div></div><div class="sk-parallel"><div class="sk-parallel-item"><div class="sk-item"><div class="sk-label-container"><div class="sk-label sk-toggleable"><input class="sk-toggleable__control sk-hidden--visually" id="sk-estimator-id-12" type="checkbox" ><label for="sk-estimator-id-12" class="sk-toggleable__label sk-toggleable__label-arrow">num</label><div class="sk-toggleable__content"><pre>[&#x27;MonthlyCharges&#x27;, &#x27;TotalCharges&#x27;, &#x27;tenure&#x27;]</pre></div></div></div><div class="sk-serial"><div class="sk-item"><div class="sk-serial"><div class="sk-item"><div class="sk-estimator sk-toggleable"><input class="sk-toggleable__control sk-hidden--visually" id="sk-estimator-id-13" type="checkbox" ><label for="sk-estimator-id-13" class="sk-toggleable__label sk-toggleable__label-arrow">SimpleImputer</label><div class="sk-toggleable__content"><pre>SimpleImputer(strategy=&#x27;median&#x27;)</pre></div></div></div><div class="sk-item"><div class="sk-estimator sk-toggleable"><input class="sk-toggleable__control sk-hidden--visually" id="sk-estimator-id-14" type="checkbox" ><label for="sk-estimator-id-14" class="sk-toggleable__label sk-toggleable__label-arrow">StandardScaler</label><div class="sk-toggleable__content"><pre>StandardScaler()</pre></div></div></div></div></div></div></div></div><div class="sk-parallel-item"><div class="sk-item"><div class="sk-label-container"><div class="sk-label sk-toggleable"><input class="sk-toggleable__control sk-hidden--visually" id="sk-estimator-id-15" type="checkbox" ><label for="sk-estimator-id-15" class="sk-toggleable__label sk-toggleable__label-arrow">cat</label><div class="sk-toggleable__content"><pre>[&#x27;SeniorCitizen&#x27;, &#x27;gender&#x27;, &#x27;Partner&#x27;, &#x27;Dependents&#x27;, &#x27;PhoneService&#x27;, &#x27;MultipleLines&#x27;, &#x27;InternetService&#x27;, &#x27;OnlineSecurity&#x27;, &#x27;OnlineBackup&#x27;, &#x27;DeviceProtection&#x27;, &#x27;TechSupport&#x27;, &#x27;StreamingTV&#x27;, &#x27;StreamingMovies&#x27;, &#x27;Contract&#x27;, &#x27;PaperlessBilling&#x27;, &#x27;PaymentMethod&#x27;]</pre></div></div></div><div class="sk-serial"><div class="sk-item"><div class="sk-estimator sk-toggleable"><input class="sk-toggleable__control sk-hidden--visually" id="sk-estimator-id-16" type="checkbox" ><label for="sk-estimator-id-16" class="sk-toggleable__label sk-toggleable__label-arrow">OneHotEncoder</label><div class="sk-toggleable__content"><pre>OneHotEncoder()</pre></div></div></div></div></div></div></div></div><div class="sk-item"><div class="sk-estimator sk-toggleable"><input class="sk-toggleable__control sk-hidden--visually" id="sk-estimator-id-17" type="checkbox" ><label for="sk-estimator-id-17" class="sk-toggleable__label sk-toggleable__label-arrow">LogisticRegression</label><div class="sk-toggleable__content"><pre>LogisticRegression(class_weight=&#x27;balanced&#x27;, max_iter=300)</pre></div></div></div></div></div></div></div> ## Evaluation Results You can find the details about evaluation process and the evaluation results. | Metric | Value | |----------|----------| | accuracy | 0.730305 | | f1 score | 0.730305 | # How to Get Started with the Model Use the code below to get started with the model. <details> <summary> Click to expand </summary> ```python import pickle with open(dtc_pkl_filename, 'rb') as file: clf = pickle.load(file) ``` </details> # Model Card Authors This model card is written by following authors: skops_user # Model Card Contact You can contact the model card authors through following channels: [More Information Needed] # Citation Below you can find information related to citation. **BibTeX:** ``` bibtex @inproceedings{...,year={2020}} ``` # Additional Content ## confusion_matrix ![confusion_matrix](confusion_matrix.png)
AdapterHub/roberta-base-pf-yelp_polarity
[ "roberta", "en", "dataset:yelp_polarity", "arxiv:2104.08247", "adapter-transformers", "text-classification" ]
text-classification
{ "architectures": null, "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- inference: false tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image license: creativeml-openrail-m --- ## Note A newer version of this model has been released: https://huggingface.co/naclbit/trinart_derrida_characters_v2_stable_diffusion ## Stable Diffusion TrinArt Characters model v1 trinart_characters_19.2m_stable_diffusion_v1 is a stable diffusion v1-based model trained by roughly 19.2M anime/manga style images (pre-rolled augmented images included) plus final finetuning by about 50,000 images. This model seeks for a sweet spot between artistic style versatility and anatomical quality within the given model spec of SDv1. This is the same version 1 model that was released in AI Novelist/TrinArt service from early September through Oct 14. We are currently experimenting with the new Derrida model on TrinArt service for further improvement and anatomical stabilization. In the mean time, please enjoy this real-service-tested Characters v1! #### Hardware - 8xNVIDIA A100 40GB #### Custom autoencoder *Note: There was a wrong checkpoint uploaded before 5 Nov 2022. The file has been replaced with the latest checkpoint.* We also provide a separate checkpoint for the custom KL autoencoder. As suggested by the Latent Diffusion paper, we found that training the autoencoder and the latent diffusion model separately improves the result. Since the official stable diffusion script does not support loading the other VAE, in order to run it in your script, you'll need to override state_dict for first_stage_model. The popular WebUI has the script to load separate first_stage_model parameters. #### Safety The dataset is filtered to exclude NSFW or unsafe contents. After our extensive experimentation and testing with 10M+ user generated images, we decided that this model is safe enough and less likely to spit out questionable (nudity/overly sexual/realistic gore) content than the stock SD v1.4 model or other anime/manga models. However, if the user tortures this model enough until it talks, it may be still possible to force this model to generate obnoxious materials. We do not consider this model to be 100% risk-free. *This statement does not necessarily restrict third-party from training a derivative of this model that includes NSFW. #### Examples Below images are directly generated by the native TrinArt service with its idiosyncratic upscaler, parser and processes. Your mileage may vary. ![examples](https://pbs.twimg.com/media/FeLNpeuVIAEUqKe?format=jpg&name=4096x4096) (assorted random examples) ![examples](https://ai-novel.com/images/trinart_samples/trinart_sample_20221005_c.webp) ![examples](https://ai-novel.com/images/trinart_samples/trinart_sample_20221005_d.webp) wide shot, high quality, htgngg animal arm rest brown hair merry chair cup dress flower from above jacket on shoulders long hair sitting solo sugar bowl fantasy adventurer's inn table teacup teapot landscape miniature (2022 Artstyle preset) ![examples](https://ai-novel.com/images/trinart_samples/trinart_sample_20221016_a.webp) highres wide shot bangs bare shoulders water bird cage terrarium detached sleeves frilled frilled legwear frills hair ornament hair ribbon hood long hair medium breasts ribbon thighhighs (2019 Artstyle preset) ![examples](https://ai-novel.com/images/trinart_samples/trinart_sample_20221006_a.webp) 1girl standing holding sword hizzrd arm up bangs bare shoulders boots bow breasts bright pupils choker detached sleeves diamond (shape) floating floating hair footwear bow from side full body gloves leg up long hair looking at viewer open mouth outstretched arm solo streaked hair swept bangs two tone hair very long hair::4 angry::1 (2022 Artstyle preset) ![examples](https://ai-novel.com/images/trinart_samples/trinart_sample_20221006_c.webp) 1boy male focus standing hizzrd holding sword arm up bow bright pupils cape coat diamond (shape) floating floating hair fold-over boots footwear bow from side full body gloves leg up long sleeves looking at viewer open mouth outstretched arm open coat open clothes solo swept two tone hair thigh boots::4 angry::1.25 (2022 Artstyle preset) ![examples](https://pbs.twimg.com/media/FeNv2QeUAAIvDO1?format=jpg&name=large) cathedral 1girl schoolgirl momoko school uniform cats particles beautiful shooting stars detailed cathedral jacket open mouth glasses cats (2022 Artstyle preset) ![examples](https://ai-novel.com/images/trinart_samples/trinart_sample_20221005_a.webp) highres 2girls yuri wide shot bangs bare shoulders water bird cage terrarium detached sleeves frilled frilled legwear frills hair ornament hair ribbon hood long hair medium breasts ribbon thighhighs (More Details preset) ![examples](https://ai-novel.com/images/trinart_samples/trinart_sample_20221016_d.webp) wide shot, best quality lapis erebcir highres 1boy bangs black gloves brown hair closed mouth gloves hair between eyes looking at viewer male focus flowers green eyes (More Details preset) TrinArt 2022 Artstyle preset negative prompts: **retro style, 1980s, 1990s, 2000s, 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019** TrinArt More Details preset negative prompts: **flat color, flat shading** We recommend to add known sets of negative prompts in order to stabilize the anatomy such as: bad hands, fewer digits, etc. #### Credits - Sta, AI Novelist Dev (https://ai-novel.com/) @ Bit192, Inc. Twitter https://twitter.com/naclbbr (Japanese) https://twitter.com/naclbbre (English) - Stable Diffusion - Rombach, Robin and Blattmann, Andreas and Lorenz, Dominik and Esser, Patrick and Ommer, Bjorn #### License CreativeML OpenRAIL-M
Adarsh123/distilbert-base-uncased-finetuned-ner
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-10-15T01:25:28Z
--- license: mit library_name: sklearn tags: - sklearn - skops - tabular-classification widget: structuredData: Contract: - Two year - Month-to-month - One year Dependents: - 'Yes' - 'No' - 'No' DeviceProtection: - 'No' - 'No' - 'Yes' InternetService: - Fiber optic - Fiber optic - DSL MonthlyCharges: - 79.05 - 84.95 - 68.8 MultipleLines: - 'Yes' - 'Yes' - 'Yes' OnlineBackup: - 'No' - 'No' - 'Yes' OnlineSecurity: - 'Yes' - 'No' - 'Yes' PaperlessBilling: - 'No' - 'Yes' - 'No' Partner: - 'Yes' - 'Yes' - 'No' PaymentMethod: - Bank transfer (automatic) - Electronic check - Bank transfer (automatic) PhoneService: - 'Yes' - 'Yes' - 'Yes' SeniorCitizen: - 0 - 0 - 0 StreamingMovies: - 'No' - 'No' - 'No' StreamingTV: - 'No' - 'Yes' - 'No' TechSupport: - 'No' - 'No' - 'Yes' TotalCharges: - 5730.7 - 1378.25 - 4111.35 gender: - Female - Female - Male tenure: - 72 - 16 - 63 --- # Model description This is a Logistic Regression model trained on churn dataset. ## Intended uses & limitations This model is not ready to be used in production. ## Training Procedure ### Hyperparameters The model is trained with below hyperparameters. <details> <summary> Click to expand </summary> | Hyperparameter | Value | |--------------------------------------------|-----------------------------------------------------------------------------------| | memory | | | steps | [('preprocessor', ColumnTransformer(transformers=[('num', Pipeline(steps=[('imputer', SimpleImputer(strategy='median')), ('std_scaler', StandardScaler())]), ['MonthlyCharges', 'TotalCharges', 'tenure']), ('cat', OneHotEncoder(handle_unknown='ignore'), ['SeniorCitizen', 'gender', 'Partner', 'Dependents', 'PhoneService', 'MultipleLines', 'InternetService', 'OnlineSecurity', 'OnlineBackup', 'DeviceProtection', 'TechSupport', 'StreamingTV', 'StreamingMovies', 'Contract', 'PaperlessBilling', 'PaymentMethod'])])), ('classifier', LogisticRegression(class_weight='balanced', max_iter=300))] | | verbose | False | | preprocessor | ColumnTransformer(transformers=[('num', Pipeline(steps=[('imputer', SimpleImputer(strategy='median')), ('std_scaler', StandardScaler())]), ['MonthlyCharges', 'TotalCharges', 'tenure']), ('cat', OneHotEncoder(handle_unknown='ignore'), ['SeniorCitizen', 'gender', 'Partner', 'Dependents', 'PhoneService', 'MultipleLines', 'InternetService', 'OnlineSecurity', 'OnlineBackup', 'DeviceProtection', 'TechSupport', 'StreamingTV', 'StreamingMovies', 'Contract', 'PaperlessBilling', 'PaymentMethod'])]) | | classifier | LogisticRegression(class_weight='balanced', max_iter=300) | | preprocessor__n_jobs | | | preprocessor__remainder | drop | | preprocessor__sparse_threshold | 0.3 | | preprocessor__transformer_weights | | | preprocessor__transformers | [('num', Pipeline(steps=[('imputer', SimpleImputer(strategy='median')), ('std_scaler', StandardScaler())]), ['MonthlyCharges', 'TotalCharges', 'tenure']), ('cat', OneHotEncoder(handle_unknown='ignore'), ['SeniorCitizen', 'gender', 'Partner', 'Dependents', 'PhoneService', 'MultipleLines', 'InternetService', 'OnlineSecurity', 'OnlineBackup', 'DeviceProtection', 'TechSupport', 'StreamingTV', 'StreamingMovies', 'Contract', 'PaperlessBilling', 'PaymentMethod'])] | | preprocessor__verbose | False | | preprocessor__verbose_feature_names_out | True | | preprocessor__num | Pipeline(steps=[('imputer', SimpleImputer(strategy='median')), ('std_scaler', StandardScaler())]) | | preprocessor__cat | OneHotEncoder(handle_unknown='ignore') | | preprocessor__num__memory | | | preprocessor__num__steps | [('imputer', SimpleImputer(strategy='median')), ('std_scaler', StandardScaler())] | | preprocessor__num__verbose | False | | preprocessor__num__imputer | SimpleImputer(strategy='median') | | preprocessor__num__std_scaler | StandardScaler() | | preprocessor__num__imputer__add_indicator | False | | preprocessor__num__imputer__copy | True | | preprocessor__num__imputer__fill_value | | | preprocessor__num__imputer__missing_values | nan | | preprocessor__num__imputer__strategy | median | | preprocessor__num__imputer__verbose | deprecated | | preprocessor__num__std_scaler__copy | True | | preprocessor__num__std_scaler__with_mean | True | | preprocessor__num__std_scaler__with_std | True | | preprocessor__cat__categories | auto | | preprocessor__cat__drop | | | preprocessor__cat__dtype | <class 'numpy.float64'> | | preprocessor__cat__handle_unknown | ignore | | preprocessor__cat__max_categories | | | preprocessor__cat__min_frequency | | | preprocessor__cat__sparse | True | | classifier__C | 1.0 | | classifier__class_weight | balanced | | classifier__dual | False | | classifier__fit_intercept | True | | classifier__intercept_scaling | 1 | | classifier__l1_ratio | | | classifier__max_iter | 300 | | classifier__multi_class | auto | | classifier__n_jobs | | | classifier__penalty | l2 | | classifier__random_state | | | classifier__solver | lbfgs | | classifier__tol | 0.0001 | | classifier__verbose | 0 | | classifier__warm_start | False | </details> ### Model Plot The model plot is below. <style>#sk-container-id-5 {color: black;background-color: white;}#sk-container-id-5 pre{padding: 0;}#sk-container-id-5 div.sk-toggleable {background-color: white;}#sk-container-id-5 label.sk-toggleable__label {cursor: pointer;display: block;width: 100%;margin-bottom: 0;padding: 0.3em;box-sizing: border-box;text-align: center;}#sk-container-id-5 label.sk-toggleable__label-arrow:before {content: "▸";float: left;margin-right: 0.25em;color: #696969;}#sk-container-id-5 label.sk-toggleable__label-arrow:hover:before {color: black;}#sk-container-id-5 div.sk-estimator:hover label.sk-toggleable__label-arrow:before {color: black;}#sk-container-id-5 div.sk-toggleable__content {max-height: 0;max-width: 0;overflow: hidden;text-align: left;background-color: #f0f8ff;}#sk-container-id-5 div.sk-toggleable__content pre {margin: 0.2em;color: black;border-radius: 0.25em;background-color: #f0f8ff;}#sk-container-id-5 input.sk-toggleable__control:checked~div.sk-toggleable__content {max-height: 200px;max-width: 100%;overflow: auto;}#sk-container-id-5 input.sk-toggleable__control:checked~label.sk-toggleable__label-arrow:before {content: "▾";}#sk-container-id-5 div.sk-estimator input.sk-toggleable__control:checked~label.sk-toggleable__label {background-color: #d4ebff;}#sk-container-id-5 div.sk-label input.sk-toggleable__control:checked~label.sk-toggleable__label {background-color: #d4ebff;}#sk-container-id-5 input.sk-hidden--visually {border: 0;clip: rect(1px 1px 1px 1px);clip: rect(1px, 1px, 1px, 1px);height: 1px;margin: -1px;overflow: hidden;padding: 0;position: absolute;width: 1px;}#sk-container-id-5 div.sk-estimator {font-family: monospace;background-color: #f0f8ff;border: 1px dotted black;border-radius: 0.25em;box-sizing: border-box;margin-bottom: 0.5em;}#sk-container-id-5 div.sk-estimator:hover {background-color: #d4ebff;}#sk-container-id-5 div.sk-parallel-item::after {content: "";width: 100%;border-bottom: 1px solid gray;flex-grow: 1;}#sk-container-id-5 div.sk-label:hover label.sk-toggleable__label {background-color: #d4ebff;}#sk-container-id-5 div.sk-serial::before {content: "";position: absolute;border-left: 1px solid gray;box-sizing: border-box;top: 0;bottom: 0;left: 50%;z-index: 0;}#sk-container-id-5 div.sk-serial {display: flex;flex-direction: column;align-items: center;background-color: white;padding-right: 0.2em;padding-left: 0.2em;position: relative;}#sk-container-id-5 div.sk-item {position: relative;z-index: 1;}#sk-container-id-5 div.sk-parallel {display: flex;align-items: stretch;justify-content: center;background-color: white;position: relative;}#sk-container-id-5 div.sk-item::before, #sk-container-id-5 div.sk-parallel-item::before {content: "";position: absolute;border-left: 1px solid gray;box-sizing: border-box;top: 0;bottom: 0;left: 50%;z-index: -1;}#sk-container-id-5 div.sk-parallel-item {display: flex;flex-direction: column;z-index: 1;position: relative;background-color: white;}#sk-container-id-5 div.sk-parallel-item:first-child::after {align-self: flex-end;width: 50%;}#sk-container-id-5 div.sk-parallel-item:last-child::after {align-self: flex-start;width: 50%;}#sk-container-id-5 div.sk-parallel-item:only-child::after {width: 0;}#sk-container-id-5 div.sk-dashed-wrapped {border: 1px dashed gray;margin: 0 0.4em 0.5em 0.4em;box-sizing: border-box;padding-bottom: 0.4em;background-color: white;}#sk-container-id-5 div.sk-label label {font-family: monospace;font-weight: bold;display: inline-block;line-height: 1.2em;}#sk-container-id-5 div.sk-label-container {text-align: center;}#sk-container-id-5 div.sk-container {/* jupyter's `normalize.less` sets `[hidden] { display: none; }` but bootstrap.min.css set `[hidden] { display: none !important; }` so we also need the `!important` here to be able to override the default hidden behavior on the sphinx rendered scikit-learn.org. See: https://github.com/scikit-learn/scikit-learn/issues/21755 */display: inline-block !important;position: relative;}#sk-container-id-5 div.sk-text-repr-fallback {display: none;}</style><div id="sk-container-id-5" class="sk-top-container"><div class="sk-text-repr-fallback"><pre>Pipeline(steps=[(&#x27;preprocessor&#x27;,ColumnTransformer(transformers=[(&#x27;num&#x27;,Pipeline(steps=[(&#x27;imputer&#x27;,SimpleImputer(strategy=&#x27;median&#x27;)),(&#x27;std_scaler&#x27;,StandardScaler())]),[&#x27;MonthlyCharges&#x27;,&#x27;TotalCharges&#x27;, &#x27;tenure&#x27;]),(&#x27;cat&#x27;,OneHotEncoder(handle_unknown=&#x27;ignore&#x27;),[&#x27;SeniorCitizen&#x27;, &#x27;gender&#x27;,&#x27;Partner&#x27;, &#x27;Dependents&#x27;,&#x27;PhoneService&#x27;,&#x27;MultipleLines&#x27;,&#x27;InternetService&#x27;,&#x27;OnlineSecurity&#x27;,&#x27;OnlineBackup&#x27;,&#x27;DeviceProtection&#x27;,&#x27;TechSupport&#x27;, &#x27;StreamingTV&#x27;,&#x27;StreamingMovies&#x27;,&#x27;Contract&#x27;,&#x27;PaperlessBilling&#x27;,&#x27;PaymentMethod&#x27;])])),(&#x27;classifier&#x27;,LogisticRegression(class_weight=&#x27;balanced&#x27;, max_iter=300))])</pre><b>In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook. <br />On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org.</b></div><div class="sk-container" hidden><div class="sk-item sk-dashed-wrapped"><div class="sk-label-container"><div class="sk-label sk-toggleable"><input class="sk-toggleable__control sk-hidden--visually" id="sk-estimator-id-26" type="checkbox" ><label for="sk-estimator-id-26" class="sk-toggleable__label sk-toggleable__label-arrow">Pipeline</label><div class="sk-toggleable__content"><pre>Pipeline(steps=[(&#x27;preprocessor&#x27;,ColumnTransformer(transformers=[(&#x27;num&#x27;,Pipeline(steps=[(&#x27;imputer&#x27;,SimpleImputer(strategy=&#x27;median&#x27;)),(&#x27;std_scaler&#x27;,StandardScaler())]),[&#x27;MonthlyCharges&#x27;,&#x27;TotalCharges&#x27;, &#x27;tenure&#x27;]),(&#x27;cat&#x27;,OneHotEncoder(handle_unknown=&#x27;ignore&#x27;),[&#x27;SeniorCitizen&#x27;, &#x27;gender&#x27;,&#x27;Partner&#x27;, &#x27;Dependents&#x27;,&#x27;PhoneService&#x27;,&#x27;MultipleLines&#x27;,&#x27;InternetService&#x27;,&#x27;OnlineSecurity&#x27;,&#x27;OnlineBackup&#x27;,&#x27;DeviceProtection&#x27;,&#x27;TechSupport&#x27;, &#x27;StreamingTV&#x27;,&#x27;StreamingMovies&#x27;,&#x27;Contract&#x27;,&#x27;PaperlessBilling&#x27;,&#x27;PaymentMethod&#x27;])])),(&#x27;classifier&#x27;,LogisticRegression(class_weight=&#x27;balanced&#x27;, max_iter=300))])</pre></div></div></div><div class="sk-serial"><div class="sk-item sk-dashed-wrapped"><div class="sk-label-container"><div class="sk-label sk-toggleable"><input class="sk-toggleable__control sk-hidden--visually" id="sk-estimator-id-27" type="checkbox" ><label for="sk-estimator-id-27" class="sk-toggleable__label sk-toggleable__label-arrow">preprocessor: ColumnTransformer</label><div class="sk-toggleable__content"><pre>ColumnTransformer(transformers=[(&#x27;num&#x27;,Pipeline(steps=[(&#x27;imputer&#x27;,SimpleImputer(strategy=&#x27;median&#x27;)),(&#x27;std_scaler&#x27;,StandardScaler())]),[&#x27;MonthlyCharges&#x27;, &#x27;TotalCharges&#x27;, &#x27;tenure&#x27;]),(&#x27;cat&#x27;, OneHotEncoder(handle_unknown=&#x27;ignore&#x27;),[&#x27;SeniorCitizen&#x27;, &#x27;gender&#x27;, &#x27;Partner&#x27;,&#x27;Dependents&#x27;, &#x27;PhoneService&#x27;, &#x27;MultipleLines&#x27;,&#x27;InternetService&#x27;, &#x27;OnlineSecurity&#x27;,&#x27;OnlineBackup&#x27;, &#x27;DeviceProtection&#x27;,&#x27;TechSupport&#x27;, &#x27;StreamingTV&#x27;,&#x27;StreamingMovies&#x27;, &#x27;Contract&#x27;,&#x27;PaperlessBilling&#x27;, &#x27;PaymentMethod&#x27;])])</pre></div></div></div><div class="sk-parallel"><div class="sk-parallel-item"><div class="sk-item"><div class="sk-label-container"><div class="sk-label sk-toggleable"><input class="sk-toggleable__control sk-hidden--visually" id="sk-estimator-id-28" type="checkbox" ><label for="sk-estimator-id-28" class="sk-toggleable__label sk-toggleable__label-arrow">num</label><div class="sk-toggleable__content"><pre>[&#x27;MonthlyCharges&#x27;, &#x27;TotalCharges&#x27;, &#x27;tenure&#x27;]</pre></div></div></div><div class="sk-serial"><div class="sk-item"><div class="sk-serial"><div class="sk-item"><div class="sk-estimator sk-toggleable"><input class="sk-toggleable__control sk-hidden--visually" id="sk-estimator-id-29" type="checkbox" ><label for="sk-estimator-id-29" class="sk-toggleable__label sk-toggleable__label-arrow">SimpleImputer</label><div class="sk-toggleable__content"><pre>SimpleImputer(strategy=&#x27;median&#x27;)</pre></div></div></div><div class="sk-item"><div class="sk-estimator sk-toggleable"><input class="sk-toggleable__control sk-hidden--visually" id="sk-estimator-id-30" type="checkbox" ><label for="sk-estimator-id-30" class="sk-toggleable__label sk-toggleable__label-arrow">StandardScaler</label><div class="sk-toggleable__content"><pre>StandardScaler()</pre></div></div></div></div></div></div></div></div><div class="sk-parallel-item"><div class="sk-item"><div class="sk-label-container"><div class="sk-label sk-toggleable"><input class="sk-toggleable__control sk-hidden--visually" id="sk-estimator-id-31" type="checkbox" ><label for="sk-estimator-id-31" class="sk-toggleable__label sk-toggleable__label-arrow">cat</label><div class="sk-toggleable__content"><pre>[&#x27;SeniorCitizen&#x27;, &#x27;gender&#x27;, &#x27;Partner&#x27;, &#x27;Dependents&#x27;, &#x27;PhoneService&#x27;, &#x27;MultipleLines&#x27;, &#x27;InternetService&#x27;, &#x27;OnlineSecurity&#x27;, &#x27;OnlineBackup&#x27;, &#x27;DeviceProtection&#x27;, &#x27;TechSupport&#x27;, &#x27;StreamingTV&#x27;, &#x27;StreamingMovies&#x27;, &#x27;Contract&#x27;, &#x27;PaperlessBilling&#x27;, &#x27;PaymentMethod&#x27;]</pre></div></div></div><div class="sk-serial"><div class="sk-item"><div class="sk-estimator sk-toggleable"><input class="sk-toggleable__control sk-hidden--visually" id="sk-estimator-id-32" type="checkbox" ><label for="sk-estimator-id-32" class="sk-toggleable__label sk-toggleable__label-arrow">OneHotEncoder</label><div class="sk-toggleable__content"><pre>OneHotEncoder(handle_unknown=&#x27;ignore&#x27;)</pre></div></div></div></div></div></div></div></div><div class="sk-item"><div class="sk-estimator sk-toggleable"><input class="sk-toggleable__control sk-hidden--visually" id="sk-estimator-id-33" type="checkbox" ><label for="sk-estimator-id-33" class="sk-toggleable__label sk-toggleable__label-arrow">LogisticRegression</label><div class="sk-toggleable__content"><pre>LogisticRegression(class_weight=&#x27;balanced&#x27;, max_iter=300)</pre></div></div></div></div></div></div></div> ## Evaluation Results You can find the details about evaluation process and the evaluation results. | Metric | Value | |----------|----------| | accuracy | 0.730305 | | f1 score | 0.730305 | # How to Get Started with the Model Use the code below to get started with the model. <details> <summary> Click to expand </summary> ```python import pickle with open(dtc_pkl_filename, 'rb') as file: clf = pickle.load(file) ``` </details> # Model Card Authors This model card is written by following authors: skops_user # Model Card Contact You can contact the model card authors through following channels: [More Information Needed] # Citation Below you can find information related to citation. **BibTeX:** ``` bibtex @inproceedings{...,year={2020}} ``` # Additional Content ## confusion_matrix ![confusion_matrix](confusion_matrix.png)
Addixz/Sanyx
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
A GPT-2 Medium sized SoLU model trained on 11.7B tokens of the Pile (training crashed because of dodgy data loaders at 11B, and wasn't resumed, so this is shorter than the others). 12 layers, d_model=1536.
Adityanawal/testmodel_1
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: colab-demo results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # colab-demo This model is a fine-tuned version of [facebook/wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 3.9910 - Wer: 0.9714 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 1000 - num_epochs: 30 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.1212 | 2.14 | 500 | 3.6706 | 1.0757 | | 0.2303 | 4.27 | 1000 | 2.6849 | 1.0578 | | 0.3003 | 6.41 | 1500 | 3.2261 | 1.0605 | | 0.2705 | 8.55 | 2000 | 3.3483 | 1.0844 | | 0.2178 | 10.68 | 2500 | 3.2000 | 1.0219 | | 0.1875 | 12.82 | 3000 | 2.2454 | 1.0159 | | 0.1792 | 14.96 | 3500 | 2.7510 | 0.9973 | | 0.1477 | 17.09 | 4000 | 2.6716 | 0.9847 | | 0.1232 | 19.23 | 4500 | 2.5939 | 0.9807 | | 0.1051 | 21.37 | 5000 | 3.3308 | 0.9794 | | 0.0847 | 23.5 | 5500 | 3.3430 | 0.9814 | | 0.0809 | 25.64 | 6000 | 3.2566 | 0.9595 | | 0.0642 | 27.78 | 6500 | 3.6392 | 0.9654 | | 0.0566 | 29.91 | 7000 | 3.9910 | 0.9714 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu113 - Datasets 1.18.3 - Tokenizers 0.13.1
AethiQs-Max/s3-v1-20_epochs
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: wav2vec2-large-english-TIMIT-phoneme_v3 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-base960-english-phoneme_v3 This model is a fine-tuned version of [facebook/wav2vec2-large](https://huggingface.co/facebook/wav2vec2-large) on the TIMIT dataset. It achieves the following results on the evaluation set: - Loss: 0.3697 - Cer: 0.0987 ## Training and evaluation data Training: TIMIT dataset training + validation set Evaluation: TIMIT dataset test set ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 32 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 1000 - num_epochs: 50 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Per | |:-------------:|:-----:|:----:|:---------------:|:------:| | 2.2678 | 6.94 | 500 | 0.2347 | 0.0874 | | 0.25 | 13.88 | 1000 | 0.3358 | 0.1122 | | 0.2126 | 20.83 | 1500 | 0.3865 | 0.1131 | | 0.1397 | 27.77 | 2000 | 0.4162 | 0.1085 | | 0.0916 | 34.72 | 2500 | 0.4429 | 0.1086 | | 0.0594 | 41.66 | 3000 | 0.3697 | 0.0987 | ### Framework versions - Transformers 4.23.0.dev0 - Pytorch 1.12.1.post201 - Datasets 2.5.2.dev0 - Tokenizers 0.12.1
AhmedHassan19/model
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: mit tags: - generated_from_trainer datasets: - xtreme metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-de results: - task: name: Token Classification type: token-classification dataset: name: xtreme type: xtreme config: PAN-X.de split: train args: PAN-X.de metrics: - name: F1 type: f1 value: 0.8440299974721232 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-de This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset. It achieves the following results on the evaluation set: - Loss: 0.1754 - F1: 0.8440 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.3536 | 1.0 | 394 | 0.2111 | 0.7964 | | 0.1759 | 2.0 | 788 | 0.1786 | 0.8331 | | 0.1126 | 3.0 | 1182 | 0.1754 | 0.8440 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
Ahren09/distilbert-base-uncased-finetuned-cola
[ "pytorch", "tensorboard", "distilbert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "DistilBertForSequenceClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
33
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - emotion metrics: - accuracy model-index: - name: distiled_flip_model_emotion results: - task: name: Text Classification type: text-classification dataset: name: emotion type: emotion config: default split: train args: default metrics: - name: Accuracy type: accuracy value: 0.9305 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distiled_flip_model_emotion This model is a fine-tuned version of [ArafatBHossain/distill_bert_fine_tuned_emotion_dataset](https://huggingface.co/ArafatBHossain/distill_bert_fine_tuned_emotion_dataset) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.1619 - Accuracy: 0.9305 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.1681 | 1.0 | 2000 | 0.2442 | 0.9255 | | 0.1179 | 2.0 | 4000 | 0.1654 | 0.926 | | 0.0928 | 3.0 | 6000 | 0.1619 | 0.9305 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.11.0 - Datasets 2.6.1 - Tokenizers 0.12.1
AimB/konlpy_berttokenizer_helsinki
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
The sea in macaron color, the blue sea, the white waves, the pink clouds, the clean asphalt, and the dilapidated stone houses, which are full of oil paintings, very dreamy, addicted to paintings, almost missed the last train
AimB/mT5-en-kr-aihub-netflix
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - emotion metrics: - accuracy model-index: - name: distiled_flip_model_emotion_alpha_0.8_v1 results: - task: name: Text Classification type: text-classification dataset: name: emotion type: emotion config: default split: train args: default metrics: - name: Accuracy type: accuracy value: 0.9425 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distiled_flip_model_emotion_alpha_0.8_v1 This model is a fine-tuned version of [ArafatBHossain/distill_bert_fine_tuned_emotion_dataset](https://huggingface.co/ArafatBHossain/distill_bert_fine_tuned_emotion_dataset) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.1615 - Accuracy: 0.9425 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.1921 | 1.0 | 2000 | 0.2402 | 0.933 | | 0.129 | 2.0 | 4000 | 0.1789 | 0.94 | | 0.0869 | 3.0 | 6000 | 0.1615 | 0.9425 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.11.0 - Datasets 2.6.1 - Tokenizers 0.12.1
AimB/mT5-en-kr-natural
[ "pytorch", "mt5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MT5ForConditionalGeneration" ], "model_type": "mt5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
78
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - emotion metrics: - accuracy model-index: - name: distiled_flip_model_emotion_alpha_0.8_epoch5_v1 results: - task: name: Text Classification type: text-classification dataset: name: emotion type: emotion config: default split: train args: default metrics: - name: Accuracy type: accuracy value: 0.942 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distiled_flip_model_emotion_alpha_0.8_epoch5_v1 This model is a fine-tuned version of [ArafatBHossain/distill_bert_fine_tuned_emotion_dataset](https://huggingface.co/ArafatBHossain/distill_bert_fine_tuned_emotion_dataset) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.1476 - Accuracy: 0.942 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:| | 0.1966 | 1.0 | 2000 | 0.2675 | 0.9315 | | 0.154 | 2.0 | 4000 | 0.2265 | 0.9355 | | 0.1214 | 3.0 | 6000 | 0.1805 | 0.9375 | | 0.078 | 4.0 | 8000 | 0.1401 | 0.9385 | | 0.0652 | 5.0 | 10000 | 0.1476 | 0.942 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.11.0 - Datasets 2.6.1 - Tokenizers 0.12.1
AimB/mT5-en-kr-opus
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - opus_books model-index: - name: mt5-small-finetuned-8epochs-opus_books-en-to-it results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mt5-small-finetuned-8epochs-opus_books-en-to-it This model is a fine-tuned version of [google/mt5-small](https://huggingface.co/google/mt5-small) on the opus_books dataset. It achieves the following results on the evaluation set: - Loss: 2.7220 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 8 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 4.1572 | 1.0 | 3638 | 3.1652 | | 3.7846 | 2.0 | 7276 | 2.9708 | | 3.6056 | 3.0 | 10914 | 2.8673 | | 3.4722 | 4.0 | 14552 | 2.8067 | | 3.4006 | 5.0 | 18190 | 2.7665 | | 3.344 | 6.0 | 21828 | 2.7414 | | 3.3131 | 7.0 | 25466 | 2.7268 | | 3.3251 | 8.0 | 29104 | 2.7220 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
Ajaykannan6/autonlp-manthan-16122692
[ "pytorch", "bart", "text2text-generation", "unk", "dataset:Ajaykannan6/autonlp-data-manthan", "transformers", "autonlp", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "BartForConditionalGeneration" ], "model_type": "bart", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 142, "min_length": 56, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
2022-10-15T08:11:26Z
--- license: mit --- ### Vasko style second try on Stable Diffusion via Dreambooth #### model by akolov This your the Stable Diffusion model fine-tuned the Vasko style second try concept taught to Stable Diffusion with Dreambooth. It can be used by modifying the `instance_prompt`: **a painting by vasko style** You can also train your own concepts and upload them to the library by using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_training.ipynb). And you can run your new concept via `diffusers`: [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb), [Spaces with the Public Concepts loaded](https://huggingface.co/spaces/sd-dreambooth-library/stable-diffusion-dreambooth-concepts) Here are the images used for training this concept: ![image 0](https://huggingface.co/akolov/vasko-style-second-try/resolve/main/concept_images/1.jpeg) ![image 1](https://huggingface.co/akolov/vasko-style-second-try/resolve/main/concept_images/13.jpeg) ![image 2](https://huggingface.co/akolov/vasko-style-second-try/resolve/main/concept_images/12.jpeg) ![image 3](https://huggingface.co/akolov/vasko-style-second-try/resolve/main/concept_images/8.jpeg) ![image 4](https://huggingface.co/akolov/vasko-style-second-try/resolve/main/concept_images/17.jpeg) ![image 5](https://huggingface.co/akolov/vasko-style-second-try/resolve/main/concept_images/9.jpeg) ![image 6](https://huggingface.co/akolov/vasko-style-second-try/resolve/main/concept_images/2.jpeg) ![image 7](https://huggingface.co/akolov/vasko-style-second-try/resolve/main/concept_images/0.jpeg) ![image 8](https://huggingface.co/akolov/vasko-style-second-try/resolve/main/concept_images/14.jpeg) ![image 9](https://huggingface.co/akolov/vasko-style-second-try/resolve/main/concept_images/4.jpeg) ![image 10](https://huggingface.co/akolov/vasko-style-second-try/resolve/main/concept_images/10.jpeg) ![image 11](https://huggingface.co/akolov/vasko-style-second-try/resolve/main/concept_images/6.jpeg) ![image 12](https://huggingface.co/akolov/vasko-style-second-try/resolve/main/concept_images/11.jpeg) ![image 13](https://huggingface.co/akolov/vasko-style-second-try/resolve/main/concept_images/5.jpeg) ![image 14](https://huggingface.co/akolov/vasko-style-second-try/resolve/main/concept_images/3.jpeg) ![image 15](https://huggingface.co/akolov/vasko-style-second-try/resolve/main/concept_images/15.jpeg) ![image 16](https://huggingface.co/akolov/vasko-style-second-try/resolve/main/concept_images/16.jpeg) ![image 17](https://huggingface.co/akolov/vasko-style-second-try/resolve/main/concept_images/7.jpeg)
Akash7897/distilbert-base-uncased-finetuned-sst2
[ "pytorch", "tensorboard", "distilbert", "text-classification", "dataset:glue", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
{ "architectures": [ "DistilBertForSequenceClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
31
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 229.93 +/- 43.18 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Akash7897/gpt2-wikitext2
[ "pytorch", "tensorboard", "gpt2", "text-generation", "transformers", "generated_from_trainer", "license:mit" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
2022-10-15T09:13:48Z
architectural section drawing 3d parametric design for elderly people living with courtyards in between two forms with small bridges
Akiva/Joke
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: - en tags: - stable-diffusion - text-to-image license: unknown inference: false --- # Novelai-Diffusion Novelai-Diffusion is a latent diffusion model which can create best quality anime image. Here is the diffusers version of the model. Just to make it easier to use Novelai-Diffusion for all. # Gradio & Colab Demo There is a [Gradio](https://github.com/gradio-app/gradio) Web UI and Colab with Diffusers to run Novelai Diffusion: [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1fNscA4Xqga8DZVPYZo17OUyzk7tzOZEw) Run Novelai Diffusion on TPU !!! (Beta): [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1r_ouUxFIFQzTJstnSJT0jGYNObc6UGkA?usp=sharing) ## Example Code ### pytorch ```python from diffusers import DiffusionPipeline import torch pipe = DiffusionPipeline.from_pretrained("animelover/novelai-diffusion", custom_pipeline="waifu-research-department/long-prompt-weighting-pipeline", torch_dtype=torch.float16) pipe.safety_checker = None # we don't need safety checker. you can add not safe words to negative prompt instead. pipe = pipe.to("cuda") prompt = "best quality, masterpiece, 1girl, cute, looking at viewer, smiling, open mouth, white hair, red eyes, white kimono, sakura petal" neg_prompt = "lowres, bad anatomy, error body, error hair, error arm, error hands, bad hands, error fingers, bad fingers, missing fingers, error legs, bad legs, multiple legs, missing legs, error lighting, error shadow, error reflection, text, error, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry" # we don't need autocast here, because autocast will make speed slow down. image = pipe.text2img(prompt,negative_prompt=neg_prompt, width=512,height=768,max_embeddings_multiples=5,guidance_scale=12).images[0] image.save("test.png") ``` ### onnxruntime ```python from diffusers import DiffusionPipeline pipe = DiffusionPipeline.from_pretrained("animelover/novelai-diffusion", revision="onnx16", custom_pipeline="waifu-research-department/onnx-long-prompt-weighting-pipeline", provider="CUDAExecutionProvider") pipe.safety_checker = None # we don't need safety checker. you can add not safe words to negative prompt instead. prompt = "best quality, masterpiece, 1girl, cute, looking at viewer, smiling, open mouth, white hair, red eyes, white kimono, sakura petal" neg_prompt = "lowres, bad anatomy, error body, error hair, error arm, error hands, bad hands, error fingers, bad fingers, missing fingers, error legs, bad legs, multiple legs, missing legs, error lighting, error shadow, error reflection, text, error, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry" image = pipe.text2img(prompt,negative_prompt=neg_prompt, width=512,height=768,max_embeddings_multiples=5,guidance_scale=12).images[0] image.save("test.png") ``` note: we can input long prompt and adjust weighting by using "waifu-research-department/long-prompt-weighting-pipeline". it requires diffusers>=0.4.0 . ## Acknowledgements Thanks to [novelai](https://novelai.net/) for this awesome model. Support them if you can.
AkshatSurolia/BEiT-FaceMask-Finetuned
[ "pytorch", "beit", "image-classification", "dataset:Face-Mask18K", "transformers", "license:apache-2.0", "autotrain_compatible" ]
image-classification
{ "architectures": [ "BeitForImageClassification" ], "model_type": "beit", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
239
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - xnli model-index: - name: distilbert_xnli_hpu results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert_xnli_hpu This model is a fine-tuned version of [distilbert-base-multilingual-cased](https://huggingface.co/distilbert-base-multilingual-cased) on the xnli dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - distributed_type: multi-GPU - num_devices: 8 - total_train_batch_size: 64 - total_eval_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-06 - lr_scheduler_type: linear - num_epochs: 2.0 ### Training results ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.0a0+git7392344 - Datasets 2.6.1 - Tokenizers 0.13.1
AkshatSurolia/ConvNeXt-FaceMask-Finetuned
[ "pytorch", "safetensors", "convnext", "image-classification", "dataset:Face-Mask18K", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
image-classification
{ "architectures": [ "ConvNextForImageClassification" ], "model_type": "convnext", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
56
null
--- license: mit --- ### arwijn on Stable Diffusion This is the `arwijn` concept taught to Stable Diffusion via Textual Inversion. You can load this concept into the [Stable Conceptualizer](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_conceptualizer_inference.ipynb) notebook. You can also train your own concepts and load them into the concept libraries using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb). Here is the new concept you will be able to use as an `object`: ![arwijn 0](https://huggingface.co/sd-concepts-library/arwijn/resolve/main/concept_images/1.jpeg) ![arwijn 1](https://huggingface.co/sd-concepts-library/arwijn/resolve/main/concept_images/8.jpeg) ![arwijn 2](https://huggingface.co/sd-concepts-library/arwijn/resolve/main/concept_images/9.jpeg) ![arwijn 3](https://huggingface.co/sd-concepts-library/arwijn/resolve/main/concept_images/2.jpeg) ![arwijn 4](https://huggingface.co/sd-concepts-library/arwijn/resolve/main/concept_images/0.jpeg) ![arwijn 5](https://huggingface.co/sd-concepts-library/arwijn/resolve/main/concept_images/4.jpeg) ![arwijn 6](https://huggingface.co/sd-concepts-library/arwijn/resolve/main/concept_images/6.jpeg) ![arwijn 7](https://huggingface.co/sd-concepts-library/arwijn/resolve/main/concept_images/5.jpeg) ![arwijn 8](https://huggingface.co/sd-concepts-library/arwijn/resolve/main/concept_images/3.jpeg) ![arwijn 9](https://huggingface.co/sd-concepts-library/arwijn/resolve/main/concept_images/7.jpeg)
AkshatSurolia/DeiT-FaceMask-Finetuned
[ "pytorch", "deit", "image-classification", "dataset:Face-Mask18K", "transformers", "license:apache-2.0", "autotrain_compatible" ]
image-classification
{ "architectures": [ "DeiTForImageClassification" ], "model_type": "deit", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
46
null
--- license: mit --- ### GBA FE Class Cards on Stable Diffusion This is the `classcard` concept taught to Stable Diffusion via Textual Inversion. You can load this concept into the [Stable Conceptualizer](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_conceptualizer_inference.ipynb) notebook. You can also train your own concepts and load them into the concept libraries using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb). Here is the new concept you will be able to use as a `style`: ![classcard 0](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/146.jpeg) ![classcard 1](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/40.jpeg) ![classcard 2](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/246.jpeg) ![classcard 3](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/182.jpeg) ![classcard 4](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/1.jpeg) ![classcard 5](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/13.jpeg) ![classcard 6](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/253.jpeg) ![classcard 7](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/12.jpeg) ![classcard 8](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/448.jpeg) ![classcard 9](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/377.jpeg) ![classcard 10](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/31.jpeg) ![classcard 11](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/405.jpeg) ![classcard 12](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/37.jpeg) ![classcard 13](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/300.jpeg) ![classcard 14](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/280.jpeg) ![classcard 15](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/462.jpeg) ![classcard 16](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/339.jpeg) ![classcard 17](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/173.jpeg) ![classcard 18](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/344.jpeg) ![classcard 19](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/170.jpeg) ![classcard 20](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/149.jpeg) ![classcard 21](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/335.jpeg) ![classcard 22](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/249.jpeg) ![classcard 23](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/420.jpeg) ![classcard 24](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/274.jpeg) ![classcard 25](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/127.jpeg) ![classcard 26](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/268.jpeg) ![classcard 27](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/43.jpeg) ![classcard 28](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/345.jpeg) ![classcard 29](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/456.jpeg) ![classcard 30](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/360.jpeg) ![classcard 31](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/310.jpeg) ![classcard 32](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/68.jpeg) ![classcard 33](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/480.jpeg) ![classcard 34](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/450.jpeg) ![classcard 35](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/258.jpeg) ![classcard 36](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/74.jpeg) ![classcard 37](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/394.jpeg) ![classcard 38](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/157.jpeg) ![classcard 39](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/114.jpeg) ![classcard 40](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/356.jpeg) ![classcard 41](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/48.jpeg) ![classcard 42](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/376.jpeg) ![classcard 43](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/374.jpeg) ![classcard 44](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/230.jpeg) ![classcard 45](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/160.jpeg) ![classcard 46](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/364.jpeg) ![classcard 47](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/76.jpeg) ![classcard 48](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/333.jpeg) ![classcard 49](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/476.jpeg) ![classcard 50](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/164.jpeg) ![classcard 51](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/216.jpeg) ![classcard 52](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/87.jpeg) ![classcard 53](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/18.jpeg) ![classcard 54](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/304.jpeg) ![classcard 55](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/282.jpeg) ![classcard 56](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/286.jpeg) ![classcard 57](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/45.jpeg) ![classcard 58](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/208.jpeg) ![classcard 59](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/441.jpeg) ![classcard 60](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/204.jpeg) ![classcard 61](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/95.jpeg) ![classcard 62](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/135.jpeg) ![classcard 63](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/464.jpeg) ![classcard 64](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/144.jpeg) ![classcard 65](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/390.jpeg) ![classcard 66](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/140.jpeg) ![classcard 67](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/166.jpeg) ![classcard 68](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/237.jpeg) ![classcard 69](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/199.jpeg) ![classcard 70](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/459.jpeg) ![classcard 71](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/219.jpeg) ![classcard 72](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/402.jpeg) ![classcard 73](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/440.jpeg) ![classcard 74](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/454.jpeg) ![classcard 75](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/185.jpeg) ![classcard 76](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/28.jpeg) ![classcard 77](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/453.jpeg) ![classcard 78](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/83.jpeg) ![classcard 79](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/129.jpeg) ![classcard 80](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/380.jpeg) ![classcard 81](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/54.jpeg) ![classcard 82](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/254.jpeg) ![classcard 83](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/366.jpeg) ![classcard 84](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/278.jpeg) ![classcard 85](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/461.jpeg) ![classcard 86](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/8.jpeg) ![classcard 87](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/365.jpeg) ![classcard 88](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/197.jpeg) ![classcard 89](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/159.jpeg) ![classcard 90](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/338.jpeg) ![classcard 91](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/429.jpeg) ![classcard 92](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/293.jpeg) ![classcard 93](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/428.jpeg) ![classcard 94](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/60.jpeg) ![classcard 95](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/470.jpeg) ![classcard 96](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/473.jpeg) ![classcard 97](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/194.jpeg) ![classcard 98](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/23.jpeg) ![classcard 99](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/112.jpeg) ![classcard 100](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/396.jpeg) ![classcard 101](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/235.jpeg) ![classcard 102](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/169.jpeg) ![classcard 103](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/321.jpeg) ![classcard 104](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/260.jpeg) ![classcard 105](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/103.jpeg) ![classcard 106](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/151.jpeg) ![classcard 107](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/34.jpeg) ![classcard 108](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/325.jpeg) ![classcard 109](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/410.jpeg) ![classcard 110](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/51.jpeg) ![classcard 111](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/236.jpeg) ![classcard 112](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/250.jpeg) ![classcard 113](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/257.jpeg) ![classcard 114](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/433.jpeg) ![classcard 115](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/384.jpeg) ![classcard 116](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/133.jpeg) ![classcard 117](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/279.jpeg) ![classcard 118](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/115.jpeg) ![classcard 119](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/41.jpeg) ![classcard 120](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/288.jpeg) ![classcard 121](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/154.jpeg) ![classcard 122](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/190.jpeg) ![classcard 123](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/305.jpeg) ![classcard 124](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/116.jpeg) ![classcard 125](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/468.jpeg) ![classcard 126](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/17.jpeg) ![classcard 127](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/223.jpeg) ![classcard 128](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/446.jpeg) ![classcard 129](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/232.jpeg) ![classcard 130](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/172.jpeg) ![classcard 131](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/407.jpeg) ![classcard 132](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/225.jpeg) ![classcard 133](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/57.jpeg) ![classcard 134](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/77.jpeg) ![classcard 135](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/66.jpeg) ![classcard 136](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/65.jpeg) ![classcard 137](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/49.jpeg) ![classcard 138](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/121.jpeg) ![classcard 139](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/379.jpeg) ![classcard 140](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/466.jpeg) ![classcard 141](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/382.jpeg) ![classcard 142](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/213.jpeg) ![classcard 143](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/9.jpeg) ![classcard 144](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/202.jpeg) ![classcard 145](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/210.jpeg) ![classcard 146](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/316.jpeg) ![classcard 147](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/359.jpeg) ![classcard 148](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/419.jpeg) ![classcard 149](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/207.jpeg) ![classcard 150](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/266.jpeg) ![classcard 151](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/399.jpeg) ![classcard 152](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/416.jpeg) ![classcard 153](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/413.jpeg) ![classcard 154](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/171.jpeg) ![classcard 155](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/181.jpeg) ![classcard 156](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/78.jpeg) ![classcard 157](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/58.jpeg) ![classcard 158](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/123.jpeg) ![classcard 159](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/153.jpeg) ![classcard 160](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/52.jpeg) ![classcard 161](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/389.jpeg) ![classcard 162](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/64.jpeg) ![classcard 163](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/163.jpeg) ![classcard 164](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/85.jpeg) ![classcard 165](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/392.jpeg) ![classcard 166](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/334.jpeg) ![classcard 167](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/30.jpeg) ![classcard 168](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/451.jpeg) ![classcard 169](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/73.jpeg) ![classcard 170](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/343.jpeg) ![classcard 171](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/152.jpeg) ![classcard 172](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/2.jpeg) ![classcard 173](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/137.jpeg) ![classcard 174](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/36.jpeg) ![classcard 175](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/486.jpeg) ![classcard 176](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/352.jpeg) ![classcard 177](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/270.jpeg) ![classcard 178](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/224.jpeg) ![classcard 179](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/307.jpeg) ![classcard 180](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/245.jpeg) ![classcard 181](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/263.jpeg) ![classcard 182](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/403.jpeg) ![classcard 183](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/414.jpeg) ![classcard 184](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/261.jpeg) ![classcard 185](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/427.jpeg) ![classcard 186](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/145.jpeg) ![classcard 187](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/67.jpeg) ![classcard 188](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/341.jpeg) ![classcard 189](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/330.jpeg) ![classcard 190](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/436.jpeg) ![classcard 191](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/362.jpeg) ![classcard 192](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/291.jpeg) ![classcard 193](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/474.jpeg) ![classcard 194](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/342.jpeg) ![classcard 195](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/108.jpeg) ![classcard 196](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/0.jpeg) ![classcard 197](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/55.jpeg) ![classcard 198](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/29.jpeg) ![classcard 199](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/14.jpeg) ![classcard 200](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/25.jpeg) ![classcard 201](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/432.jpeg) ![classcard 202](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/94.jpeg) ![classcard 203](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/50.jpeg) ![classcard 204](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/417.jpeg) ![classcard 205](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/431.jpeg) ![classcard 206](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/148.jpeg) ![classcard 207](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/158.jpeg) ![classcard 208](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/469.jpeg) ![classcard 209](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/277.jpeg) ![classcard 210](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/244.jpeg) ![classcard 211](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/294.jpeg) ![classcard 212](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/458.jpeg) ![classcard 213](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/422.jpeg) ![classcard 214](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/251.jpeg) ![classcard 215](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/147.jpeg) ![classcard 216](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/122.jpeg) ![classcard 217](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/275.jpeg) ![classcard 218](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/92.jpeg) ![classcard 219](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/239.jpeg) ![classcard 220](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/332.jpeg) ![classcard 221](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/104.jpeg) ![classcard 222](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/177.jpeg) ![classcard 223](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/175.jpeg) ![classcard 224](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/368.jpeg) ![classcard 225](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/39.jpeg) ![classcard 226](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/4.jpeg) ![classcard 227](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/61.jpeg) ![classcard 228](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/228.jpeg) ![classcard 229](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/176.jpeg) ![classcard 230](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/227.jpeg) ![classcard 231](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/240.jpeg) ![classcard 232](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/385.jpeg) ![classcard 233](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/222.jpeg) ![classcard 234](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/255.jpeg) ![classcard 235](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/238.jpeg) ![classcard 236](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/292.jpeg) ![classcard 237](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/452.jpeg) ![classcard 238](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/162.jpeg) ![classcard 239](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/284.jpeg) ![classcard 240](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/231.jpeg) ![classcard 241](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/259.jpeg) ![classcard 242](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/435.jpeg) ![classcard 243](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/273.jpeg) ![classcard 244](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/361.jpeg) ![classcard 245](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/337.jpeg) ![classcard 246](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/98.jpeg) ![classcard 247](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/10.jpeg) ![classcard 248](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/132.jpeg) ![classcard 249](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/124.jpeg) ![classcard 250](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/370.jpeg) ![classcard 251](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/156.jpeg) ![classcard 252](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/113.jpeg) ![classcard 253](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/439.jpeg) ![classcard 254](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/6.jpeg) ![classcard 255](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/324.jpeg) ![classcard 256](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/404.jpeg) ![classcard 257](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/478.jpeg) ![classcard 258](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/93.jpeg) ![classcard 259](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/192.jpeg) ![classcard 260](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/408.jpeg) ![classcard 261](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/100.jpeg) ![classcard 262](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/386.jpeg) ![classcard 263](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/375.jpeg) ![classcard 264](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/465.jpeg) ![classcard 265](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/393.jpeg) ![classcard 266](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/206.jpeg) ![classcard 267](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/303.jpeg) ![classcard 268](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/24.jpeg) ![classcard 269](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/445.jpeg) ![classcard 270](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/319.jpeg) ![classcard 271](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/11.jpeg) ![classcard 272](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/90.jpeg) ![classcard 273](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/179.jpeg) ![classcard 274](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/80.jpeg) ![classcard 275](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/449.jpeg) ![classcard 276](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/119.jpeg) ![classcard 277](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/318.jpeg) ![classcard 278](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/308.jpeg) ![classcard 279](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/320.jpeg) ![classcard 280](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/110.jpeg) ![classcard 281](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/214.jpeg) ![classcard 282](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/264.jpeg) ![classcard 283](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/328.jpeg) ![classcard 284](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/471.jpeg) ![classcard 285](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/387.jpeg) ![classcard 286](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/32.jpeg) ![classcard 287](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/21.jpeg) ![classcard 288](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/353.jpeg) ![classcard 289](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/460.jpeg) ![classcard 290](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/301.jpeg) ![classcard 291](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/444.jpeg) ![classcard 292](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/53.jpeg) ![classcard 293](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/400.jpeg) ![classcard 294](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/421.jpeg) ![classcard 295](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/42.jpeg) ![classcard 296](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/69.jpeg) ![classcard 297](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/242.jpeg) ![classcard 298](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/322.jpeg) ![classcard 299](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/89.jpeg) ![classcard 300](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/309.jpeg) ![classcard 301](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/5.jpeg) ![classcard 302](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/388.jpeg) ![classcard 303](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/475.jpeg) ![classcard 304](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/82.jpeg) ![classcard 305](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/272.jpeg) ![classcard 306](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/327.jpeg) ![classcard 307](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/59.jpeg) ![classcard 308](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/479.jpeg) ![classcard 309](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/296.jpeg) ![classcard 310](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/62.jpeg) ![classcard 311](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/424.jpeg) ![classcard 312](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/281.jpeg) ![classcard 313](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/351.jpeg) ![classcard 314](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/434.jpeg) ![classcard 315](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/283.jpeg) ![classcard 316](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/3.jpeg) ![classcard 317](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/269.jpeg) ![classcard 318](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/276.jpeg) ![classcard 319](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/120.jpeg) ![classcard 320](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/189.jpeg) ![classcard 321](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/128.jpeg) ![classcard 322](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/81.jpeg) ![classcard 323](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/150.jpeg) ![classcard 324](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/406.jpeg) ![classcard 325](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/395.jpeg) ![classcard 326](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/99.jpeg) ![classcard 327](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/302.jpeg) ![classcard 328](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/346.jpeg) ![classcard 329](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/63.jpeg) ![classcard 330](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/72.jpeg) ![classcard 331](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/109.jpeg) ![classcard 332](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/200.jpeg) ![classcard 333](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/96.jpeg) ![classcard 334](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/285.jpeg) ![classcard 335](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/323.jpeg) ![classcard 336](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/56.jpeg) ![classcard 337](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/118.jpeg) ![classcard 338](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/209.jpeg) ![classcard 339](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/252.jpeg) ![classcard 340](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/155.jpeg) ![classcard 341](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/168.jpeg) ![classcard 342](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/136.jpeg) ![classcard 343](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/467.jpeg) ![classcard 344](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/193.jpeg) ![classcard 345](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/243.jpeg) ![classcard 346](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/306.jpeg) ![classcard 347](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/398.jpeg) ![classcard 348](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/26.jpeg) ![classcard 349](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/381.jpeg) ![classcard 350](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/298.jpeg) ![classcard 351](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/167.jpeg) ![classcard 352](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/233.jpeg) ![classcard 353](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/331.jpeg) ![classcard 354](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/447.jpeg) ![classcard 355](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/415.jpeg) ![classcard 356](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/226.jpeg) ![classcard 357](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/455.jpeg) ![classcard 358](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/97.jpeg) ![classcard 359](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/358.jpeg) ![classcard 360](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/19.jpeg) ![classcard 361](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/105.jpeg) ![classcard 362](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/383.jpeg) ![classcard 363](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/125.jpeg) ![classcard 364](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/131.jpeg) ![classcard 365](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/130.jpeg) ![classcard 366](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/256.jpeg) ![classcard 367](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/47.jpeg) ![classcard 368](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/212.jpeg) ![classcard 369](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/347.jpeg) ![classcard 370](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/71.jpeg) ![classcard 371](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/165.jpeg) ![classcard 372](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/482.jpeg) ![classcard 373](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/191.jpeg) ![classcard 374](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/314.jpeg) ![classcard 375](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/348.jpeg) ![classcard 376](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/20.jpeg) ![classcard 377](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/139.jpeg) ![classcard 378](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/184.jpeg) ![classcard 379](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/161.jpeg) ![classcard 380](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/290.jpeg) ![classcard 381](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/15.jpeg) ![classcard 382](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/326.jpeg) ![classcard 383](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/312.jpeg) ![classcard 384](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/262.jpeg) ![classcard 385](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/483.jpeg) ![classcard 386](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/203.jpeg) ![classcard 387](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/371.jpeg) ![classcard 388](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/215.jpeg) ![classcard 389](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/315.jpeg) ![classcard 390](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/442.jpeg) ![classcard 391](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/16.jpeg) ![classcard 392](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/217.jpeg) ![classcard 393](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/44.jpeg) ![classcard 394](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/33.jpeg) ![classcard 395](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/117.jpeg) ![classcard 396](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/220.jpeg) ![classcard 397](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/425.jpeg) ![classcard 398](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/38.jpeg) ![classcard 399](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/248.jpeg) ![classcard 400](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/357.jpeg) ![classcard 401](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/443.jpeg) ![classcard 402](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/317.jpeg) ![classcard 403](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/485.jpeg) ![classcard 404](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/22.jpeg) ![classcard 405](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/295.jpeg) ![classcard 406](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/423.jpeg) ![classcard 407](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/106.jpeg) ![classcard 408](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/329.jpeg) ![classcard 409](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/340.jpeg) ![classcard 410](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/195.jpeg) ![classcard 411](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/349.jpeg) ![classcard 412](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/336.jpeg) ![classcard 413](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/201.jpeg) ![classcard 414](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/289.jpeg) ![classcard 415](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/378.jpeg) ![classcard 416](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/373.jpeg) ![classcard 417](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/86.jpeg) ![classcard 418](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/198.jpeg) ![classcard 419](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/174.jpeg) ![classcard 420](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/188.jpeg) ![classcard 421](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/412.jpeg) ![classcard 422](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/430.jpeg) ![classcard 423](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/311.jpeg) ![classcard 424](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/355.jpeg) ![classcard 425](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/363.jpeg) ![classcard 426](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/211.jpeg) ![classcard 427](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/88.jpeg) ![classcard 428](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/484.jpeg) ![classcard 429](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/265.jpeg) ![classcard 430](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/354.jpeg) ![classcard 431](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/107.jpeg) ![classcard 432](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/75.jpeg) ![classcard 433](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/79.jpeg) ![classcard 434](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/372.jpeg) ![classcard 435](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/221.jpeg) ![classcard 436](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/472.jpeg) ![classcard 437](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/141.jpeg) ![classcard 438](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/297.jpeg) ![classcard 439](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/267.jpeg) ![classcard 440](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/369.jpeg) ![classcard 441](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/401.jpeg) ![classcard 442](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/247.jpeg) ![classcard 443](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/27.jpeg) ![classcard 444](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/70.jpeg) ![classcard 445](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/91.jpeg) ![classcard 446](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/218.jpeg) ![classcard 447](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/411.jpeg) ![classcard 448](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/234.jpeg) ![classcard 449](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/142.jpeg) ![classcard 450](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/180.jpeg) ![classcard 451](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/299.jpeg) ![classcard 452](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/205.jpeg) ![classcard 453](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/457.jpeg) ![classcard 454](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/287.jpeg) ![classcard 455](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/350.jpeg) ![classcard 456](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/134.jpeg) ![classcard 457](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/391.jpeg) ![classcard 458](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/186.jpeg) ![classcard 459](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/437.jpeg) ![classcard 460](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/477.jpeg) ![classcard 461](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/187.jpeg) ![classcard 462](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/84.jpeg) ![classcard 463](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/196.jpeg) ![classcard 464](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/7.jpeg) ![classcard 465](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/35.jpeg) ![classcard 466](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/183.jpeg) ![classcard 467](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/397.jpeg) ![classcard 468](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/229.jpeg) ![classcard 469](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/101.jpeg) ![classcard 470](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/46.jpeg) ![classcard 471](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/111.jpeg) ![classcard 472](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/367.jpeg) ![classcard 473](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/463.jpeg) ![classcard 474](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/418.jpeg) ![classcard 475](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/143.jpeg) ![classcard 476](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/241.jpeg) ![classcard 477](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/138.jpeg) ![classcard 478](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/126.jpeg) ![classcard 479](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/438.jpeg) ![classcard 480](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/481.jpeg) ![classcard 481](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/313.jpeg) ![classcard 482](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/271.jpeg) ![classcard 483](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/409.jpeg) ![classcard 484](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/178.jpeg) ![classcard 485](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/102.jpeg) ![classcard 486](https://huggingface.co/sd-concepts-library/gba-fe-class-cards/resolve/main/concept_images/426.jpeg)
AkshatSurolia/ICD-10-Code-Prediction
[ "pytorch", "bert", "transformers", "text-classification", "license:apache-2.0", "has_space" ]
text-classification
{ "architectures": null, "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
994
null
--- language: - pt license: mit tags: - generated_from_trainer datasets: - lener_br metrics: - precision - recall - f1 - accuracy model-index: - name: xlm-roberta-base-finetuned-lener-br results: - task: name: Token Classification type: token-classification dataset: name: lener_br type: lener_br config: lener_br split: train args: lener_br metrics: - name: Precision type: precision value: 0.844312854675549 - name: Recall type: recall value: 0.8844662703540966 - name: F1 type: f1 value: 0.8639232517041151 - name: Accuracy type: accuracy value: 0.97516697297055 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-lener-br This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the lener_br dataset. It achieves the following results on the evaluation set: - Loss: nan - Precision: 0.8443 - Recall: 0.8845 - F1: 0.8639 - Accuracy: 0.9752 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 15 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:---------:|:------:|:------:|:--------:| | 0.0832 | 1.0 | 1957 | nan | 0.6752 | 0.8625 | 0.7575 | 0.9578 | | 0.0477 | 2.0 | 3914 | nan | 0.8391 | 0.8839 | 0.8609 | 0.9704 | | 0.029 | 3.0 | 5871 | nan | 0.7530 | 0.9059 | 0.8224 | 0.9648 | | 0.0223 | 4.0 | 7828 | nan | 0.7488 | 0.8744 | 0.8067 | 0.9659 | | 0.0234 | 5.0 | 9785 | nan | 0.7216 | 0.8783 | 0.7923 | 0.9644 | | 0.0171 | 6.0 | 11742 | nan | 0.7072 | 0.8969 | 0.7908 | 0.9642 | | 0.0121 | 7.0 | 13699 | nan | 0.7769 | 0.8775 | 0.8241 | 0.9681 | | 0.0093 | 8.0 | 15656 | nan | 0.7218 | 0.8772 | 0.7920 | 0.9621 | | 0.0074 | 9.0 | 17613 | nan | 0.8241 | 0.8767 | 0.8496 | 0.9739 | | 0.0055 | 10.0 | 19570 | nan | 0.7369 | 0.8801 | 0.8021 | 0.9638 | | 0.0055 | 11.0 | 21527 | nan | 0.8443 | 0.8845 | 0.8639 | 0.9752 | | 0.0029 | 12.0 | 23484 | nan | 0.8338 | 0.8935 | 0.8626 | 0.9753 | | 0.0026 | 13.0 | 25441 | nan | 0.7721 | 0.8992 | 0.8308 | 0.9694 | | 0.004 | 14.0 | 27398 | nan | 0.7466 | 0.8886 | 0.8114 | 0.9672 | | 0.0006 | 15.0 | 29355 | nan | 0.7518 | 0.8995 | 0.8190 | 0.9686 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
Ale/Alen
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: en thumbnail: https://github.com/borisdayma/huggingtweets/blob/master/img/logo.png?raw=true tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1571596376575823874/UlqdnBSB_400x400.jpg&#39;)"> </div> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1580493717915435009/cdj6zH7__400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI CYBORG 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Toskm & Daily Pokémon Waifus 🔞</div> <div style="text-align: center; font-size: 14px;">@pkmnwaifuhentai-tosk_toskm</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Toskm & Daily Pokémon Waifus 🔞. | Data | Toskm | Daily Pokémon Waifus 🔞 | | --- | --- | --- | | Tweets downloaded | 3188 | 153 | | Retweets | 1472 | 4 | | Short tweets | 310 | 18 | | Tweets kept | 1406 | 131 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/qqqgtqk1/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @pkmnwaifuhentai-tosk_toskm's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/2j6nsmfi) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/2j6nsmfi/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/pkmnwaifuhentai-tosk_toskm') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
Aleenbo/Arcane
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - summarization - generated_from_trainer metrics: - rouge model-index: - name: BARTkrame-abstract-mT5 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # BARTkrame-abstract-mT5 This model is a fine-tuned version of [google/mt5-small](https://huggingface.co/google/mt5-small) on the None dataset. It achieves the following results on the evaluation set: - Loss: 2.2557 - Rouge1: 0.2223 - Rouge2: 0.0735 - Rougel: 0.1826 - Rougelsum: 0.1849 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5.6e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | |:-------------:|:-----:|:----:|:---------------:|:------:|:------:|:------:|:---------:| | 4.9563 | 1.0 | 1250 | 2.3674 | 0.2206 | 0.0755 | 0.1853 | 0.1869 | | 3.1856 | 2.0 | 2500 | 2.2988 | 0.2296 | 0.0757 | 0.1888 | 0.1910 | | 3.0083 | 3.0 | 3750 | 2.2668 | 0.2201 | 0.0728 | 0.1816 | 0.1832 | | 2.9296 | 4.0 | 5000 | 2.2557 | 0.2223 | 0.0735 | 0.1826 | 0.1849 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
AlekseyKulnevich/Pegasus-HeaderGeneration
[ "pytorch", "pegasus", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "PegasusForConditionalGeneration" ], "model_type": "pegasus", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- language: en widget: - text: "As manager, I want to Find pet by ID through /pet/{petId}. Returns a single pet." - text: "As operator, I want to Create user through /user. #/definitions/User This can only be done by the logged in user." license: mit --- # ner-roles-openapi: model fine-tuned from distilbert-base-uncased for NER task. ## Introduction [ner-roles-openapi] is a NER model that was fine-tuned from distilbert-base-uncased on synthetic dataset based SCRUM user story. Model was trained on synthetic dataset (~80 sentences). Model was validated on openapi/swagger data and overperformed other models on this type of data specifically. ## Training data Training data was classified as follow: Abbreviation|Description -|- O|Outside of a named entity ENTITY|Entity provided by OpenAPI ACTION|Action required by a user/role ROLE|Role predefined to use APIs
Alerosae/SocratesGPT-2
[ "pytorch", "gpt2", "feature-extraction", "en", "transformers", "text-generation" ]
text-generation
{ "architectures": [ "GPT2Model" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- license: mit --- ### Toyota Sera on Stable Diffusion This is the `<toyota-sera>` concept taught to Stable Diffusion via Textual Inversion. You can load this concept into the [Stable Conceptualizer](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_conceptualizer_inference.ipynb) notebook. You can also train your own concepts and load them into the concept libraries using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb). Here is the new concept you will be able to use as an `object`: ![<toyota-sera> 0](https://huggingface.co/sd-concepts-library/toyota-sera/resolve/main/concept_images/1.jpeg) ![<toyota-sera> 1](https://huggingface.co/sd-concepts-library/toyota-sera/resolve/main/concept_images/8.jpeg) ![<toyota-sera> 2](https://huggingface.co/sd-concepts-library/toyota-sera/resolve/main/concept_images/9.jpeg) ![<toyota-sera> 3](https://huggingface.co/sd-concepts-library/toyota-sera/resolve/main/concept_images/2.jpeg) ![<toyota-sera> 4](https://huggingface.co/sd-concepts-library/toyota-sera/resolve/main/concept_images/0.jpeg) ![<toyota-sera> 5](https://huggingface.co/sd-concepts-library/toyota-sera/resolve/main/concept_images/4.jpeg) ![<toyota-sera> 6](https://huggingface.co/sd-concepts-library/toyota-sera/resolve/main/concept_images/6.jpeg) ![<toyota-sera> 7](https://huggingface.co/sd-concepts-library/toyota-sera/resolve/main/concept_images/5.jpeg) ![<toyota-sera> 8](https://huggingface.co/sd-concepts-library/toyota-sera/resolve/main/concept_images/3.jpeg) ![<toyota-sera> 9](https://huggingface.co/sd-concepts-library/toyota-sera/resolve/main/concept_images/7.jpeg)
Alessandro/model_name
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: mit tags: - generated_from_trainer datasets: - imagefolder model-index: - name: HandWritten Medical Prescription Text Extraction Using Donut (Document Understanding Transformers ) results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # donut-base-Medical_Handwritten_Prescriptions_Information_Extraction This model is a fine-tuned version of [naver-clova-ix/donut-base](https://huggingface.co/naver-clova-ix/donut-base) on the imagefolder dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 2 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 6 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.24.0.dev0 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
Andrija/SRoBERTa-L
[ "pytorch", "roberta", "fill-mask", "hr", "sr", "multilingual", "dataset:oscar", "dataset:srwac", "dataset:leipzig", "transformers", "masked-lm", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
58
2022-10-16T01:21:43Z
--- library_name: fairseq task: audio-to-audio tags: - fairseq - audio - audio-to-audio - speech-to-speech-translation license: cc-by-nc-4.0 --- You can try out the model on the right of the page by uploading or recording. For model usage, please refer to https://huggingface.co/facebook/textless_sm_cs_en
Andrija/SRoBERTa-NER
[ "pytorch", "roberta", "token-classification", "hr", "sr", "multilingual", "dataset:hr500k", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "RobertaForTokenClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
2022-10-16T01:21:51Z
--- library_name: fairseq task: audio-to-audio tags: - fairseq - audio - audio-to-audio - speech-to-speech-translation license: cc-by-nc-4.0 --- You can try out the model on the right of the page by uploading or recording. For model usage, please refer to https://huggingface.co/facebook/textless_sm_cs_en
Andrija/SRoBERTa-NLP
[ "pytorch", "roberta", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "RobertaForTokenClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- library_name: fairseq task: audio-to-audio tags: - fairseq - audio - audio-to-audio - speech-to-speech-translation license: cc-by-nc-4.0 --- You can try out the model on the right of the page by uploading or recording. For model usage, please refer to https://huggingface.co/facebook/textless_sm_cs_en
Andrija/SRoBERTa-XL-NER
[ "pytorch", "roberta", "token-classification", "hr", "sr", "multilingual", "dataset:hr500k", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "RobertaForTokenClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
2022-10-16T01:22:09Z
--- library_name: fairseq task: audio-to-audio tags: - fairseq - audio - audio-to-audio - speech-to-speech-translation license: cc-by-nc-4.0 --- You can try out the model on the right of the page by uploading or recording. For model usage, please refer to https://huggingface.co/facebook/textless_sm_cs_en
Andrija/SRoBERTa-XL
[ "pytorch", "roberta", "fill-mask", "hr", "sr", "multilingual", "dataset:oscar", "dataset:srwac", "dataset:leipzig", "dataset:cc100", "dataset:hrwac", "transformers", "masked-lm", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
54
2022-10-16T01:22:35Z
--- library_name: fairseq task: audio-to-audio tags: - fairseq - audio - audio-to-audio - speech-to-speech-translation license: cc-by-nc-4.0 --- You can try out the model on the right of the page by uploading or recording. For model usage, please refer to https://huggingface.co/facebook/textless_sm_cs_en
Andrija/SRoBERTa-base-NER
[ "pytorch", "roberta", "token-classification", "hr", "sr", "multilingual", "dataset:hr500k", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "RobertaForTokenClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
2022-10-16T01:22:43Z
--- library_name: fairseq task: audio-to-audio tags: - fairseq - audio - audio-to-audio - speech-to-speech-translation license: cc-by-nc-4.0 --- You can try out the model on the right of the page by uploading or recording. For model usage, please refer to https://huggingface.co/facebook/textless_sm_cs_en
Andrija/SRoBERTaFastBPE
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-10-16T01:23:20Z
--- library_name: fairseq task: audio-to-audio tags: - fairseq - audio - audio-to-audio - speech-to-speech-translation license: cc-by-nc-4.0 --- You can try out the model on the right of the page by uploading or recording. For model usage, please refer to https://huggingface.co/facebook/textless_sm_cs_en
Andry/111
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: fairseq task: audio-to-audio tags: - fairseq - audio - audio-to-audio - speech-to-speech-translation license: cc-by-nc-4.0 --- You can try out the model on the right of the page by uploading or recording. For model usage, please refer to https://huggingface.co/facebook/textless_sm_cs_en
Andry/1111
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-10-16T01:23:34Z
--- library_name: fairseq task: audio-to-audio tags: - fairseq - audio - audio-to-audio - speech-to-speech-translation license: cc-by-nc-4.0 --- You can try out the model on the right of the page by uploading or recording. For model usage, please refer to https://huggingface.co/facebook/textless_sm_cs_en
Andy1621/uniformer
[ "license:mit", "has_space" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-10-16T01:23:42Z
--- library_name: fairseq task: audio-to-audio tags: - fairseq - audio - audio-to-audio - speech-to-speech-translation license: cc-by-nc-4.0 --- You can try out the model on the right of the page by uploading or recording. For model usage, please refer to https://huggingface.co/facebook/textless_sm_cs_en
AndyJ/clinicalBERT
[ "pytorch", "transformers" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
2022-10-16T01:23:48Z
--- library_name: fairseq task: audio-to-audio tags: - fairseq - audio - audio-to-audio - speech-to-speech-translation license: cc-by-nc-4.0 --- You can try out the model on the right of the page by uploading or recording. For model usage, please refer to https://huggingface.co/facebook/textless_sm_cs_en
AndyJ/prompt_finetune
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
2022-10-16T01:23:55Z
--- library_name: fairseq task: audio-to-audio tags: - fairseq - audio - audio-to-audio - speech-to-speech-translation license: cc-by-nc-4.0 --- You can try out the model on the right of the page by uploading or recording. For model usage, please refer to https://huggingface.co/facebook/textless_sm_cs_en
AndyyyCai/bert-base-uncased-finetuned-copa
[ "pytorch", "bert", "multiple-choice", "transformers" ]
multiple-choice
{ "architectures": [ "BertForMultipleChoice" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
2022-10-16T01:24:02Z
--- library_name: fairseq task: audio-to-audio tags: - fairseq - audio - audio-to-audio - speech-to-speech-translation license: cc-by-nc-4.0 --- You can try out the model on the right of the page by uploading or recording. For model usage, please refer to https://huggingface.co/facebook/textless_sm_cs_en
Ani123/Ani
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-10-16T01:34:02Z
--- license: creativeml-openrail-m --- # Nixeu stable diffusion model Original artist: Nixeu\ Patreon: https://www.patreon.com/nixeu/posts ## Basic explanation Token and Class words are what guide the AI to produce images similar to the trained style/object/character. Include any mix of these words in the prompt to produce verying results, or exclude them to have a less pronounced effect. There is usually at least a slight stylistic effect even without the words, but it is recommended to include at least one. Adding token word/phrase class word/phrase at the start of the prompt in that order produces results most similar to the trained concept, but they can be included elsewhere as well. Some models produce better results when not including all token/class words. 3k models are are more flexible, while 5k models produce images closer to the trained concept. I recommend 2k/3k models for normal use, and 5k/6k models for model merging and use without token/class words. However it can be also very prompt specific. I highly recommend self-experimentation. ## Model info Nixeu-any is the newest and best version ### Model: nixeu-any token not necessary, but might increase coherency of the image ``` token: m_nixeu class: artstyle base: anything v3 images: 110 steps: 11000 ``` ### Model: nixeu-f you don't necessarily need to use token/class in the prompt for lighter style influence ``` token: m_nixeu class: illustration style base: waifu diffusion 1.3-full ``` ### Model: nixeu_e5 ``` token: m_concept class: 1girl base: waifu diffusion 1.3-e5 ``` ## License This embedding is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage. The CreativeML OpenRAIL License specifies: 1. You can't use the model to deliberately produce nor share illegal or harmful outputs or content 2. The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license 3. You may re-distribute the weights and use the model commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully) [Please read the full license here](https://huggingface.co/spaces/CompVis/stable-diffusion-license)
AnnettJaeger/AnneJae
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-10-16T04:10:06Z
--- license: mit --- ### wukong_900 on Stable Diffusion via Dreambooth #### model by jaxmetaverse This your the Stable Diffusion model fine-tuned the wukong_900 concept taught to Stable Diffusion with Dreambooth. It can be used by modifying the `instance_prompt`: **wukong** You can also train your own concepts and upload them to the library by using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_training.ipynb). And you can run your new concept via `diffusers`: [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb), [Spaces with the Public Concepts loaded](https://huggingface.co/spaces/sd-dreambooth-library/stable-diffusion-dreambooth-concepts) Here are the images used for training this concept: ![image 0](https://huggingface.co/jaxmetaverse/wukong-900/resolve/main/concept_images/1.jpeg) ![image 1](https://huggingface.co/jaxmetaverse/wukong-900/resolve/main/concept_images/2.jpeg) ![image 2](https://huggingface.co/jaxmetaverse/wukong-900/resolve/main/concept_images/0.jpeg) ![image 3](https://huggingface.co/jaxmetaverse/wukong-900/resolve/main/concept_images/4.jpeg) ![image 4](https://huggingface.co/jaxmetaverse/wukong-900/resolve/main/concept_images/3.jpeg)
Anomic/DialoGPT-medium-loki
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-10-16T04:29:49Z
--- license: mit --- ### Xuna on Stable Diffusion This is the `<Xuna>` concept taught to Stable Diffusion via Textual Inversion. You can load this concept into the [Stable Conceptualizer](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_conceptualizer_inference.ipynb) notebook. You can also train your own concepts and load them into the concept libraries using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb). Here is the new concept you will be able to use as an `object`: ![<Xuna> 0](https://huggingface.co/sd-concepts-library/xuna/resolve/main/concept_images/1.jpeg) ![<Xuna> 1](https://huggingface.co/sd-concepts-library/xuna/resolve/main/concept_images/2.jpeg) ![<Xuna> 2](https://huggingface.co/sd-concepts-library/xuna/resolve/main/concept_images/0.jpeg)
AnonymousNLP/pretrained-model-1
[ "pytorch", "gpt2", "transformers" ]
null
{ "architectures": [ "GPT2DoubleHeadsModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
2022-10-16T05:40:15Z
--- tags: - object-detection - vision finetuned_from: - hustvl/yolos-small --- # YOLOS (small-sized) model fine-tuned on Matterport balloon dataset YOLOS is a Vision Transformer (ViT) trained using the DETR loss. Despite its simplicity, a base-sized YOLOS model is able to achieve 42 AP on COCO validation 2017 (similar to DETR and more complex frameworks such as Faster R-CNN). YOLOS model fine-tuned on COCO 2017 object detection (118k annotated images). It was introduced in the paper [You Only Look at One Sequence: Rethinking Transformer in Vision through Object Detection](https://arxiv.org/abs/2106.00666) by Fang et al. and first released in [this repository](https://github.com/hustvl/YOLOS). ## Model description The model is trained using a "bipartite matching loss": one compares the predicted classes + bounding boxes of each of the N = 100 object queries to the ground truth annotations, padded up to the same length N (so if an image only contains 4 objects, 96 annotations will just have a "no object" as class and "no bounding box" as bounding box). The Hungarian matching algorithm is used to create an optimal one-to-one mapping between each of the N queries and each of the N annotations. Next, standard cross-entropy (for the classes) and a linear combination of the L1 and generalized IoU loss (for the bounding boxes) are used to optimize the parameters of the model. Currently, both the feature extractor and model support PyTorch. ## Training data This model was pre-trained on [ImageNet-1k](https://huggingface.co/datasets/imagenet2012) and fine-tuned on [COCO 2017 object detection](https://cocodataset.org/#download), a dataset consisting of 118k/5k annotated images for training/validation respectively. It was further fine-tuned on [Matterport Balloon Detection dataset](https://github.com/matterport/Mask_RCNN/releases/download/v2.1/balloon_dataset.zip), a dataset containg 74 annotated images. ### Training The model was pre-trained for 200 epochs on ImageNet-1k, fine-tuned for 150 epochs on COCO and further fine-tuned for 96 epochs on Matterport Balloon Dataset. You can go through its detailed notebook [here](https://github.com/ZohebAbai/Deep-Learning-Projects/blob/master/10_PT_Object_Detection_using_Transformers.ipynb). ## Evaluation results This model achieves an AP (average precision) of **26.9** on Matterport Balloon validation. ### BibTeX entry and citation info ```bibtex @article{DBLP:journals/corr/abs-2106-00666, author = {Yuxin Fang and Bencheng Liao and Xinggang Wang and Jiemin Fang and Jiyang Qi and Rui Wu and Jianwei Niu and Wenyu Liu}, title = {You Only Look at One Sequence: Rethinking Transformer in Vision through Object Detection}, journal = {CoRR}, volume = {abs/2106.00666}, year = {2021}, url = {https://arxiv.org/abs/2106.00666}, eprinttype = {arXiv}, eprint = {2106.00666}, timestamp = {Fri, 29 Apr 2022 19:49:16 +0200}, biburl = {https://dblp.org/rec/journals/corr/abs-2106-00666.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ```
AnonymousSub/AR_EManuals-RoBERTa
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
2022-10-16T06:16:17Z
--- license: mit --- ### Pion by August Semionov on Stable Diffusion This is the `<pion>` concept taught to Stable Diffusion via Textual Inversion. You can load this concept into the [Stable Conceptualizer](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_conceptualizer_inference.ipynb) notebook. You can also train your own concepts and load them into the concept libraries using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb). Here is the new concept you will be able to use as an `object`: ![<pion> 0](https://huggingface.co/sd-concepts-library/pion-by-august-semionov/resolve/main/concept_images/1.jpeg) ![<pion> 1](https://huggingface.co/sd-concepts-library/pion-by-august-semionov/resolve/main/concept_images/2.jpeg) ![<pion> 2](https://huggingface.co/sd-concepts-library/pion-by-august-semionov/resolve/main/concept_images/0.jpeg) ![<pion> 3](https://huggingface.co/sd-concepts-library/pion-by-august-semionov/resolve/main/concept_images/4.jpeg) ![<pion> 4](https://huggingface.co/sd-concepts-library/pion-by-august-semionov/resolve/main/concept_images/3.jpeg)
AnonymousSub/AR_bert-base-uncased
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- license: mit tags: - generated_from_trainer metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-de-fr results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-de-fr This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.2871 - F1: 0.8596 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:-----:|:---------------:|:------:| | 0.2911 | 1.0 | 3718 | 0.2709 | 0.8020 | | 0.1344 | 2.0 | 7436 | 0.2659 | 0.8432 | | 0.0631 | 3.0 | 11154 | 0.2871 | 0.8596 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
AnonymousSub/AR_consert
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
2022-10-16T07:16:13Z
--- license: mit tags: - generated_from_trainer datasets: - xtreme metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-fr results: - task: name: Token Classification type: token-classification dataset: name: xtreme type: xtreme config: PAN-X.fr split: train args: PAN-X.fr metrics: - name: F1 type: f1 value: 0.8667799490229396 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-fr This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset. It achieves the following results on the evaluation set: - Loss: 0.3503 - F1: 0.8668 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.5539 | 1.0 | 573 | 0.3241 | 0.8206 | | 0.2748 | 2.0 | 1146 | 0.2956 | 0.8477 | | 0.1658 | 3.0 | 1719 | 0.3503 | 0.8668 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
AnonymousSub/AR_rule_based_bert_triplet_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
2022-10-16T07:38:10Z
--- license: mit tags: - generated_from_trainer datasets: - xtreme metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-it results: - task: name: Token Classification type: token-classification dataset: name: xtreme type: xtreme config: PAN-X.it split: train args: PAN-X.it metrics: - name: F1 type: f1 value: 0.8234718826405867 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-it This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset. It achieves the following results on the evaluation set: - Loss: 0.3544 - F1: 0.8235 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.7074 | 1.0 | 210 | 0.4237 | 0.7311 | | 0.3172 | 2.0 | 420 | 0.3662 | 0.7820 | | 0.1855 | 3.0 | 630 | 0.3544 | 0.8235 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
AnonymousSub/AR_rule_based_only_classfn_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- language: - en tags: - stable-diffusion - text-to-image license: creativeml-openrail-m inference: false --- [hakurei/waifu-diffusion-v1-3](https://huggingface.co/hakurei/waifu-diffusion-v1-3) fine-tuned on 800 samples from [https://www.kaggle.com/datasets/stevenevan99/face-of-pixiv-top-daily-illustration-2020](https://www.kaggle.com/datasets/stevenevan99/face-of-pixiv-top-daily-illustration-2020) at 384x384 resolution (because some of the images here are very low-res and I don't want artifacts) (converted to .ckpt file) examples of "highres, sketch, rkgk, pfp" ![](https://cdn.discordapp.com/attachments/1024588665596411944/1028242786144755723/unknown.png) "1girl bangs blush crop top earrings grey eyes hair ornament hairclip indoors jewelry mole mole under eye necklace highres, sketch, rkgk" ![](https://pbs.twimg.com/media/FeiFRK1WIAAkS5V?format=jpg&name=large)
AnonymousSub/AR_rule_based_roberta_bert_quadruplet_epochs_1_shard_1
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
2022-10-16T07:56:19Z
--- license: mit tags: - generated_from_trainer datasets: - xtreme metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-en results: - task: name: Token Classification type: token-classification dataset: name: xtreme type: xtreme config: PAN-X.en split: train args: PAN-X.en metrics: - name: F1 type: f1 value: 0.7205428747686613 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-en This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the xtreme dataset. It achieves the following results on the evaluation set: - Loss: 0.4923 - F1: 0.7205 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.9902 | 1.0 | 148 | 0.6183 | 0.5830 | | 0.4903 | 2.0 | 296 | 0.5232 | 0.6675 | | 0.3272 | 3.0 | 444 | 0.4923 | 0.7205 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
AnonymousSub/AR_rule_based_roberta_hier_triplet_epochs_1_shard_10
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
2022-10-16T08:44:36Z
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: xmelus/mbert results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # xmelus/mbert This model is a fine-tuned version of [bert-base-multilingual-cased](https://huggingface.co/bert-base-multilingual-cased) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 1.5424 - Train Accuracy: 0.1446 - Validation Loss: 1.5269 - Validation Accuracy: 0.1461 - Finished epochs: 24 ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': {'class_name': 'WarmUp', 'config': {'initial_learning_rate': 2e-05, 'decay_schedule_fn': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': -596, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}, '__passive_serialization__': True}, 'warmup_steps': 1000, 'power': 1.0, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.01} - training_precision: mixed_float16 ### Training results Epoch 1/50 loss: 2.9925 - accuracy: 0.1059 - val_loss: 1.9812 - val_accuracy: 0.1331 Epoch 2/50 loss: 1.9979 - accuracy: 0.1307 - val_loss: 1.6063 - val_accuracy: 0.1429 Epoch 3/50 loss: 1.5798 - accuracy: 0.1434 - val_loss: 1.5332 - val_accuracy: 0.1461 Epoch 4/50 loss: 1.5325 - accuracy: 0.1451 - val_loss: 1.5285 - val_accuracy: 0.1458 Epoch 5/50 loss: 1.5415 - accuracy: 0.1448 - val_loss: 1.5449 - val_accuracy: 0.1457 Epoch 6/50 loss: 1.5395 - accuracy: 0.1448 - val_loss: 1.5448 - val_accuracy: 0.1456 Epoch 7/50 loss: 1.5463 - accuracy: 0.1446 - val_loss: 1.5421 - val_accuracy: 0.1454 Epoch 8/50 loss: 1.5352 - accuracy: 0.1451 - val_loss: 1.5536 - val_accuracy: 0.1453 Epoch 9/50 oss: 1.5230 - accuracy: 0.1451 - val_loss: 1.5097 - val_accuracy: 0.1466 Epoch 10/50 loss: 1.5318 - accuracy: 0.1449 - val_loss: 1.5303 - val_accuracy: 0.1460 Epoch 11/50 loss: 1.5364 - accuracy: 0.1448 - val_loss: 1.5280 - val_accuracy: 0.1462 Epoch 12/50 loss: 1.5411 - accuracy: 0.1444 - val_loss: 1.5493 - val_accuracy: 0.1455 Epoch 13/50 loss: 1.5378 - accuracy: 0.1446 - val_loss: 1.5473 - val_accuracy: 0.1456 Epoch 14/50 loss: 1.5357 - accuracy: 0.1449 - val_loss: 1.5310 - val_accuracy: 0.1457 Epoch 15/50 loss: 1.5424 - accuracy: 0.1446 - val_loss: 1.5269 - val_accuracy: 0.1461 Epoch 16/50 loss: 1.5314 - accuracy: 0.1450 - val_loss: 1.5392 - val_accuracy: 0.1456 Epoch 17/50 loss: 1.5309 - accuracy: 0.1451 - val_loss: 1.5567 - val_accuracy: 0.1454 Epoch 18/50 loss: 1.5279 - accuracy: 0.1450 - val_loss: 1.5561 - val_accuracy: 0.1452 Epoch 19/50 loss: 1.5311 - accuracy: 0.1450 - val_loss: 1.5400 - val_accuracy: 0.1460 Epoch 20/50 loss: 1.5332 - accuracy: 0.1449 - val_loss: 1.5347 - val_accuracy: 0.1460 Epoch 21/50 loss: 1.5319 - accuracy: 0.1452 - val_loss: 1.5410 - val_accuracy: 0.1458 Epoch 22/50 loss: 1.5327 - accuracy: 0.1449 - val_loss: 1.5352 - val_accuracy: 0.1460 Epoch 23/50 loss: 1.5278 - accuracy: 0.1451 - val_loss: 1.5289 - val_accuracy: 0.1458 Epoch 24/50 loss: 1.5234 - accuracy: 0.1451 - val_loss: 1.5568 - val_accuracy: 0.1449 ### Framework versions - Transformers 4.22.1 - TensorFlow 2.8.2 - Datasets 2.5.1 - Tokenizers 0.12.1
AnonymousSub/AR_rule_based_roberta_twostagequadruplet_hier_epochs_1_shard_1
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- license: mit --- ### RikiArt on Stable Diffusion This is the `<rick-art>` concept taught to Stable Diffusion via Textual Inversion. You can load this concept into the [Stable Conceptualizer](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_conceptualizer_inference.ipynb) notebook. You can also train your own concepts and load them into the concept libraries using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb). Here is the new concept you will be able to use as a `style`: ![<rick-art> 0](https://huggingface.co/sd-concepts-library/rikiart/resolve/main/concept_images/1.jpeg) ![<rick-art> 1](https://huggingface.co/sd-concepts-library/rikiart/resolve/main/concept_images/2.jpeg) ![<rick-art> 2](https://huggingface.co/sd-concepts-library/rikiart/resolve/main/concept_images/0.jpeg) ![<rick-art> 3](https://huggingface.co/sd-concepts-library/rikiart/resolve/main/concept_images/4.jpeg) ![<rick-art> 4](https://huggingface.co/sd-concepts-library/rikiart/resolve/main/concept_images/3.jpeg)
AnonymousSub/AR_rule_based_roberta_twostagetriplet_epochs_1_shard_10
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
2022-10-16T09:44:27Z
--- language: ml datasets: - Indic TTS Malayalam Speech Corpus - Openslr Malayalam Speech Corpus - SMC Malayalam Speech Corpus - IIIT-H Indic Speech Databases metrics: - wer tags: - audio - automatic-speech-recognition - speech - xlsr-fine-tuning-week license: apache-2.0 model-index: - name: Malayalam XLSR Wav2Vec2 Large 53 results: - task: name: Speech Recognition type: automatic-speech-recognition dataset: name: Test split of combined dataset using all datasets mentioned above type: custom args: ml metrics: - name: Test WER type: wer value: 28.43 --- # Wav2Vec2-Large-XLSR-53-ml Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on ml (Malayalam) using the [Indic TTS Malayalam Speech Corpus (via Kaggle)](https://www.kaggle.com/kavyamanohar/indic-tts-malayalam-speech-corpus), [Openslr Malayalam Speech Corpus](http://openslr.org/63/), [SMC Malayalam Speech Corpus](https://blog.smc.org.in/malayalam-speech-corpus/) and [IIIT-H Indic Speech Databases](http://speech.iiit.ac.in/index.php/research-svl/69.html). The notebooks used to train model are available [here](https://github.com/gauthamsuresh09/wav2vec2-large-xlsr-53-malayalam/). When using this model, make sure that your speech input is sampled at 16kHz. ## Usage The model can be used directly (without a language model) as follows: ```python import torch import torchaudio from datasets import load_dataset from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor test_dataset = <load-test-split-of-combined-dataset> # Details on loading this dataset in the evaluation section processor = Wav2Vec2Processor.from_pretrained("gvs/wav2vec2-large-xlsr-malayalam") model = Wav2Vec2ForCTC.from_pretrained("gvs/wav2vec2-large-xlsr-malayalam") resampler = torchaudio.transforms.Resample(48_000, 16_000) # Preprocessing the datasets. # We need to read the audio files as arrays def speech_file_to_array_fn(batch): speech_array, sampling_rate = torchaudio.load(batch["path"]) batch["speech"] = resampler(speech_array).squeeze().numpy() return batch test_dataset = test_dataset.map(speech_file_to_array_fn) inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits predicted_ids = torch.argmax(logits, dim=-1) print("Prediction:", processor.batch_decode(predicted_ids)) print("Reference:", test_dataset["sentence"]) ``` ## Evaluation The model can be evaluated as follows on the test data of combined custom dataset. For more details on dataset preparation, check the notebooks mentioned at the end of this file. ```python import torch import torchaudio from datasets import load_dataset, load_metric from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor import re from datasets import load_dataset, load_metric from pathlib import Path # The custom dataset needs to be created using notebook mentioned at the end of this file data_dir = Path('<path-to-custom-dataset>') dataset_folders = { 'iiit': 'iiit_mal_abi', 'openslr': 'openslr', 'indic-tts': 'indic-tts-ml', 'msc-reviewed': 'msc-reviewed-speech-v1.0+20200825', } # Set directories for datasets openslr_male_dir = data_dir / dataset_folders['openslr'] / 'male' openslr_female_dir = data_dir / dataset_folders['openslr'] / 'female' iiit_dir = data_dir / dataset_folders['iiit'] indic_tts_male_dir = data_dir / dataset_folders['indic-tts'] / 'male' indic_tts_female_dir = data_dir / dataset_folders['indic-tts'] / 'female' msc_reviewed_dir = data_dir / dataset_folders['msc-reviewed'] # Load the datasets openslr_male = load_dataset("json", data_files=[f"{str(openslr_male_dir.absolute())}/sample_{i}.json" for i in range(2023)], split="train") openslr_female = load_dataset("json", data_files=[f"{str(openslr_female_dir.absolute())}/sample_{i}.json" for i in range(2103)], split="train") iiit = load_dataset("json", data_files=[f"{str(iiit_dir.absolute())}/sample_{i}.json" for i in range(1000)], split="train") indic_tts_male = load_dataset("json", data_files=[f"{str(indic_tts_male_dir.absolute())}/sample_{i}.json" for i in range(5649)], split="train") indic_tts_female = load_dataset("json", data_files=[f"{str(indic_tts_female_dir.absolute())}/sample_{i}.json" for i in range(2950)], split="train") msc_reviewed = load_dataset("json", data_files=[f"{str(msc_reviewed_dir.absolute())}/sample_{i}.json" for i in range(1541)], split="train") # Create test split as 20%, set random seed as well. test_size = 0.2 random_seed=1 openslr_male_splits = openslr_male.train_test_split(test_size=test_size, seed=random_seed) openslr_female_splits = openslr_female.train_test_split(test_size=test_size, seed=random_seed) iiit_splits = iiit.train_test_split(test_size=test_size, seed=random_seed) indic_tts_male_splits = indic_tts_male.train_test_split(test_size=test_size, seed=random_seed) indic_tts_female_splits = indic_tts_female.train_test_split(test_size=test_size, seed=random_seed) msc_reviewed_splits = msc_reviewed.train_test_split(test_size=test_size, seed=random_seed) # Get combined test dataset split_list = [openslr_male_splits, openslr_female_splits, indic_tts_male_splits, indic_tts_female_splits, msc_reviewed_splits, iiit_splits] test_dataset = datasets.concatenate_datasets([split['test'] for split in split_list) wer = load_metric("wer") processor = Wav2Vec2Processor.from_pretrained("gvs/wav2vec2-large-xlsr-malayalam") model = Wav2Vec2ForCTC.from_pretrained("gvs/wav2vec2-large-xlsr-malayalam") model.to("cuda") resamplers = { 48000: torchaudio.transforms.Resample(48_000, 16_000), } chars_to_ignore_regex = '[\\\\,\\\\?\\\\.\\\\!\\\\-\\\\;\\\\:\\\\"\\\\“\\\\%\\\\‘\\\\”\\\\�Utrnle\\\\_]' unicode_ignore_regex = r'[\\\\u200e]' # Preprocessing the datasets. # We need to read the audio files as arrays def speech_file_to_array_fn(batch): batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]) batch["sentence"] = re.sub(unicode_ignore_regex, '', batch["sentence"]) speech_array, sampling_rate = torchaudio.load(batch["path"]) # Resample if its not in 16kHz if sampling_rate != 16000: batch["speech"] = resamplers[sampling_rate](speech_array).squeeze().numpy() else: batch["speech"] = speech_array.squeeze().numpy() # If more than one dimension is present, pick first one if batch["speech"].ndim > 1: batch["speech"] = batch["speech"][0] return batch test_dataset = test_dataset.map(speech_file_to_array_fn) # Preprocessing the datasets. # We need to read the audio files as arrays def evaluate(batch): inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): logits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits pred_ids = torch.argmax(logits, dim=-1) batch["pred_strings"] = processor.batch_decode(pred_ids) return batch result = test_dataset.map(evaluate, batched=True, batch_size=8) print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"]))) ``` **Test Result (WER)**: 28.43 % ## Training A combined dataset was created using [Indic TTS Malayalam Speech Corpus (via Kaggle)](https://www.kaggle.com/kavyamanohar/indic-tts-malayalam-speech-corpus), [Openslr Malayalam Speech Corpus](http://openslr.org/63/), [SMC Malayalam Speech Corpus](https://blog.smc.org.in/malayalam-speech-corpus/) and [IIIT-H Indic Speech Databases](http://speech.iiit.ac.in/index.php/research-svl/69.html). The datasets were downloaded and was converted to HF Dataset format using [this notebook](https://github.com/gauthamsuresh09/wav2vec2-large-xlsr-53-malayalam/blob/main/make_hf_dataset.ipynb) The notebook used for training and evaluation can be found [here](https://github.com/gauthamsuresh09/wav2vec2-large-xlsr-53-malayalam/blob/main/fine-tune-xlsr-wav2vec2-on-malayalam-asr-with-transformers_v2.ipynb)
AnonymousSub/EManuals_RoBERTa_wikiqa
[ "pytorch", "roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
29
2022-10-16T10:54:43Z
--- license: mit --- ### Jacqueline-the-unicorn on Stable Diffusion This is the `<jacqueline>` concept taught to Stable Diffusion via Textual Inversion. You can load this concept into the [Stable Conceptualizer](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_conceptualizer_inference.ipynb) notebook. You can also train your own concepts and load them into the concept libraries using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb). Here is the new concept you will be able to use as an `object`: ![<jacqueline> 0](https://huggingface.co/sd-concepts-library/jacqueline-the-unicorn/resolve/main/concept_images/1.jpeg) ![<jacqueline> 1](https://huggingface.co/sd-concepts-library/jacqueline-the-unicorn/resolve/main/concept_images/2.jpeg) ![<jacqueline> 2](https://huggingface.co/sd-concepts-library/jacqueline-the-unicorn/resolve/main/concept_images/0.jpeg) ![<jacqueline> 3](https://huggingface.co/sd-concepts-library/jacqueline-the-unicorn/resolve/main/concept_images/4.jpeg) ![<jacqueline> 4](https://huggingface.co/sd-concepts-library/jacqueline-the-unicorn/resolve/main/concept_images/6.jpeg) ![<jacqueline> 5](https://huggingface.co/sd-concepts-library/jacqueline-the-unicorn/resolve/main/concept_images/5.jpeg) ![<jacqueline> 6](https://huggingface.co/sd-concepts-library/jacqueline-the-unicorn/resolve/main/concept_images/3.jpeg) ![<jacqueline> 7](https://huggingface.co/sd-concepts-library/jacqueline-the-unicorn/resolve/main/concept_images/7.jpeg)
AnonymousSub/SDR_HF_model_base
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
2022-10-16T10:57:03Z
--- license: mit --- # Description Trainer: ChrisC Ram from Re:zero # Dataset >Training: 23 images >Regularization: 400 images # Info >ram_3k_WD1-3.ckpt >Model Used: Waifu Diffusion 1.3 >Steps: 3000 >Keyword: Ram (Use this in the prompt) >Class Phrase: ram_mondays
AnonymousSub/T5_pubmedqa_question_generation
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
6
2022-10-16T17:03:09Z
--- language: en tags: - bert - business - finance license: cc-by-4.0 datasets: - CompanyWeb - MD&A Disclosures - S2ORC --- # BusinessBERT An industry-sensitive language model for business applications pretrained on business communication corpora. The model incorporates industry classification (IC) as a pretraining objective besides masked langauge modeling (MLM). It was introduced in [this paper]() and released in [this repository](). ## Model description We introduce BusinessBERT, an industry-sensitive language model for business applications. The advantage of the model is the training approach focused on incorporating industry information relevant for business related natural language processing (NLP) tasks. We compile three large-scale textual corpora consisting of annual disclosures, company website content and scientific literature representing business communication. In total, the corpora include 2.23 billion token. BusinessBERT builds upon the bidirectional encoder representations from transformer architecture (BERT) and embeds industry information during pretraining in two ways: (1) The business communication corpora contain a variety of industry-specific terminology; (2) We employ industry classification (IC) as an additional pretraining objective for text documents originating from companies. BusinessBERT is evaluated on business related NLP tasks including text classification, named entity recognition, sentiment analysis and question answering. We find that BusinessBERT substantially improves downstream performance for business related NLP tasks. ## Intended uses & limitations The model is intended to be fine-tuned on business related NLP tasks, i.e. sequence classification, named entity recognition, sentiment analysis or question answering. ### How to use [More Information Needed] ### Limitations and bias [More Information Needed] ## Training data - [CompanyWeb](https://huggingface.co/datasets/anonymous20221014/submissiondata2022): 0.77 billion token, 3.5 GB raw text file - [MD&A Disclosures](https://data.caltech.edu/records/1249): 1.06 billion token, 5.1 GB raw text file - [Semantic Scholar Open Research Corpus](https://api.semanticscholar.org/corpus): 0.40 billion token, 1.9 GB raw text file ## Evaluation results [More Information Needed] <!-- When fine-tuned on downstream tasks, this model achieves the following results: Glue test results: | Task | MNLI-(m/mm) | QQP | QNLI | SST-2 | CoLA | STS-B | MRPC | RTE | Average | |:----:|:-----------:|:----:|:----:|:-----:|:----:|:-----:|:----:|:----:|:-------:| | | 84.6/83.4 | 71.2 | 90.5 | 93.5 | 52.1 | 85.8 | 88.9 | 66.4 | 79.6 | --> ### BibTeX entry and citation info ```bibtex @misc{title_year, title={TITLE}, author={AUTHORS}, year={2022}, } ```
AnonymousSub/bert_snips
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
2022-10-16T17:35:21Z
--- license: mit --- ### test-epson on Stable Diffusion This is the `<epson-branch>` concept taught to Stable Diffusion via Textual Inversion. You can load this concept into the [Stable Conceptualizer](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_conceptualizer_inference.ipynb) notebook. You can also train your own concepts and load them into the concept libraries using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb). Here is the new concept you will be able to use as an `object`: ![<epson-branch> 0](https://huggingface.co/sd-concepts-library/test-epson/resolve/main/concept_images/0.jpeg) ![<epson-branch> 1](https://huggingface.co/sd-concepts-library/test-epson/resolve/main/concept_images/1.jpeg) ![<epson-branch> 2](https://huggingface.co/sd-concepts-library/test-epson/resolve/main/concept_images/2.jpeg) ![<epson-branch> 3](https://huggingface.co/sd-concepts-library/test-epson/resolve/main/concept_images/3.jpeg) ![<epson-branch> 4](https://huggingface.co/sd-concepts-library/test-epson/resolve/main/concept_images/4.jpeg)
AnonymousSub/cline-s10-SR
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-10-16T17:52:08Z
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-Taxi-v3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.46 +/- 2.80 name: mean_reward verified: false --- # **Q-Learning** Agent playing **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="danluo96/q-Taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) evaluate_agent(env, model["max_steps"], model["n_eval_episodes"], model["qtable"], model["eval_seed"]) ```
AnonymousSub/cline-techqa
[ "pytorch", "roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "RobertaForQuestionAnswering" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
2022-10-16T17:52:25Z
--- license: mit --- ### orientalist art on Stable Diffusion This is the `<orientalist-art>` concept taught to Stable Diffusion via Textual Inversion. You can load this concept into the [Stable Conceptualizer](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_conceptualizer_inference.ipynb) notebook. You can also train your own concepts and load them into the concept libraries using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb). Here is the new concept you will be able to use as a `style`: ![<orientalist-art> 0](https://huggingface.co/sd-concepts-library/orientalist-art/resolve/main/concept_images/1.jpeg) ![<orientalist-art> 1](https://huggingface.co/sd-concepts-library/orientalist-art/resolve/main/concept_images/12.jpeg) ![<orientalist-art> 2](https://huggingface.co/sd-concepts-library/orientalist-art/resolve/main/concept_images/8.jpeg) ![<orientalist-art> 3](https://huggingface.co/sd-concepts-library/orientalist-art/resolve/main/concept_images/9.jpeg) ![<orientalist-art> 4](https://huggingface.co/sd-concepts-library/orientalist-art/resolve/main/concept_images/2.jpeg) ![<orientalist-art> 5](https://huggingface.co/sd-concepts-library/orientalist-art/resolve/main/concept_images/0.jpeg) ![<orientalist-art> 6](https://huggingface.co/sd-concepts-library/orientalist-art/resolve/main/concept_images/4.jpeg) ![<orientalist-art> 7](https://huggingface.co/sd-concepts-library/orientalist-art/resolve/main/concept_images/10.jpeg) ![<orientalist-art> 8](https://huggingface.co/sd-concepts-library/orientalist-art/resolve/main/concept_images/6.jpeg) ![<orientalist-art> 9](https://huggingface.co/sd-concepts-library/orientalist-art/resolve/main/concept_images/11.jpeg) ![<orientalist-art> 10](https://huggingface.co/sd-concepts-library/orientalist-art/resolve/main/concept_images/5.jpeg) ![<orientalist-art> 11](https://huggingface.co/sd-concepts-library/orientalist-art/resolve/main/concept_images/3.jpeg) ![<orientalist-art> 12](https://huggingface.co/sd-concepts-library/orientalist-art/resolve/main/concept_images/7.jpeg)
AnonymousSub/declutr-emanuals-s10-AR
[ "pytorch", "roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
29
null
--- language: - en tags: - pytorch - causal-lm - pythia - pythia_v0 license: apache-2.0 datasets: - the_pile --- The *Pythia Scaling Suite* is a collection of models developed to facilitate interpretability research. It contains two sets of eight models of sizes 70M, 160M, 410M, 1B, 1.4B, 2.8B, 6.9B, and 12B. For each size, there are two models: one trained on the Pile, and one trained on the Pile after the dataset has been globally deduplicated. All 8 model sizes are trained on the exact same data, in the exact same order. All Pythia models are available [on Hugging Face](https://huggingface.co/models?other=pythia). The Pythia model suite was deliberately designed to promote scientific research on large language models, especially interpretability research. Despite not centering downstream performance as a design goal, we find the models <a href="#evaluations">match or exceed</a> the performance of similar and same-sized models, such as those in the OPT and GPT-Neo suites. Please note that all models in the *Pythia* suite were renamed in January 2023. For clarity, a <a href="#naming-convention-and-parameter-count">table comparing the old and new names</a> is provided in this model card, together with exact parameter counts. ## Pythia-1B ### Model Details - Developed by: [EleutherAI](http://eleuther.ai) - Model type: Transformer-based Language Model - Language: English - Learn more: [Pythia's GitHub repository](https://github.com/EleutherAI/pythia) for training procedure, config files, and details on how to use. - Library: [GPT-NeoX](https://github.com/EleutherAI/gpt-neox) - License: Apache 2.0 - Contact: to ask questions about this model, join the [EleutherAI Discord](https://discord.gg/zBGx3azzUn), and post them in `#release-discussion`. Please read the existing *Pythia* documentation before asking about it in the EleutherAI Discord. For general correspondence: [contact@eleuther. ai](mailto:[email protected]). <figure> | Pythia model | Non-Embedding Params | Layers | Model Dim | Heads | Batch Size | Learning Rate | Equivalent Models | | -----------: | -------------------: | :----: | :-------: | :---: | :--------: | :-------------------: | :--------------------: | | 70M | 18,915,328 | 6 | 512 | 8 | 2M | 1.0 x 10<sup>-3</sup> | — | | 160M | 85,056,000 | 12 | 768 | 12 | 4M | 6.0 x 10<sup>-4</sup> | GPT-Neo 125M, OPT-125M | | 410M | 302,311,424 | 24 | 1024 | 16 | 4M | 3.0 x 10<sup>-4</sup> | OPT-350M | | 1.0B | 805,736,448 | 16 | 2048 | 8 | 2M | 3.0 x 10<sup>-4</sup> | — | | 1.4B | 1,208,602,624 | 24 | 2048 | 16 | 4M | 2.0 x 10<sup>-4</sup> | GPT-Neo 1.3B, OPT-1.3B | | 2.8B | 2,517,652,480 | 32 | 2560 | 32 | 2M | 1.6 x 10<sup>-4</sup> | GPT-Neo 2.7B, OPT-2.7B | | 6.9B | 6,444,163,072 | 32 | 4096 | 32 | 2M | 1.2 x 10<sup>-4</sup> | OPT-6.7B | | 12B | 11,327,027,200 | 36 | 5120 | 40 | 2M | 1.2 x 10<sup>-4</sup> | — | <figcaption>Engineering details for the <i>Pythia Suite</i>. Deduped and non-deduped models of a given size have the same hyperparameters. “Equivalent” models have <b>exactly</b> the same architecture, and the same number of non-embedding parameters.</figcaption> </figure> ### Uses and Limitations #### Intended Use The primary intended use of Pythia is research on the behavior, functionality, and limitations of large language models. This suite is intended to provide a controlled setting for performing scientific experiments. To enable the study of how language models change over the course of training, we provide 143 evenly spaced intermediate checkpoints per model. These checkpoints are hosted on Hugging Face as branches. Note that branch `143000` corresponds exactly to the model checkpoint on the `main` branch of each model. You may also further fine-tune and adapt Pythia-1B for deployment, as long as your use is in accordance with the Apache 2.0 license. Pythia models work with the Hugging Face [Transformers Library](https://huggingface.co/docs/transformers/index). If you decide to use pre-trained Pythia-1B as a basis for your fine-tuned model, please conduct your own risk and bias assessment. #### Out-of-scope use The Pythia Suite is **not** intended for deployment. It is not a in itself a product and cannot be used for human-facing interactions. Pythia models are English-language only, and are not suitable for translation or generating text in other languages. Pythia-1B has not been fine-tuned for downstream contexts in which language models are commonly deployed, such as writing genre prose, or commercial chatbots. This means Pythia-1B will **not** respond to a given prompt the way a product like ChatGPT does. This is because, unlike this model, ChatGPT was fine-tuned using methods such as Reinforcement Learning from Human Feedback (RLHF) to better “understand” human instructions. #### Limitations and biases The core functionality of a large language model is to take a string of text and predict the next token. The token deemed statistically most likely by the model need not produce the most “accurate” text. Never rely on Pythia-1B to produce factually accurate output. This model was trained on [the Pile](https://pile.eleuther.ai/), a dataset known to contain profanity and texts that are lewd or otherwise offensive. See [Section 6 of the Pile paper](https://arxiv.org/abs/2101.00027) for a discussion of documented biases with regards to gender, religion, and race. Pythia-1B may produce socially unacceptable or undesirable text, *even if* the prompt itself does not include anything explicitly offensive. If you plan on using text generated through, for example, the Hosted Inference API, we recommend having a human curate the outputs of this language model before presenting it to other people. Please inform your audience that the text was generated by Pythia-1B. ### Quickstart Pythia models can be loaded and used via the following code, demonstrated here for the third `pythia-70m-deduped` checkpoint: ```python from transformers import GPTNeoXForCausalLM, AutoTokenizer model = GPTNeoXForCausalLM.from_pretrained( "EleutherAI/pythia-70m-deduped", revision="step3000", cache_dir="./pythia-70m-deduped/step3000", ) tokenizer = AutoTokenizer.from_pretrained( "EleutherAI/pythia-70m-deduped", revision="step3000", cache_dir="./pythia-70m-deduped/step3000", ) inputs = tokenizer("Hello, I am", return_tensors="pt") tokens = model.generate(**inputs) tokenizer.decode(tokens[0]) ``` Revision/branch `step143000` corresponds exactly to the model checkpoint on the `main` branch of each model.<br> For more information on how to use all Pythia models, see [documentation on GitHub](https://github.com/EleutherAI/pythia). ### Training #### Training data [The Pile](https://pile.eleuther.ai/) is a 825GiB general-purpose dataset in English. It was created by EleutherAI specifically for training large language models. It contains texts from 22 diverse sources, roughly broken down into five categories: academic writing (e.g. arXiv), internet (e.g. CommonCrawl), prose (e.g. Project Gutenberg), dialogue (e.g. YouTube subtitles), and miscellaneous (e.g. GitHub, Enron Emails). See [the Pile paper](https://arxiv.org/abs/2101.00027) for a breakdown of all data sources, methodology, and a discussion of ethical implications. Consult [the datasheet](https://arxiv.org/abs/2201.07311) for more detailed documentation about the Pile and its component datasets. The Pile can be downloaded from the [official website](https://pile.eleuther.ai/), or from a [community mirror](https://the-eye.eu/public/AI/pile/).<br> The Pile was **not** deduplicated before being used to train Pythia-1B. #### Training procedure All models were trained on the exact same data, in the exact same order. Each model saw 299,892,736,000 tokens during training, and 143 checkpoints for each model are saved every 2,097,152,000 tokens, spaced evenly throughout training. This corresponds to training for just under 1 epoch on the Pile for non-deduplicated models, and about 1.5 epochs on the deduplicated Pile. All *Pythia* models trained for the equivalent of 143000 steps at a batch size of 2,097,152 tokens. Two batch sizes were used: 2M and 4M. Models with a batch size of 4M tokens listed were originally trained for 71500 steps instead, with checkpoints every 500 steps. The checkpoints on Hugging Face are renamed for consistency with all 2M batch models, so `step1000` is the first checkpoint for `pythia-1.4b` that was saved (corresponding to step 500 in training), and `step1000` is likewise the first `pythia-6.9b` checkpoint that was saved (corresponding to 1000 “actual” steps).<br> See [GitHub](https://github.com/EleutherAI/pythia) for more details on training procedure, including [how to reproduce it](https://github.com/EleutherAI/pythia/blob/main/README.md#reproducing-training).<br> Pythia uses the same tokenizer as [GPT-NeoX- 20B](https://huggingface.co/EleutherAI/gpt-neox-20b). ### Evaluations All 16 *Pythia* models were evaluated using the [LM Evaluation Harness](https://github.com/EleutherAI/lm-evaluation-harness). You can access the results by model and step at `results/json/*` in the [GitHub repository](https://github.com/EleutherAI/pythia/tree/main/results/json).<br> Expand the sections below to see plots of evaluation results for all Pythia and Pythia-deduped models compared with OPT and BLOOM. <details> <summary>LAMBADA – OpenAI</summary> <img src="/EleutherAI/pythia-12b/resolve/main/eval_plots/lambada_openai.png" style="width:auto"/> </details> <details> <summary>Physical Interaction: Question Answering (PIQA)</summary> <img src="/EleutherAI/pythia-12b/resolve/main/eval_plots/piqa.png" style="width:auto"/> </details> <details> <summary>WinoGrande</summary> <img src="/EleutherAI/pythia-12b/resolve/main/eval_plots/winogrande.png" style="width:auto"/> </details> <details> <summary>AI2 Reasoning Challenge—Challenge Set</summary> <img src="/EleutherAI/pythia-12b/resolve/main/eval_plots/arc_challenge.png" style="width:auto"/> </details> <details> <summary>SciQ</summary> <img src="/EleutherAI/pythia-12b/resolve/main/eval_plots/sciq.png" style="width:auto"/> </details> ### Naming convention and parameter count *Pythia* models were renamed in January 2023. It is possible that the old naming convention still persists in some documentation by accident. The current naming convention (70M, 160M, etc.) is based on total parameter count. <figure style="width:32em"> | current Pythia suffix | old suffix | total params | non-embedding params | | --------------------: | ---------: | -------------: | -------------------: | | 70M | 19M | 70,426,624 | 18,915,328 | | 160M | 125M | 162,322,944 | 85,056,000 | | 410M | 350M | 405,334,016 | 302,311,424 | | 1B | 800M | 1,011,781,632 | 805,736,448 | | 1.4B | 1.3B | 1,414,647,808 | 1,208,602,624 | | 2.8B | 2.7B | 2,775,208,960 | 2,517,652,480 | | 6.9B | 6.7B | 6,857,302,016 | 6,444,163,072 | | 12B | 13B | 11,846,072,320 | 11,327,027,200 | </figure>
AnonymousSub/declutr-model
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- language: en thumbnail: http://www.huggingtweets.com/th3nfthunt3r/1665945395711/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1563458962158022656/CWXK4AUr_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">Th3 NFT Hunt3r</div> <div style="text-align: center; font-size: 14px;">@th3nfthunt3r</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from Th3 NFT Hunt3r. | Data | Th3 NFT Hunt3r | | --- | --- | | Tweets downloaded | 364 | | Retweets | 50 | | Short tweets | 113 | | Tweets kept | 201 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/13l2dy5v/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @th3nfthunt3r's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/1xgt6nuf) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/1xgt6nuf/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/th3nfthunt3r') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
AnonymousSub/declutr-model_squad2.0
[ "pytorch", "roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "RobertaForQuestionAnswering" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
2022-10-16T18:39:39Z
--- language: - en tags: - pytorch - causal-lm - pythia - pythia_v0 license: apache-2.0 datasets: - the_pile --- The *Pythia Scaling Suite* is a collection of models developed to facilitate interpretability research. It contains two sets of eight models of sizes 70M, 160M, 410M, 1B, 1.4B, 2.8B, 6.9B, and 12B. For each size, there are two models: one trained on the Pile, and one trained on the Pile after the dataset has been globally deduplicated. All 8 model sizes are trained on the exact same data, in the exact same order. All Pythia models are available [on Hugging Face](https://huggingface.co/models?other=pythia). The Pythia model suite was deliberately designed to promote scientific research on large language models, especially interpretability research. Despite not centering downstream performance as a design goal, we find the models <a href="#evaluations">match or exceed</a> the performance of similar and same-sized models, such as those in the OPT and GPT-Neo suites. Please note that all models in the *Pythia* suite were renamed in January 2023. For clarity, a <a href="#naming-convention-and-parameter-count">table comparing the old and new names</a> is provided in this model card, together with exact parameter counts. ## Pythia-410M ### Model Details - Developed by: [EleutherAI](http://eleuther.ai) - Model type: Transformer-based Language Model - Language: English - Learn more: [Pythia's GitHub repository](https://github.com/EleutherAI/pythia) for training procedure, config files, and details on how to use. - Library: [GPT-NeoX](https://github.com/EleutherAI/gpt-neox) - License: Apache 2.0 - Contact: to ask questions about this model, join the [EleutherAI Discord](https://discord.gg/zBGx3azzUn), and post them in `#release-discussion`. Please read the existing *Pythia* documentation before asking about it in the EleutherAI Discord. For general correspondence: [contact@eleuther. ai](mailto:[email protected]). <figure> | Pythia model | Non-Embedding Params | Layers | Model Dim | Heads | Batch Size | Learning Rate | Equivalent Models | | -----------: | -------------------: | :----: | :-------: | :---: | :--------: | :-------------------: | :--------------------: | | 70M | 18,915,328 | 6 | 512 | 8 | 2M | 1.0 x 10<sup>-3</sup> | — | | 160M | 85,056,000 | 12 | 768 | 12 | 4M | 6.0 x 10<sup>-4</sup> | GPT-Neo 125M, OPT-125M | | 410M | 302,311,424 | 24 | 1024 | 16 | 4M | 3.0 x 10<sup>-4</sup> | OPT-350M | | 1.0B | 805,736,448 | 16 | 2048 | 8 | 2M | 3.0 x 10<sup>-4</sup> | — | | 1.4B | 1,208,602,624 | 24 | 2048 | 16 | 4M | 2.0 x 10<sup>-4</sup> | GPT-Neo 1.3B, OPT-1.3B | | 2.8B | 2,517,652,480 | 32 | 2560 | 32 | 2M | 1.6 x 10<sup>-4</sup> | GPT-Neo 2.7B, OPT-2.7B | | 6.9B | 6,444,163,072 | 32 | 4096 | 32 | 2M | 1.2 x 10<sup>-4</sup> | OPT-6.7B | | 12B | 11,327,027,200 | 36 | 5120 | 40 | 2M | 1.2 x 10<sup>-4</sup> | — | <figcaption>Engineering details for the <i>Pythia Suite</i>. Deduped and non-deduped models of a given size have the same hyperparameters. “Equivalent” models have <b>exactly</b> the same architecture, and the same number of non-embedding parameters.</figcaption> </figure> ### Uses and Limitations #### Intended Use The primary intended use of Pythia is research on the behavior, functionality, and limitations of large language models. This suite is intended to provide a controlled setting for performing scientific experiments. To enable the study of how language models change over the course of training, we provide 143 evenly spaced intermediate checkpoints per model. These checkpoints are hosted on Hugging Face as branches. Note that branch `143000` corresponds exactly to the model checkpoint on the `main` branch of each model. You may also further fine-tune and adapt Pythia-410M for deployment, as long as your use is in accordance with the Apache 2.0 license. Pythia models work with the Hugging Face [Transformers Library](https://huggingface.co/docs/transformers/index). If you decide to use pre-trained Pythia-410M as a basis for your fine-tuned model, please conduct your own risk and bias assessment. #### Out-of-scope use The Pythia Suite is **not** intended for deployment. It is not a in itself a product and cannot be used for human-facing interactions. Pythia models are English-language only, and are not suitable for translation or generating text in other languages. Pythia-410M has not been fine-tuned for downstream contexts in which language models are commonly deployed, such as writing genre prose, or commercial chatbots. This means Pythia-410M will **not** respond to a given prompt the way a product like ChatGPT does. This is because, unlike this model, ChatGPT was fine-tuned using methods such as Reinforcement Learning from Human Feedback (RLHF) to better “understand” human instructions. #### Limitations and biases The core functionality of a large language model is to take a string of text and predict the next token. The token deemed statistically most likely by the model need not produce the most “accurate” text. Never rely on Pythia-410M to produce factually accurate output. This model was trained on [the Pile](https://pile.eleuther.ai/), a dataset known to contain profanity and texts that are lewd or otherwise offensive. See [Section 6 of the Pile paper](https://arxiv.org/abs/2101.00027) for a discussion of documented biases with regards to gender, religion, and race. Pythia-410M may produce socially unacceptable or undesirable text, *even if* the prompt itself does not include anything explicitly offensive. If you plan on using text generated through, for example, the Hosted Inference API, we recommend having a human curate the outputs of this language model before presenting it to other people. Please inform your audience that the text was generated by Pythia-410M. ### Quickstart Pythia models can be loaded and used via the following code, demonstrated here for the third `pythia-70m-deduped` checkpoint: ```python from transformers import GPTNeoXForCausalLM, AutoTokenizer model = GPTNeoXForCausalLM.from_pretrained( "EleutherAI/pythia-70m-deduped", revision="step3000", cache_dir="./pythia-70m-deduped/step3000", ) tokenizer = AutoTokenizer.from_pretrained( "EleutherAI/pythia-70m-deduped", revision="step3000", cache_dir="./pythia-70m-deduped/step3000", ) inputs = tokenizer("Hello, I am", return_tensors="pt") tokens = model.generate(**inputs) tokenizer.decode(tokens[0]) ``` Revision/branch `step143000` corresponds exactly to the model checkpoint on the `main` branch of each model.<br> For more information on how to use all Pythia models, see [documentation on GitHub](https://github.com/EleutherAI/pythia). ### Training #### Training data [The Pile](https://pile.eleuther.ai/) is a 825GiB general-purpose dataset in English. It was created by EleutherAI specifically for training large language models. It contains texts from 22 diverse sources, roughly broken down into five categories: academic writing (e.g. arXiv), internet (e.g. CommonCrawl), prose (e.g. Project Gutenberg), dialogue (e.g. YouTube subtitles), and miscellaneous (e.g. GitHub, Enron Emails). See [the Pile paper](https://arxiv.org/abs/2101.00027) for a breakdown of all data sources, methodology, and a discussion of ethical implications. Consult [the datasheet](https://arxiv.org/abs/2201.07311) for more detailed documentation about the Pile and its component datasets. The Pile can be downloaded from the [official website](https://pile.eleuther.ai/), or from a [community mirror](https://the-eye.eu/public/AI/pile/).<br> The Pile was **not** deduplicated before being used to train Pythia-410M. #### Training procedure All models were trained on the exact same data, in the exact same order. Each model saw 299,892,736,000 tokens during training, and 143 checkpoints for each model are saved every 2,097,152,000 tokens, spaced evenly throughout training. This corresponds to training for just under 1 epoch on the Pile for non-deduplicated models, and about 1.5 epochs on the deduplicated Pile. All *Pythia* models trained for the equivalent of 143000 steps at a batch size of 2,097,152 tokens. Two batch sizes were used: 2M and 4M. Models with a batch size of 4M tokens listed were originally trained for 71500 steps instead, with checkpoints every 500 steps. The checkpoints on Hugging Face are renamed for consistency with all 2M batch models, so `step1000` is the first checkpoint for `pythia-1.4b` that was saved (corresponding to step 500 in training), and `step1000` is likewise the first `pythia-6.9b` checkpoint that was saved (corresponding to 1000 “actual” steps).<br> See [GitHub](https://github.com/EleutherAI/pythia) for more details on training procedure, including [how to reproduce it](https://github.com/EleutherAI/pythia/blob/main/README.md#reproducing-training).<br> Pythia uses the same tokenizer as [GPT-NeoX- 20B](https://huggingface.co/EleutherAI/gpt-neox-20b). ### Evaluations All 16 *Pythia* models were evaluated using the [LM Evaluation Harness](https://github.com/EleutherAI/lm-evaluation-harness). You can access the results by model and step at `results/json/*` in the [GitHub repository](https://github.com/EleutherAI/pythia/tree/main/results/json).<br> Expand the sections below to see plots of evaluation results for all Pythia and Pythia-deduped models compared with OPT and BLOOM. <details> <summary>LAMBADA – OpenAI</summary> <img src="/EleutherAI/pythia-12b/resolve/main/eval_plots/lambada_openai.png" style="width:auto"/> </details> <details> <summary>Physical Interaction: Question Answering (PIQA)</summary> <img src="/EleutherAI/pythia-12b/resolve/main/eval_plots/piqa.png" style="width:auto"/> </details> <details> <summary>WinoGrande</summary> <img src="/EleutherAI/pythia-12b/resolve/main/eval_plots/winogrande.png" style="width:auto"/> </details> <details> <summary>AI2 Reasoning Challenge—Challenge Set</summary> <img src="/EleutherAI/pythia-12b/resolve/main/eval_plots/arc_challenge.png" style="width:auto"/> </details> <details> <summary>SciQ</summary> <img src="/EleutherAI/pythia-12b/resolve/main/eval_plots/sciq.png" style="width:auto"/> </details> ### Naming convention and parameter count *Pythia* models were renamed in January 2023. It is possible that the old naming convention still persists in some documentation by accident. The current naming convention (70M, 160M, etc.) is based on total parameter count. <figure style="width:32em"> | current Pythia suffix | old suffix | total params | non-embedding params | | --------------------: | ---------: | -------------: | -------------------: | | 70M | 19M | 70,426,624 | 18,915,328 | | 160M | 125M | 162,322,944 | 85,056,000 | | 410M | 350M | 405,334,016 | 302,311,424 | | 1B | 800M | 1,011,781,632 | 805,736,448 | | 1.4B | 1.3B | 1,414,647,808 | 1,208,602,624 | | 2.8B | 2.7B | 2,775,208,960 | 2,517,652,480 | | 6.9B | 6.7B | 6,857,302,016 | 6,444,163,072 | | 12B | 13B | 11,846,072,320 | 11,327,027,200 | </figure>
AnonymousSub/dummy_1
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
33
null
--- language: - en tags: - pytorch - causal-lm - pythia - pythia_v0 license: apache-2.0 datasets: - the_pile --- The *Pythia Scaling Suite* is a collection of models developed to facilitate interpretability research. It contains two sets of eight models of sizes 70M, 160M, 410M, 1B, 1.4B, 2.8B, 6.9B, and 12B. For each size, there are two models: one trained on the Pile, and one trained on the Pile after the dataset has been globally deduplicated. All 8 model sizes are trained on the exact same data, in the exact same order. All Pythia models are available [on Hugging Face](https://huggingface.co/models?other=pythia). The Pythia model suite was deliberately designed to promote scientific research on large language models, especially interpretability research. Despite not centering downstream performance as a design goal, we find the models <a href="#evaluations">match or exceed</a> the performance of similar and same-sized models, such as those in the OPT and GPT-Neo suites. Please note that all models in the *Pythia* suite were renamed in January 2023. For clarity, a <a href="#naming-convention-and-parameter-count">table comparing the old and new names</a> is provided in this model card, together with exact parameter counts. ## Pythia-12B ### Model Details - Developed by: [EleutherAI](http://eleuther.ai) - Model type: Transformer-based Language Model - Language: English - Learn more: [Pythia's GitHub repository](https://github.com/EleutherAI/pythia) for training procedure, config files, and details on how to use. - Library: [GPT-NeoX](https://github.com/EleutherAI/gpt-neox) - License: Apache 2.0 - Contact: to ask questions about this model, join the [EleutherAI Discord](https://discord.gg/zBGx3azzUn), and post them in `#release-discussion`. Please read the existing *Pythia* documentation before asking about it in the EleutherAI Discord. For general correspondence: [contact@eleuther. ai](mailto:[email protected]). <figure> | Pythia model | Non-Embedding Params | Layers | Model Dim | Heads | Batch Size | Learning Rate | Equivalent Models | | -----------: | -------------------: | :----: | :-------: | :---: | :--------: | :-------------------: | :--------------------: | | 70M | 18,915,328 | 6 | 512 | 8 | 2M | 1.0 x 10<sup>-3</sup> | — | | 160M | 85,056,000 | 12 | 768 | 12 | 4M | 6.0 x 10<sup>-4</sup> | GPT-Neo 125M, OPT-125M | | 410M | 302,311,424 | 24 | 1024 | 16 | 4M | 3.0 x 10<sup>-4</sup> | OPT-350M | | 1.0B | 805,736,448 | 16 | 2048 | 8 | 2M | 3.0 x 10<sup>-4</sup> | — | | 1.4B | 1,208,602,624 | 24 | 2048 | 16 | 4M | 2.0 x 10<sup>-4</sup> | GPT-Neo 1.3B, OPT-1.3B | | 2.8B | 2,517,652,480 | 32 | 2560 | 32 | 2M | 1.6 x 10<sup>-4</sup> | GPT-Neo 2.7B, OPT-2.7B | | 6.9B | 6,444,163,072 | 32 | 4096 | 32 | 2M | 1.2 x 10<sup>-4</sup> | OPT-6.7B | | 12B | 11,327,027,200 | 36 | 5120 | 40 | 2M | 1.2 x 10<sup>-4</sup> | — | <figcaption>Engineering details for the <i>Pythia Suite</i>. Deduped and non-deduped models of a given size have the same hyperparameters. “Equivalent” models have <b>exactly</b> the same architecture, and the same number of non-embedding parameters.</figcaption> </figure> ### Uses and Limitations #### Intended Use The primary intended use of Pythia is research on the behavior, functionality, and limitations of large language models. This suite is intended to provide a controlled setting for performing scientific experiments. To enable the study of how language models change over the course of training, we provide 143 evenly spaced intermediate checkpoints per model. These checkpoints are hosted on Hugging Face as branches. Note that branch `143000` corresponds exactly to the model checkpoint on the `main` branch of each model. You may also further fine-tune and adapt Pythia-12B for deployment, as long as your use is in accordance with the Apache 2.0 license. Pythia models work with the Hugging Face [Transformers Library](https://huggingface.co/docs/transformers/index). If you decide to use pre-trained Pythia-12B as a basis for your fine-tuned model, please conduct your own risk and bias assessment. #### Out-of-scope use The Pythia Suite is **not** intended for deployment. It is not a in itself a product and cannot be used for human-facing interactions. Pythia models are English-language only, and are not suitable for translation or generating text in other languages. Pythia-12B has not been fine-tuned for downstream contexts in which language models are commonly deployed, such as writing genre prose, or commercial chatbots. This means Pythia-12B will **not** respond to a given prompt the way a product like ChatGPT does. This is because, unlike this model, ChatGPT was fine-tuned using methods such as Reinforcement Learning from Human Feedback (RLHF) to better “understand” human instructions. #### Limitations and biases The core functionality of a large language model is to take a string of text and predict the next token. The token deemed statistically most likely by the model need not produce the most “accurate” text. Never rely on Pythia-12B to produce factually accurate output. This model was trained on [the Pile](https://pile.eleuther.ai/), a dataset known to contain profanity and texts that are lewd or otherwise offensive. See [Section 6 of the Pile paper](https://arxiv.org/abs/2101.00027) for a discussion of documented biases with regards to gender, religion, and race. Pythia-12B may produce socially unacceptable or undesirable text, *even if* the prompt itself does not include anything explicitly offensive. If you plan on using text generated through, for example, the Hosted Inference API, we recommend having a human curate the outputs of this language model before presenting it to other people. Please inform your audience that the text was generated by Pythia-12B. ### Quickstart Pythia models can be loaded and used via the following code, demonstrated here for the third `pythia-70m-deduped` checkpoint: ```python from transformers import GPTNeoXForCausalLM, AutoTokenizer model = GPTNeoXForCausalLM.from_pretrained( "EleutherAI/pythia-70m-deduped", revision="step3000", cache_dir="./pythia-70m-deduped/step3000", ) tokenizer = AutoTokenizer.from_pretrained( "EleutherAI/pythia-70m-deduped", revision="step3000", cache_dir="./pythia-70m-deduped/step3000", ) inputs = tokenizer("Hello, I am", return_tensors="pt") tokens = model.generate(**inputs) tokenizer.decode(tokens[0]) ``` Revision/branch `step143000` corresponds exactly to the model checkpoint on the `main` branch of each model.<br> For more information on how to use all Pythia models, see [documentation on GitHub](https://github.com/EleutherAI/pythia). ### Training #### Training data [The Pile](https://pile.eleuther.ai/) is a 825GiB general-purpose dataset in English. It was created by EleutherAI specifically for training large language models. It contains texts from 22 diverse sources, roughly broken down into five categories: academic writing (e.g. arXiv), internet (e.g. CommonCrawl), prose (e.g. Project Gutenberg), dialogue (e.g. YouTube subtitles), and miscellaneous (e.g. GitHub, Enron Emails). See [the Pile paper](https://arxiv.org/abs/2101.00027) for a breakdown of all data sources, methodology, and a discussion of ethical implications. Consult [the datasheet](https://arxiv.org/abs/2201.07311) for more detailed documentation about the Pile and its component datasets. The Pile can be downloaded from the [official website](https://pile.eleuther.ai/), or from a [community mirror](https://the-eye.eu/public/AI/pile/).<br> The Pile was **not** deduplicated before being used to train Pythia-12B. #### Training procedure All models were trained on the exact same data, in the exact same order. Each model saw 299,892,736,000 tokens during training, and 143 checkpoints for each model are saved every 2,097,152,000 tokens, spaced evenly throughout training. This corresponds to training for just under 1 epoch on the Pile for non-deduplicated models, and about 1.5 epochs on the deduplicated Pile. All *Pythia* models trained for the equivalent of 143000 steps at a batch size of 2,097,152 tokens. Two batch sizes were used: 2M and 4M. Models with a batch size of 4M tokens listed were originally trained for 71500 steps instead, with checkpoints every 500 steps. The checkpoints on Hugging Face are renamed for consistency with all 2M batch models, so `step1000` is the first checkpoint for `pythia-1.4b` that was saved (corresponding to step 500 in training), and `step1000` is likewise the first `pythia-6.9b` checkpoint that was saved (corresponding to 1000 “actual” steps).<br> See [GitHub](https://github.com/EleutherAI/pythia) for more details on training procedure, including [how to reproduce it](https://github.com/EleutherAI/pythia/blob/main/README.md#reproducing-training).<br> Pythia uses the same tokenizer as [GPT-NeoX- 20B](https://huggingface.co/EleutherAI/gpt-neox-20b). ### Evaluations All 16 *Pythia* models were evaluated using the [LM Evaluation Harness](https://github.com/EleutherAI/lm-evaluation-harness). You can access the results by model and step at `results/json/*` in the [GitHub repository](https://github.com/EleutherAI/pythia/tree/main/results/json).<br> Expand the sections below to see plots of evaluation results for all Pythia and Pythia-deduped models compared with OPT and BLOOM. <details> <summary>LAMBADA – OpenAI</summary> <img src="/EleutherAI/pythia-12b/resolve/main/eval_plots/lambada_openai.png" style="width:auto"/> </details> <details> <summary>Physical Interaction: Question Answering (PIQA)</summary> <img src="/EleutherAI/pythia-12b/resolve/main/eval_plots/piqa.png" style="width:auto"/> </details> <details> <summary>WinoGrande</summary> <img src="/EleutherAI/pythia-12b/resolve/main/eval_plots/winogrande.png" style="width:auto"/> </details> <details> <summary>AI2 Reasoning Challenge—Challenge Set</summary> <img src="/EleutherAI/pythia-12b/resolve/main/eval_plots/arc_challenge.png" style="width:auto"/> </details> <details> <summary>SciQ</summary> <img src="/EleutherAI/pythia-12b/resolve/main/eval_plots/sciq.png" style="width:auto"/> </details> ### Naming convention and parameter count *Pythia* models were renamed in January 2023. It is possible that the old naming convention still persists in some documentation by accident. The current naming convention (70M, 160M, etc.) is based on total parameter count. <figure style="width:32em"> | current Pythia suffix | old suffix | total params | non-embedding params | | --------------------: | ---------: | -------------: | -------------------: | | 70M | 19M | 70,426,624 | 18,915,328 | | 160M | 125M | 162,322,944 | 85,056,000 | | 410M | 350M | 405,334,016 | 302,311,424 | | 1B | 800M | 1,011,781,632 | 805,736,448 | | 1.4B | 1.3B | 1,414,647,808 | 1,208,602,624 | | 2.8B | 2.7B | 2,775,208,960 | 2,517,652,480 | | 6.9B | 6.7B | 6,857,302,016 | 6,444,163,072 | | 12B | 13B | 11,846,072,320 | 11,327,027,200 | </figure>
AnonymousSub/rule_based_bert_mean_diff_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-Taxi-v3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.56 +/- 2.71 name: mean_reward verified: false --- # **Q-Learning** Agent playing **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="Axwl/q-Taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) evaluate_agent(env, model["max_steps"], model["n_eval_episodes"], model["qtable"], model["eval_seed"]) ```
AnonymousSub/rule_based_bert_mean_diff_epochs_1_shard_10
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-Taxi-v3-tst results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.56 +/- 2.71 name: mean_reward verified: false --- # **Q-Learning** Agent playing **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="Axwl/q-Taxi-v3-tst", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) evaluate_agent(env, model["max_steps"], model["n_eval_episodes"], model["qtable"], model["eval_seed"]) ```
AnonymousSub/rule_based_bert_quadruplet_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: summarise_v6 results: [] --- # summarise_v6 This model is a fine-tuned version of [allenai/led-base-16384](https://huggingface.co/allenai/led-base-16384) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.0497 - Rouge2 Precision: 0.3109 - Rouge2 Recall: 0.406 - Rouge2 Fmeasure: 0.3375 ## Model description More information needed ## Intended uses & limitations max_input_length = 3072 max_output_length = 1000 led.config.max_length = 1000 led.config.min_length = 100 ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge2 Precision | Rouge2 Recall | Rouge2 Fmeasure | |:-------------:|:-----:|:----:|:---------------:|:----------------:|:-------------:|:---------------:| | 1.7163 | 0.22 | 10 | 1.2307 | 0.1428 | 0.5118 | 0.2089 | | 1.632 | 0.44 | 20 | 1.1337 | 0.36 | 0.3393 | 0.3181 | | 1.0916 | 0.67 | 30 | 1.0738 | 0.2693 | 0.3487 | 0.2731 | | 1.573 | 0.89 | 40 | 1.0497 | 0.3109 | 0.406 | 0.3375 | ### Framework versions - Transformers 4.21.3 - Pytorch 1.12.1+cu113 - Datasets 1.2.1 - Tokenizers 0.12.1
AnonymousSub/rule_based_bert_quadruplet_epochs_1_shard_1_squad2.0
[ "pytorch", "bert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "BertForQuestionAnswering" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- license: mit --- ### ki on Stable Diffusion This is the `<ki-mars>` (Ki from the Disney Mars Needs Mom) concept taught to Stable Diffusion via Textual Inversion. You can load this concept into the [Stable Conceptualizer](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_conceptualizer_inference.ipynb) notebook. You can also train your own concepts and load them into the concept libraries using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb). Here is the new concept you will be able to use as an `object`: ![<ki-mars> 0](https://huggingface.co/sd-concepts-library/ki/resolve/main/concept_images/1.jpeg) ![<ki-mars> 1](https://huggingface.co/sd-concepts-library/ki/resolve/main/concept_images/13.jpeg) ![<ki-mars> 2](https://huggingface.co/sd-concepts-library/ki/resolve/main/concept_images/12.jpeg) ![<ki-mars> 3](https://huggingface.co/sd-concepts-library/ki/resolve/main/concept_images/18.jpeg) ![<ki-mars> 4](https://huggingface.co/sd-concepts-library/ki/resolve/main/concept_images/8.jpeg) ![<ki-mars> 5](https://huggingface.co/sd-concepts-library/ki/resolve/main/concept_images/23.jpeg) ![<ki-mars> 6](https://huggingface.co/sd-concepts-library/ki/resolve/main/concept_images/17.jpeg) ![<ki-mars> 7](https://huggingface.co/sd-concepts-library/ki/resolve/main/concept_images/9.jpeg) ![<ki-mars> 8](https://huggingface.co/sd-concepts-library/ki/resolve/main/concept_images/2.jpeg) ![<ki-mars> 9](https://huggingface.co/sd-concepts-library/ki/resolve/main/concept_images/0.jpeg) ![<ki-mars> 10](https://huggingface.co/sd-concepts-library/ki/resolve/main/concept_images/14.jpeg) ![<ki-mars> 11](https://huggingface.co/sd-concepts-library/ki/resolve/main/concept_images/4.jpeg) ![<ki-mars> 12](https://huggingface.co/sd-concepts-library/ki/resolve/main/concept_images/10.jpeg) ![<ki-mars> 13](https://huggingface.co/sd-concepts-library/ki/resolve/main/concept_images/6.jpeg) ![<ki-mars> 14](https://huggingface.co/sd-concepts-library/ki/resolve/main/concept_images/24.jpeg) ![<ki-mars> 15](https://huggingface.co/sd-concepts-library/ki/resolve/main/concept_images/11.jpeg) ![<ki-mars> 16](https://huggingface.co/sd-concepts-library/ki/resolve/main/concept_images/21.jpeg) ![<ki-mars> 17](https://huggingface.co/sd-concepts-library/ki/resolve/main/concept_images/5.jpeg) ![<ki-mars> 18](https://huggingface.co/sd-concepts-library/ki/resolve/main/concept_images/3.jpeg) ![<ki-mars> 19](https://huggingface.co/sd-concepts-library/ki/resolve/main/concept_images/19.jpeg) ![<ki-mars> 20](https://huggingface.co/sd-concepts-library/ki/resolve/main/concept_images/20.jpeg) ![<ki-mars> 21](https://huggingface.co/sd-concepts-library/ki/resolve/main/concept_images/15.jpeg) ![<ki-mars> 22](https://huggingface.co/sd-concepts-library/ki/resolve/main/concept_images/16.jpeg) ![<ki-mars> 23](https://huggingface.co/sd-concepts-library/ki/resolve/main/concept_images/22.jpeg) ![<ki-mars> 24](https://huggingface.co/sd-concepts-library/ki/resolve/main/concept_images/7.jpeg)
AnonymousSub/rule_based_bert_triplet_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
data: https://github.com/BigSalmon2/InformalToFormalDataset Text Generation Informal Formal ``` from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln85Paraphrase") model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln85Paraphrase") ``` ``` Demo: https://huggingface.co/spaces/BigSalmon/FormalInformalConciseWordy ``` ``` prompt = """informal english: corn fields are all across illinois, visible once you leave chicago.\nTranslated into the Style of Abraham Lincoln:""" input_ids = tokenizer.encode(prompt, return_tensors='pt') outputs = model.generate(input_ids=input_ids, max_length=10 + len(prompt), temperature=1.0, top_k=50, top_p=0.95, do_sample=True, num_return_sequences=5, early_stopping=True) for i in range(5): print(tokenizer.decode(outputs[i])) ``` Most likely outputs (Disclaimer: I highly recommend using this over just generating): ``` prompt = """informal english: corn fields are all across illinois, visible once you leave chicago.\nTranslated into the Style of Abraham Lincoln:""" text = tokenizer.encode(prompt) myinput, past_key_values = torch.tensor([text]), None myinput = myinput myinput= myinput.to(device) logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False) logits = logits[0,-1] probabilities = torch.nn.functional.softmax(logits) best_logits, best_indices = logits.topk(250) best_words = [tokenizer.decode([idx.item()]) for idx in best_indices] text.append(best_indices[0].item()) best_probabilities = probabilities[best_indices].tolist() words = [] print(best_words) ``` ``` How To Make Prompt: informal english: i am very ready to do that just that. Translated into the Style of Abraham Lincoln: you can assure yourself of my readiness to work toward this end. Translated into the Style of Abraham Lincoln: please be assured that i am most ready to undertake this laborious task. *** informal english: space is huge and needs to be explored. Translated into the Style of Abraham Lincoln: space awaits traversal, a new world whose boundaries are endless. Translated into the Style of Abraham Lincoln: space is a ( limitless / boundless ) expanse, a vast virgin domain awaiting exploration. *** informal english: corn fields are all across illinois, visible once you leave chicago. Translated into the Style of Abraham Lincoln: corn fields ( permeate illinois / span the state of illinois / ( occupy / persist in ) all corners of illinois / line the horizon of illinois / envelop the landscape of illinois ), manifesting themselves visibly as one ventures beyond chicago. informal english: ``` ``` original: microsoft word's [MASK] pricing invites competition. Translated into the Style of Abraham Lincoln: microsoft word's unconscionable pricing invites competition. *** original: the library’s quiet atmosphere encourages visitors to [blank] in their work. Translated into the Style of Abraham Lincoln: the library’s quiet atmosphere encourages visitors to immerse themselves in their work. ``` ``` Essay Intro (Warriors vs. Rockets in Game 7): text: eagerly anticipated by fans, game 7's are the highlight of the post-season. text: ever-building in suspense, game 7's have the crowd captivated. *** Essay Intro (South Korean TV Is Becoming Popular): text: maturing into a bona fide paragon of programming, south korean television ( has much to offer / entertains without fail / never disappoints ). text: increasingly held in critical esteem, south korean television continues to impress. text: at the forefront of quality content, south korea is quickly achieving celebrity status. *** Essay Intro ( ``` ``` Search: What is the definition of Checks and Balances? https://en.wikipedia.org/wiki/Checks_and_balances Checks and Balances is the idea of having a system where each and every action in government should be subject to one or more checks that would not allow one branch or the other to overly dominate. https://www.harvard.edu/glossary/Checks_and_Balances Checks and Balances is a system that allows each branch of government to limit the powers of the other branches in order to prevent abuse of power https://www.law.cornell.edu/library/constitution/Checks_and_Balances Checks and Balances is a system of separation through which branches of government can control the other, thus preventing excess power. *** Search: What is the definition of Separation of Powers? https://en.wikipedia.org/wiki/Separation_of_powers The separation of powers is a principle in government, whereby governmental powers are separated into different branches, each with their own set of powers, that are prevent one branch from aggregating too much power. https://www.yale.edu/tcf/Separation_of_Powers.html Separation of Powers is the division of governmental functions between the executive, legislative and judicial branches, clearly demarcating each branch's authority, in the interest of ensuring that individual liberty or security is not undermined. *** Search: What is the definition of Connection of Powers? https://en.wikipedia.org/wiki/Connection_of_powers Connection of Powers is a feature of some parliamentary forms of government where different branches of government are intermingled, typically the executive and legislative branches. https://simple.wikipedia.org/wiki/Connection_of_powers The term Connection of Powers describes a system of government in which there is overlap between different parts of the government. *** Search: What is the definition of ``` ``` Search: What are phrase synonyms for "second-guess"? https://www.powerthesaurus.org/second-guess/synonyms Shortest to Longest: - feel dubious about - raise an eyebrow at - wrinkle their noses at - cast a jaundiced eye at - teeter on the fence about *** Search: What are phrase synonyms for "mean to newbies"? https://www.powerthesaurus.org/mean_to_newbies/synonyms Shortest to Longest: - readiness to balk at rookies - absence of tolerance for novices - hostile attitude toward newcomers *** Search: What are phrase synonyms for "make use of"? https://www.powerthesaurus.org/make_use_of/synonyms Shortest to Longest: - call upon - glean value from - reap benefits from - derive utility from - seize on the merits of - draw on the strength of - tap into the potential of *** Search: What are phrase synonyms for "hurting itself"? https://www.powerthesaurus.org/hurting_itself/synonyms Shortest to Longest: - erring - slighting itself - forfeiting its integrity - doing itself a disservice - evincing a lack of backbone *** Search: What are phrase synonyms for " ``` ``` - nebraska - unicamerical legislature - different from federal house and senate text: featuring a unicameral legislature, nebraska's political system stands in stark contrast to the federal model, comprised of a house and senate. *** - penny has practically no value - should be taken out of circulation - just as other coins have been in us history - lost use - value not enough - to make environmental consequences worthy text: all but valueless, the penny should be retired. as with other coins in american history, it has become defunct. too minute to warrant the environmental consequences of its production, it has outlived its usefulness. *** - ``` ``` original: sports teams are profitable for owners. [MASK], their valuations experience a dramatic uptick. infill: sports teams are profitable for owners. ( accumulating vast sums / stockpiling treasure / realizing benefits / cashing in / registering robust financials / scoring on balance sheets ), their valuations experience a dramatic uptick. *** original: ``` ``` wordy: classical music is becoming less popular more and more. Translate into Concise Text: interest in classic music is fading. *** wordy: ``` ``` sweet: savvy voters ousted him. longer: voters who were informed delivered his defeat. *** sweet: ``` ``` 1: commercial space company spacex plans to launch a whopping 52 flights in 2022. 2: spacex, a commercial space company, intends to undertake a total of 52 flights in 2022. 3: in 2022, commercial space company spacex has its sights set on undertaking 52 flights. 4: 52 flights are in the pipeline for 2022, according to spacex, a commercial space company. 5: a commercial space company, spacex aims to conduct 52 flights in 2022. *** 1: ``` Keywords to sentences or sentence. ``` ngos are characterized by: □ voluntary citizens' group that is organized on a local, national or international level □ encourage political participation □ often serve humanitarian functions □ work for social, economic, or environmental change *** what are the drawbacks of living near an airbnb? □ noise □ parking □ traffic □ security □ strangers *** ``` ``` original: musicals generally use spoken dialogue as well as songs to convey the story. operas are usually fully sung. adapted: musicals generally use spoken dialogue as well as songs to convey the story. ( in a stark departure / on the other hand / in contrast / by comparison / at odds with this practice / far from being alike / in defiance of this standard / running counter to this convention ), operas are usually fully sung. *** original: akoya and tahitian are types of pearls. akoya pearls are mostly white, and tahitian pearls are naturally dark. adapted: akoya and tahitian are types of pearls. ( a far cry from being indistinguishable / easily distinguished / on closer inspection / setting them apart / not to be mistaken for one another / hardly an instance of mere synonymy / differentiating the two ), akoya pearls are mostly white, and tahitian pearls are naturally dark. *** original: ``` ``` original: had trouble deciding. translated into journalism speak: wrestled with the question, agonized over the matter, furrowed their brows in contemplation. *** original: ``` ``` input: not loyal 1800s english: ( two-faced / inimical / perfidious / duplicitous / mendacious / double-dealing / shifty ). *** input: ``` ``` first: ( was complicit in / was involved in ). antonym: ( was blameless / was not an accomplice to / had no hand in / was uninvolved in ). *** first: ( have no qualms about / see no issue with ). antonym: ( are deeply troubled by / harbor grave reservations about / have a visceral aversion to / take ( umbrage at / exception to ) / are wary of ). *** first: ( do not see eye to eye / disagree often ). antonym: ( are in sync / are united / have excellent rapport / are like-minded / are in step / are of one mind / are in lockstep / operate in perfect harmony / march in lockstep ). *** first: ``` ``` stiff with competition, law school {A} is the launching pad for countless careers, {B} is a crowded field, {C} ranks among the most sought-after professional degrees, {D} is a professional proving ground. *** languishing in viewership, saturday night live {A} is due for a creative renaissance, {B} is no longer a ratings juggernaut, {C} has been eclipsed by its imitators, {C} can still find its mojo. *** dubbed the "manhattan of the south," atlanta {A} is a bustling metropolis, {B} is known for its vibrant downtown, {C} is a city of rich history, {D} is the pride of georgia. *** embattled by scandal, harvard {A} is feeling the heat, {B} cannot escape the media glare, {C} is facing its most intense scrutiny yet, {D} is in the spotlight for all the wrong reasons. ``` Infill / Infilling / Masking / Phrase Masking (Works pretty decently actually, especially when you use logprobs code from above): ``` his contention [blank] by the evidence [sep] was refuted [answer] *** few sights are as [blank] new york city as the colorful, flashing signage of its bodegas [sep] synonymous with [answer] *** when rick won the lottery, all of his distant relatives [blank] his winnings [sep] clamored for [answer] *** the library’s quiet atmosphere encourages visitors to [blank] in their work [sep] immerse themselves [answer] *** the joy of sport is that no two games are alike. for every exhilarating experience, however, there is an interminable one. the national pastime, unfortunately, has a penchant for the latter. what begins as a summer evening at the ballpark can quickly devolve into a game of tedium. the primary culprit is the [blank] of play. from batters readjusting their gloves to fielders spitting on their mitts, the action is [blank] unnecessary interruptions. the sport's future is [blank] if these tendencies are not addressed [sep] plodding pace [answer] riddled with [answer] bleak [answer] *** microsoft word's [blank] pricing [blank] competition [sep] unconscionable [answer] invites [answer] *** ``` ``` original: microsoft word's [MASK] pricing invites competition. Translated into the Style of Abraham Lincoln: microsoft word's unconscionable pricing invites competition. *** original: the library’s quiet atmosphere encourages visitors to [blank] in their work. Translated into the Style of Abraham Lincoln: the library’s quiet atmosphere encourages visitors to immerse themselves in their work. ``` Backwards ``` Essay Intro (National Parks): text: tourists are at ease in the national parks, ( swept up in the beauty of their natural splendor ). *** Essay Intro (D.C. Statehood): washington, d.c. is a city of outsize significance, ( ground zero for the nation's political life / center stage for the nation's political machinations ). ``` ``` topic: the Golden State Warriors. characterization 1: the reigning kings of the NBA. characterization 2: possessed of a remarkable cohesion. characterization 3: helmed by superstar Stephen Curry. characterization 4: perched atop the league’s hierarchy. characterization 5: boasting a litany of hall-of-famers. *** topic: emojis. characterization 1: shorthand for a digital generation. characterization 2: more versatile than words. characterization 3: the latest frontier in language. characterization 4: a form of self-expression. characterization 5: quintessentially millennial. characterization 6: reflective of a tech-centric world. *** topic: ``` ``` regular: illinois went against the census' population-loss prediction by getting more residents. VBG: defying the census' prediction of population loss, illinois experienced growth. *** regular: microsoft word’s high pricing increases the likelihood of competition. VBG: extortionately priced, microsoft word is inviting competition. *** regular: ``` ``` source: badminton should be more popular in the US. QUERY: Based on the given topic, can you develop a story outline? target: (1) games played with racquets are popular, (2) just look at tennis and ping pong, (3) but badminton underappreciated, (4) fun, fast-paced, competitive, (5) needs to be marketed more text: the sporting arena is dominated by games that are played with racquets. tennis and ping pong, in particular, are immensely popular. somewhat curiously, however, badminton is absent from this pantheon. exciting, fast-paced, and competitive, it is an underappreciated pastime. all that it lacks is more effective marketing. *** source: movies in theaters should be free. QUERY: Based on the given topic, can you develop a story outline? target: (1) movies provide vital life lessons, (2) many venues charge admission, (3) those without much money text: the lessons that movies impart are far from trivial. the vast catalogue of cinematic classics is replete with inspiring sagas of friendship, bravery, and tenacity. it is regrettable, then, that admission to theaters is not free. in their current form, the doors of this most vital of institutions are closed to those who lack the means to pay. *** source: ``` ``` in the private sector, { transparency } is vital to the business’s credibility. the { disclosure of information } can be the difference between success and failure. *** the labor market is changing, with { remote work } now the norm. this { flexible employment } allows the individual to design their own schedule. *** the { cubicle } is the locus of countless grievances. many complain that the { enclosed workspace } restricts their freedom of movement. *** ``` ``` it would be natural to assume that americans, as a people whose ancestors { immigrated to this country }, would be sympathetic to those seeking to do likewise. question: what does “do likewise” mean in the above context? (a) make the same journey (b) share in the promise of the american dream (c) start anew in the land of opportunity (d) make landfall on the united states *** in the private sector, { transparency } is vital to the business’s credibility. this orientation can be the difference between success and failure. question: what does “this orientation” mean in the above context? (a) visible business practices (b) candor with the public (c) open, honest communication (d) culture of accountability ``` ``` example: suppose you are a teacher. further suppose you want to tell an accurate telling of history. then suppose a parent takes offense. they do so in the name of name of their kid. this happens a lot. text: educators' responsibility to remain true to the historical record often clashes with the parent's desire to shelter their child from uncomfortable realities. *** example: suppose you are a student at college. now suppose you have to buy textbooks. that is going to be worth hundreds of dollars. given how much you already spend on tuition, that is going to hard cost to bear. text: the exorbitant cost of textbooks, which often reaches hundreds of dollars, imposes a sizable financial burden on the already-strapped college student. ``` ``` <Prefix> the atlanta hawks may attribute <Prefix> <Suffix> trae young <Suffix> <Middle> their robust season to <Middle> *** <Prefix> the nobel prize in literature <Prefix> <Suffix> honor <Suffix> <Middle> is a singularly prestigious <Middle> ``` ``` accustomed to having its name uttered ______, harvard university is weathering a rare spell of reputational tumult (a) in reverential tones (b) with great affection (c) in adulatory fashion (d) in glowing terms ``` ``` clarify: international ( {working together} / cooperation ) is called for when ( {issue go beyond lots of borders} / an issue transcends borders / a given matter has transnational implications ). ``` ``` description: when someone thinks that their view is the only right one. synonyms: intolerant, opinionated, narrow-minded, insular, self-righteous. *** description: when you put something off. synonyms: shelve, defer, table, postpone. ``` ``` organic sentence: crowdfunding is about winner of best ideas and it can test an entrepreneur’s idea. rewrite phrases: meritocratic, viability, vision rewritten with phrases: the meritocratic nature of crowdfunding empowers entrepreneurs to test their vision's viability. ``` *Note* Of all the masking techniques, this one works the best. ``` <Prefix> the atlanta hawks may attribute <Prefix> <Suffix> trae young <Suffix> <Middle> their robust season to <Middle> *** <Prefix> the nobel prize in literature <Prefix> <Suffix> honor <Suffix> <Middle> is a singularly prestigious <Middle> ``` ``` essence: when someone's views are keeping within reasonable. refine: the senator's voting record is ( moderate / centrist / pragmatic / balanced / fair-minded / even-handed ). *** essence: when things are worked through in a petty way. refine: the propensity of the u.s. congress to settle every dispute by way of ( mudslinging / bickering / demagoguery / name-calling / finger-pointing / vilification ) is appalling. ``` ``` description: when someone thinks that their view is the only right one. synonyms: intolerant, opinionated, narrow-minded, insular, self-righteous. *** description: when you put something off. synonyms: shelve, defer, table, postpone. ``` ``` organic sentence: crowdfunding is about winner of best ideas and it can test an entrepreneur’s idea. rewrite phrases: meritocratic, viability, vision rewritten with phrases: the meritocratic nature of crowdfunding empowers entrepreneurs to test their vision's viability. ``` ``` music before bedtime [makes for being able to relax] -> is a recipe for relaxation. ``` ``` [people wanting entertainment love traveling new york city] -> travelers flock to new york city in droves, drawn to its iconic entertainment scene. [cannot blame them] -> one cannot fault them [broadway so fun] -> when it is home to such thrilling fare as Broadway. ``` ``` in their ( ‖ when you are rushing because you want to get there on time ‖ / haste to arrive punctually / mad dash to be timely ), morning commuters are too rushed to whip up their own meal. *** politicians prefer to author vague plans rather than ( ‖ when you can make a plan without many unknowns ‖ / actionable policies / concrete solutions ). ```
AnonymousSub/rule_based_bert_triplet_epochs_1_shard_1_squad2.0
[ "pytorch", "bert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "BertForQuestionAnswering" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- tags: - conversational --- # Harry Potter DialoGPT Model
AnonymousSub/rule_based_bert_triplet_epochs_1_shard_1_wikiqa
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
31
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy - f1 - precision - recall model-index: - name: results results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # results This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.4221 - Accuracy: 0.5764 - F1: 0.5709 - Precision: 0.5737 - Recall: 0.5764 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.8.0 - Tokenizers 0.13.2
AnonymousSub/rule_based_hier_quadruplet_0.1_epochs_1_shard_1_squad2.0
[ "pytorch", "bert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "BertForQuestionAnswering" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- license: mit --- ### FNF Boyfriend on Stable Diffusion This is the `<fnf-boyfriend>` concept taught to Stable Diffusion via Textual Inversion. You can load this concept into the [Stable Conceptualizer](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_conceptualizer_inference.ipynb) notebook. You can also train your own concepts and load them into the concept libraries using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb). Here is the new concept you will be able to use as an `object`: ![<fnf-boyfriend> 0](https://huggingface.co/sd-concepts-library/fnf-boyfriend/resolve/main/concept_images/1.jpeg) ![<fnf-boyfriend> 1](https://huggingface.co/sd-concepts-library/fnf-boyfriend/resolve/main/concept_images/8.jpeg) ![<fnf-boyfriend> 2](https://huggingface.co/sd-concepts-library/fnf-boyfriend/resolve/main/concept_images/2.jpeg) ![<fnf-boyfriend> 3](https://huggingface.co/sd-concepts-library/fnf-boyfriend/resolve/main/concept_images/0.jpeg) ![<fnf-boyfriend> 4](https://huggingface.co/sd-concepts-library/fnf-boyfriend/resolve/main/concept_images/4.jpeg) ![<fnf-boyfriend> 5](https://huggingface.co/sd-concepts-library/fnf-boyfriend/resolve/main/concept_images/6.jpeg) ![<fnf-boyfriend> 6](https://huggingface.co/sd-concepts-library/fnf-boyfriend/resolve/main/concept_images/5.jpeg) ![<fnf-boyfriend> 7](https://huggingface.co/sd-concepts-library/fnf-boyfriend/resolve/main/concept_images/3.jpeg) ![<fnf-boyfriend> 8](https://huggingface.co/sd-concepts-library/fnf-boyfriend/resolve/main/concept_images/7.jpeg)
AnonymousSub/rule_based_hier_triplet_0.1_epochs_1_shard_1_squad2.0
[ "pytorch", "bert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "BertForQuestionAnswering" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- license: mit --- ### Society Finch on Stable Diffusion This is the `<society-finch>` concept taught to Stable Diffusion via Textual Inversion. You can load this concept into the [Stable Conceptualizer](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_conceptualizer_inference.ipynb) notebook. You can also train your own concepts and load them into the concept libraries using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb). Here is the new concept you will be able to use as an `object`: ![<society-finch> 0](https://huggingface.co/sd-concepts-library/society-finch/resolve/main/concept_images/1.jpeg) ![<society-finch> 1](https://huggingface.co/sd-concepts-library/society-finch/resolve/main/concept_images/2.jpeg) ![<society-finch> 2](https://huggingface.co/sd-concepts-library/society-finch/resolve/main/concept_images/0.jpeg) ![<society-finch> 3](https://huggingface.co/sd-concepts-library/society-finch/resolve/main/concept_images/3.jpeg) Here are the example images generated with this concept: ![<society-finch> example](https://i.imgur.com/dMswU2s.png)
AnonymousSub/rule_based_hier_triplet_epochs_1_shard_10
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: AlanLee/distilbert-base-uncased-finetuned-cola results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # AlanLee/distilbert-base-uncased-finetuned-cola This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 0.3265 - Validation Loss: 0.4596 - Train Matthews Correlation: 0.5260 - Epoch: 1 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'learning_rate': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 1602, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False} - training_precision: float32 ### Training results | Train Loss | Validation Loss | Train Matthews Correlation | Epoch | |:----------:|:---------------:|:--------------------------:|:-----:| | 0.5201 | 0.4432 | 0.4977 | 0 | | 0.3265 | 0.4596 | 0.5260 | 1 | ### Framework versions - Transformers 4.23.1 - TensorFlow 2.10.0 - Datasets 2.6.1 - Tokenizers 0.12.1
AnonymousSub/rule_based_roberta_bert_triplet_epochs_1_shard_1_wikiqa_copy
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- tags: - conversational --- # Version 2 of the McTea-based AI chatbot, now trained on more data.
AnonymousSub/rule_based_roberta_hier_quadruplet_epochs_1_shard_1_wikiqa
[ "pytorch", "roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
24
null
--- tags: - autotrain - tabular - classification - tabular-classification datasets: - pachi107/autotrain-data-in-class-test co2_eq_emissions: emissions: 3.1621916284030838 --- # Model Trained Using AutoTrain - Problem type: Binary Classification - Model ID: 1780161764 - CO2 Emissions (in grams): 3.1622 ## Validation Metrics - Loss: 0.044 - Accuracy: 0.974 - Precision: 1.000 - Recall: 0.930 - AUC: 1.000 - F1: 0.964 ## Usage ```python import json import joblib import pandas as pd model = joblib.load('model.joblib') config = json.load(open('config.json')) features = config['features'] # data = pd.read_csv("data.csv") data = data[features] data.columns = ["feat_" + str(col) for col in data.columns] predictions = model.predict(data) # or model.predict_proba(data) ```
AnonymousSub/rule_based_roberta_hier_triplet_epochs_1_shard_1_wikiqa
[ "pytorch", "roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
25
null
--- license: creativeml-openrail-m tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image inference: false library_name: diffusers extra_gated_prompt: |- One more step before getting this model. This model is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage. The CreativeML OpenRAIL License specifies: 1. You can't use the model to deliberately produce nor share illegal or harmful outputs or content 2. CompVis claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license 3. You may re-distribute the weights and use the model commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully) Please read the full license here: https://huggingface.co/spaces/CompVis/stable-diffusion-license By clicking on "Access repository" below, you accept that your *contact information* (email address and username) can be shared with the model authors as well. extra_gated_fields: I have read the License and agree with its terms: checkbox --- Stable Diffusion Inpainting is a latent text-to-image diffusion model capable of generating photo-realistic images given any text input, with the extra capability of inpainting the pictures by using a mask. The **Stable-Diffusion-Inpainting** was initialized with the weights of the [Stable-Diffusion-v-1-2](https://steps/huggingface.co/CompVis/stable-diffusion-v-1-2-original). First 595k steps regular training, then 440k steps of inpainting training at resolution 512x512 on “laion-aesthetics v2 5+” and 10% dropping of the text-conditioning to improve classifier-free [classifier-free guidance sampling](https://arxiv.org/abs/2207.12598). For inpainting, the UNet has 5 additional input channels (4 for the encoded masked-image and 1 for the mask itself) whose weights were zero-initialized after restoring the non-inpainting checkpoint. During training, we generate synthetic masks and in 25% mask everything. [![Open In Spaces](https://camo.githubusercontent.com/00380c35e60d6b04be65d3d94a58332be5cc93779f630bcdfc18ab9a3a7d3388/68747470733a2f2f696d672e736869656c64732e696f2f62616467652f25463025394625413425393725323048756767696e67253230466163652d5370616365732d626c7565)](https://huggingface.co/spaces/runwayml/stable-diffusion-inpainting) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/in_painting_with_stable_diffusion_using_diffusers.ipynb) :-------------------------:|:-------------------------:| ## Examples: You can use this both with the [🧨Diffusers library](https://github.com/huggingface/diffusers) and the [RunwayML GitHub repository](https://github.com/runwayml/stable-diffusion). ### Diffusers ```python from diffusers import StableDiffusionInpaintPipeline pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", revision="fp16", torch_dtype=torch.float16, ) prompt = "Face of a yellow cat, high resolution, sitting on a park bench" #image and mask_image should be PIL images. #The mask structure is white for inpainting and black for keeping as is image = pipe(prompt=prompt, image=image, mask_image=mask_image).images[0] image.save("./yellow_cat_on_park_bench.png") ``` **How it works:** `image` | `mask_image` :-------------------------:|:-------------------------:| <img src="https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" alt="drawing" width="300"/> | <img src="https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" alt="drawing" width="300"/> `prompt` | `Output` :-------------------------:|:-------------------------:| <span style="position: relative;bottom: 150px;">Face of a yellow cat, high resolution, sitting on a park bench</span> | <img src="https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/test.png" alt="drawing" width="300"/> ### Original GitHub Repository 1. Download the weights [sd-v1-5-inpainting.ckpt](https://huggingface.co/runwayml/stable-diffusion-inpainting/resolve/main/sd-v1-5-inpainting.ckpt) 2. Follow instructions [here](https://github.com/runwayml/stable-diffusion#inpainting-with-stable-diffusion). ## Model Details - **Developed by:** Robin Rombach, Patrick Esser - **Model type:** Diffusion-based text-to-image generation model - **Language(s):** English - **License:** [The CreativeML OpenRAIL M license](https://huggingface.co/spaces/CompVis/stable-diffusion-license) is an [Open RAIL M license](https://www.licenses.ai/blog/2022/8/18/naming-convention-of-responsible-ai-licenses), adapted from the work that [BigScience](https://bigscience.huggingface.co/) and [the RAIL Initiative](https://www.licenses.ai/) are jointly carrying in the area of responsible AI licensing. See also [the article about the BLOOM Open RAIL license](https://bigscience.huggingface.co/blog/the-bigscience-rail-license) on which our license is based. - **Model Description:** This is a model that can be used to generate and modify images based on text prompts. It is a [Latent Diffusion Model](https://arxiv.org/abs/2112.10752) that uses a fixed, pretrained text encoder ([CLIP ViT-L/14](https://arxiv.org/abs/2103.00020)) as suggested in the [Imagen paper](https://arxiv.org/abs/2205.11487). - **Resources for more information:** [GitHub Repository](https://github.com/runwayml/stable-diffusion), [Paper](https://arxiv.org/abs/2112.10752). - **Cite as:** @InProceedings{Rombach_2022_CVPR, author = {Rombach, Robin and Blattmann, Andreas and Lorenz, Dominik and Esser, Patrick and Ommer, Bj\"orn}, title = {High-Resolution Image Synthesis With Latent Diffusion Models}, booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, month = {June}, year = {2022}, pages = {10684-10695} } # Uses ## Direct Use The model is intended for research purposes only. Possible research areas and tasks include - Safe deployment of models which have the potential to generate harmful content. - Probing and understanding the limitations and biases of generative models. - Generation of artworks and use in design and other artistic processes. - Applications in educational or creative tools. - Research on generative models. Excluded uses are described below. ### Misuse, Malicious Use, and Out-of-Scope Use _Note: This section is taken from the [DALLE-MINI model card](https://huggingface.co/dalle-mini/dalle-mini), but applies in the same way to Stable Diffusion v1_. The model should not be used to intentionally create or disseminate images that create hostile or alienating environments for people. This includes generating images that people would foreseeably find disturbing, distressing, or offensive; or content that propagates historical or current stereotypes. #### Out-of-Scope Use The model was not trained to be factual or true representations of people or events, and therefore using the model to generate such content is out-of-scope for the abilities of this model. #### Misuse and Malicious Use Using the model to generate content that is cruel to individuals is a misuse of this model. This includes, but is not limited to: - Generating demeaning, dehumanizing, or otherwise harmful representations of people or their environments, cultures, religions, etc. - Intentionally promoting or propagating discriminatory content or harmful stereotypes. - Impersonating individuals without their consent. - Sexual content without consent of the people who might see it. - Mis- and disinformation - Representations of egregious violence and gore - Sharing of copyrighted or licensed material in violation of its terms of use. - Sharing content that is an alteration of copyrighted or licensed material in violation of its terms of use. ## Limitations and Bias ### Limitations - The model does not achieve perfect photorealism - The model cannot render legible text - The model does not perform well on more difficult tasks which involve compositionality, such as rendering an image corresponding to “A red cube on top of a blue sphere” - Faces and people in general may not be generated properly. - The model was trained mainly with English captions and will not work as well in other languages. - The autoencoding part of the model is lossy - The model was trained on a large-scale dataset [LAION-5B](https://laion.ai/blog/laion-5b/) which contains adult material and is not fit for product use without additional safety mechanisms and considerations. - No additional measures were used to deduplicate the dataset. As a result, we observe some degree of memorization for images that are duplicated in the training data. The training data can be searched at [https://rom1504.github.io/clip-retrieval/](https://rom1504.github.io/clip-retrieval/) to possibly assist in the detection of memorized images. ### Bias While the capabilities of image generation models are impressive, they can also reinforce or exacerbate social biases. Stable Diffusion v1 was trained on subsets of [LAION-2B(en)](https://laion.ai/blog/laion-5b/), which consists of images that are primarily limited to English descriptions. Texts and images from communities and cultures that use other languages are likely to be insufficiently accounted for. This affects the overall output of the model, as white and western cultures are often set as the default. Further, the ability of the model to generate content with non-English prompts is significantly worse than with English-language prompts. ## Training **Training Data** The model developers used the following dataset for training the model: - LAION-2B (en) and subsets thereof (see next section) **Training Procedure** Stable Diffusion v1 is a latent diffusion model which combines an autoencoder with a diffusion model that is trained in the latent space of the autoencoder. During training, - Images are encoded through an encoder, which turns images into latent representations. The autoencoder uses a relative downsampling factor of 8 and maps images of shape H x W x 3 to latents of shape H/f x W/f x 4 - Text prompts are encoded through a ViT-L/14 text-encoder. - The non-pooled output of the text encoder is fed into the UNet backbone of the latent diffusion model via cross-attention. - The loss is a reconstruction objective between the noise that was added to the latent and the prediction made by the UNet. We currently provide six checkpoints, `sd-v1-1.ckpt`, `sd-v1-2.ckpt` and `sd-v1-3.ckpt`, `sd-v1-4.ckpt`, `sd-v1-5.ckpt` and `sd-v1-5-inpainting.ckpt` which were trained as follows, - `sd-v1-1.ckpt`: 237k steps at resolution `256x256` on [laion2B-en](https://huggingface.co/datasets/laion/laion2B-en). 194k steps at resolution `512x512` on [laion-high-resolution](https://huggingface.co/datasets/laion/laion-high-resolution) (170M examples from LAION-5B with resolution `>= 1024x1024`). - `sd-v1-2.ckpt`: Resumed from `sd-v1-1.ckpt`. 515k steps at resolution `512x512` on "laion-improved-aesthetics" (a subset of laion2B-en, filtered to images with an original size `>= 512x512`, estimated aesthetics score `> 5.0`, and an estimated watermark probability `< 0.5`. The watermark estimate is from the LAION-5B metadata, the aesthetics score is estimated using an [improved aesthetics estimator](https://github.com/christophschuhmann/improved-aesthetic-predictor)). - `sd-v1-3.ckpt`: Resumed from `sd-v1-2.ckpt`. 195k steps at resolution `512x512` on "laion-improved-aesthetics" and 10\% dropping of the text-conditioning to improve [classifier-free guidance sampling](https://arxiv.org/abs/2207.12598). - `sd-v1-4.ckpt`: Resumed from stable-diffusion-v1-2.225,000 steps at resolution 512x512 on "laion-aesthetics v2 5+" and 10 % dropping of the text-conditioning to [classifier-free guidance sampling](https://arxiv.org/abs/2207.12598). - `sd-v1-5.ckpt`: Resumed from sd-v1-2.ckpt. 595k steps at resolution 512x512 on "laion-aesthetics v2 5+" and 10% dropping of the text-conditioning to improve classifier-free guidance sampling. - `sd-v1-5-inpaint.ckpt`: Resumed from sd-v1-2.ckpt. 595k steps at resolution 512x512 on "laion-aesthetics v2 5+" and 10% dropping of the text-conditioning to improve classifier-free guidance sampling. Then 440k steps of inpainting training at resolution 512x512 on “laion-aesthetics v2 5+” and 10% dropping of the text-conditioning. For inpainting, the UNet has 5 additional input channels (4 for the encoded masked-image and 1 for the mask itself) whose weights were zero-initialized after restoring the non-inpainting checkpoint. During training, we generate synthetic masks and in 25% mask everything. - **Hardware:** 32 x 8 x A100 GPUs - **Optimizer:** AdamW - **Gradient Accumulations**: 2 - **Batch:** 32 x 8 x 2 x 4 = 2048 - **Learning rate:** warmup to 0.0001 for 10,000 steps and then kept constant ## Evaluation Results Evaluations with different classifier-free guidance scales (1.5, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0) and 50 PLMS sampling steps show the relative improvements of the checkpoints: ![pareto](https://huggingface.co/CompVis/stable-diffusion/resolve/main/v1-1-to-v1-5.png) Evaluated using 50 PLMS steps and 10000 random prompts from the COCO2017 validation set, evaluated at 512x512 resolution. Not optimized for FID scores. ## Inpainting Evaluation To assess the performance of the inpainting model, we used the same evaluation protocol as in our [LDM paper](https://arxiv.org/abs/2112.10752). Since the Stable Diffusion Inpainting Model acccepts a text input, we simply used a fixed prompt of `photograph of a beautiful empty scene, highest quality settings`. | Model | FID | LPIPS | |-----------------------------|------|------------------| | Stable Diffusion Inpainting | 1.00 | 0.141 (+- 0.082) | | Latent Diffusion Inpainting | 1.50 | 0.137 (+- 0.080) | | CoModGAN | 1.82 | 0.15 | | LaMa | 2.21 | 0.134 (+- 0.080) | ## Environmental Impact **Stable Diffusion v1** **Estimated Emissions** Based on that information, we estimate the following CO2 emissions using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). The hardware, runtime, cloud provider, and compute region were utilized to estimate the carbon impact. - **Hardware Type:** A100 PCIe 40GB - **Hours used:** 150000 - **Cloud Provider:** AWS - **Compute Region:** US-east - **Carbon Emitted (Power consumption x Time x Carbon produced based on location of power grid):** 11250 kg CO2 eq. ## Citation ```bibtex @InProceedings{Rombach_2022_CVPR, author = {Rombach, Robin and Blattmann, Andreas and Lorenz, Dominik and Esser, Patrick and Ommer, Bj\"orn}, title = {High-Resolution Image Synthesis With Latent Diffusion Models}, booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, month = {June}, year = {2022}, pages = {10684-10695} } ``` *This model card was written by: Robin Rombach and Patrick Esser and is based on the [DALL-E Mini model card](https://huggingface.co/dalle-mini/dalle-mini).*
AnonymousSub/rule_based_roberta_only_classfn_twostage_epochs_1_shard_1_squad2.0
[ "pytorch", "roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "RobertaForQuestionAnswering" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- license: cc-by-nc-sa-4.0 tags: - generated_from_trainer model-index: - name: output results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # output This model is a fine-tuned version of [Babelscape/rebel-large](https://huggingface.co/Babelscape/rebel-large) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1-measure | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:----------:| | No log | 1.0 | 236 | 0.3225 | 0.8889 | 0.8889 | 0.8889 | ### Framework versions - Transformers 4.21.1 - Pytorch 1.9.0+cu111 - Datasets 2.4.0 - Tokenizers 0.12.1
AnonymousSub/rule_based_roberta_twostage_quadruplet_epochs_1_shard_1
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- tags: - image-classification - pytorch metrics: - accuracy model-index: - name: Syn10kPlusOG-oct-ViT-Base-8Epochs-v1 results: - task: name: Image Classification type: image-classification metrics: - name: Accuracy type: accuracy value: 0.8866666555404663 --- # Syn10kPlusOG-oct-ViT-Base-8Epochs-v1
AnonymousSub/rule_based_roberta_twostage_quadruplet_epochs_1_shard_1_squad2.0
[ "pytorch", "roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "RobertaForQuestionAnswering" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- tags: - autotrain - token-classification language: - unk widget: - text: "I love AutoTrain 🤗" datasets: - teacookies/autotrain-data-17102022-cert co2_eq_emissions: emissions: 16.43804270120875 --- # Model Trained Using AutoTrain - Problem type: Entity Extraction - Model ID: 1781461794 - CO2 Emissions (in grams): 16.4380 ## Validation Metrics - Loss: 0.023 - Accuracy: 0.994 - Precision: 0.821 - Recall: 0.876 - F1: 0.847 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/teacookies/autotrain-17102022-cert-1781461794 ``` Or Python API: ``` from transformers import AutoModelForTokenClassification, AutoTokenizer model = AutoModelForTokenClassification.from_pretrained("teacookies/autotrain-17102022-cert-1781461794", use_auth_token=True) tokenizer = AutoTokenizer.from_pretrained("teacookies/autotrain-17102022-cert-1781461794", use_auth_token=True) inputs = tokenizer("I love AutoTrain", return_tensors="pt") outputs = model(**inputs) ```
ArBert/albert-base-v2-finetuned-ner-gmm-twitter
[ "pytorch", "tensorboard", "albert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "AlbertForTokenClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- license: cc-by-4.0 --- More information about the model [in this git repo](https://github.com/tceron/capture_similarity_between_political_parties)
ArBert/roberta-base-finetuned-ner-kmeans
[ "pytorch", "tensorboard", "roberta", "token-classification", "dataset:conll2003", "transformers", "generated_from_trainer", "license:mit", "model-index", "autotrain_compatible" ]
token-classification
{ "architectures": [ "RobertaForTokenClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
2022-10-17T09:39:21Z
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: t5-large_dataset_radiology_20220912.tsv results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # t5-large_dataset_radiology_20220912.tsv This model is a fine-tuned version of [t5-large](https://huggingface.co/t5-large) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - gradient_accumulation_steps: 16 - total_train_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0+cu102 - Datasets 2.4.0 - Tokenizers 0.12.1
Aravinth/test
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 256.22 +/- 29.66 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```