modelId
stringlengths
4
81
tags
list
pipeline_tag
stringclasses
17 values
config
dict
downloads
int64
0
59.7M
first_commit
timestamp[ns, tz=UTC]
card
stringlengths
51
438k
CAMeL-Lab/bert-base-arabic-camelbert-mix-pos-msa
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1,862
null
--- language: - pt license: apache-2.0 tags: - whisper-event - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 metrics: - wer - cer model-index: - name: Whisper Large Portuguese results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: mozilla-foundation/common_voice_11_0 pt type: mozilla-foundation/common_voice_11_0 config: pt split: test args: pt metrics: - name: WER type: wer value: 4.816664144852979 - name: CER type: cer value: 1.6052355927195898 - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: google/fleurs pt_br type: google/fleurs config: pt_br split: test args: pt_br metrics: - name: WER type: wer value: 8.56762285333714 - name: CER type: cer value: 5.462965196208485 --- # Whisper Large Portuguese This model is a fine-tuned version of [openai/whisper-large-v2](https://huggingface.co/openai/whisper-large-v2) on Portuguese using the train and validation splits of [Common Voice 11](https://huggingface.co/datasets/mozilla-foundation/common_voice_11_0). Not all validation split data were used during training, I extracted 1k samples from the validation split to be used for evaluation during fine-tuning. ## Usage ```python from transformers import pipeline transcriber = pipeline( "automatic-speech-recognition", model="jonatasgrosman/whisper-large-pt-cv11" ) transcriber.model.config.forced_decoder_ids = ( transcriber.tokenizer.get_decoder_prompt_ids( language="pt", task="transcribe" ) ) transcription = transcriber("path/to/my_audio.wav") ``` ## Evaluation I've performed the evaluation of the model using the test split of two datasets, the [Common Voice 11](https://huggingface.co/datasets/mozilla-foundation/common_voice_11_0) (same dataset used for the fine-tuning) and the [Fleurs](https://huggingface.co/datasets/google/fleurs) (dataset not seen during the fine-tuning). As Whisper can transcribe casing and punctuation, I've performed the model evaluation in 2 different scenarios, one using the raw text and the other using the normalized text (lowercase + removal of punctuations). Additionally, for the Fleurs dataset, I've evaluated the model in a scenario where there are no transcriptions of numerical values since the way these values are described in this dataset is different from how they are described in the dataset used in fine-tuning (Common Voice), so it is expected that this difference in the way of describing numerical values will affect the performance of the model for this type of transcription in Fleurs. ### Common Voice 11 | | CER | WER | | --- | --- | --- | | [jonatasgrosman/whisper-large-pt-cv11](https://huggingface.co/jonatasgrosman/whisper-large-pt-cv11) | 2.52 | 9.56 | | [jonatasgrosman/whisper-large-pt-cv11](https://huggingface.co/jonatasgrosman/whisper-large-pt-cv11) + text normalization | 1.60 | 4.82 | | [openai/whisper-large-v2](https://huggingface.co/openai/whisper-large-v2) | 4.32 | 13.92 | | [openai/whisper-large-v2](https://huggingface.co/openai/whisper-large-v2) + text normalization | 2.84 | 7.02 | ### Fleurs | | CER | WER | | --- | --- | --- | | [jonatasgrosman/whisper-large-pt-cv11](https://huggingface.co/jonatasgrosman/whisper-large-pt-cv11) | 4.88 | 12.08 | | [jonatasgrosman/whisper-large-pt-cv11](https://huggingface.co/jonatasgrosman/whisper-large-pt-cv11) + text normalization | 5.46 | 8.57 | | [jonatasgrosman/whisper-large-pt-cv11](https://huggingface.co/jonatasgrosman/whisper-large-pt-cv11) + keep only non-numeric samples | 2.35 | 9.00 | | [jonatasgrosman/whisper-large-pt-cv11](https://huggingface.co/jonatasgrosman/whisper-large-pt-cv11) + text normalization + keep only non-numeric samples | 3.36 | 6.05 | | [openai/whisper-large-v2](https://huggingface.co/openai/whisper-large-v2) | 3.52 | 10.55 | | [openai/whisper-large-v2](https://huggingface.co/openai/whisper-large-v2) + text normalization | 4.19 | 7.04 | | [openai/whisper-large-v2](https://huggingface.co/openai/whisper-large-v2) + keep only non-numeric samples | 2.61 | 9.29 | | [openai/whisper-large-v2](https://huggingface.co/openai/whisper-large-v2) + text normalization + keep only non-numeric samples | 3.56 | 6.15 |
CAMeL-Lab/bert-base-arabic-camelbert-mix
[ "pytorch", "tf", "jax", "bert", "fill-mask", "ar", "arxiv:2103.06678", "transformers", "Arabic", "Dialect", "Egyptian", "Gulf", "Levantine", "Classical Arabic", "MSA", "Modern Standard Arabic", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
20,880
null
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # {MODEL_NAME} This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}') model = AutoModel.from_pretrained('{MODEL_NAME}') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 928 with parameters: ``` {'batch_size': 16, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.CosineSimilarityLoss.CosineSimilarityLoss` Parameters of the fit()-Method: ``` { "epochs": 1, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": 928, "warmup_steps": 93, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: MPNetModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
CAMeL-Lab/bert-base-arabic-camelbert-msa-did-nadi
[ "pytorch", "tf", "bert", "text-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
71
null
--- license: mit widget: - text: 要进一步加强党风[MASK]政建设 example_title: example 1 - text: 要落实[MASK]产 example_title: example 2 --- BERT fine-tuned with Chinese political text
CAMeL-Lab/bert-base-arabic-camelbert-msa-eighth
[ "pytorch", "tf", "jax", "bert", "fill-mask", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
21
2022-12-16T05:10:30Z
--- language: - vi license: apache-2.0 tags: - whisper-event - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 metrics: - wer model-index: - name: Whisper Medium Vietnamese results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: mozilla-foundation/common_voice_11_0 vi type: mozilla-foundation/common_voice_11_0 config: vi split: test args: vi metrics: - name: Wer type: wer value: 20.04825619653433 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Medium Vietnamese This model is a fine-tuned version of [openai/whisper-medium](https://huggingface.co/openai/whisper-medium) on the mozilla-foundation/common_voice_11_0 vi dataset. It achieves the following results on the evaluation set: - Loss: 0.5422 - Wer: 20.0483 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 100 - training_steps: 1000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:-------:| | 0.0241 | 4.01 | 1000 | 0.5422 | 20.0483 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.11.0+cu102 - Datasets 2.7.1.dev0 - Tokenizers 0.13.2
CAMeL-Lab/bert-base-arabic-camelbert-msa-half
[ "pytorch", "tf", "jax", "bert", "fill-mask", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
16
null
--- language: - bn license: apache-2.0 tags: - whisper-event - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 metrics: - wer model-index: - name: whisper-small-or results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: Common Voice 11.0 type: mozilla-foundation/common_voice_11_0 config: or split: test args: or metrics: - name: Wer type: wer value: 40.30612244897959 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # whisper-small-or This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the Common Voice 11.0 dataset. It achieves the following results on the evaluation set: - Loss: 0.5871 - Wer: 40.3061 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 200 - training_steps: 3000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:-------:| | 0.001 | 25.01 | 1000 | 0.4038 | 37.4804 | | 0.0001 | 51.0 | 2000 | 0.5288 | 40.0706 | | 0.0001 | 76.01 | 3000 | 0.5871 | 40.3061 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.10.0 - Datasets 2.7.1.dev0 - Tokenizers 0.13.2
CAMeL-Lab/bert-base-arabic-camelbert-msa-ner
[ "pytorch", "tf", "bert", "token-classification", "ar", "arxiv:2103.06678", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
229
2022-12-16T05:22:28Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: Proximal Policy Optimization with MLP PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 272.38 +/- 16.11 name: mean_reward verified: false --- # **Proximal Policy Optimization with MLP PPO** Agent playing **LunarLander-v2** This is a trained model of a **Proximal Policy Optimization with MLP PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
CLAck/indo-pure
[ "pytorch", "marian", "text2text-generation", "en", "id", "dataset:ALT", "transformers", "translation", "license:apache-2.0", "autotrain_compatible" ]
translation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for mobilenetv3_small_050.lamb_in1k A MobileNet-v3 image classification model. Trained on ImageNet-1k in `timm` using recipe template described below. Recipe details: * A LAMB optimizer recipe that is similar to [ResNet Strikes Back](https://arxiv.org/abs/2110.00476) `A2` but 50% longer with EMA weight averaging, no CutMix * RMSProp (TF 1.0 behaviour) optimizer, EMA weight averaging * Step (exponential decay w/ staircase) LR schedule with warmup ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 1.6 - GMACs: 0.0 - Activations (M): 0.9 - Image size: 224 x 224 - **Papers:** - Searching for MobileNetV3: https://arxiv.org/abs/1905.02244 - **Dataset:** ImageNet-1k - **Original:** https://github.com/huggingface/pytorch-image-models ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('mobilenetv3_small_050.lamb_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'mobilenetv3_small_050.lamb_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 16, 112, 112]) # torch.Size([1, 8, 56, 56]) # torch.Size([1, 16, 28, 28]) # torch.Size([1, 24, 14, 14]) # torch.Size([1, 288, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'mobilenetv3_small_050.lamb_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 288, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ``` ```bibtex @inproceedings{howard2019searching, title={Searching for mobilenetv3}, author={Howard, Andrew and Sandler, Mark and Chu, Grace and Chen, Liang-Chieh and Chen, Bo and Tan, Mingxing and Wang, Weijun and Zhu, Yukun and Pang, Ruoming and Vasudevan, Vijay and others}, booktitle={Proceedings of the IEEE/CVF international conference on computer vision}, pages={1314--1324}, year={2019} } ```
CLAck/vi-en
[ "pytorch", "marian", "text2text-generation", "en", "vi", "dataset:ALT", "transformers", "translation", "license:apache-2.0", "autotrain_compatible" ]
translation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for mobilenetv3_small_075.lamb_in1k A MobileNet-v3 image classification model. Trained on ImageNet-1k in `timm` using recipe template described below. Recipe details: * A LAMB optimizer recipe that is similar to [ResNet Strikes Back](https://arxiv.org/abs/2110.00476) `A2` but 50% longer with EMA weight averaging, no CutMix * RMSProp (TF 1.0 behaviour) optimizer, EMA weight averaging * Step (exponential decay w/ staircase) LR schedule with warmup ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 2.0 - GMACs: 0.0 - Activations (M): 1.3 - Image size: 224 x 224 - **Papers:** - Searching for MobileNetV3: https://arxiv.org/abs/1905.02244 - **Dataset:** ImageNet-1k - **Original:** https://github.com/huggingface/pytorch-image-models ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('mobilenetv3_small_075.lamb_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'mobilenetv3_small_075.lamb_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 16, 112, 112]) # torch.Size([1, 16, 56, 56]) # torch.Size([1, 24, 28, 28]) # torch.Size([1, 40, 14, 14]) # torch.Size([1, 432, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'mobilenetv3_small_075.lamb_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 432, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ``` ```bibtex @inproceedings{howard2019searching, title={Searching for mobilenetv3}, author={Howard, Andrew and Sandler, Mark and Chu, Grace and Chen, Liang-Chieh and Chen, Bo and Tan, Mingxing and Wang, Weijun and Zhu, Yukun and Pang, Ruoming and Vasudevan, Vijay and others}, booktitle={Proceedings of the IEEE/CVF international conference on computer vision}, pages={1314--1324}, year={2019} } ```
CLEE/CLEE
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for mobilenetv3_small_100.lamb_in1k A MobileNet-v3 image classification model. Trained on ImageNet-1k in `timm` using recipe template described below. Recipe details: * A LAMB optimizer recipe that is similar to [ResNet Strikes Back](https://arxiv.org/abs/2110.00476) `A2` but 50% longer with EMA weight averaging, no CutMix * RMSProp (TF 1.0 behaviour) optimizer, EMA weight averaging * Step (exponential decay w/ staircase) LR schedule with warmup ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 2.5 - GMACs: 0.1 - Activations (M): 1.4 - Image size: 224 x 224 - **Papers:** - Searching for MobileNetV3: https://arxiv.org/abs/1905.02244 - **Dataset:** ImageNet-1k - **Original:** https://github.com/huggingface/pytorch-image-models ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('mobilenetv3_small_100.lamb_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'mobilenetv3_small_100.lamb_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 16, 112, 112]) # torch.Size([1, 16, 56, 56]) # torch.Size([1, 24, 28, 28]) # torch.Size([1, 48, 14, 14]) # torch.Size([1, 576, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'mobilenetv3_small_100.lamb_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 576, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ``` ```bibtex @inproceedings{howard2019searching, title={Searching for mobilenetv3}, author={Howard, Andrew and Sandler, Mark and Chu, Grace and Chen, Liang-Chieh and Chen, Bo and Tan, Mingxing and Wang, Weijun and Zhu, Yukun and Pang, Ruoming and Vasudevan, Vijay and others}, booktitle={Proceedings of the IEEE/CVF international conference on computer vision}, pages={1314--1324}, year={2019} } ```
CLS/WubiBERT_models
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for tf_mobilenetv3_large_075.in1k A MobileNet-v3 image classification model. Trained on ImageNet-1k in Tensorflow by paper authors, ported to PyTorch by Ross Wightman. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 4.0 - GMACs: 0.2 - Activations (M): 4.0 - Image size: 224 x 224 - **Papers:** - Searching for MobileNetV3: https://arxiv.org/abs/1905.02244 - **Dataset:** ImageNet-1k - **Original:** https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('tf_mobilenetv3_large_075.in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'tf_mobilenetv3_large_075.in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 16, 112, 112]) # torch.Size([1, 24, 56, 56]) # torch.Size([1, 32, 28, 28]) # torch.Size([1, 88, 14, 14]) # torch.Size([1, 720, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'tf_mobilenetv3_large_075.in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 720, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @inproceedings{howard2019searching, title={Searching for mobilenetv3}, author={Howard, Andrew and Sandler, Mark and Chu, Grace and Chen, Liang-Chieh and Chen, Bo and Tan, Mingxing and Wang, Weijun and Zhu, Yukun and Pang, Ruoming and Vasudevan, Vijay and others}, booktitle={Proceedings of the IEEE/CVF international conference on computer vision}, pages={1314--1324}, year={2019} } ``` ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ```
CLTL/MedRoBERTa.nl
[ "pytorch", "roberta", "fill-mask", "nl", "transformers", "license:mit", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2,988
null
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for tf_mobilenetv3_large_100.in1k A MobileNet-v3 image classification model. Trained on ImageNet-1k in Tensorflow by paper authors, ported to PyTorch by Ross Wightman. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 5.5 - GMACs: 0.2 - Activations (M): 4.4 - Image size: 224 x 224 - **Papers:** - Searching for MobileNetV3: https://arxiv.org/abs/1905.02244 - **Dataset:** ImageNet-1k - **Original:** https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('tf_mobilenetv3_large_100.in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'tf_mobilenetv3_large_100.in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 16, 112, 112]) # torch.Size([1, 24, 56, 56]) # torch.Size([1, 40, 28, 28]) # torch.Size([1, 112, 14, 14]) # torch.Size([1, 960, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'tf_mobilenetv3_large_100.in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 960, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @inproceedings{howard2019searching, title={Searching for mobilenetv3}, author={Howard, Andrew and Sandler, Mark and Chu, Grace and Chen, Liang-Chieh and Chen, Bo and Tan, Mingxing and Wang, Weijun and Zhu, Yukun and Pang, Ruoming and Vasudevan, Vijay and others}, booktitle={Proceedings of the IEEE/CVF international conference on computer vision}, pages={1314--1324}, year={2019} } ``` ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ```
CLTL/gm-ner-xlmrbase
[ "pytorch", "tf", "xlm-roberta", "token-classification", "nl", "transformers", "dighum", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "XLMRobertaForTokenClassification" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for tf_mobilenetv3_large_minimal_100.in1k A MobileNet-v3 image classification model. Trained on ImageNet-1k in Tensorflow by paper authors, ported to PyTorch by Ross Wightman. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 3.9 - GMACs: 0.2 - Activations (M): 4.4 - Image size: 224 x 224 - **Papers:** - Searching for MobileNetV3: https://arxiv.org/abs/1905.02244 - **Dataset:** ImageNet-1k - **Original:** https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('tf_mobilenetv3_large_minimal_100.in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'tf_mobilenetv3_large_minimal_100.in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 16, 112, 112]) # torch.Size([1, 24, 56, 56]) # torch.Size([1, 40, 28, 28]) # torch.Size([1, 112, 14, 14]) # torch.Size([1, 960, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'tf_mobilenetv3_large_minimal_100.in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 960, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @inproceedings{howard2019searching, title={Searching for mobilenetv3}, author={Howard, Andrew and Sandler, Mark and Chu, Grace and Chen, Liang-Chieh and Chen, Bo and Tan, Mingxing and Wang, Weijun and Zhu, Yukun and Pang, Ruoming and Vasudevan, Vijay and others}, booktitle={Proceedings of the IEEE/CVF international conference on computer vision}, pages={1314--1324}, year={2019} } ``` ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ```
CLTL/icf-domains
[ "pytorch", "roberta", "nl", "transformers", "license:mit", "text-classification" ]
text-classification
{ "architectures": [ "RobertaForMultiLabelSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
35
null
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for tf_mobilenetv3_small_075.in1k A MobileNet-v3 image classification model. Trained on ImageNet-1k in Tensorflow by paper authors, ported to PyTorch by Ross Wightman. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 2.0 - GMACs: 0.0 - Activations (M): 1.3 - Image size: 224 x 224 - **Papers:** - Searching for MobileNetV3: https://arxiv.org/abs/1905.02244 - **Dataset:** ImageNet-1k - **Original:** https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('tf_mobilenetv3_small_075.in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'tf_mobilenetv3_small_075.in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 16, 112, 112]) # torch.Size([1, 16, 56, 56]) # torch.Size([1, 24, 28, 28]) # torch.Size([1, 40, 14, 14]) # torch.Size([1, 432, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'tf_mobilenetv3_small_075.in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 432, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @inproceedings{howard2019searching, title={Searching for mobilenetv3}, author={Howard, Andrew and Sandler, Mark and Chu, Grace and Chen, Liang-Chieh and Chen, Bo and Tan, Mingxing and Wang, Weijun and Zhu, Yukun and Pang, Ruoming and Vasudevan, Vijay and others}, booktitle={Proceedings of the IEEE/CVF international conference on computer vision}, pages={1314--1324}, year={2019} } ``` ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ```
CLTL/icf-levels-adm
[ "pytorch", "roberta", "text-classification", "nl", "transformers", "license:mit" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
33
null
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for tf_mobilenetv3_small_100.in1k A MobileNet-v3 image classification model. Trained on ImageNet-1k in Tensorflow by paper authors, ported to PyTorch by Ross Wightman. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 2.5 - GMACs: 0.1 - Activations (M): 1.4 - Image size: 224 x 224 - **Papers:** - Searching for MobileNetV3: https://arxiv.org/abs/1905.02244 - **Dataset:** ImageNet-1k - **Original:** https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('tf_mobilenetv3_small_100.in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'tf_mobilenetv3_small_100.in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 16, 112, 112]) # torch.Size([1, 16, 56, 56]) # torch.Size([1, 24, 28, 28]) # torch.Size([1, 48, 14, 14]) # torch.Size([1, 576, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'tf_mobilenetv3_small_100.in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 576, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @inproceedings{howard2019searching, title={Searching for mobilenetv3}, author={Howard, Andrew and Sandler, Mark and Chu, Grace and Chen, Liang-Chieh and Chen, Bo and Tan, Mingxing and Wang, Weijun and Zhu, Yukun and Pang, Ruoming and Vasudevan, Vijay and others}, booktitle={Proceedings of the IEEE/CVF international conference on computer vision}, pages={1314--1324}, year={2019} } ``` ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ```
CLTL/icf-levels-att
[ "pytorch", "roberta", "text-classification", "nl", "transformers", "license:mit" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
32
null
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for tf_mobilenetv3_small_minimal_100.in1k A MobileNet-v3 image classification model. Trained on ImageNet-1k in Tensorflow by paper authors, ported to PyTorch by Ross Wightman. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 2.0 - GMACs: 0.1 - Activations (M): 1.4 - Image size: 224 x 224 - **Papers:** - Searching for MobileNetV3: https://arxiv.org/abs/1905.02244 - **Dataset:** ImageNet-1k - **Original:** https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('tf_mobilenetv3_small_minimal_100.in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'tf_mobilenetv3_small_minimal_100.in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 16, 112, 112]) # torch.Size([1, 16, 56, 56]) # torch.Size([1, 24, 28, 28]) # torch.Size([1, 48, 14, 14]) # torch.Size([1, 576, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'tf_mobilenetv3_small_minimal_100.in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 576, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @inproceedings{howard2019searching, title={Searching for mobilenetv3}, author={Howard, Andrew and Sandler, Mark and Chu, Grace and Chen, Liang-Chieh and Chen, Bo and Tan, Mingxing and Wang, Weijun and Zhu, Yukun and Pang, Ruoming and Vasudevan, Vijay and others}, booktitle={Proceedings of the IEEE/CVF international conference on computer vision}, pages={1314--1324}, year={2019} } ``` ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ```
CLTL/icf-levels-enr
[ "pytorch", "roberta", "text-classification", "nl", "transformers", "license:mit" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
30
null
--- language: - gos --- A Gronings Wav2Vec2 model. This model is created by fine-tuning the multilingual [XLS-R](https://huggingface.co/facebook/wav2vec2-xls-r-300m) model on Gronings speech. This model is part of the paper: Making More of Little Data: Improving Low-Resource Automatic Speech Recognition Using Data Augmentation. More information on [GitHub](https://github.com/Bartelds/asr-augmentation).
CLTL/icf-levels-etn
[ "pytorch", "roberta", "text-classification", "nl", "transformers", "license:mit" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
31
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 183.38 +/- 67.08 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
CLTL/icf-levels-fac
[ "pytorch", "roberta", "text-classification", "nl", "transformers", "license:mit" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
32
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: my_awesome_qa_model results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # my_awesome_qa_model This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an High School Health Science dataset. It achieves the following results on the evaluation set: - Loss: 5.2683 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | No log | 1.0 | 3 | 5.6569 | | No log | 2.0 | 6 | 5.3967 | | No log | 3.0 | 9 | 5.2683 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.7.1 - Tokenizers 0.13.2
CLTL/icf-levels-mbw
[ "pytorch", "roberta", "text-classification", "nl", "transformers", "license:mit" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
30
null
--- language: - gos --- A Gronings Wav2Vec2 model. This model is created by fine-tuning the multilingual [XLS-R](https://huggingface.co/facebook/wav2vec2-xls-r-300m) model on Gronings speech. This model is part of the paper: Making More of Little Data: Improving Low-Resource Automatic Speech Recognition Using Data Augmentation. More information on [GitHub](https://github.com/Bartelds/asr-augmentation).
CLTL/icf-levels-stm
[ "pytorch", "roberta", "text-classification", "nl", "transformers", "license:mit" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
32
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 221.15 +/- 13.28 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
CM-CA/Cartman
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: - en license: apache-2.0 tags: - generated_from_trainer datasets: - kejian/codeparrot-train-more-filter-3.3b-cleaned model-index: - name: vigor-awr results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vigor-awr This model was trained from scratch on the kejian/codeparrot-train-more-filter-3.3b-cleaned dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.001 - train_batch_size: 64 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 256 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.01 - training_steps: 12588 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.23.0 - Pytorch 1.13.0+cu116 - Datasets 2.0.0 - Tokenizers 0.12.1 # Full config {'dataset': {'datasets': ['kejian/codeparrot-train-more-filter-3.3b-cleaned'], 'is_split_by_sentences': True}, 'generation': {'batch_size': 128, 'every_n_steps': 256, 'force_call_on': [6294], 'metrics_configs': [{}, {'n': 1}, {}], 'scenario_configs': [{'display_as_html': True, 'generate_kwargs': {'do_sample': True, 'eos_token_id': 0, 'max_length': 640, 'min_length': 10, 'temperature': 0.7, 'top_k': 0, 'top_p': 0.9}, 'name': 'unconditional', 'num_hits_threshold': 0, 'num_samples': 2048}, {'display_as_html': True, 'generate_kwargs': {'do_sample': True, 'eos_token_id': 0, 'max_length': 272, 'min_length': 10, 'temperature': 0.7, 'top_k': 0, 'top_p': 0.9}, 'name': 'functions', 'num_hits_threshold': 0, 'num_samples': 2048, 'prompts_path': 'resources/functions_csnet.jsonl', 'use_prompt_for_scoring': True}], 'scorer_config': {}}, 'kl_gpt3_callback': {'every_n_steps': 256, 'force_call_on': [6294], 'gpt3_kwargs': {'model_name': 'code-cushman-001'}, 'max_tokens': 64, 'num_samples': 4096}, 'model': {'from_scratch': True, 'gpt2_config_kwargs': {'reorder_and_upcast_attn': True, 'scale_attn_by': True}, 'model_kwargs': {'value_head_config': {'is_detached': False}}, 'path_or_name': 'codeparrot/codeparrot-small'}, 'objective': {'alpha': 0.05, 'beta': 1, 'name': 'AWR'}, 'tokenizer': {'path_or_name': 'codeparrot/codeparrot-small'}, 'training': {'dataloader_num_workers': 0, 'effective_batch_size': 256, 'evaluation_strategy': 'no', 'fp16': True, 'hub_model_id': 'vigor-awr', 'hub_strategy': 'all_checkpoints', 'learning_rate': 0.001, 'logging_first_step': True, 'logging_steps': 1, 'num_tokens': 3300000000.0, 'output_dir': 'training_output', 'per_device_train_batch_size': 16, 'push_to_hub': True, 'remove_unused_columns': False, 'save_steps': 6294, 'save_strategy': 'steps', 'seed': 42, 'warmup_ratio': 0.01, 'weight_decay': 0.1}} # Wandb URL: https://wandb.ai/kejian/uncategorized/runs/1sojz3iz
CM-CA/DialoGPT-small-cartman
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - billsum metrics: - rouge model-index: - name: my_awesome_billsum_model results: - task: name: Sequence-to-sequence Language Modeling type: text2text-generation dataset: name: billsum type: billsum config: default split: ca_test args: default metrics: - name: Rouge1 type: rouge value: 19.4885 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # my_awesome_billsum_model This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on the billsum dataset. It achieves the following results on the evaluation set: - Loss: 2.1303 - Rouge1: 19.4885 - Rouge2: 9.7756 - Rougel: 16.7539 - Rougelsum: 18.153 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5.6e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 8 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | |:-------------:|:-----:|:----:|:---------------:|:-------:|:------:|:-------:|:---------:| | 2.9287 | 1.0 | 124 | 2.3690 | 18.7685 | 8.721 | 15.7134 | 17.2109 | | 2.5051 | 2.0 | 248 | 2.2540 | 19.5651 | 9.5886 | 16.5619 | 18.1252 | | 2.4042 | 3.0 | 372 | 2.2140 | 19.4716 | 9.7429 | 16.6675 | 18.0006 | | 2.3442 | 4.0 | 496 | 2.1800 | 19.5841 | 9.7078 | 16.7923 | 18.1682 | | 2.3075 | 5.0 | 620 | 2.1562 | 19.4162 | 9.6647 | 16.5106 | 17.9637 | | 2.2693 | 6.0 | 744 | 2.1394 | 19.5064 | 9.8462 | 16.6515 | 18.0461 | | 2.2714 | 7.0 | 868 | 2.1321 | 19.475 | 9.7216 | 16.6698 | 18.1103 | | 2.2413 | 8.0 | 992 | 2.1303 | 19.4885 | 9.7756 | 16.7539 | 18.153 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.7.1 - Tokenizers 0.13.2
CNT-UPenn/Bio_ClinicalBERT_for_seizureFreedom_classification
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
28
null
--- license: creativeml-openrail-m --- Preview Images https://imgur.com/a/CnIPfrQ IMPORTANT INSTRUCTIONS! This model was trained on SD base 1.5 version BUT It does also work for 1.4 as they both share the same Clip encoder. Install instructions. Simply place the invisible.pt file inside the \stable-diffusion-webui\models\hypernetworks folder. Load the model inside the Automatic1111 interface under settings hypernetwork. Use instructions. Use 1.0 hypernetwork strength for best results. Use DPM++ SDE Karras sampler with 15 steps and CFG of 4.0. Make sure and always include the word invisible somewhere in the prompt. For people always preface the subject with invisible, example "invisible man walking", "invisible girl playing in the backyard", etc... VERY IMPORTANT! Always describe the background in some detail or you WILL get a very generic boring background.. So for example DON'T just say "an old invisible man". DO say "an old invisible man inside a rustic hut". For some fun, use the 1.5 inpaint model with this hypernetwork on your photos. Simply prompt invisible and use the inpaint tool to inpaint any exposed skin and watch the magic happen.
CNT-UPenn/RoBERTa_for_seizureFrequency_QA
[ "pytorch", "roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "RobertaForQuestionAnswering" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- language: - cs license: apache-2.0 tags: - whisper-event - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 metrics: - wer model-index: - name: Whisper Large-v2 Czech CV11 v2 results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: mozilla-foundation/common_voice_11_0 cs type: mozilla-foundation/common_voice_11_0 config: cs split: test args: cs metrics: - name: Wer type: wer value: 9.045873924973758 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Large-v2 Czech CV11 v2 This model is a fine-tuned version of [openai/whisper-large-v2](https://huggingface.co/openai/whisper-large-v2) on the mozilla-foundation/common_voice_11_0 cs dataset. It achieves the following results on the evaluation set: - Loss: 0.2120 - Wer: 9.0459 ## Model description Fine tuned with deepspeed optimization and batch_size: 32. ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 32 - eval_batch_size: 8 - seed: 42 - distributed_type: multi-GPU - gradient_accumulation_steps: 2 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 5000 ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.0106 | 4.24 | 1000 | 0.1625 | 9.9888 | | 0.0034 | 8.47 | 2000 | 0.1841 | 9.8304 | | 0.0011 | 12.71 | 3000 | 0.1917 | 9.4031 | | 0.0004 | 16.95 | 4000 | 0.2075 | 9.1177 | | 0.0003 | 21.19 | 5000 | 0.2120 | 9.0459 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.0+cu117 - Datasets 2.7.1.dev0 - Tokenizers 0.13.2
CZWin32768/xlm-align
[ "pytorch", "xlm-roberta", "fill-mask", "arxiv:2106.06381", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "XLMRobertaForMaskedLM" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- language: zh license: creativeml-openrail-m tags: - clip - zh - Chinese --- # clip-huge-zh-75k-steps-bs4096 ## Brief Introduction 训练该模型的目的是使用中文文本指导stable diffusion 2模型进行生成。冻结[open_clip的CLIP-VIT-H](https://huggingface.co/laion/CLIP-ViT-H-14-laion2B-s32B-b79K)图像编码部分,训练文本编码部分以对齐英文语义空间, 训练样本均来自[LAION-5B](https://laion.ai/blog/laion-5b/)的中文子集 注:由于数据量,bs,step远小于原生clip-h,所以模型远未收敛且远未达到huge模型该有的性能,只是作为stable diffusion 2的文本指导的中间结果, 欢迎基于该模型做二次开发强化其CLIP性能。 The purpose of training this model is to use chinese text guiding stable difussion 2 generation. Freezing only the vision part of [CLIP-VIT-H](https://huggingface.co/laion/CLIP-ViT-H-14-laion2B-s32B-b79K) and train the text encoder can align chinese latent space to the original english latent space. All training samples are from chinese subset of [LAION-5B](https://laion.ai/blog/laion-5b/) Note: Because of smaller dataset size, batch size and steps, this model is still far away from expected performance and convergence. It is only expected as the middle result for stable diffusion 2 text encoder. You are very welcome to do further training based on this model to enhance its 'CLIP' performance. ## Stable Diffusion 2 Guiding Example 赛博朋克风格的城市街道 ![](examples/cyberpunk.jpeg) 一只可爱的柴犬 ![](examples/shiba.jpeg) ## Training Details ### 文本编码器/Text Encoder 文本编码器采用与stable diffusion 2同样的结构:[open_clip的CLIP-VIT-H](https://huggingface.co/laion/CLIP-ViT-H-14-laion2B-s32B-b79K). 为了使中文编码在语义空间内尽量与原来英文编码器的语义距离接近,文本编码器的训练细节如下: 1. 暴力的替换原来英文版本的clip_huge的文本编码器的vocab与tokenizer为chinese roberta的vocab与tokenizer 2. 完整copy原英文编码器的所有权重 3. 冻结图像编码器的全部参数与文本编码器的编码部分与输出映射部分,只训练词嵌入,目的是在保留语义空间尽量不变的情况下,将中文词嵌入对齐英文词嵌入的语义空间。 4. 在训练多个step后,完全解冻文本编码器,使整个文本模型去拟合clip_huge图像编码器的语义空间。 注:训练的loss采用clip loss,数据集采用[LAION-5B](https://laion.ai/blog/laion-5b/)数据集的中文子集部分(由于失效url等原因,共约8500万),模型在4096的batch size下共训练75k步,所以并未完全收敛。 Text encoder is the same structure as [open_clip/CLIP-VIT-H](https://huggingface.co/laion/CLIP-ViT-H-14-laion2B-s32B-b79K) which is used by stable diffusion 2. Our purpose is mapping chinese latent space to the original english one. The training details are listed below: 1. Do brute force in-place vocab substitution: directly use chinese tokened sequence to pick up embedding vectors from the original embedding layer. 2. Copy the original model weights from the text encoder of CLIP-VIT-H 3. Freeze the entire visual model, text encoder layer as well as the text projection layer. Only the text embedding layer is unfrozen. The purpose of this step is to align chinese word embedding with the original english word embedding such that the final projection latent space would not drift far away. 4. After a bunch of steps, unfreeze the entire text encoder for better convergence. Note: We use clip loss to optimize chinese text encoder. Chinese subset of [LAION-5B](https://laion.ai/blog/laion-5b/) are chosen as our training set (around 85M text-image pairs). This model was trained 75k steps with 4096 batch size so it is still far away from convergence. ## 使用 Usage ### Zero-Shot Classification ```py import torch import numpy as np import requests from PIL import Image from transformers import CLIPModel, CLIPFeatureExtractor, AutoTokenizer model_id = "lyua1225/clip-huge-zh-75k-steps-bs4096" model = CLIPModel.from_pretrained(model_id) tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True) processor = CLIPFeatureExtractor.from_pretrained(model_id) # online example from OFA-Sys url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg" image = Image.open(requests.get(url, stream=True).raw).convert("RGB") texts = ["杰尼龟", "妙蛙种子", "皮卡丘", "小火龙"] # compute image feature inputs = torch.from_numpy(processor(image).pixel_values[0]).unsqueeze(0) image_features = model.get_image_features(pixel_values=inputs) image_features = image_features / image_features.norm(p=2, dim=-1, keepdim=True) # compute text features inputs = tokenizer(text=texts, padding="max_length", max_length=77, return_tensors="pt") input_ids, attention_mask = inputs.input_ids, inputs.attention_mask input_dict = dict(input_ids=input_ids, attention_mask=attention_mask) text_features = model.get_text_features(**input_dict) text_features = text_features / text_features.norm(p=2, dim=-1, keepdim=True) # normalize # compute probs for each class logit_scale = model.logit_scale.exp() logits_per_image = logit_scale * image_features @ text_features.t() logits_per_text = logits_per_image.t() probs = logits_per_image.softmax(dim=-1).detach().numpy() print(np.around(probs, 3)) ``` ### Guiding Stable Diffusion V2.1 使用该中文模型可以指导stable diffusion 2 进行生成(在图灵架构或者V100以后的GPU上推荐使用FP16进行推理) ```py import torch from diffusers import StableDiffusionPipeline from transformers import AutoTokenizer, CLIPTextModel clip_id = "lyua1225/clip-huge-zh-75k-steps-bs4096" sd2_id = "stabilityai/stable-diffusion-2-1" text_encoder = CLIPTextModel.from_pretrained(clip_id).half() tokenizer = AutoTokenizer.from_pretrained(clip_id, trust_remote_code=True) pipe = StableDiffusionPipeline.from_pretrained(sd2_id, torch_dtype=torch.float16, revision="fp16", tokenizer=tokenizer, text_encoder=text_encoder) pipe.to("cuda") image = pipe("赛博朋克风格的城市街道", num_inference_steps=20).images[0] image.save("cyberpunk.jpeg") ```
Caddy/UD
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - FrozenLake-v1-4x4 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4 type: FrozenLake-v1-4x4 metrics: - type: mean_reward value: 0.67 +/- 0.47 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="duongkstn/q-FrozenLake-v1-4x4", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
CallumRai/HansardGPT2
[ "pytorch", "jax", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
14
null
--- license: mit tags: - generated_from_trainer metrics: - accuracy - precision - recall - f1 model-index: - name: xlm-roberta-targin-final results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-targin-final This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.8172 - Accuracy: 0.6873 - Precision: 0.6494 - Recall: 0.6422 - F1: 0.6450 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | Precision | Recall | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:---------:|:------:|:------:| | No log | 1.0 | 296 | 0.6065 | 0.6873 | 0.6537 | 0.5833 | 0.5748 | | 0.597 | 2.0 | 592 | 0.5822 | 0.7015 | 0.6652 | 0.6279 | 0.6332 | | 0.597 | 3.0 | 888 | 0.5704 | 0.7015 | 0.6654 | 0.6551 | 0.6589 | | 0.5156 | 4.0 | 1184 | 0.6393 | 0.7044 | 0.6684 | 0.6552 | 0.6597 | | 0.5156 | 5.0 | 1480 | 0.5924 | 0.7082 | 0.6752 | 0.6720 | 0.6735 | | 0.4479 | 6.0 | 1776 | 0.7029 | 0.7006 | 0.6629 | 0.6351 | 0.6408 | | 0.3783 | 7.0 | 2072 | 0.6963 | 0.7072 | 0.6715 | 0.6554 | 0.6606 | | 0.3783 | 8.0 | 2368 | 0.7636 | 0.6987 | 0.6627 | 0.6549 | 0.6579 | | 0.3253 | 9.0 | 2664 | 0.7804 | 0.6901 | 0.6549 | 0.6523 | 0.6535 | | 0.3253 | 10.0 | 2960 | 0.8172 | 0.6873 | 0.6494 | 0.6422 | 0.6450 | ### Framework versions - Transformers 4.24.0.dev0 - Pytorch 1.11.0+cu102 - Datasets 2.6.1 - Tokenizers 0.13.1
Cameron/BERT-SBIC-offensive
[ "pytorch", "jax", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
31
null
--- library_name: lucidrains/gated-state-spaces-pytorch license: mit datasets: - c4 pipeline_tag: text-generation tags: - text generation - pytorch - causal-lm - gated-state-space --- # [Gated State Space](https://arxiv.org/abs/2206.13947) This repo contains pretrain model for the gated state space paper. The model has been trained on [C4 dataset](https://huggingface.co/datasets/c4). I have used [Lucidrains' implementation](https://github.com/lucidrains/gated-state-spaces-pytorch) ([commit](https://github.com/lucidrains/gated-state-spaces-pytorch/tree/32cd036e775112cc469e94fa1165fe111393708b)) for the model. I think the main benefit of this model is the ability to scale beyond the training context length. As authors noted in the paper, they trained the model on 4k sequence length but it generalized beyond that length. I have written a **blog post on how I started the training [here](https://naxalpha.substack.com/p/devlog-experiment-a2a468-gated-state)**. **[Wandb Report is available at this link](https://wandb.ai/naxalpha/gated-state-space/reports/Gated-State-Space-Training-v1--VmlldzozMTYzMzY3?accessToken=zy10rrpofi9k7l52aqwiej8bk0ub302rdswfkxmf8y94dt2j6z4kxbca6ar3sc52)** ## How to use this. Since it is not based on [transformers](https://github.com/huggingface/transformers/) library, it is a bit tricky to use the model out of the box. Here are the general steps: 1. `pip install gated-state-spaces-pytorch` 2. Download the model weights from [here](https://huggingface.co/naxalpha/gated-state-space/raw/main/model.pt). 3. Download the config from [here](https://huggingface.co/naxalpha/gated-state-space/raw/main/config.json). 4. Following code to patch the original model: ```python model = AutoregressiveWrapper( GatedStateSpacesLM( **config ), ) model.net.to_logits = nn.Sequential( nn.LayerNorm(f_emb), model.net.to_logits, ) ``` 5. Load the state dict: `model.load_state_dict(torch.load('model.pt'))` 6. If you want to fine-tune the model, you can freeze the embeddings: ```python model.net.token_emb.weight.requires_grad_(False) model.net.token_emb.weight.copy_(emb) model.net.to_logits[1].weight.requires_grad_(False) model.net.to_logits[1].weight.copy_(emb) ``` Training code is available in this repo. [Link to the training script](https://huggingface.co/naxalpha/gated-state-space/blob/main/app.py). ## Training Information Here are the details of the training: - Objective: `Alternate between simple cross entropy and GPT-2 XL distillation` - Gradient Accumulation: `4` - Batch Size: `8` - Sequence Length `128` - Learning Rate: `2e-5` - Optimizer: `AdamW` - Gradient Norm Clipping: `1.0` - Hardware: `RTX 3090` on [vast.ai](vast.ai) - Training Cost: `~20$` - Training Time: `~3 days` - Number of steps: `557,000` - Tokens seen: `570 million` - Final loss: `~3.9` ## Fine-Tuning Info: [model2.pt](https://huggingface.co/naxalpha/gated-state-space/blob/main/) is available as fine-tuned version with longer context length. - Objective: `Simple Cross Entropy` - Gradient Accumulation: `4` - Batch Size: `1` - Sequence Length: `2048` - Learning Rate: `5e-6` - Embeddings: `unfrozen for fine-tuning` - Gradient Norm Clipping: `1.0` - Hardware: `2x3090` on vast.ai - Extra Tricks: `Used HuggingFace Accelerate with Full Sharding without CPU offload`
Cameron/BERT-jigsaw-identityhate
[ "pytorch", "jax", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
37
null
--- tags: - FrozenLake-v1-8x8 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-8x8-90000-steps results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-8x8 type: FrozenLake-v1-8x8 metrics: - type: mean_reward value: 0.18 +/- 0.38 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="duongkstn/q-FrozenLake-v1-8x8-90000-steps", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
Cameron/BERT-mdgender-convai-binary
[ "pytorch", "jax", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
33
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 267.52 +/- 15.08 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Canadiancaleb/jessebot
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: mit tags: - generated_from_trainer datasets: - billsum metrics: - rouge model-index: - name: bart-large-cnn-small-billsum-5epochs results: - task: name: Sequence-to-sequence Language Modeling type: text2text-generation dataset: name: billsum type: billsum config: default split: train[:1%] args: default metrics: - name: Rouge1 type: rouge value: 0.5406 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bart-large-cnn-small-billsum-5epochs This model is a fine-tuned version of [facebook/bart-large-cnn](https://huggingface.co/facebook/bart-large-cnn) on the billsum dataset. It achieves the following results on the evaluation set: - Loss: 1.7206 - Rouge1: 0.5406 - Rouge2: 0.312 - Rougel: 0.3945 - Rougelsum: 0.4566 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3.373e-05 - train_batch_size: 4 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 16 - num_epochs: 5 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | |:-------------:|:-----:|:----:|:---------------:|:------:|:------:|:------:|:---------:| | 2.3723 | 1.33 | 16 | 1.8534 | 0.5204 | 0.299 | 0.3893 | 0.4441 | | 1.6579 | 2.67 | 32 | 1.7208 | 0.5427 | 0.3143 | 0.3915 | 0.459 | | 1.2397 | 4.0 | 48 | 1.7206 | 0.5406 | 0.312 | 0.3945 | 0.4566 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu117 - Datasets 2.7.1 - Tokenizers 0.13.2
Capreolus/birch-bert-large-mb
[ "pytorch", "tf", "jax", "bert", "next-sentence-prediction", "transformers" ]
null
{ "architectures": [ "BertForNextSentencePrediction" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: t5-base-finetuned-youtube results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # t5-base-finetuned-youtube This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on the None dataset. It achieves the following results on the evaluation set: - Loss: 3.7643 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 4.0266 | 1.0 | 9057 | 3.7643 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.7.1 - Tokenizers 0.13.2
Capreolus/birch-bert-large-msmarco_mb
[ "pytorch", "tf", "jax", "bert", "next-sentence-prediction", "transformers" ]
null
{ "architectures": [ "BertForNextSentencePrediction" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="CreativeEvolution/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
Captain-1337/CrudeBERT
[ "pytorch", "bert", "text-classification", "arxiv:1908.10063", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
28
null
Access to model anrdrew/kittyskit is restricted and you are not in the authorized list. Visit https://huggingface.co/anrdrew/kittyskit to ask for access.
Carlork314/Carlos
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - dataset/riksdagen metrics: - wer model-index: - name: whisper-small-sv results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: dataset/riksdagen audiofolder type: dataset/riksdagen config: test split: test args: audiofolder metrics: - name: WER type: wer value: 0.22405586116204554 - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: Common Voice 11.0 type: mozilla-foundation/common_voice_11_0 config: sv-SE split: test args: language: sv-SE metrics: - name: WER type: wer value: 26.69 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # whisper-small-sv This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the dataset/riksdagen audiofolder dataset. It achieves the following results on the evaluation set: - Loss: 0.2917 - Wer: 0.2241 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 100 - training_steps: 20000 ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:-----:|:---------------:|:------:| | 0.5023 | 0.04 | 250 | 0.5072 | 0.2949 | | 0.4678 | 0.08 | 500 | 0.4632 | 0.2780 | | 0.4233 | 0.12 | 750 | 0.4384 | 0.2749 | | 0.4113 | 0.17 | 1000 | 0.4205 | 0.2673 | | 0.3994 | 0.21 | 1250 | 0.4079 | 0.2649 | | 0.3841 | 0.25 | 1500 | 0.3947 | 0.2609 | | 0.3775 | 0.29 | 1750 | 0.3854 | 0.2564 | | 0.383 | 0.33 | 2000 | 0.3781 | 0.2540 | | 0.3651 | 0.37 | 2250 | 0.3721 | 0.2532 | | 0.3456 | 0.42 | 2500 | 0.3651 | 0.2517 | | 0.3719 | 0.46 | 2750 | 0.3612 | 0.2481 | | 0.3399 | 0.5 | 3000 | 0.3561 | 0.2437 | | 0.3428 | 0.54 | 3250 | 0.3522 | 0.2465 | | 0.3442 | 0.58 | 3500 | 0.3451 | 0.2399 | | 0.3315 | 0.62 | 3750 | 0.3431 | 0.2417 | | 0.3299 | 0.66 | 4000 | 0.3404 | 0.2428 | | 0.3417 | 0.71 | 4250 | 0.3373 | 0.2395 | | 0.3399 | 0.75 | 4500 | 0.3332 | 0.2390 | | 0.3222 | 0.79 | 4750 | 0.3310 | 0.2385 | | 0.3319 | 0.83 | 5000 | 0.3291 | 0.2372 | | 0.3188 | 0.87 | 5250 | 0.3265 | 0.2359 | | 0.3197 | 0.91 | 5500 | 0.3240 | 0.2378 | | 0.3099 | 0.96 | 5750 | 0.3215 | 0.2342 | | 0.3132 | 1.0 | 6000 | 0.3195 | 0.2374 | | 0.286 | 1.04 | 6250 | 0.3179 | 0.2348 | | 0.2765 | 1.08 | 6500 | 0.3166 | 0.2354 | | 0.2795 | 1.12 | 6750 | 0.3153 | 0.2324 | | 0.2825 | 1.16 | 7000 | 0.3145 | 0.2316 | | 0.2865 | 1.21 | 7250 | 0.3144 | 0.2329 | | 0.2703 | 1.25 | 7500 | 0.3126 | 0.2326 | | 0.2792 | 1.29 | 7750 | 0.3121 | 0.2324 | | 0.2749 | 1.33 | 8000 | 0.3106 | 0.2325 | | 0.2762 | 1.37 | 8250 | 0.3093 | 0.2315 | | 0.2813 | 1.41 | 8500 | 0.3080 | 0.2302 | | 0.2755 | 1.45 | 8750 | 0.3078 | 0.2321 | | 0.2779 | 1.5 | 9000 | 0.3062 | 0.2305 | | 0.2764 | 1.54 | 9250 | 0.3059 | 0.2336 | | 0.2763 | 1.58 | 9500 | 0.3041 | 0.2310 | | 0.2723 | 1.62 | 9750 | 0.3027 | 0.2292 | | 0.2756 | 1.66 | 10000 | 0.3026 | 0.2301 | | 0.2663 | 1.7 | 10250 | 0.3008 | 0.2262 | | 0.269 | 1.75 | 10500 | 0.3006 | 0.2280 | | 0.2682 | 1.79 | 10750 | 0.3002 | 0.2291 | | 0.2721 | 1.83 | 11000 | 0.2994 | 0.2267 | | 0.2681 | 1.87 | 11250 | 0.2987 | 0.2288 | | 0.278 | 1.91 | 11500 | 0.2978 | 0.2296 | | 0.2625 | 1.95 | 11750 | 0.2978 | 0.2278 | | 0.2583 | 1.99 | 12000 | 0.2967 | 0.2259 | | 0.2403 | 2.04 | 12250 | 0.2976 | 0.2276 | | 0.2414 | 2.08 | 12500 | 0.2972 | 0.2264 | | 0.251 | 2.12 | 12750 | 0.2969 | 0.2256 | | 0.2404 | 2.16 | 13000 | 0.2968 | 0.2253 | | 0.2473 | 2.2 | 13250 | 0.2966 | 0.2253 | | 0.2444 | 2.24 | 13500 | 0.2965 | 0.2262 | | 0.2512 | 2.29 | 13750 | 0.2962 | 0.2253 | | 0.2417 | 2.33 | 14000 | 0.2950 | 0.2280 | | 0.2445 | 2.37 | 14250 | 0.2950 | 0.2256 | | 0.2461 | 2.41 | 14500 | 0.2949 | 0.2262 | | 0.2496 | 2.45 | 14750 | 0.2944 | 0.2261 | | 0.2422 | 2.49 | 15000 | 0.2942 | 0.2248 | | 0.2415 | 2.53 | 15250 | 0.2940 | 0.2252 | | 0.2465 | 2.58 | 15500 | 0.2932 | 0.2269 | | 0.2508 | 2.62 | 15750 | 0.2931 | 0.2245 | | 0.2339 | 2.66 | 16000 | 0.2930 | 0.2257 | | 0.2441 | 2.7 | 16250 | 0.2923 | 0.2247 | | 0.2444 | 2.74 | 16500 | 0.2921 | 0.2246 | | 0.2416 | 2.78 | 16750 | 0.2918 | 0.2264 | | 0.2425 | 2.83 | 17000 | 0.2916 | 0.2251 | | 0.2404 | 2.87 | 17250 | 0.2916 | 0.2234 | | 0.2456 | 2.91 | 17500 | 0.2911 | 0.2238 | | 0.2384 | 2.95 | 17750 | 0.2908 | 0.2252 | | 0.244 | 2.99 | 18000 | 0.2905 | 0.2251 | | 0.2197 | 3.03 | 18250 | 0.2919 | 0.2239 | | 0.2194 | 3.08 | 18500 | 0.2919 | 0.2237 | | 0.2294 | 3.12 | 18750 | 0.2919 | 0.2243 | | 0.2225 | 3.16 | 19000 | 0.2918 | 0.2252 | | 0.2229 | 3.2 | 19250 | 0.2919 | 0.2242 | | 0.2153 | 3.24 | 19500 | 0.2917 | 0.2241 | | 0.2137 | 3.28 | 19750 | 0.2917 | 0.2239 | | 0.2194 | 3.32 | 20000 | 0.2917 | 0.2241 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.12.0a0+8a1a93a - Datasets 2.7.1 - Tokenizers 0.13.2
Carlork314/Xd
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - image-classification - pytorch - huggingpics metrics: - accuracy model-index: - name: rare-puppers results: - task: name: Image Classification type: image-classification metrics: - name: Accuracy type: accuracy value: 0.8939393758773804 --- # rare-puppers Autogenerated by HuggingPics🤗🖼️ Create your own image classifier for **anything** by running [the demo on Google Colab](https://colab.research.google.com/github/nateraw/huggingpics/blob/main/HuggingPics.ipynb). Report any issues with the demo at the [github repo](https://github.com/nateraw/huggingpics). ## Example Images #### corgi ![corgi](images/corgi.jpg) #### samoyed ![samoyed](images/samoyed.jpg) #### shiba inu ![shiba inu](images/shiba_inu.jpg)
Cdial/hausa-asr
[ "wav2vec2", "automatic-speech-recognition", "ha", "dataset:mozilla-foundation/common_voice_8_0", "transformers", "mozilla-foundation/common_voice_8_0", "generated_from_trainer", "robust-speech-event", "model_for_talk", "hf-asr-leaderboard", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-Taxi-v3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.56 +/- 2.71 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="JabrilJacobs/q-Taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
dccuchile/albert-base-spanish-finetuned-ner
[ "pytorch", "albert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "AlbertForTokenClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
14
null
--- license: mit tags: - pytorch - diffusers - unconditional-image-generation - diffusion-models-class --- # Model Card for Unit 1 of the [Diffusion Models Class 🧨](https://github.com/huggingface/diffusion-models-class) This model is a diffusion model for unconditional image generation of cute 🦋. ## Usage ```python from diffusers import DDPMPipeline pipeline = DDPMPipeline.from_pretrained('zhow/sd-class-butterflies-32') image = pipeline().images[0] image ```
dccuchile/albert-base-spanish-finetuned-qa-mlqa
[ "pytorch", "albert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "AlbertForQuestionAnswering" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity --- # waynedsouza/phon4 This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 1024 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('waynedsouza/phon4') embeddings = model.encode(sentences) print(embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name=waynedsouza/phon4) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 6957 with parameters: ``` {'batch_size': 16, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.CosineSimilarityLoss.CosineSimilarityLoss` Parameters of the fit()-Method: ``` { "epochs": 15, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 10, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 256, 'do_lower_case': False}) with Transformer model: RobertaModel (1): Pooling({'word_embedding_dimension': 1024, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) (2): Normalize() ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
dccuchile/albert-base-spanish-finetuned-xnli
[ "pytorch", "albert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "AlbertForSequenceClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
28
null
--- tags: - generated_from_trainer datasets: - nielsr/funsd-layoutlmv3 pipeline_tag: object-detection widget: - src: "https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" example_title: invoice - src: "https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/contract.jpeg" example_title: contract metrics: - precision - recall - f1 - accuracy model-index: - name: layoutlmv3-finetuned-funsd results: - task: name: Token Classification type: token-classification dataset: name: nielsr/funsd-layoutlmv3 type: nielsr/funsd-layoutlmv3 args: funsd metrics: - name: Precision type: precision value: 0.9026198714780029 - name: Recall type: recall value: 0.913 - name: F1 type: f1 value: 0.9077802634849614 - name: Accuracy type: accuracy value: 0.8330271015158475 duplicated_from: nielsr/layoutlmv3-finetuned-funsd --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # layoutlmv3-finetuned-funsd This model is a fine-tuned version of [microsoft/layoutlmv3-base](https://huggingface.co/microsoft/layoutlmv3-base) on the nielsr/funsd-layoutlmv3 dataset. It achieves the following results on the evaluation set: - Loss: 1.1164 - Precision: 0.9026 - Recall: 0.913 - F1: 0.9078 - Accuracy: 0.8330 The script for training can be found here: https://github.com/huggingface/transformers/tree/main/examples/research_projects/layoutlmv3 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 1000 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 10.0 | 100 | 0.5238 | 0.8366 | 0.886 | 0.8606 | 0.8410 | | No log | 20.0 | 200 | 0.6930 | 0.8751 | 0.8965 | 0.8857 | 0.8322 | | No log | 30.0 | 300 | 0.7784 | 0.8902 | 0.908 | 0.8990 | 0.8414 | | No log | 40.0 | 400 | 0.9056 | 0.8916 | 0.905 | 0.8983 | 0.8364 | | 0.2429 | 50.0 | 500 | 1.0016 | 0.8954 | 0.9075 | 0.9014 | 0.8298 | | 0.2429 | 60.0 | 600 | 1.0097 | 0.8899 | 0.897 | 0.8934 | 0.8294 | | 0.2429 | 70.0 | 700 | 1.0722 | 0.9035 | 0.9085 | 0.9060 | 0.8315 | | 0.2429 | 80.0 | 800 | 1.0884 | 0.8905 | 0.9105 | 0.9004 | 0.8269 | | 0.2429 | 90.0 | 900 | 1.1292 | 0.8938 | 0.909 | 0.9013 | 0.8279 | | 0.0098 | 100.0 | 1000 | 1.1164 | 0.9026 | 0.913 | 0.9078 | 0.8330 | ### Framework versions - Transformers 4.19.0.dev0 - Pytorch 1.11.0+cu113 - Datasets 2.0.0 - Tokenizers 0.11.6
dccuchile/albert-tiny-spanish-finetuned-mldoc
[ "pytorch", "albert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "AlbertForSequenceClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
32
null
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="LucianoDeben/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
dccuchile/albert-tiny-spanish-finetuned-ner
[ "pytorch", "albert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "AlbertForTokenClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- tags: - generated_from_trainer datasets: - funsd pipeline_tag: object-detection widget: - src: "https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" example_title: invoice - src: "https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/contract.jpeg" example_title: contract model_index: - name: layoutlmv2-finetuned-funsd results: - task: name: Token Classification type: token-classification dataset: name: funsd type: funsd args: funsd duplicated_from: nielsr/layoutlmv2-finetuned-funsd --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # layoutlmv2-finetuned-funsd This model is a fine-tuned version of [microsoft/layoutlmv2-base-uncased](https://huggingface.co/microsoft/layoutlmv2-base-uncased) on the funsd dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 1000 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.9.0.dev0 - Pytorch 1.8.0+cu101 - Datasets 1.9.0 - Tokenizers 0.10.3
dccuchile/albert-xlarge-spanish-finetuned-pos
[ "pytorch", "albert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "AlbertForTokenClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- license: mit tags: - generated_from_trainer metrics: - accuracy model-index: - name: mbti-classification-roberta-base-aug results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mbti-classification-roberta-base-aug This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 2.1645 - Accuracy: 0.2834 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:| | 2.1201 | 1.0 | 29900 | 2.1415 | 0.2833 | | 1.8733 | 2.0 | 59800 | 2.1235 | 0.2866 | | 1.7664 | 3.0 | 89700 | 2.1645 | 0.2834 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.12.1+cu102 - Datasets 2.7.1 - Tokenizers 0.13.2
dccuchile/albert-xlarge-spanish-finetuned-qa-mlqa
[ "pytorch", "albert", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "AlbertForQuestionAnswering" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- license: mit tags: - generated_from_trainer metrics: - precision - recall - f1 - accuracy model-index: - name: gpt2_tryout results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # gpt2_tryout This model is a fine-tuned version of [gpt2](https://huggingface.co/gpt2) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.2275 - Precision: 0.0 - Recall: 0.0 - F1: 0.0 - Accuracy: 0.9182 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:---:|:--------:| | No log | 1.0 | 174 | 0.2725 | 0.0 | 0.0 | 0.0 | 0.9019 | | No log | 2.0 | 348 | 0.2395 | 0.0 | 0.0 | 0.0 | 0.9141 | | 0.3173 | 3.0 | 522 | 0.2275 | 0.0 | 0.0 | 0.0 | 0.9182 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.7.1 - Tokenizers 0.13.2
CennetOguz/distilbert-base-uncased-finetuned-recipe
[ "pytorch", "tensorboard", "distilbert", "fill-mask", "transformers", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "DistilBertForMaskedLM" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
2022-12-16T11:23:10Z
--- license: mit tags: - generated_from_trainer datasets: - snips_built_in_intents metrics: - accuracy model-index: - name: roberta-base-finetuned-intent-ipu results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-base-finetuned-intent-ipu This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the snips_built_in_intents dataset. It achieves the following results on the evaluation set: - Loss: 0.1503 - Accuracy: 1.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - distributed_type: IPU - gradient_accumulation_steps: 8 - total_train_batch_size: 8 - total_eval_batch_size: 5 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 - training precision: Mixed Precision ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 1.2478 | 1.0 | 75 | 0.6069 | 0.96 | | 0.2522 | 2.0 | 150 | 0.1503 | 1.0 | | 0.0903 | 3.0 | 225 | 0.0712 | 1.0 | | 0.0883 | 4.0 | 300 | 0.0350 | 1.0 | | 0.0491 | 5.0 | 375 | 0.0267 | 1.0 | | 0.0305 | 6.0 | 450 | 0.0218 | 1.0 | | 0.0461 | 7.0 | 525 | 0.0191 | 1.0 | | 0.039 | 8.0 | 600 | 0.0174 | 1.0 | | 0.0337 | 9.0 | 675 | 0.0166 | 1.0 | | 0.0164 | 10.0 | 750 | 0.0162 | 1.0 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.10.0+cpu - Datasets 2.7.1 - Tokenizers 0.12.0
Chaewon/mmnt_decoder_en
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
2022-12-17T07:51:48Z
--- license: other tags: - generated_from_trainer datasets: - scene_parse_150 model-index: - name: segformer-b0-scene-parse-150 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # segformer-b0-scene-parse-150 This model is a fine-tuned version of [nvidia/mit-b0](https://huggingface.co/nvidia/mit-b0) on the scene_parse_150 dataset. It achieves the following results on the evaluation set: - Loss: 2.5335 - Mean Iou: 0.0707 - Mean Accuracy: 0.1187 - Overall Accuracy: 0.4355 - Per Category Iou: [0.1511520068309521, 0.024028425494115033, 0.6471288114704491, 0.8633333995549904, 0.0, 0.4401568297272759, 0.0, 0.5004964042839947, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, 0.0, 0.0, 0.05898491083676269, nan, nan, nan, 0.0, 0.0, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] - Per Category Accuracy: [0.2971938080867454, 0.21660794362588084, 0.7656914729136355, 0.9259316472418128, nan, 0.6205679256230926, nan, 0.9121937727941797, nan, nan, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.05898491083676269, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 6e-05 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 50 ### Training results | Training Loss | Epoch | Step | Validation Loss | Mean Iou | Mean Accuracy | Overall Accuracy | Per Category Iou | Per Category Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:|:-------------:|:----------------:|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------:| | 4.7901 | 1.0 | 20 | 4.9509 | 0.0030 | 0.0206 | 0.0375 | [0.029116167766871537, 0.004850985134077815, 0.010220235449829852, 0.176688345237905, 0.0, 0.0, 0.0, 0.05940553801114108, 0.0, 0.0, 0.00016572239427429129, 0.0, 0.08801905697185249, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, nan, 0.0, 0.0, 0.0017191038829917865, nan, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, nan, nan, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, nan, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.0] | [0.030754871821836956, 0.21844971172325434, 0.01025483284020298, 0.17669869814416295, nan, 0.0, nan, 0.061639212083141916, nan, nan, 0.00017035775127768313, nan, 0.15775515543610114, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0021147326373737035, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | | 4.4381 | 2.0 | 40 | 4.7442 | 0.0149 | 0.0434 | 0.1446 | [0.06435324947589098, 0.003068118402773655, 0.2344339472740397, 0.5981880470408828, 0.0, 0.059353779822521185, 0.0, 0.06952019603193424, 0.0, 0.0, 0.060148904738144854, nan, 0.05000837621155917, 0.0, 0.0198164193188642, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, 0.0, nan, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, 0.0, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, 0.0, nan, 0.00019081541788576516, nan, 0.0, nan, nan, nan, nan, 0.0, nan, 0.0, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, 0.0, nan, 0.0, 0.0, nan, nan, nan, nan] | [0.11529180226891593, 0.10345932094811018, 0.2521263194898786, 0.6017727398632092, nan, 0.061828683300187356, nan, 0.0824575055549826, nan, nan, 0.06545996592844974, nan, 0.06212687010917394, 0.0, 0.044941634241245136, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.00046591085572293835, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | | 4.0262 | 3.0 | 60 | 4.4675 | 0.0241 | 0.0489 | 0.2002 | [0.05145163571782198, 0.0032922897214954917, 0.366799962987138, 0.7716318760666929, 0.0, 0.11978532604036282, 0.0, 0.011402374908747513, 0.0, 0.0, 0.00044637163627088384, nan, 0.00022845895646978192, 0.0, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, 0.0, 0.0, 0.0, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | [0.0983641345274469, 0.06590326713645099, 0.4774055158989783, 0.782700871455053, nan, 0.12377252855795083, nan, 0.015961785469853086, nan, nan, 0.00044718909710391824, nan, 0.00023190543015484148, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | | 3.9733 | 4.0 | 80 | 4.2808 | 0.0340 | 0.0587 | 0.2522 | [0.07639825877784764, 0.008412991091849679, 0.48690675102259723, 0.7764034751970135, 0.0, 0.1473653784027341, 0.0, 0.0004283060905949748, 0.0, 0.0, 0.0, nan, 8.916152501872393e-05, 0.0, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | [0.14099691831564637, 0.14285714285714285, 0.6530689633846645, 0.7887733577653251, nan, 0.15355518250955622, nan, 0.0004875258998134276, nan, nan, 0.0, nan, 8.919439621340056e-05, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | | 4.0878 | 5.0 | 100 | 4.1491 | 0.0433 | 0.0679 | 0.2898 | [0.07526457850850539, 0.0004202622436400314, 0.5540736322960325, 0.8224971012804998, 0.0, 0.326869718001924, 0.0, 0.07828407569303993, 0.0, 0.0, 0.0025872807998211264, nan, 0.0, 0.0, 0.00016777116013757235, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | [0.15489548376434975, 0.006005765534913517, 0.6622512783956825, 0.8652973387595082, nan, 0.3642129787526188, nan, 0.1181219002259495, nan, nan, 0.0025873083475298126, nan, 0.0, 0.0, 0.0007782101167315176, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | | 3.525 | 6.0 | 120 | 4.0150 | 0.0384 | 0.0592 | 0.2488 | [0.06036469045395224, 0.004663630354229713, 0.5271274896779279, 0.8064062151088215, 0.0, 0.06768267258911324, 0.0, 0.14502974022622822, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | [0.14297437939083682, 0.06157911595131326, 0.5855296350184395, 0.8271578633370975, nan, 0.07046140081233473, nan, 0.2081407449770769, nan, nan, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | | 3.3545 | 7.0 | 140 | 3.8528 | 0.0429 | 0.0716 | 0.2870 | [0.06992493583388786, 0.01633327662056729, 0.5623595923734023, 0.8359440948264968, 0.0, 0.17865304809128185, 0.0, 0.17977235726617743, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | [0.15471332448201974, 0.16143497757847533, 0.6640643054999495, 0.8809313276372701, nan, 0.19182438687330855, nan, 0.23966585724866635, nan, nan, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | | 3.2592 | 8.0 | 160 | 3.7682 | 0.0475 | 0.0751 | 0.2944 | [0.07113251877792341, 0.014610308936804615, 0.5911149499418694, 0.8310932389577597, 0.0, 0.15071444913579735, 0.0, 0.24217577606047563, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | [0.16459499606573508, 0.21188340807174888, 0.67273884272234, 0.8906366522489506, nan, 0.16047818788230447, nan, 0.30335830340986863, nan, nan, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | | 2.9325 | 9.0 | 180 | 3.6503 | 0.0563 | 0.0904 | 0.3601 | [0.0916482919757952, 0.022215982028241335, 0.6687414532937238, 0.8182098589677884, 0.0, 0.22720486815415822, 0.0, 0.3099954101952888, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, 0.0, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, 0.0, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | [0.18319590010159606, 0.22173286354900704, 0.8309197613444393, 0.9184529009438988, nan, 0.24806771523105475, nan, 0.4907463834016182, nan, nan, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | | 3.142 | 10.0 | 200 | 3.4945 | 0.0545 | 0.0827 | 0.3270 | [0.07943750220476259, 0.0053334057734796625, 0.5932404807736752, 0.8476477835608861, 0.0, 0.13782713351656795, 0.0, 0.40742487246538345, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, 0.0, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | [0.19875643426021736, 0.04716527866752082, 0.6475273500183907, 0.8978703683975028, nan, 0.1430400099216456, nan, 0.7128378695118178, nan, nan, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | | 2.7183 | 11.0 | 220 | 3.3061 | 0.0532 | 0.0884 | 0.3614 | [0.09437645330484863, 0.021453065703442915, 0.6285046308785182, 0.8407216519837672, 0.0, 0.13011486304142492, 0.0, 0.306993495672741, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, 0.0, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, 0.0, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | [0.1923019862873497, 0.1878603459320948, 0.8531643344693234, 0.9236464747618947, nan, 0.13667509711253537, nan, 0.5354346949681702, nan, nan, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | | 2.6474 | 12.0 | 240 | 3.3685 | 0.0598 | 0.0988 | 0.3780 | [0.08757474413901047, 0.016069320742568945, 0.6390678180059792, 0.8533876232724623, 0.0, 0.28524329381122526, 0.0, 0.39065739112218667, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, 0.0, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, 0.0, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | [0.19596395124140611, 0.14389814221652786, 0.753887266820953, 0.9232789295378518, nan, 0.3065699896797169, nan, 0.8392242712894122, nan, nan, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | | 2.7719 | 13.0 | 260 | 3.3020 | 0.0568 | 0.0944 | 0.3649 | [0.09085569817556093, 0.027910377517060544, 0.6367747960613105, 0.8167292669770043, 0.0, 0.3113982529070008, 0.0, 0.3306482064191315, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | [0.1821480147455122, 0.26593529788597053, 0.8123028699210015, 0.8710235868152473, nan, 0.3337880754222236, nan, 0.5541622523696571, nan, nan, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | | 2.6362 | 14.0 | 280 | 3.1347 | 0.0660 | 0.1044 | 0.4209 | [0.1273020502279295, 0.006827022988604454, 0.6633959331495424, 0.8481541992453417, 0.0, 0.39483611002260144, 0.0, 0.40189402100096483, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, 0.0, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | [0.25214412742886866, 0.06366111467008329, 0.8502299647485035, 0.9291330194106492, nan, 0.44182770885285405, nan, 0.8045114896728889, nan, nan, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | | 2.4505 | 15.0 | 300 | 3.1482 | 0.0646 | 0.1088 | 0.4078 | [0.10690913062884068, 0.02778897769766541, 0.6550727619978521, 0.8344739694135592, 0.0, 0.3731910527723551, 0.0, 0.3935447619006612, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, 0.0, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | [0.2132127451403847, 0.2814702114029468, 0.8289504949205946, 0.8859118317601687, nan, 0.4140559598527699, nan, 0.856911148404759, nan, nan, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | | 1.9352 | 16.0 | 320 | 3.1472 | 0.0647 | 0.1032 | 0.3934 | [0.11729780052892762, 0.022112862113234228, 0.6462081647866925, 0.8508569537012791, 0.0, 0.3511956648362463, 0.0, 0.4075542794341189, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, 0.0, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | [0.24063053402716614, 0.19034272901985907, 0.7646368575064856, 0.9173502652717704, nan, 0.3872409409617795, nan, 0.8031239159580352, nan, nan, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | | 1.9071 | 17.0 | 340 | 3.1266 | 0.0662 | 0.1079 | 0.4028 | [0.11352719870967862, 0.015174064470595456, 0.647711888515769, 0.8538743091572812, 0.0, 0.43477237929424367, 0.0, 0.3854976185824105, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 7.93405215845889e-05, nan, 0.0, nan, 0.0, 0.0, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | [0.2285422938172886, 0.15839205637411916, 0.7373828611976395, 0.9357221997315321, nan, 0.5320260974713091, nan, 0.8601456952400597, nan, nan, 0.0, nan, 0.0, 0.0, 0.0019455252918287938, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | | 2.5206 | 18.0 | 360 | 2.8962 | 0.0600 | 0.1020 | 0.4033 | [0.1333313414349274, 0.02709231598398522, 0.6463348341130845, 0.7671728490598273, 0.0, 0.23569449211784305, 0.0, 0.4116562623451348, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, 0.0, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | [0.2765459595381042, 0.225416399743754, 0.8359470869504363, 0.8132444122472461, nan, 0.2493965070492406, nan, 0.8646224955700772, nan, nan, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | | 2.4567 | 19.0 | 380 | 3.0116 | 0.0642 | 0.1137 | 0.4058 | [0.10987217488143201, 0.031201301598852177, 0.6469167327131724, 0.8471571063695243, 0.0, 0.4226642100348894, 0.0, 0.44236987536254607, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.002880658436213992, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | [0.20695360196506674, 0.3639493914157591, 0.7784558998245563, 0.9134244561396032, nan, 0.5253156987894707, nan, 0.8465418475356503, nan, nan, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.002880658436213992, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | | 2.3976 | 20.0 | 400 | 2.9132 | 0.0683 | 0.1047 | 0.4049 | [0.12737338958246042, 0.009121932723037756, 0.6121744539673762, 0.8506052732289967, 0.0, 0.41581406868619175, 0.0, 0.5105810115061199, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, 0.0, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | [0.2757309375944834, 0.06742472773862908, 0.7207303537193096, 0.8997933223957556, nan, 0.505662867798502, nan, 0.880954613213827, nan, nan, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | | 1.9075 | 21.0 | 420 | 2.8910 | 0.0687 | 0.1131 | 0.4278 | [0.1436993561550413, 0.025621042377009254, 0.6479798434266175, 0.8248603211355514, 0.0, 0.44588110635092176, 0.0, 0.4552363558704185, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, 0.0, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | [0.28727457788811667, 0.23166239590006407, 0.7969311993646268, 0.8729199071016129, nan, 0.5610203349441243, nan, 0.8693477465990381, nan, nan, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | | 1.4467 | 22.0 | 440 | 2.7984 | 0.0655 | 0.1052 | 0.4186 | [0.13494916206482163, 0.010124800875658454, 0.6702924092132377, 0.8110126671287506, 0.0, 0.3210469770132584, 0.0, 0.4759193291796755, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, 0.0, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | [0.282373178641918, 0.08295964125560538, 0.8062453413015387, 0.8601144184262672, nan, 0.3613383591192768, nan, 0.9732517039967749, nan, nan, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | | 1.9577 | 23.0 | 460 | 2.8220 | 0.0672 | 0.1058 | 0.4280 | [0.14934753607240642, 0.005778399941058216, 0.6714852664086316, 0.8456009814780782, 0.0, 0.36135601781696286, 0.0, 0.45156565080089794, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, 0.0, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | [0.3012632840126122, 0.043962203715566944, 0.8241754307159992, 0.9068459293034752, nan, 0.4182638006103584, nan, 0.8901847910670254, nan, nan, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | | 2.3491 | 24.0 | 480 | 2.8165 | 0.0690 | 0.1150 | 0.4141 | [0.13824102995037338, 0.03141061776701926, 0.6015717518333398, 0.8489689224513506, 0.0, 0.4765615515606493, 0.0, 0.45774496970072526, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, 0.0, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | [0.29472620708693503, 0.3034112748238309, 0.7043821874155738, 0.9030639422154987, nan, 0.6433345587746768, nan, 0.8310722757146474, nan, nan, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | | 1.5423 | 25.0 | 500 | 2.7267 | 0.0703 | 0.1130 | 0.4399 | [0.15594817992447663, 0.027442979586858415, 0.6582019286744792, 0.8451959264998893, 0.0, 0.4472329542362736, 0.0, 0.46286335923616995, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, 0.0, 0.0, 0.0024348422496570645, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | [0.3078923795448271, 0.19819026265214607, 0.8464558086849532, 0.8947915113033473, nan, 0.5460537097652911, nan, 0.8217483428807155, nan, nan, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0024348422496570645, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | | 2.4912 | 26.0 | 520 | 2.7238 | 0.0693 | 0.1146 | 0.4259 | [0.1416043318330325, 0.03102077972095303, 0.6557606602663111, 0.8472886541301466, 0.0, 0.4371670311799013, 0.0, 0.4514066653691034, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, 0.0, 0.0, 0.0008916323731138546, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | [0.29097973524932347, 0.2837924407431134, 0.7842709320000911, 0.8954680075852811, nan, 0.5524629156350075, nan, 0.8588940662472694, nan, nan, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0008916323731138546, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | | 1.443 | 27.0 | 540 | 2.7617 | 0.0685 | 0.1094 | 0.4104 | [0.14071012519162673, 0.014339323305976752, 0.5907393796443533, 0.8551527560319563, 0.0, 0.45343267169943013, 0.0, 0.46853376439482225, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, 0.0, 0.0, 0.011934156378600824, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | [0.29319944939070536, 0.11290839205637412, 0.6880470410551363, 0.907719515053374, nan, 0.6100350358109395, nan, 0.876384057903076, nan, nan, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.011934156378600824, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | | 1.5431 | 28.0 | 560 | 2.7918 | 0.0658 | 0.1153 | 0.4150 | [0.13441969983496593, 0.034889516530985216, 0.6509897909616087, 0.8543548354795426, 0.0, 0.417574055176235, 0.0, 0.45776896052019445, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.016906721536351164, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | [0.2537553920086535, 0.3867713004484305, 0.7917281045241048, 0.9089659727696929, nan, 0.5274196191682649, nan, 0.8025988880659285, nan, nan, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.016906721536351164, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | | 1.7827 | 29.0 | 580 | 2.6662 | 0.0699 | 0.1202 | 0.4431 | [0.160180219137823, 0.038746945930480746, 0.6690472109794202, 0.8615704417638955, 0.0, 0.43641468199112654, 0.0, 0.48334855645129265, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, 0.0, 0.0, 0.007064471879286694, nan, nan, nan, 0.0, 0.0, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | [0.30050835585285285, 0.34160794362588087, 0.8259184756250386, 0.9294792576651822, nan, 0.5873038371078403, nan, 0.8558329661263254, nan, nan, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.007064471879286694, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | | 1.374 | 30.0 | 600 | 2.6959 | 0.0669 | 0.1105 | 0.4156 | [0.14571548601309695, 0.012654088548892148, 0.598597793928746, 0.8572169386716763, 0.0, 0.4438326954797189, 0.0, 0.4514204342832408, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.03209876543209877, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | [0.2970247943196351, 0.10225816784112748, 0.7053326432763385, 0.93336245285833, nan, 0.606642187368505, nan, 0.8604269601822597, nan, nan, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.03209876543209877, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | | 1.7638 | 31.0 | 620 | 2.6978 | 0.0664 | 0.1121 | 0.4143 | [0.1350681815834129, 0.016355118933915495, 0.5942889337785656, 0.857772071894134, 0.0, 0.448799734172327, 0.0, 0.4499661917174608, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, 0.0, 0.0, 0.020027434842249656, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | [0.27010465708045617, 0.1439782190903267, 0.7038418597687007, 0.9253403788379179, nan, 0.6640504232201941, nan, 0.8609941778156964, nan, nan, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.020027434842249656, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | | 1.9152 | 32.0 | 640 | 2.6263 | 0.0685 | 0.1114 | 0.4310 | [0.1517016103182207, 0.017163858153810062, 0.6290242002348975, 0.8607076164629136, 0.0, 0.46392476895638546, 0.0, 0.48196850073805236, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, 0.0, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | [0.3217327291404148, 0.14061499039077516, 0.7766331077628157, 0.9206901327424201, nan, 0.5703174483653646, nan, 0.8341849410749946, nan, nan, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | | 1.7425 | 33.0 | 660 | 2.6800 | 0.0692 | 0.1209 | 0.4216 | [0.12981406357602096, 0.03653651270820439, 0.6414109760749904, 0.8595771678639583, 0.0, 0.437931045921837, 0.0, 0.4932279087199134, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, 0.0, 0.0, 0.02938957475994513, nan, nan, nan, 0.0, 0.0, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | [0.2527544549212114, 0.4301729660474055, 0.7661748383085791, 0.9165459271727782, nan, 0.5847259809805597, nan, 0.8883753199388718, nan, nan, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.02938957475994513, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | | 1.6934 | 34.0 | 680 | 2.6082 | 0.0676 | 0.1095 | 0.4216 | [0.1528047926554112, 0.009652470047595601, 0.5992184149598796, 0.8630238075191545, 0.0, 0.46338162574824326, 0.0, 0.47579022046746194, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, 0.0, 0.0, 0.0035665294924554186, nan, nan, nan, 0.0, 0.0, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | [0.3319430386046223, 0.07535233824471492, 0.7072254175333066, 0.9282061662369762, nan, 0.5914673848048226, nan, 0.8677023466871677, nan, nan, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0035665294924554186, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | | 1.9336 | 35.0 | 700 | 2.5847 | 0.0706 | 0.1232 | 0.4368 | [0.15006462094848302, 0.0352769921810015, 0.6311706260169092, 0.8570531358630561, 0.0, 0.4795575790279793, 0.0, 0.48930077201202066, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, 0.0, 0.0, 0.03926611796982167, nan, nan, nan, 0.0, 0.0, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | [0.2993815974054509, 0.33960602178090965, 0.7462445601049408, 0.9377037478959368, nan, 0.6573666003747193, nan, 0.9243069163049287, nan, nan, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.03926611796982167, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | | 2.0489 | 36.0 | 720 | 2.6122 | 0.0660 | 0.1109 | 0.4193 | [0.14756823146763295, 0.008198786453631191, 0.5922453849933319, 0.8605343767020952, 0.0, 0.46292998932975676, 0.0, 0.4766443817260259, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.024108367626886146, nan, nan, nan, 0.0, 0.0, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | [0.31294964704291633, 0.06254003843689943, 0.6866164747852523, 0.9173236315598833, nan, 0.6206963754988506, nan, 0.9231021648025052, nan, nan, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.024108367626886146, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | | 1.2949 | 37.0 | 740 | 2.5498 | 0.0708 | 0.1203 | 0.4450 | [0.15648548981882315, 0.030933947429474896, 0.6556232731788642, 0.8627263965803942, 0.0, 0.48519519150736146, 0.0, 0.46221749792249767, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, 0.0, 0.0, 0.03830589849108368, nan, nan, nan, 0.0, 0.0, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | [0.30608768809823833, 0.2882767456758488, 0.8179600352840464, 0.9202853003217353, nan, 0.6299979182261515, nan, 0.8500154695718211, nan, nan, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.03830589849108368, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | | 2.2796 | 38.0 | 760 | 2.5707 | 0.0695 | 0.1128 | 0.4342 | [0.14724776017641608, 0.006459795882248835, 0.6511277461218256, 0.8639771172739709, 0.0, 0.4292520323648728, 0.0, 0.492252079109291, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, 0.0, 0.0, 0.05, nan, nan, nan, 0.0, 0.0, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | [0.2855243464331522, 0.05261050608584241, 0.781286109998991, 0.9251539428547078, nan, 0.5966186677533231, nan, 0.9173127947422207, nan, nan, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.05, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | | 1.2894 | 39.0 | 780 | 2.5862 | 0.0698 | 0.1184 | 0.4313 | [0.14947545798127151, 0.025651068794533105, 0.634261357372953, 0.862320925433549, 0.0, 0.44831160761837135, 0.0, 0.4777336104441171, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, 0.0, 0.0, 0.053497942386831275, nan, nan, nan, 0.0, 0.0, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | [0.29151870137333075, 0.24647661755285075, 0.7584881892839357, 0.9259849146655871, nan, 0.6263880337867467, nan, 0.8878971695371317, nan, nan, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.053497942386831275, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | | 1.9124 | 40.0 | 800 | 2.5373 | 0.0703 | 0.1171 | 0.4452 | [0.16071214348204768, 0.023829825834542816, 0.6647836549387501, 0.8619843712526981, 0.0, 0.43514279927081223, 0.0, 0.49115990617918714, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, 0.0, 0.0, 0.0340877914951989, nan, nan, nan, 0.0, 0.0, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | [0.30944167241000486, 0.2103619474695708, 0.8331331517051244, 0.9189749216968871, nan, 0.5709286926017301, nan, 0.868743026973308, nan, nan, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0340877914951989, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | | 1.7589 | 41.0 | 820 | 2.5569 | 0.0677 | 0.1121 | 0.4318 | [0.15765882666983036, 0.007796582817259395, 0.6308152678784291, 0.8646247135695488, 0.0, 0.44799959752983937, 0.0, 0.48845405640673145, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.043655692729766805, nan, nan, nan, 0.0, 0.0, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | [0.31943601983846037, 0.06189942344650865, 0.7492944818225317, 0.9205463106982293, nan, 0.6310875275170639, nan, 0.8627755224496302, nan, nan, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.043655692729766805, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | | 1.7756 | 42.0 | 840 | 2.5152 | 0.0688 | 0.1173 | 0.4430 | [0.15868452303164257, 0.013903107237220548, 0.642236062178714, 0.8643320814783495, 0.0, 0.47230821314090343, 0.0, 0.48588939998989034, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.047085048010973934, nan, nan, nan, 0.0, 0.0, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | [0.31468109919042403, 0.11418962203715567, 0.7651316153518152, 0.9255800822449023, nan, 0.6859179072414725, nan, 0.9012056890522309, nan, nan, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.047085048010973934, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | | 1.0187 | 43.0 | 860 | 2.4940 | 0.0707 | 0.1174 | 0.4457 | [0.15876552623280288, 0.016738066378600442, 0.65643805653257, 0.8630520376175549, 0.0, 0.4611843347921155, 0.0, 0.49266785616528624, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, 0.0, 0.0, 0.03734567901234568, nan, nan, nan, 0.0, 0.0, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | [0.31672992163395, 0.14485906470211404, 0.791256131579547, 0.9165778876270428, nan, 0.6380681138686002, nan, 0.9112468474887728, nan, nan, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.03734567901234568, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | | 2.1398 | 44.0 | 880 | 2.5248 | 0.0685 | 0.1145 | 0.4424 | [0.15856835577511422, 0.011535617451318708, 0.6596684782608696, 0.8635816497265439, 0.0, 0.45123431544162157, 0.0, 0.4984200716994533, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, 0.029663923182441702, nan, nan, nan, 0.0, 0.0, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | [0.31730644637287064, 0.09368994234465086, 0.7901738487928885, 0.9210044105426886, nan, 0.6108588867382148, nan, 0.9013556970214043, nan, nan, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.029663923182441702, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | | 1.3554 | 45.0 | 900 | 2.5254 | 0.0707 | 0.1179 | 0.4400 | [0.1524571036505486, 0.022273950938668893, 0.6584132886397872, 0.8626246128412226, 0.0, 0.4521826517609658, 0.0, 0.4923129887880625, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, 0.0, 0.0, 0.047085048010973934, nan, nan, nan, 0.0, 0.0, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | [0.3037815891425556, 0.20075272261370916, 0.7896025987806823, 0.9227728890119958, nan, 0.6098534342624541, nan, 0.8982758459043136, nan, nan, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.047085048010973934, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | | 1.8522 | 46.0 | 920 | 2.5307 | 0.0700 | 0.1141 | 0.4350 | [0.1535510422790971, 0.01272993743071344, 0.6480249397558965, 0.8619174493993912, 0.0, 0.4418847477725259, 0.0, 0.49732447144817804, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, 0.0, 0.0, 0.04327846364883402, nan, nan, nan, 0.0, 0.0, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | [0.30997124888028377, 0.10850416399743754, 0.7689676161460317, 0.9184582276862762, nan, 0.5942091252563461, nan, 0.9070794385951754, nan, nan, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.04327846364883402, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | | 1.9358 | 47.0 | 940 | 2.5056 | 0.0703 | 0.1160 | 0.4430 | [0.15678186853809056, 0.018708257085264694, 0.6599045756125929, 0.859763930660299, 0.0, 0.44190147915132855, 0.0, 0.5006130602617456, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, 0.0, 0.0, 0.03347050754458162, nan, nan, nan, 0.0, 0.0, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | [0.307437920304375, 0.16127482383087766, 0.8126023286168589, 0.9090831611019965, nan, 0.5848854359987421, nan, 0.903390180103318, nan, nan, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.03347050754458162, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | | 1.5047 | 48.0 | 960 | 2.5735 | 0.0707 | 0.1197 | 0.4342 | [0.14647307744524948, 0.027907938806195753, 0.6522978191113287, 0.8634765021769236, 0.0, 0.44023074317324484, 0.0, 0.49504958123694975, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, 0.0, 0.0, 0.0600480109739369, nan, nan, nan, 0.0, 0.0, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | [0.28769899023663803, 0.2809096732863549, 0.7807929796465737, 0.9233162167344938, nan, 0.592893621356342, nan, 0.9035776900647847, nan, nan, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.0600480109739369, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | | 1.4503 | 49.0 | 980 | 2.5705 | 0.0710 | 0.1212 | 0.4361 | [0.14592490923755602, 0.029829231336254557, 0.6533107196106601, 0.8632987441278888, 0.0, 0.4387736465256517, 0.0, 0.49941386379950525, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, 0.0, 0.0, 0.06752400548696845, nan, nan, nan, 0.0, 0.0, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | [0.2779243607054259, 0.2916399743754004, 0.7830112524859955, 0.9231031470393966, nan, 0.6278142703382661, nan, 0.9066762921780219, nan, nan, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.06752400548696845, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | | 1.8094 | 50.0 | 1000 | 2.5335 | 0.0707 | 0.1187 | 0.4355 | [0.1511520068309521, 0.024028425494115033, 0.6471288114704491, 0.8633333995549904, 0.0, 0.4401568297272759, 0.0, 0.5004964042839947, 0.0, 0.0, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, 0.0, 0.0, 0.05898491083676269, nan, nan, nan, 0.0, 0.0, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | [0.2971938080867454, 0.21660794362588084, 0.7656914729136355, 0.9259316472418128, nan, 0.6205679256230926, nan, 0.9121937727941797, nan, nan, 0.0, nan, 0.0, 0.0, 0.0, nan, 0.0, nan, 0.0, nan, 0.0, 0.05898491083676269, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, nan, 0.0, nan, nan, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, 0.0, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan, nan, nan, nan, nan, nan, 0.0, nan, nan, nan, nan] | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.0+cu116 - Datasets 2.7.1 - Tokenizers 0.13.2
Chaima/TunBerto
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: - ms license: apache-2.0 tags: - whisper-event - generated_from_trainer datasets: - google/fleurs model-index: - name: Whisper Medium MS - FLEURS results: - task: type: automatic-speech-recognition name: Automatic Speech Recognition dataset: name: google/fleurs type: google/fleurs config: ms_my split: test metrics: - type: wer value: 11.75 name: WER - type: cer value: 3.49 name: CER --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Medium MS - FLEURS This model is a fine-tuned version of [openai/whisper-medium](https://huggingface.co/openai/whisper-medium) on the FLEURS dataset. It achieves the following results on the evaluation set: - eval_loss: 0.2941 - eval_wer: 10.2 - eval_runtime: 954.9 - eval_samples_per_second: 0.784 - eval_steps_per_second: 0.049 - epoch: 53.2 - step: 5000 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 32 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 1 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 5000 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.0+cu117 - Datasets 2.7.1.dev0 - Tokenizers 0.13.2
chainyo/speaker-recognition-meetup
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
2022-12-16T11:40:28Z
--- language: - en license: mit tags: - generated_from_trainer datasets: - tomekkorbak/detoxify-pile-chunk3-0-50000 - tomekkorbak/detoxify-pile-chunk3-50000-100000 - tomekkorbak/detoxify-pile-chunk3-100000-150000 - tomekkorbak/detoxify-pile-chunk3-150000-200000 - tomekkorbak/detoxify-pile-chunk3-200000-250000 - tomekkorbak/detoxify-pile-chunk3-250000-300000 - tomekkorbak/detoxify-pile-chunk3-300000-350000 - tomekkorbak/detoxify-pile-chunk3-350000-400000 - tomekkorbak/detoxify-pile-chunk3-400000-450000 - tomekkorbak/detoxify-pile-chunk3-450000-500000 - tomekkorbak/detoxify-pile-chunk3-500000-550000 - tomekkorbak/detoxify-pile-chunk3-550000-600000 - tomekkorbak/detoxify-pile-chunk3-600000-650000 - tomekkorbak/detoxify-pile-chunk3-650000-700000 - tomekkorbak/detoxify-pile-chunk3-700000-750000 - tomekkorbak/detoxify-pile-chunk3-750000-800000 - tomekkorbak/detoxify-pile-chunk3-800000-850000 - tomekkorbak/detoxify-pile-chunk3-850000-900000 - tomekkorbak/detoxify-pile-chunk3-900000-950000 - tomekkorbak/detoxify-pile-chunk3-950000-1000000 - tomekkorbak/detoxify-pile-chunk3-1000000-1050000 - tomekkorbak/detoxify-pile-chunk3-1050000-1100000 - tomekkorbak/detoxify-pile-chunk3-1100000-1150000 - tomekkorbak/detoxify-pile-chunk3-1150000-1200000 - tomekkorbak/detoxify-pile-chunk3-1200000-1250000 - tomekkorbak/detoxify-pile-chunk3-1250000-1300000 - tomekkorbak/detoxify-pile-chunk3-1300000-1350000 - tomekkorbak/detoxify-pile-chunk3-1350000-1400000 - tomekkorbak/detoxify-pile-chunk3-1400000-1450000 - tomekkorbak/detoxify-pile-chunk3-1450000-1500000 - tomekkorbak/detoxify-pile-chunk3-1500000-1550000 - tomekkorbak/detoxify-pile-chunk3-1550000-1600000 - tomekkorbak/detoxify-pile-chunk3-1600000-1650000 - tomekkorbak/detoxify-pile-chunk3-1650000-1700000 - tomekkorbak/detoxify-pile-chunk3-1700000-1750000 - tomekkorbak/detoxify-pile-chunk3-1750000-1800000 - tomekkorbak/detoxify-pile-chunk3-1800000-1850000 - tomekkorbak/detoxify-pile-chunk3-1850000-1900000 - tomekkorbak/detoxify-pile-chunk3-1900000-1950000 model-index: - name: dazzling_swirles results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # dazzling_swirles This model was trained from scratch on the tomekkorbak/detoxify-pile-chunk3-0-50000, the tomekkorbak/detoxify-pile-chunk3-50000-100000, the tomekkorbak/detoxify-pile-chunk3-100000-150000, the tomekkorbak/detoxify-pile-chunk3-150000-200000, the tomekkorbak/detoxify-pile-chunk3-200000-250000, the tomekkorbak/detoxify-pile-chunk3-250000-300000, the tomekkorbak/detoxify-pile-chunk3-300000-350000, the tomekkorbak/detoxify-pile-chunk3-350000-400000, the tomekkorbak/detoxify-pile-chunk3-400000-450000, the tomekkorbak/detoxify-pile-chunk3-450000-500000, the tomekkorbak/detoxify-pile-chunk3-500000-550000, the tomekkorbak/detoxify-pile-chunk3-550000-600000, the tomekkorbak/detoxify-pile-chunk3-600000-650000, the tomekkorbak/detoxify-pile-chunk3-650000-700000, the tomekkorbak/detoxify-pile-chunk3-700000-750000, the tomekkorbak/detoxify-pile-chunk3-750000-800000, the tomekkorbak/detoxify-pile-chunk3-800000-850000, the tomekkorbak/detoxify-pile-chunk3-850000-900000, the tomekkorbak/detoxify-pile-chunk3-900000-950000, the tomekkorbak/detoxify-pile-chunk3-950000-1000000, the tomekkorbak/detoxify-pile-chunk3-1000000-1050000, the tomekkorbak/detoxify-pile-chunk3-1050000-1100000, the tomekkorbak/detoxify-pile-chunk3-1100000-1150000, the tomekkorbak/detoxify-pile-chunk3-1150000-1200000, the tomekkorbak/detoxify-pile-chunk3-1200000-1250000, the tomekkorbak/detoxify-pile-chunk3-1250000-1300000, the tomekkorbak/detoxify-pile-chunk3-1300000-1350000, the tomekkorbak/detoxify-pile-chunk3-1350000-1400000, the tomekkorbak/detoxify-pile-chunk3-1400000-1450000, the tomekkorbak/detoxify-pile-chunk3-1450000-1500000, the tomekkorbak/detoxify-pile-chunk3-1500000-1550000, the tomekkorbak/detoxify-pile-chunk3-1550000-1600000, the tomekkorbak/detoxify-pile-chunk3-1600000-1650000, the tomekkorbak/detoxify-pile-chunk3-1650000-1700000, the tomekkorbak/detoxify-pile-chunk3-1700000-1750000, the tomekkorbak/detoxify-pile-chunk3-1750000-1800000, the tomekkorbak/detoxify-pile-chunk3-1800000-1850000, the tomekkorbak/detoxify-pile-chunk3-1850000-1900000 and the tomekkorbak/detoxify-pile-chunk3-1900000-1950000 datasets. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.01 - training_steps: 25000 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.24.0 - Pytorch 1.11.0+cu113 - Datasets 2.5.1 - Tokenizers 0.11.6 # Full config {'dataset': {'datasets': ['tomekkorbak/detoxify-pile-chunk3-0-50000', 'tomekkorbak/detoxify-pile-chunk3-50000-100000', 'tomekkorbak/detoxify-pile-chunk3-100000-150000', 'tomekkorbak/detoxify-pile-chunk3-150000-200000', 'tomekkorbak/detoxify-pile-chunk3-200000-250000', 'tomekkorbak/detoxify-pile-chunk3-250000-300000', 'tomekkorbak/detoxify-pile-chunk3-300000-350000', 'tomekkorbak/detoxify-pile-chunk3-350000-400000', 'tomekkorbak/detoxify-pile-chunk3-400000-450000', 'tomekkorbak/detoxify-pile-chunk3-450000-500000', 'tomekkorbak/detoxify-pile-chunk3-500000-550000', 'tomekkorbak/detoxify-pile-chunk3-550000-600000', 'tomekkorbak/detoxify-pile-chunk3-600000-650000', 'tomekkorbak/detoxify-pile-chunk3-650000-700000', 'tomekkorbak/detoxify-pile-chunk3-700000-750000', 'tomekkorbak/detoxify-pile-chunk3-750000-800000', 'tomekkorbak/detoxify-pile-chunk3-800000-850000', 'tomekkorbak/detoxify-pile-chunk3-850000-900000', 'tomekkorbak/detoxify-pile-chunk3-900000-950000', 'tomekkorbak/detoxify-pile-chunk3-950000-1000000', 'tomekkorbak/detoxify-pile-chunk3-1000000-1050000', 'tomekkorbak/detoxify-pile-chunk3-1050000-1100000', 'tomekkorbak/detoxify-pile-chunk3-1100000-1150000', 'tomekkorbak/detoxify-pile-chunk3-1150000-1200000', 'tomekkorbak/detoxify-pile-chunk3-1200000-1250000', 'tomekkorbak/detoxify-pile-chunk3-1250000-1300000', 'tomekkorbak/detoxify-pile-chunk3-1300000-1350000', 'tomekkorbak/detoxify-pile-chunk3-1350000-1400000', 'tomekkorbak/detoxify-pile-chunk3-1400000-1450000', 'tomekkorbak/detoxify-pile-chunk3-1450000-1500000', 'tomekkorbak/detoxify-pile-chunk3-1500000-1550000', 'tomekkorbak/detoxify-pile-chunk3-1550000-1600000', 'tomekkorbak/detoxify-pile-chunk3-1600000-1650000', 'tomekkorbak/detoxify-pile-chunk3-1650000-1700000', 'tomekkorbak/detoxify-pile-chunk3-1700000-1750000', 'tomekkorbak/detoxify-pile-chunk3-1750000-1800000', 'tomekkorbak/detoxify-pile-chunk3-1800000-1850000', 'tomekkorbak/detoxify-pile-chunk3-1850000-1900000', 'tomekkorbak/detoxify-pile-chunk3-1900000-1950000'], 'filter_threshold': 0.00078, 'is_split_by_sentences': True, 'skip_tokens': 1661599744}, 'generation': {'metrics_configs': [{}, {'n': 1}, {'n': 2}, {'n': 5}], 'scenario_configs': [{'generate_kwargs': {'do_sample': True, 'max_length': 128, 'min_length': 10, 'temperature': 0.7, 'top_k': 0, 'top_p': 0.9}, 'name': 'unconditional', 'num_samples': 2048}, {'generate_kwargs': {'do_sample': True, 'max_length': 128, 'min_length': 10, 'temperature': 0.7, 'top_k': 0, 'top_p': 0.9}, 'name': 'challenging_rtp', 'num_samples': 2048, 'prompts_path': 'resources/challenging_rtp.jsonl'}], 'scorer_config': {'device': 'cuda:0'}}, 'kl_gpt3_callback': {'max_tokens': 64, 'num_samples': 4096}, 'model': {'from_scratch': False, 'gpt2_config_kwargs': {'reorder_and_upcast_attn': True, 'scale_attn_by': True}, 'model_kwargs': {'revision': '81a1701e025d2c65ae6e8c2103df559071523ee0'}, 'path_or_name': 'tomekkorbak/goofy_pasteur'}, 'objective': {'name': 'MLE'}, 'tokenizer': {'path_or_name': 'gpt2'}, 'training': {'dataloader_num_workers': 0, 'effective_batch_size': 64, 'evaluation_strategy': 'no', 'fp16': True, 'hub_model_id': 'dazzling_swirles', 'hub_strategy': 'all_checkpoints', 'learning_rate': 0.0005, 'logging_first_step': True, 'logging_steps': 1, 'num_tokens': 3300000000, 'output_dir': 'training_output104340', 'per_device_train_batch_size': 16, 'push_to_hub': True, 'remove_unused_columns': False, 'save_steps': 25354, 'save_strategy': 'steps', 'seed': 42, 'tokens_already_seen': 1661599744, 'warmup_ratio': 0.01, 'weight_decay': 0.1}} # Wandb URL: https://wandb.ai/tomekkorbak/apo/runs/1957kpf2
Chakita/KROBERT
[ "pytorch", "roberta", "fill-mask", "transformers", "masked-lm", "fill-in-the-blanks", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- tags: - unity-ml-agents - ml-agents - deep-reinforcement-learning - reinforcement-learning - ML-Agents-Huggy library_name: ml-agents --- # **ppo** Agent playing **Huggy** This is a trained model of a **ppo** agent playing **Huggy** using the [Unity ML-Agents Library](https://github.com/Unity-Technologies/ml-agents). ## Usage (with ML-Agents) The Documentation: https://github.com/huggingface/ml-agents#get-started We wrote a complete tutorial to learn to train your first agent using ML-Agents and publish it to the Hub: ### Resume the training ``` mlagents-learn <your_configuration_file_path.yaml> --run-id=<run_id> --resume ``` ### Watch your Agent play You can watch your agent **playing directly in your browser:**. 1. Go to https://huggingface.co/spaces/unity/ML-Agents-Huggy 2. Step 1: Write your model_id: iblub/ppo-Huggy 3. Step 2: Select your *.nn /*.onnx file 4. Click on Watch the agent play 👀
CharlieChen/feedback-bigbird
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: mit tags: - generated_from_trainer datasets: - imagefolder model-index: - name: donut-base-sroie results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # donut-base-sroie This model is a fine-tuned version of [naver-clova-ix/donut-base](https://huggingface.co/naver-clova-ix/donut-base) on the imagefolder dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3.4707116138614145e-05 - train_batch_size: 2 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 40 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.27.0.dev0 - Pytorch 1.13.1+cu116 - Datasets 2.8.0 - Tokenizers 0.13.2
Cheapestmedsshop/Buymodafinilus
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 267.44 +/- 25.14 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Cheatham/xlm-roberta-large-finetuned-d12
[ "pytorch", "xlm-roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "XLMRobertaForSequenceClassification" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
20
null
--- tags: - FrozenLake-v1-8x8-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-8x8-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-8x8-no_slippery type: FrozenLake-v1-8x8-no_slippery metrics: - type: mean_reward value: 0.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="hendoo/q-FrozenLake-v1-8x8-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
Cheatham/xlm-roberta-large-finetuned-r01
[ "pytorch", "xlm-roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "XLMRobertaForSequenceClassification" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
23
2022-12-16T12:44:47Z
--- license: apache-2.0 tags: - generated_from_trainer metrics: - wer model-index: - name: openai/whisper-medium results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # openai/whisper-medium This model is a fine-tuned version of [openai/whisper-medium](https://huggingface.co/openai/whisper-medium) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.2201 - Wer: 44.6966 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 32 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 5000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:-------:| | 0.0566 | 6.02 | 1000 | 0.9354 | 47.1998 | | 0.0025 | 13.01 | 2000 | 1.0806 | 47.5605 | | 0.0012 | 19.03 | 3000 | 1.1642 | 47.6665 | | 0.0002 | 26.01 | 4000 | 1.1866 | 44.9724 | | 0.0001 | 33.0 | 5000 | 1.2201 | 44.6966 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.1+cu117 - Datasets 2.7.1.dev0 - Tokenizers 0.13.2
Cheatham/xlm-roberta-large-finetuned3
[ "pytorch", "xlm-roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "XLMRobertaForSequenceClassification" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
22
null
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-Taxi-v3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.56 +/- 2.71 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="hendoo/q-Taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
CheonggyeMountain-Sherpa/kogpt-trinity-poem
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
15
null
--- license: apache-2.0 --- This repository contains a custom handler of text encoder part of [KerasCV Stable Diffusion](https://github.com/keras-team/keras-cv/tree/master/keras_cv/models/stable_diffusion) for [Hugging Face Endpoint](https://huggingface.co/inference-endpoints).
Chester/traffic-rec
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-12-16T12:55:42Z
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 282.45 +/- 21.16 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Chiuchiyin/DialoGPT-small-Donald
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- tags: - FrozenLake-v1-8x8-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-8x8-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-8x8-no_slippery type: FrozenLake-v1-8x8-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="ldaquan1996/q-FrozenLake-v1-8x8-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
ChukSamuels/DialoGPT-small-Dr.FauciBot
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
null
Access to model jaimeguiAI/potitosai is restricted and you are not in the authorized list. Visit https://huggingface.co/jaimeguiAI/potitosai to ask for access.
Chun/DialoGPT-large-dailydialog
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="HayLahav/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
Chun/DialoGPT-small-dailydialog
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- license: apache-2.0 tags: - summarization - generated_from_trainer metrics: - rouge model-index: - name: mt5-small-mohamed-elmogy results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mt5-small-mohamed-elmogy This model is a fine-tuned version of [google/mt5-small](https://huggingface.co/google/mt5-small) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: nan - Rouge1: 0.0 - Rouge2: 0.0 - Rougel: 0.0 - Rougelsum: 0.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5.6e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 8 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | |:-------------:|:-----:|:----:|:---------------:|:------:|:------:|:------:|:---------:| | No log | 1.0 | 1141 | 9.4719 | 0.0 | 0.0 | 0.0 | 0.0 | | 40.4884 | 2.0 | 2282 | 78.1757 | 0.0 | 0.0 | 0.0 | 0.0 | | 40.4884 | 3.0 | 3423 | 54.3033 | 0.0 | 0.0 | 0.0 | 0.0 | | 72.4118 | 4.0 | 4564 | 75.8558 | 0.0 | 0.0 | 0.0 | 0.0 | | 72.4118 | 5.0 | 5705 | 12.4297 | 0.0 | 0.0 | 0.0 | 0.0 | | 24.3571 | 6.0 | 6846 | 12.4297 | 0.0 | 0.0 | 0.0 | 0.0 | | 24.3571 | 7.0 | 7987 | 12.4297 | 0.0 | 0.0 | 0.0 | 0.0 | | 16.5474 | 8.0 | 9128 | nan | 0.0 | 0.0 | 0.0 | 0.0 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.1+cu117 - Datasets 2.7.1 - Tokenizers 0.13.2
Chun/w-en2zh-hsk
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- license: apache-2.0 --- This repository contains a custom handler of diffusion model part of [KerasCV Stable Diffusion](https://github.com/keras-team/keras-cv/tree/master/keras_cv/models/stable_diffusion) for [Hugging Face Endpoint](https://huggingface.co/inference-endpoints).
Chun/w-en2zh-mtm
[ "pytorch", "mbart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MBartForConditionalGeneration" ], "model_type": "mbart", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- ### supastazzz Dreambooth model trained by supastazz with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb) Or you can run your new concept via `diffusers` [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb) Sample pictures of this concept:
Chun/w-zh2en-hsk
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- language: - da license: apache-2.0 tags: - whisper-event - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 metrics: - wer model-index: - name: Whisper Small da - Common Voice+FLEURS results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: Common Voice 11.0, FLEURS type: mozilla-foundation/common_voice_11_0 config: da split: test args: da metrics: - name: Wer type: wer value: 138.8532351394003 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Small da - Common Voice+FLEURS This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the Common Voice 11.0, FLEURS dataset. It achieves the following results on the evaluation set: - Loss: 0.9242 - Wer: 138.8532 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-07 - train_batch_size: 64 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 5000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 1.22 | 15.0 | 1000 | 1.2238 | 197.5434 | | 1.004 | 30.0 | 2000 | 1.0386 | 197.9221 | | 0.9104 | 45.01 | 3000 | 0.9704 | 156.6544 | | 0.8455 | 60.01 | 4000 | 0.9352 | 142.3619 | | 0.8237 | 75.01 | 5000 | 0.9242 | 138.8532 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.1+cu117 - Datasets 2.7.1.dev0 - Tokenizers 0.13.2
Chun/w-zh2en-mtm
[ "pytorch", "mbart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MBartForConditionalGeneration" ], "model_type": "mbart", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- tags: - autotrain - vision - image-classification datasets: - sasha/autotrain-data-butterfly_similarity_swin widget: - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg example_title: Tiger - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg example_title: Teapot - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg example_title: Palace co2_eq_emissions: emissions: 28.296015693616066 --- # Model Trained Using AutoTrain - Problem type: Multi-class Classification - Model ID: 2490776951 - CO2 Emissions (in grams): 28.2960 ## Validation Metrics - Loss: 1.385 - Accuracy: 0.689 - Macro F1: 0.488 - Micro F1: 0.689 - Weighted F1: 0.641 - Macro Precision: 0.483 - Micro Precision: 0.689 - Weighted Precision: 0.628 - Macro Recall: 0.528 - Micro Recall: 0.689 - Weighted Recall: 0.689
Chun/w-zh2en-mto
[ "pytorch", "mbart", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MBartForConditionalGeneration" ], "model_type": "mbart", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy - f1 model-index: - name: sentiment_model results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # sentiment_model This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.3852 - Accuracy: 0.8424 - F1: 0.8398 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results ### Framework versions - Transformers 4.25.1 - Pytorch 1.12.1+cu113 - Datasets 2.7.1 - Tokenizers 0.13.2
CleveGreen/FieldClassifier_v2_gpt
[ "pytorch", "gpt2", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "GPT2ForSequenceClassification" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
26
null
--- license: apache-2.0 tags: - vision - image-to-image inference: false --- # Swin2SR model (image super-resolution) Swin2SR model that upscales images x4. It was introduced in the paper [Swin2SR: SwinV2 Transformer for Compressed Image Super-Resolution and Restoration](https://arxiv.org/abs/2209.11345) by Conde et al. and first released in [this repository](https://github.com/mv-lab/swin2sr). # Intended use cases This model is intended for real-world image super resolution. # Usage Refer to the [documentation](https://huggingface.co/docs/transformers/main/en/model_doc/swin2sr#transformers.Swin2SRForImageSuperResolution.forward.example).
CleveGreen/JobClassifier
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
31
null
Access to model entresan1/ugurcanyucedag is restricted and you are not in the authorized list. Visit https://huggingface.co/entresan1/ugurcanyucedag to ask for access.
CleveGreen/JobClassifier_v2_gpt
[ "pytorch", "gpt2", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "GPT2ForSequenceClassification" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
27
null
--- language: - hi license: apache-2.0 tags: - whisper-event - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 metrics: - wer model-index: - name: Whisper Large V2 Hindi- Drishti Sharma results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: Common Voice 11.0 type: mozilla-foundation/common_voice_11_0 config: hi split: test args: hi metrics: - name: Wer type: wer value: 10.413423679150261 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Large V2 Hindi This model is a fine-tuned version of [openai/whisper-large-v2](https://huggingface.co/openai/whisper-large-v2) on the Common Voice 11.0 dataset. It achieves the following results on the evaluation set: - Loss: 0.2609 - Wer: 10.4134 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 100 - training_steps: 5000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:-------:| | 0.0005 | 6.11 | 5000 | 0.2609 | 10.4134 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.0+cu116 - Datasets 2.7.1.dev0 - Tokenizers 0.13.2
Cloudy/DialoGPT-CJ-large
[ "pytorch", "conversational" ]
conversational
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- license: apache-2.0 --- This repository contains a custom handler of decoder part of [KerasCV Stable Diffusion](https://github.com/keras-team/keras-cv/tree/master/keras_cv/models/stable_diffusion) for [Hugging Face Endpoint](https://huggingface.co/inference-endpoints).
CoachCarter/distilbert-base-uncased
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-12-16T14:32:37Z
--- language: - ml license: apache-2.0 tags: - whisper-event - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 metrics: - wer model-index: - name: Whisper Large V2 Malayalam - Drishti Sharma results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: Common Voice 11.0 type: mozilla-foundation/common_voice_11_0 config: ml split: test args: ml metrics: - name: Wer type: wer value: 27.458492975734355 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Large V2 Malayalam - Drishti Sharma This model is a fine-tuned version of [openai/whisper-large-v2](https://huggingface.co/openai/whisper-large-v2) on the Common Voice 11.0 dataset. It achieves the following results on the evaluation set: - Loss: 0.3510 - Wer: 27.4585 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 100 - training_steps: 1000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:-------:| | 0.0001 | 18.52 | 1000 | 0.3510 | 27.4585 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.0+cu116 - Datasets 2.7.1.dev0 - Tokenizers 0.13.2
CodeNinja1126/bert-q-encoder
[ "pytorch" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 270.26 +/- 24.01 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
CodeNinja1126/koelectra-model
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 263.63 +/- 38.36 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
CodeNinja1126/test-model
[ "pytorch", "jax", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
24
null
--- language: - te license: apache-2.0 tags: - hf-asr-leaderboard - generated_from_trainer datasets: - IndicSUPERB_train_validation_splits metrics: - wer model-index: - name: Whisper Small Telugu - Naga Budigam results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: IndicSUPERB train and validation splits type: IndicSUPERB train and validation splits config: None split: None args: 'config: te, split: test' metrics: - name: Wer type: wer value: 38.14924740301039 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Small Telugu - Naga Budigam This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the Chai_Bisket_Stories_16-08-2021_14-17 dataset. It achieves the following results on the evaluation set: - Loss: 0.2875 - Wer: 38.1492 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 15000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:-----:|:---------------:|:-------:| | 0.2064 | 0.66 | 500 | 0.2053 | 60.1707 | | 0.1399 | 1.33 | 1000 | 0.1535 | 49.3269 | | 0.1093 | 1.99 | 1500 | 0.1365 | 44.5516 | | 0.0771 | 2.66 | 2000 | 0.1316 | 42.1136 | | 0.0508 | 3.32 | 2500 | 0.1395 | 41.1384 | | 0.0498 | 3.99 | 3000 | 0.1386 | 40.5395 | | 0.0302 | 4.65 | 3500 | 0.1529 | 40.9529 | | 0.0157 | 5.32 | 4000 | 0.1719 | 40.6667 | | 0.0183 | 5.98 | 4500 | 0.1723 | 40.3646 | | 0.0083 | 6.65 | 5000 | 0.1911 | 40.4335 | | 0.0061 | 7.31 | 5500 | 0.2109 | 40.4176 | | 0.0055 | 7.98 | 6000 | 0.2075 | 39.7021 | | 0.0039 | 8.64 | 6500 | 0.2186 | 40.2639 | | 0.0026 | 9.31 | 7000 | 0.2254 | 39.1032 | | 0.0035 | 9.97 | 7500 | 0.2289 | 39.2834 | | 0.0016 | 10.64 | 8000 | 0.2332 | 39.1456 | | 0.0016 | 11.3 | 8500 | 0.2395 | 39.4371 | | 0.0016 | 11.97 | 9000 | 0.2447 | 39.2410 | | 0.0009 | 12.63 | 9500 | 0.2548 | 38.7799 | | 0.0008 | 13.3 | 10000 | 0.2551 | 38.7481 | | 0.0008 | 13.96 | 10500 | 0.2621 | 38.8276 | | 0.0007 | 14.63 | 11000 | 0.2633 | 38.6686 | | 0.0003 | 15.29 | 11500 | 0.2711 | 38.4566 | | 0.0005 | 15.96 | 12000 | 0.2772 | 38.7852 | | 0.0001 | 16.62 | 12500 | 0.2771 | 38.2658 | | 0.0001 | 17.29 | 13000 | 0.2808 | 38.2393 | | 0.0001 | 17.95 | 13500 | 0.2815 | 38.1810 | | 0.0 | 18.62 | 14000 | 0.2854 | 38.2022 | | 0.0 | 19.28 | 14500 | 0.2872 | 38.1333 | | 0.0 | 19.95 | 15000 | 0.2875 | 38.1492 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.0 - Datasets 2.7.1 - Tokenizers 0.13.2
CodeNinja1126/xlm-roberta-large-kor-mrc
[ "pytorch", "xlm-roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "XLMRobertaForQuestionAnswering" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- language: - sv license: apache-2.0 tags: - hf-asr-leaderboard - generated_from_trainer metrics: - wer model-index: - name: Whisper Small SV results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Small SV This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the Common Voice 11.0 dataset. It achieves the following results on the evaluation set: - Loss: 0.3516 - Wer: 23.0598 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 200 - training_steps: 1000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:-------:| | 0.3274 | 0.86 | 200 | 0.3552 | 24.7469 | | 0.1395 | 1.72 | 400 | 0.3303 | 23.5038 | | 0.074 | 2.59 | 600 | 0.3349 | 22.6603 | | 0.0199 | 3.45 | 800 | 0.3451 | 22.7935 | | 0.0089 | 4.31 | 1000 | 0.3516 | 23.0598 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.0+cu116 - Datasets 2.7.1 - Tokenizers 0.13.2
CoderEFE/DialoGPT-marxbot
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational", "has_space" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
11
2022-12-16T14:53:17Z
--- language: - fi license: apache-2.0 tags: - whisper-event - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 metrics: - wer model-index: - name: Whisper medium Finnish CV results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: mozilla-foundation/common_voice_11_0 fi type: mozilla-foundation/common_voice_11_0 config: fi split: test args: fi metrics: - name: Wer type: wer value: 15.718055032039203 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper medium Finnish CV This model is a fine-tuned version of [openai/whisper-medium](https://huggingface.co/openai/whisper-medium) on the mozilla-foundation/common_voice_11_0 fi dataset. It achieves the following results on the evaluation set: - Loss: 0.3010 - Wer: 15.7181 ## Model description The Model is fine-tuned for 1000 steps/updates on CV11 Finnish train+valiation data. - Zero-shot - 18.8 (CV9 test data, even on CV11 the WER is closer a bit higher than this) - Fine-tuned - 15.71 (CV11 test data) ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 64 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 1000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:-------:| | 0.0009 | 19.01 | 1000 | 0.3010 | 15.7181 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.0+cu117 - Datasets 2.7.1.dev0 - Tokenizers 0.13.2
CoderEFE/DialoGPT-medium-marx
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-Taxi-v3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.56 +/- 2.71 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="CreativeEvolution/q-Taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
CoffeeAddict93/gpt1-call-of-the-wild
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="iblub/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
CoffeeAddict93/gpt1-modest-proposal
[ "pytorch", "openai-gpt", "text-generation", "transformers", "has_space" ]
text-generation
{ "architectures": [ "OpenAIGPTLMHeadModel" ], "model_type": "openai-gpt", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
11
null
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="Nnarruqt/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
CoffeeAddict93/gpt2-call-of-the-wild
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- license: mit tags: - generated_from_trainer model-index: - name: bert-base-german-cased-finetuned-200labels results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-german-cased-finetuned-200labels This model is a fine-tuned version of [ogimgio/bert-base-german-cased-finetuned-7labels](https://huggingface.co/ogimgio/bert-base-german-cased-finetuned-7labels) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.0744 - Micro f1: 0.0894 - Macro f1: 0.0538 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-06 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: constant - num_epochs: 50 ### Training results | Training Loss | Epoch | Step | Validation Loss | Micro f1 | Macro f1 | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:--------:| | 0.8041 | 1.0 | 1380 | 0.7312 | 0.0422 | 0.0413 | | 0.605 | 2.0 | 2760 | 0.5440 | 0.0436 | 0.0423 | | 0.4455 | 3.0 | 4140 | 0.4026 | 0.0478 | 0.0449 | | 0.3317 | 4.0 | 5520 | 0.3070 | 0.0574 | 0.0516 | | 0.2553 | 5.0 | 6900 | 0.2432 | 0.0682 | 0.0599 | | 0.2041 | 6.0 | 8280 | 0.1982 | 0.0759 | 0.0657 | | 0.167 | 7.0 | 9660 | 0.1653 | 0.0798 | 0.0677 | | 0.1403 | 8.0 | 11040 | 0.1417 | 0.0839 | 0.0693 | | 0.1222 | 9.0 | 12420 | 0.1249 | 0.0865 | 0.0695 | | 0.109 | 10.0 | 13800 | 0.1132 | 0.0880 | 0.0684 | | 0.0999 | 11.0 | 15180 | 0.1052 | 0.0874 | 0.0661 | | 0.0941 | 12.0 | 16560 | 0.0994 | 0.0878 | 0.0655 | | 0.089 | 13.0 | 17940 | 0.0951 | 0.0876 | 0.0640 | | 0.0854 | 14.0 | 19320 | 0.0917 | 0.0888 | 0.0638 | | 0.0831 | 15.0 | 20700 | 0.0890 | 0.0889 | 0.0626 | | 0.0804 | 16.0 | 22080 | 0.0869 | 0.0890 | 0.0616 | | 0.0788 | 17.0 | 23460 | 0.0851 | 0.0890 | 0.0606 | | 0.077 | 18.0 | 24840 | 0.0835 | 0.0894 | 0.0599 | | 0.0759 | 19.0 | 26220 | 0.0822 | 0.0894 | 0.0593 | | 0.0745 | 20.0 | 27600 | 0.0811 | 0.0896 | 0.0588 | | 0.0735 | 21.0 | 28980 | 0.0800 | 0.0890 | 0.0573 | | 0.0728 | 22.0 | 30360 | 0.0791 | 0.0888 | 0.0564 | | 0.0716 | 23.0 | 31740 | 0.0783 | 0.0895 | 0.0559 | | 0.0709 | 24.0 | 33120 | 0.0775 | 0.0900 | 0.0556 | | 0.0698 | 25.0 | 34500 | 0.0768 | 0.0896 | 0.0550 | | 0.0694 | 26.0 | 35880 | 0.0761 | 0.0897 | 0.0548 | | 0.069 | 27.0 | 37260 | 0.0755 | 0.0892 | 0.0545 | | 0.0684 | 28.0 | 38640 | 0.0750 | 0.0893 | 0.0541 | | 0.0679 | 29.0 | 40020 | 0.0744 | 0.0894 | 0.0538 | ### Framework versions - Transformers 4.19.0.dev0 - Pytorch 1.13.0+cu117 - Datasets 2.8.0 - Tokenizers 0.12.1
CoffeeAddict93/gpt2-medium-modest-proposal
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
2022-12-16T15:07:09Z
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: bert-base-uncased-issues-128 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-uncased-issues-128 This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.0940 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 16 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 2.1003 | 1.0 | 291 | 1.6578 | | 1.6211 | 2.0 | 582 | 1.4140 | | 1.4964 | 3.0 | 873 | 1.3040 | | 1.41 | 4.0 | 1164 | 1.3011 | | 1.336 | 5.0 | 1455 | 1.3095 | | 1.2862 | 6.0 | 1746 | 1.3739 | | 1.2271 | 7.0 | 2037 | 1.2743 | | 1.2043 | 8.0 | 2328 | 1.2019 | | 1.1701 | 9.0 | 2619 | 1.2696 | | 1.1498 | 10.0 | 2910 | 1.2507 | | 1.1194 | 11.0 | 3201 | 1.1398 | | 1.1094 | 12.0 | 3492 | 1.1309 | | 1.0913 | 13.0 | 3783 | 1.0740 | | 1.0683 | 14.0 | 4074 | 1.1201 | | 1.0607 | 15.0 | 4365 | 1.1690 | | 1.0558 | 16.0 | 4656 | 1.0940 | ### Framework versions - Transformers 4.21.2 - Pytorch 1.13.0+cu117 - Datasets 2.7.1 - Tokenizers 0.12.1
CoffeeAddict93/gpt2-modest-proposal
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- language: - en library_name: transformers tags: - depression - roberta --- Fine-tuned [DepRoBERTa](https://huggingface.co/rafalposwiata/deproberta-large-v1) model for detecting the level of depression as **not depression**, **moderate** or **severe**, based on social media posts in English. Model was part of the winning solution for [the Shared Task on Detecting Signs of Depression from Social Media Text](https://competitions.codalab.org/competitions/36410) at [LT-EDI-ACL2022](https://sites.google.com/view/lt-edi-2022/home). More information can be found in the following paper: [OPI@LT-EDI-ACL2022: Detecting Signs of Depression from Social Media Text using RoBERTa Pre-trained Language Models](https://aclanthology.org/2022.ltedi-1.40/). If you use this model, please cite: ``` @inproceedings{poswiata-perelkiewicz-2022-opi, title = "{OPI}@{LT}-{EDI}-{ACL}2022: Detecting Signs of Depression from Social Media Text using {R}o{BERT}a Pre-trained Language Models", author = "Po{\'s}wiata, Rafa{\l} and Pere{\l}kiewicz, Micha{\l}", booktitle = "Proceedings of the Second Workshop on Language Technology for Equality, Diversity and Inclusion", month = may, year = "2022", address = "Dublin, Ireland", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2022.ltedi-1.40", doi = "10.18653/v1/2022.ltedi-1.40", pages = "276--282", } ```
CogComp/roberta-temporal-predictor
[ "pytorch", "roberta", "fill-mask", "arxiv:2202.00436", "transformers", "license:mit", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
14
null
--- language: - el license: apache-2.0 tags: - whisper-event - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 - google/fleurs metrics: - wer model-index: - name: whisper-sm-el-intlv-xl results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: mozilla-foundation/common_voice_11_0 type: mozilla-foundation/common_voice_11_0 config: el split: test metrics: - name: Wer type: wer value: 19.48365527488856 --- # whisper-sm-el-intlv-xl This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the mozilla-foundation/common_voice_11_0 (el) and the google/fleurs (el_gr) datasets. It achieves the following results on the evaluation set: - Loss: 0.4725 - Wer: 19.4837 ## Model description The model was trained over 10000 steps on translation from Greek to English. ## Intended uses & limitations This model was part of the Whisper Finetuning Event (Dec 2022) and was used primarily to compare relative improvements between transcription and translation tasks. ## Training and evaluation data The training datasets combined examples from both train and evaluation splits and use the train split of the mozilla-foundation/common_voice_11_0 (el) dataset for evaluation and selection of the best checkpoint. ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 8.5e-06 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 10000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:-----:|:---------------:|:-------:| | 0.0545 | 2.49 | 1000 | 0.2891 | 22.4926 | | 0.0093 | 4.98 | 2000 | 0.3927 | 20.1337 | | 0.0018 | 7.46 | 3000 | 0.4031 | 20.1616 | | 0.001 | 9.95 | 4000 | 0.4209 | 19.6880 | | 0.0008 | 12.44 | 5000 | 0.4498 | 20.0966 | | 0.0005 | 14.93 | 6000 | 0.4725 | 19.4837 | | 0.0002 | 17.41 | 7000 | 0.4917 | 19.5951 | | 0.0001 | 19.9 | 8000 | 0.5050 | 19.6230 | | 0.0001 | 22.39 | 9000 | 0.5146 | 19.5672 | | 0.0001 | 24.88 | 10000 | 0.5186 | 19.4837 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.0 - Datasets 2.7.1.dev0 - Tokenizers 0.12.1
CohleM/bert-nepali-tokenizer
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: - is license: apache-2.0 tags: - whisper-event - hf-asr-leaderboard datasets: - language-and-voice-lab/samromur_asr - language-and-voice-lab/althingi_asr - language-and-voice-lab/malromur_asr - language-and-voice-lab/samromur_children - language-and-voice-lab/raddromur_asr metrics: - wer pinned: false model-index: - name: Whisper medium is results: - task: type: automatic-speech-recognition name: Automatic Speech Recognition dataset: name: samromur type: language-and-voice-lab/samromur_asr config: samromur_asr split: test metrics: - type: wer value: 10.08% name: WER - task: type: automatic-speech-recognition name: Automatic Speech Recognition dataset: name: google/fleurs type: google/fleurs config: is_is split: test metrics: - type: wer value: 13.94 name: WER --- # openai/whisper-medium This model is a fine-tuned version of [openai/whisper-medium](https://huggingface.co/openai/whisper-medium) on the samromur, malromur, raddromur and althingi datasets. It achieves the following results on the evaluation set, the output is lowercased and punctuation is removed: - Google Fleurs 13.94% WER - Samrómur 10.08% WER ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.0+cu117 - Datasets 2.7.1.dev0 - Tokenizers 0.13.2
Coldestadam/Breakout_Mentors_SpongeBob_Model
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: basic_00 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # basic_00 This model is a fine-tuned version of [sanjin7/basic_00](https://huggingface.co/sanjin7/basic_00) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 30 ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.1+cu117 - Datasets 2.8.0 - Tokenizers 0.13.2
ComCom/gpt2-medium
[ "pytorch", "gpt2", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "GPT2Model" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- language: - en library_name: transformers tags: - depression - roberta --- Fine-tuned [RoBERTa](https://huggingface.co/roberta-large) model for detecting the level of depression as **not depression**, **moderate** or **severe**, based on social media posts in English. Model was part of the winning solution for [the Shared Task on Detecting Signs of Depression from Social Media Text](https://competitions.codalab.org/competitions/36410) at [LT-EDI-ACL2022](https://sites.google.com/view/lt-edi-2022/home). More information can be found in the following paper: [OPI@LT-EDI-ACL2022: Detecting Signs of Depression from Social Media Text using RoBERTa Pre-trained Language Models](https://aclanthology.org/2022.ltedi-1.40/). If you use this model, please cite: ``` @inproceedings{poswiata-perelkiewicz-2022-opi, title = "{OPI}@{LT}-{EDI}-{ACL}2022: Detecting Signs of Depression from Social Media Text using {R}o{BERT}a Pre-trained Language Models", author = "Po{\'s}wiata, Rafa{\l} and Pere{\l}kiewicz, Micha{\l}", booktitle = "Proceedings of the Second Workshop on Language Technology for Equality, Diversity and Inclusion", month = may, year = "2022", address = "Dublin, Ireland", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2022.ltedi-1.40", doi = "10.18653/v1/2022.ltedi-1.40", pages = "276--282", } ```
ComCom-Dev/gpt2-bible-test
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: crazytaxi results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.56 +/- 2.71 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="iblub/crazytaxi", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
Cometasonmi451/Mine
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="Jedalc/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
cometrain/neurotitle-rugpt3-small
[ "pytorch", "gpt2", "text-generation", "ru", "en", "dataset:All-NeurIPS-Papers-Scraper", "transformers", "Cometrain AutoCode", "Cometrain AlphaML", "license:mit" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
20
null
--- tags: - FrozenLake-v1-4x4-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-4x4-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-4x4-no_slippery type: FrozenLake-v1-4x4-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="nelsonsilva/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
Connor/DialoGPT-small-rick
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: Taxi-v3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.54 +/- 2.73 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="Jedalc/Taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
Connor-tech/bert_cn_finetuning
[ "pytorch", "jax", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
27
null
--- tags: - generated_from_keras_callback model-index: - name: BeardedJohn/bert-finetuned-joint_intent_slots_ubb_endava results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # BeardedJohn/bert-finetuned-joint_intent_slots_ubb_endava This model is a fine-tuned version of [](https://huggingface.co/) on an unknown dataset. It achieves the following results on the evaluation set: - Train Loss: 5.1282 - Train Output 1 Loss: 0.2030 - Train Output 2 Loss: 4.9252 - Train Output 1 Accuracy: 0.9719 - Train Output 2 Accuracy: 0.6116 - Validation Loss: 4.7179 - Validation Output 1 Loss: 0.0092 - Validation Output 2 Loss: 4.7087 - Validation Output 1 Accuracy: 0.9978 - Validation Output 2 Accuracy: 0.9997 - Epoch: 0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'learning_rate': 3e-05, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False} - training_precision: float32 ### Training results | Train Loss | Train Output 1 Loss | Train Output 2 Loss | Train Output 1 Accuracy | Train Output 2 Accuracy | Validation Loss | Validation Output 1 Loss | Validation Output 2 Loss | Validation Output 1 Accuracy | Validation Output 2 Accuracy | Epoch | |:----------:|:-------------------:|:-------------------:|:-----------------------:|:-----------------------:|:---------------:|:------------------------:|:------------------------:|:----------------------------:|:----------------------------:|:-----:| | 5.1282 | 0.2030 | 4.9252 | 0.9719 | 0.6116 | 4.7179 | 0.0092 | 4.7087 | 0.9978 | 0.9997 | 0 | ### Framework versions - Transformers 4.25.1 - TensorFlow 2.9.2 - Tokenizers 0.13.2
Connorvr/TeachingGen
[ "pytorch", "gpt2", "text-generation", "transformers", "generated_from_trainer", "license:mit" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- license: openrail --- <h1>ITRobo2022 model. Trained on SD 1.5.</h1> ![bbb4.jpg](https://s3.amazonaws.com/moonup/production/uploads/1671440178373-630c71f215433862cfc241a9.jpeg) <br> I really like the Robo-Diffusion model (https://huggingface.co/nousr/robo-diffusion), but most of what you can get with it is robot heads. :)<br> In my model I tried to emphasize full-length images of robots. I also get good results on a homogeneous background, which makes it easier to cut out objects for further work.<br> However, good results are also obtained with mixed queries. Try it. Good luck! !!! For best result use this token at the beginning of the prompt: <b>itrobo2022</b><br><br> <b>itrobo2022.ckpt</b> - base trained model. It's a little hard to control the result, but good for generating a variety of robots, and for working with img2img.<br> <b>itrobo2022-40-with-v1-5-pruned-emaonly-60.ckpt</b> - 40% mixed with base SD1.5. Better manageability and control of results. <b>Example:</b><br> Prompt: <i>ITRobo2022 (a full body photo of pug)+, isolated, high resolution photo, cinematic lighting, trending on artstation, DOF, high resolution, 4 k, 8 k, solid background</i><br> Negative prompt: <i>(duplicate)+++, deformed, no leg, blurry, no head, headless, watermarks, writings, text, marks, ugly, a lot of fingers, mutation, too many legs</i><br> ![2022-12-16-22-03-57-1-289133679-ddim-itrobo2022-30-with-v.png](https://s3.amazonaws.com/moonup/production/uploads/1671430421748-630c71f215433862cfc241a9.png) <br> Prompt: <i>A realistic photograph of a 3d robot in a modern city. A glossy white and orange robot.</i><br> Negative prompt: <i>black and white robot, picture frame, a children's drawing in crayon. #Wholesale, Abstract Metal Sculpture. i'm leaving a bad review.</i><br> ![2022-12-17-00-52-37-1-145827469-ddim-itrobo2022.png](https://s3.amazonaws.com/moonup/production/uploads/1671431407866-630c71f215433862cfc241a9.png) Best results on:<br> DDIM<br> steps:20<br> CFG scale 7<br> 512x512 ![bbb3.jpg](https://s3.amazonaws.com/moonup/production/uploads/1671440275603-630c71f215433862cfc241a9.jpeg) <br> img2img:<br> ![bbb2.jpg](https://s3.amazonaws.com/moonup/production/uploads/1671440303278-630c71f215433862cfc241a9.jpeg) <br> NSFW :)<br> ![bbb.jpg](https://s3.amazonaws.com/moonup/production/uploads/1671440350414-630c71f215433862cfc241a9.jpeg)
ConstellationBoi/Oop
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - whisper-event - generated_from_trainer - automatic-speech-recognition - hf-asr-leaderboard - pashto - ps datasets: - google/fleurs metrics: - wer model-index: - name: Whisper Small Pashto results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: google/fleurs ps_af type: google/fleurs args: 'config: ps_af, split: test' metrics: - name: Wer type: wer value: 56.651029055690074 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Small Pashto This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the google/fleurs ps_af dataset. It achieves the following results on the evaluation set: - Loss: 1.2273 - Wer: 56.6510 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-07 - train_batch_size: 64 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 800 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:------:|:----:|:---------------:|:-------:| | 2.1183 | 3.7 | 100 | 1.3170 | 76.9522 | | 0.8565 | 7.41 | 200 | 0.9367 | 61.9930 | | 0.2246 | 11.11 | 300 | 0.9642 | 58.8302 | | 0.054 | 14.81 | 400 | 1.0876 | 57.9903 | | 0.0159 | 18.52 | 500 | 1.1798 | 57.8768 | | 0.0045 | 22.22 | 600 | 1.2309 | 56.6510 | | 0.0026 | 100.0 | 700 | 1.2581 | 56.8478 | | 0.0023 | 114.29 | 800 | 1.2710 | 56.7570 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.1+cu117 - Datasets 2.7.1.dev0 - Tokenizers 0.13.2
Contrastive-Tension/BERT-Base-CT
[ "pytorch", "tf", "jax", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
16
2022-12-16T15:54:34Z
--- tags: - Taxi-v3 - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-Taxi-v3 results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: Taxi-v3 type: Taxi-v3 metrics: - type: mean_reward value: 7.56 +/- 2.71 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="nelsonsilva/q-Taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
Contrastive-Tension/BERT-Base-NLI-CT
[ "pytorch", "tf", "jax", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- tags: - FrozenLake-v1-8x8-no_slippery - q-learning - reinforcement-learning - custom-implementation model-index: - name: q-FrozenLake-v1-8x8-noSlippery results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1-8x8-no_slippery type: FrozenLake-v1-8x8-no_slippery metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="eduyio/q-FrozenLake-v1-8x8-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
Contrastive-Tension/BERT-Base-Swe-CT-STSb
[ "pytorch", "tf", "jax", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
126
2022-12-16T15:56:12Z
--- language: - as license: apache-2.0 tags: - whisper-event - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 - google/fleurs metrics: - wer model-index: - name: kpriyanshu256/whisper-large-v2-as-600-32-1e-05-bn-Assamese results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: Common Voice 11.0 type: mozilla-foundation/common_voice_11_0 config: as split: test args: as metrics: - name: Wer type: wer value: 17.560007218913555 - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: FLEURS type: google/fleurs metrics: - name: Wer type: wer value: --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # kpriyanshu256/whisper-large-v2-as-600-32-1e-05-bn-Assamese This model is a fine-tuned version of [kpriyanshu256/whisper-large-v2-as-600-32-1e-05-bn](https://huggingface.co/kpriyanshu256/whisper-large-v2-as-600-32-1e-05-bn) on the Common Voice 11.0 and the FLEURS datasets. It achieves the following results on the evaluation set: - Loss: 0.2486 - Wer: 17.5600 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - distributed_type: multi-GPU - gradient_accumulation_steps: 2 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 50 - training_steps: 1000 ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:-------:| | 0.1273 | 0.1 | 100 | 0.1737 | 20.8988 | | 0.0811 | 0.2 | 200 | 0.1739 | 19.0038 | | 0.0638 | 0.3 | 300 | 0.1823 | 18.4804 | | 0.0404 | 1.05 | 400 | 0.1893 | 17.1810 | | 0.0316 | 1.15 | 500 | 0.2067 | 17.0186 | | 0.027 | 1.25 | 600 | 0.2081 | 17.7405 | | 0.025 | 2.01 | 700 | 0.2213 | 17.7585 | | 0.0213 | 2.11 | 800 | 0.2237 | 17.8488 | | 0.0176 | 2.21 | 900 | 0.2390 | 16.7479 | | 0.0184 | 2.31 | 1000 | 0.2486 | 17.5600 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.0+cu117 - Datasets 2.7.1.dev0 - Tokenizers 0.13.2
Contrastive-Tension/BERT-Distil-CT
[ "pytorch", "tf", "distilbert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "DistilBertForMaskedLM" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
Access to model aiot/mt5-small-finetuned-amazon-en-es is restricted and you are not in the authorized list. Visit https://huggingface.co/aiot/mt5-small-finetuned-amazon-en-es to ask for access.
Cool/Demo
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: stable-baselines3 tags: - LunarLander-v2 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: LunarLander-v2 type: LunarLander-v2 metrics: - type: mean_reward value: 270.37 +/- 20.86 name: mean_reward verified: false --- # **PPO** Agent playing **LunarLander-v2** This is a trained model of a **PPO** agent playing **LunarLander-v2** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Coolhand/Abuela
[ "en", "image_restoration", "superresolution", "license:mit" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - CartPole-v1 - deep-reinforcement-learning - reinforcement-learning - custom-implementation library_name: cleanrl model-index: - name: DQN results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: CartPole-v1 type: CartPole-v1 metrics: - type: mean_reward value: 36.50 +/- 11.32 name: mean_reward verified: false --- # (CleanRL) **DQN** Agent Playing **CartPole-v1** This is a trained model of a DQN agent playing CartPole-v1. The model was trained by using [CleanRL](https://github.com/vwxyzjn/cleanrl) and the most up-to-date training code can be found [here](https://github.com/vwxyzjn/cleanrl/blob/master/cleanrl/dqn_jax.py). ## Command to reproduce the training ```bash curl -OL https://huggingface.co/vwxyzjn/CartPole-v1-dqn_jax-seed1/raw/main/dqn.py curl -OL https://huggingface.co/vwxyzjn/CartPole-v1-dqn_jax-seed1/raw/main/pyproject.toml curl -OL https://huggingface.co/vwxyzjn/CartPole-v1-dqn_jax-seed1/raw/main/poetry.lock poetry install --all-extras python dqn_jax.py --save-model --upload-model --hf-entity vwxyzjn --total-timesteps 1000 ``` # Hyperparameters ```python {'batch_size': 128, 'buffer_size': 10000, 'capture_video': False, 'end_e': 0.05, 'env_id': 'CartPole-v1', 'exp_name': 'dqn_jax', 'exploration_fraction': 0.5, 'gamma': 0.99, 'hf_entity': 'vwxyzjn', 'learning_rate': 0.00025, 'learning_starts': 10000, 'save_model': True, 'seed': 1, 'start_e': 1, 'target_network_frequency': 500, 'total_timesteps': 1000, 'track': False, 'train_frequency': 10, 'upload_model': True, 'wandb_entity': None, 'wandb_project_name': 'cleanRL'} ```
CouchCat/ma_ner_v7_distil
[ "pytorch", "distilbert", "token-classification", "en", "transformers", "ner", "license:mit", "autotrain_compatible" ]
token-classification
{ "architectures": [ "DistilBertForTokenClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
null
--- language: - id license: apache-2.0 tags: - whisper-event - generated_from_trainer model-index: - name: Whisper Medium ID - FLEURS-CV results: - task: type: automatic-speech-recognition name: Automatic Speech Recognition dataset: name: google/fleurs type: google/fleurs config: id_id split: test metrics: - type: wer value: 7.8 name: WER - type: cer value: 2.43 name: CER - task: type: automatic-speech-recognition name: Automatic Speech Recognition dataset: name: mozilla-foundation/common_voice_11_0 type: mozilla-foundation/common_voice_11_0 config: id split: test metrics: - type: wer value: 8.67 name: WER - type: cer value: 2.71 name: CER --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Medium ID - FLEURS-CV This model is a fine-tuned version of [openai/whisper-medium](https://huggingface.co/openai/whisper-medium) on the None dataset. It achieves the following results on the evaluation set: - eval_loss: 0.2563 - eval_wer: 8.4690 - eval_runtime: 2961.9108 - eval_samples_per_second: 1.453 - eval_steps_per_second: 0.091 - epoch: 14.29 - step: 5000 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 32 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 10000 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.13.1+cu117 - Datasets 2.7.1.dev0 - Tokenizers 0.13.2
CouchCat/ma_sa_v7_distil
[ "pytorch", "distilbert", "text-classification", "en", "transformers", "sentiment-analysis", "license:mit" ]
text-classification
{ "architectures": [ "DistilBertForSequenceClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
38
null
--- tags: - autotrain - text-classification language: - en widget: - text: "I love AutoTrain 🤗" datasets: - huggingface/autotrain-data-test-c3f6d546 co2_eq_emissions: emissions: 6.167593303372823 --- # Model Trained Using AutoTrain - Problem type: Binary Classification - Model ID: 15006427 - CO2 Emissions (in grams): 6.1676 ## Validation Metrics - Loss: 0.138 - Accuracy: 0.945 - Precision: 0.925 - Recall: 0.970 - AUC: 0.993 - F1: 0.947 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/huggingface/autotrain-test-15006427 ``` Or Python API: ``` from transformers import AutoModelForSequenceClassification, AutoTokenizer model = AutoModelForSequenceClassification.from_pretrained("huggingface/autotrain-test-15006427", use_auth_token=True) tokenizer = AutoTokenizer.from_pretrained("huggingface/autotrain-test-15006427", use_auth_token=True) inputs = tokenizer("I love AutoTrain", return_tensors="pt") outputs = model(**inputs) ```