modelId
stringlengths
4
81
tags
list
pipeline_tag
stringclasses
17 values
config
dict
downloads
int64
0
59.7M
first_commit
timestamp[ns, tz=UTC]
card
stringlengths
51
438k
Davlan/mbart50-large-yor-eng-mt
[ "pytorch", "mbart", "text2text-generation", "arxiv:2103.08647", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MBartForConditionalGeneration" ], "model_type": "mbart", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
Access to model ThomasGerald/MBARTHEZ-QG is restricted and you are not in the authorized list. Visit https://huggingface.co/ThomasGerald/MBARTHEZ-QG to ask for access.
Davlan/mt5_base_eng_yor_mt
[ "pytorch", "mt5", "text2text-generation", "arxiv:2103.08647", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MT5ForConditionalGeneration" ], "model_type": "mt5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # {MODEL_NAME} This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 384 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}') model = AutoModel.from_pretrained('{MODEL_NAME}') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 433 with parameters: ``` {'batch_size': 16, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.CosineSimilarityLoss.CosineSimilarityLoss` Parameters of the fit()-Method: ``` { "epochs": 1, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": 433, "warmup_steps": 44, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 128, 'do_lower_case': False}) with Transformer model: BertModel (1): Pooling({'word_embedding_dimension': 384, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
Davlan/naija-twitter-sentiment-afriberta-large
[ "pytorch", "tf", "xlm-roberta", "text-classification", "arxiv:2201.08277", "transformers", "has_space" ]
text-classification
{ "architectures": [ "XLMRobertaForSequenceClassification" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
61
null
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: DLL888/bert-base-uncased-squad results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # DLL888/bert-base-uncased-squad This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on [SQuAD](https://huggingface.co/datasets/squad) dataset. It achieves the following results on the evaluation set: - Exact Match: 80.21759697256385 - F1: 87.77849998885436 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training Machine Trained in Google Colab Pro with the following specs: - A100-SXM4-40GB - NVIDIA-SMI 460.32.03 - Driver Version: 460.32.03 - CUDA Version: 11.2 Training took about 26 minutes for two epochs. ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'learning_rate': {'class_name': 'WarmUp', 'config': {'initial_learning_rate': 2e-05, 'decay_schedule_fn': {'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 2e-05, 'decay_steps': 10564, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}, '__passive_serialization__': True}, 'warmup_steps': 500, 'power': 1.0, 'name': None}}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False} - training_precision: mixed_float16 ### Training results | Train Loss | Train End Logits Accuracy | Train Start Logits Accuracy | Validation Loss | Validation End Logits Accuracy | Validation Start Logits Accuracy | Epoch | |:----------:|:-------------------------:|:---------------------------:|:---------------:|:------------------------------:|:--------------------------------:|:-----:| | 1.4348 | 0.6368 | 0.5974 | 1.0155 | 0.7193 | 0.6825 | 0 | | 0.8072 | 0.7735 | 0.7320 | 0.9990 | 0.7302 | 0.6983 | 1 | ### Framework versions - Transformers 4.24.0 - TensorFlow 2.9.2 - Datasets 2.7.1 - Tokenizers 0.13.2
Davlan/xlm-roberta-base-finetuned-amharic
[ "pytorch", "xlm-roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "XLMRobertaForMaskedLM" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
401
2022-12-01T14:31:20Z
--- library_name: stable-baselines3 tags: - SpaceInvadersNoFrameskip-v4 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: DQN results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: SpaceInvadersNoFrameskip-v4 type: SpaceInvadersNoFrameskip-v4 metrics: - type: mean_reward value: 653.50 +/- 137.33 name: mean_reward verified: false --- # **DQN** Agent playing **SpaceInvadersNoFrameskip-v4** This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga sayby -f logs/ python enjoy.py --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do: ``` python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga sayby -f logs/ rl_zoo3 enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` ## Training (with the RL Zoo) ``` python train.py --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga sayby ``` ## Hyperparameters ```python OrderedDict([('batch_size', 32), ('buffer_size', 100000), ('env_wrapper', ['stable_baselines3.common.atari_wrappers.AtariWrapper']), ('exploration_final_eps', 0.01), ('exploration_fraction', 0.1), ('frame_stack', 4), ('gradient_steps', 1), ('learning_rate', 0.0001), ('learning_starts', 100000), ('n_timesteps', 1000000.0), ('optimize_memory_usage', False), ('policy', 'CnnPolicy'), ('target_update_interval', 1000), ('train_freq', 4), ('normalize', False)]) ```
Davlan/xlm-roberta-base-finetuned-chichewa
[ "pytorch", "xlm-roberta", "fill-mask", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "XLMRobertaForMaskedLM" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- tags: - generated_from_trainer datasets: - imdb model-index: - name: enlm-roberta-81-imdb results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # enlm-roberta-81-imdb This model is a fine-tuned version of [manirai91/enlm-r](https://huggingface.co/manirai91/enlm-r) on the imdb dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.06 - num_epochs: 10 ### Training results ### Framework versions - Transformers 4.24.0 - Pytorch 1.11.0 - Datasets 2.7.0 - Tokenizers 0.13.2
Davlan/xlm-roberta-base-finetuned-english
[ "pytorch", "xlm-roberta", "fill-mask", "transformers", "license:apache-2.0", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "XLMRobertaForMaskedLM" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
2022-12-01T14:34:41Z
--- license: creativeml-openrail-m tags: - stable-diffusion - text-to-image --- The embeddings in this repository were trained for the 768px [Stable Diffusion v2.0](https://huggingface.co/stabilityai/stable-diffusion-2) model. The embeddings should work on any model that uses SD v2.0 as a base. Currently the kc32-v4-5000.pt & kc16-v4-5000.pt embeddings seem to perform the best. **Knollingcase v1** The v1 embeddings were trained for 4000 iterations with a batch size of 2, a text dropout of 10%, & 16 vectors using Automatic1111's WebUI. A total of 69 training images with high quality captions were used. **Knollingcase v2** The v2 embeddings were trained for 5000 iterations with a batch size of 4 and a text dropout of 10%, & 16 vectors using Automatic1111's WebUI. A total of 78 training images with high quality captions were used. **Knollingcase v3** The v3 embeddings were trained for 4000-6250 iterations with a batch size of 4 and a text dropout of 10%, & 16 vectors using Automatic1111's WebUI. A total of 86 training images with high quality captions were used. <div align="center"> <img src="https://huggingface.co/ProGamerGov/knollingcase-embeddings-sd-v2-0/resolve/main/cruise_ship_on_wave_kc16-v3-6250.png"> </div> * [Full Image](https://huggingface.co/ProGamerGov/knollingcase-embeddings-sd-v2-0/resolve/main/cruise_ship_on_wave_kc16-v3-6250.png) **Knollingcase v4** The v4 embeddings were trained for 4000-6250 iterations with a batch size of 4 and a text dropout of 10%, using Automatic1111's WebUI. A total of 116 training images with high quality captions were used. <div align="center"> <img src="https://huggingface.co/ProGamerGov/knollingcase-embeddings-sd-v2-0/resolve/main/v4_size_768_t4x11.jpg"> </div> * [Full Image](https://huggingface.co/ProGamerGov/knollingcase-embeddings-sd-v2-0/resolve/main/v4_size_768_t4x11.jpg) **Usage** To use the embeddings, download and then rename the files to whatever trigger word you want to use. They were trained with kc8, kc16, kc32, but any trigger word should work. The knollingcase style is considered to be a concept inside a sleek (sometimes scifi) display case with transparent walls, and a minimalistic background. Suggested prompts: ``` <concept>, micro-details, photorealism, photorealistic, <kc-vx-iter> photorealistic <concept>, very detailed, scifi case, <kc-vx-iter> <concept>, very detailed, scifi transparent case, <kc-vx-iter> ``` Suggested negative prompts: ``` blurry, toy, cartoon, animated, underwater, photoshop ``` Suggested samplers: DPM++ SDE Karras (used for the example images) or DPM++ 2S a Karras
Davlan/xlm-roberta-base-finetuned-igbo
[ "pytorch", "xlm-roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "XLMRobertaForMaskedLM" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
68
null
--- language: - en license: creativeml-openrail-m thumbnail: "https://huggingface.co/Guizmus/MosaicArt/resolve/main/showcase.jpg" tags: - stable-diffusion - text-to-image - image-to-image --- # Mosaic Art ## Details ![Showcase](https://huggingface.co/Guizmus/MosaicArt/resolve/main/showcase.jpg) This is a Dreamboothed Stable Diffusion model trained on pictures of mosaic art. The total dataset is made of 46 pictures. V2 was trained on [Stable diffusion 2.1 768](https://huggingface.co/stabilityai/stable-diffusion-2-1). I used [StableTuner](https://github.com/devilismyfriend/StableTuner) to do the training, using full caption on the pictures with almost no recurring word outside the main concept, so that no additionnal regularisation was needed. 6 epochs of 40 repeats on LR 1e-6 were used, with prior preservation. V1 was trained on [runawayml 1.5](https://huggingface.co/runwayml/stable-diffusion-v1-5) and the [new VAE](https://huggingface.co/stabilityai/sd-vae-ft-mse). I used [EveryDream](https://github.com/victorchall/EveryDream-trainer) to do the training, using full caption on the pictures with almost no recurring word outside the main concept, so that no additionnal regularisation was needed. Out of e0 to e11 epochs, e8 was selected as the best application of style while not overtraining. Prior preservation was constated as good. A total of 9 epochs of 40 repeats with a learning rate of 1e-6. The token "Mosaic Art" will bring in the new concept, trained as a style. The recommended sampling is k_Euler_a or DPM++ 2M Karras on 20 steps, CFGS 7.5 . ## Model v2 [CKPT v2](https://huggingface.co/Guizmus/MosaicArt/resolve/main/MosaicArt_v2.ckpt) [YAML v2](https://huggingface.co/Guizmus/MosaicArt/resolve/main/MosaicArt_v2.yaml) ## Model v1 ![Showcase](https://huggingface.co/Guizmus/MosaicArt/resolve/main/showcase.png) [CKPT v1](https://huggingface.co/Guizmus/MosaicArt/resolve/main/MosaicArt_v1.ckpt) [CKPT v1 with ema weights](https://huggingface.co/Guizmus/MosaicArt/resolve/main/MosaicArt_v1_ema.ckpt) [Dataset](https://huggingface.co/Guizmus/MosaicArt/resolve/main/dataset_v1.zip) ## 🧨 Diffusers This model can be used just like any other Stable Diffusion model. For more information, please have a look at the [Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion). You can also export the model to [ONNX](https://huggingface.co/docs/diffusers/optimization/onnx), [MPS](https://huggingface.co/docs/diffusers/optimization/mps) and/or [FLAX/JAX](). ```python from diffusers import StableDiffusionPipeline import torch model_id = "Guizmus/MosaicArt" pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16) pipe = pipe.to("cuda") prompt = "Mosaic Art dog on the moon" image = pipe(prompt).images[0] image.save("./MosaicArt.png") ```
Davlan/xlm-roberta-base-finetuned-kinyarwanda
[ "pytorch", "xlm-roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "XLMRobertaForMaskedLM" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
61
null
Access to model corumba/desenhista is restricted and you are not in the authorized list. Visit https://huggingface.co/corumba/desenhista to ask for access.
Davlan/xlm-roberta-base-finetuned-luganda
[ "pytorch", "xlm-roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "XLMRobertaForMaskedLM" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
11
null
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # {MODEL_NAME} This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}') model = AutoModel.from_pretrained('{MODEL_NAME}') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 26560 with parameters: ``` {'batch_size': 8, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.CosineSimilarityLoss.CosineSimilarityLoss` Parameters of the fit()-Method: ``` { "epochs": 1, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": 26560, "warmup_steps": 2656, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: MPNetModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
Davlan/xlm-roberta-base-finetuned-yoruba
[ "pytorch", "xlm-roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "XLMRobertaForMaskedLM" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
29
null
This model is said to be a high-quality female anatomy model, though its exact design aesthetic (i.e. anime or semi-realistic) remains unclear. The only known fact is that the renowned Berry Mix model is reportedly created using this model as its base makes realsitc humans
Dayout/test
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: creativeml-openrail-m --- ### MODEL_CONFIG_DDIM_TRAIN ``` MODEL_CONFIG_DDIM = { "_class_name": "StableDiffusionPipeline", "_diffusers_version": "0.6.0", "scheduler": [ "diffusers", "DDIMScheduler" ], "text_encoder": [ "transformers", "CLIPTextModel" ], "tokenizer": [ "transformers", "CLIPTokenizer" ], "unet": [ "diffusers", "UNet2DConditionModel" ], "vae": [ "diffusers", "AutoencoderKL" ] } ``` ### MODEL_CONFIG_DDIM_SAVE ``` MODEL_CONFIG_DDIM_SAVE = { "_class_name": "StableDiffusionPipeline", "_diffusers_version": "0.9.0.dev0", "scheduler": [ "diffusers", "DDIMScheduler" ], "text_encoder": [ "transformers", "CLIPTextModel" ], "tokenizer": [ "transformers", "CLIPTokenizer" ], "unet": [ "diffusers", "UNet2DConditionModel" ], "vae": [ "diffusers", "AutoencoderKL" ] } ``` ### SCHEDULER_CONFIG_DDIM_TRAIN ``` SCHEDULER_CONFIG_DDIM_TRAIN = { "_class_name": "DDIMScheduler", "_diffusers_version": "0.6.0", "beta_end": 0.012, "beta_schedule": "scaled_linear", "beta_start": 0.00085, "clip_sample": false, "num_train_timesteps": 1000, "set_alpha_to_one": false, "skip_prk_steps": true, "steps_offset": 1, "trained_betas": null } ``` ### SCHEDULER_CONFIG_DDIM_SAVE ``` SCHEDULER_CONFIG_DDIM_SAVE = { "_class_name": "DDIMScheduler", "_diffusers_version": "0.9.0.dev0", "beta_end": 0.012, "beta_schedule": "scaled_linear", "beta_start": 0.00085, "clip_sample": false, "num_train_timesteps": 1000, "prediction_type": "epsilon", "set_alpha_to_one": false, "skip_prk_steps": true, "steps_offset": 1, "trained_betas": null } ```
Declan/Breitbart_model_v4
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
2022-12-01T16:36:39Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - squad_v2 model-index: - name: distilbert-base-uncased-finetuned-sngp-for-qa-squad-seed-999 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-sngp-for-qa-squad-seed-999 This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the squad_v2 dataset. It achieves the following results on the evaluation set: - Loss: 6.0586 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | No log | 1.0 | 7 | 5.9735 | | No log | 2.0 | 14 | 6.0586 | ### Framework versions - Transformers 4.24.0 - Pytorch 1.12.1+cu113 - Datasets 2.7.1 - Tokenizers 0.13.2
Declan/ChicagoTribune_model_v5
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- license: mit tags: - pytorch - diffusers - unconditional-image-generation - diffusion-models-class --- # Model Card for Unit 1 of the [Diffusion Models Class 🧨](https://github.com/huggingface/diffusion-models-class) This model is a diffusion model for unconditional image generation of cute 🦋. ## Usage ```python from diffusers import DDPMPipeline pipeline = DDPMPipeline.from_pretrained('VlakoResker/sd-class-butterflies-32') image = pipeline().images[0] image ```
Declan/FoxNews_model_v1
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- license: mit tags: - pytorch - diffusers - unconditional-image-generation - diffusion-models-class --- # Model Card for Unit 1 of the [Diffusion Models Class 🧨](https://github.com/huggingface/diffusion-models-class) This model is a diffusion model for unconditional image generation of cute 🦋. ## Usage ```python from diffusers import DDPMPipeline pipeline = DDPMPipeline.from_pretrained('bowwwave/sd-class-butterflies-64') image = pipeline().images[0] image ```
Declan/HuffPost_model_v4
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- datasets: - tweet_eval metrics: - f1 - accuracy model-index: - name: cardiffnlp/roberta-base-hate results: - task: type: text-classification name: Text Classification dataset: name: tweet_eval type: hate split: test metrics: - name: Micro F1 (tweet_eval/hate) type: micro_f1_tweet_eval/hate value: 0.5131313131313131 - name: Macro F1 (tweet_eval/hate) type: micro_f1_tweet_eval/hate value: 0.4634195952763752 - name: Accuracy (tweet_eval/hate) type: accuracy_tweet_eval/hate value: 0.5131313131313131 pipeline_tag: text-classification widget: - text: Get the all-analog Classic Vinyl Edition of "Takin Off" Album from {@herbiehancock@} via {@bluenoterecords@} link below {{URL}} example_title: "topic_classification 1" - text: Yes, including Medicare and social security saving👍 example_title: "sentiment 1" - text: All two of them taste like ass. example_title: "offensive 1" - text: If you wanna look like a badass, have drama on social media example_title: "irony 1" - text: Whoever just unfollowed me you a bitch example_title: "hate 1" - text: I love swimming for the same reason I love meditating...the feeling of weightlessness. example_title: "emotion 1" - text: Beautiful sunset last night from the pontoon @TupperLakeNY example_title: "emoji 1" --- # cardiffnlp/roberta-base-hate This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the [`tweet_eval (hate)`](https://huggingface.co/datasets/tweet_eval) via [`tweetnlp`](https://github.com/cardiffnlp/tweetnlp). Training split is `train` and parameters have been tuned on the validation split `validation`. Following metrics are achieved on the test split `test` ([link](https://huggingface.co/cardiffnlp/roberta-base-hate/raw/main/metric.json)). - F1 (micro): 0.5131313131313131 - F1 (macro): 0.4634195952763752 - Accuracy: 0.5131313131313131 ### Usage Install tweetnlp via pip. ```shell pip install tweetnlp ``` Load the model in python. ```python import tweetnlp model = tweetnlp.Classifier("cardiffnlp/roberta-base-hate", max_length=128) model.predict('Get the all-analog Classic Vinyl Edition of "Takin Off" Album from {@herbiehancock@} via {@bluenoterecords@} link below {{URL}}') ``` ### Reference ``` @inproceedings{camacho-collados-etal-2022-tweetnlp, title={{T}weet{NLP}: {C}utting-{E}dge {N}atural {L}anguage {P}rocessing for {S}ocial {M}edia}, author={Camacho-Collados, Jose and Rezaee, Kiamehr and Riahi, Talayeh and Ushio, Asahi and Loureiro, Daniel and Antypas, Dimosthenis and Boisson, Joanne and Espinosa-Anke, Luis and Liu, Fangyu and Mart{'\i}nez-C{'a}mara, Eugenio and others}, author = "Ushio, Asahi and Camacho-Collados, Jose", booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: System Demonstrations", month = nov, year = "2022", address = "Abu Dhabi, U.A.E.", publisher = "Association for Computational Linguistics", } ```
Declan/NPR_model_v3
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- datasets: - tweet_eval metrics: - f1 - accuracy model-index: - name: cardiffnlp/twitter-roberta-base-2021-124m-emoji results: - task: type: text-classification name: Text Classification dataset: name: tweet_eval type: emoji split: test metrics: - name: Micro F1 (tweet_eval/emoji) type: micro_f1_tweet_eval/emoji value: 0.46162 - name: Macro F1 (tweet_eval/emoji) type: micro_f1_tweet_eval/emoji value: 0.34612351090521765 - name: Accuracy (tweet_eval/emoji) type: accuracy_tweet_eval/emoji value: 0.46162 pipeline_tag: text-classification widget: - text: Get the all-analog Classic Vinyl Edition of "Takin Off" Album from {@herbiehancock@} via {@bluenoterecords@} link below {{URL}} example_title: "topic_classification 1" - text: Yes, including Medicare and social security saving👍 example_title: "sentiment 1" - text: All two of them taste like ass. example_title: "offensive 1" - text: If you wanna look like a badass, have drama on social media example_title: "irony 1" - text: Whoever just unfollowed me you a bitch example_title: "hate 1" - text: I love swimming for the same reason I love meditating...the feeling of weightlessness. example_title: "emotion 1" - text: Beautiful sunset last night from the pontoon @TupperLakeNY example_title: "emoji 1" --- # cardiffnlp/twitter-roberta-base-2021-124m-emoji This model is a fine-tuned version of [cardiffnlp/twitter-roberta-base-2021-124m](https://huggingface.co/cardiffnlp/twitter-roberta-base-2021-124m) on the [`tweet_eval (emoji)`](https://huggingface.co/datasets/tweet_eval) via [`tweetnlp`](https://github.com/cardiffnlp/tweetnlp). Training split is `train` and parameters have been tuned on the validation split `validation`. Following metrics are achieved on the test split `test` ([link](https://huggingface.co/cardiffnlp/twitter-roberta-base-2021-124m-emoji/raw/main/metric.json)). - F1 (micro): 0.46162 - F1 (macro): 0.34612351090521765 - Accuracy: 0.46162 ### Usage Install tweetnlp via pip. ```shell pip install tweetnlp ``` Load the model in python. ```python import tweetnlp model = tweetnlp.Classifier("cardiffnlp/twitter-roberta-base-2021-124m-emoji", max_length=128) model.predict('Get the all-analog Classic Vinyl Edition of "Takin Off" Album from {@herbiehancock@} via {@bluenoterecords@} link below {{URL}}') ``` ### Reference ``` @inproceedings{camacho-collados-etal-2022-tweetnlp, title={{T}weet{NLP}: {C}utting-{E}dge {N}atural {L}anguage {P}rocessing for {S}ocial {M}edia}, author={Camacho-Collados, Jose and Rezaee, Kiamehr and Riahi, Talayeh and Ushio, Asahi and Loureiro, Daniel and Antypas, Dimosthenis and Boisson, Joanne and Espinosa-Anke, Luis and Liu, Fangyu and Mart{'\i}nez-C{'a}mara, Eugenio and others}, author = "Ushio, Asahi and Camacho-Collados, Jose", booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: System Demonstrations", month = nov, year = "2022", address = "Abu Dhabi, U.A.E.", publisher = "Association for Computational Linguistics", } ```
Declan/NPR_model_v6
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- language: - gl license: apache-2.0 tags: - hf-asr-leaderboard - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 model-index: - name: Whisper Small gl - Galician results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Small gl - Galician This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the Common Voice 11.0 dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.25.0.dev0 - Pytorch 1.12.1+cu113 - Datasets 2.7.1 - Tokenizers 0.13.2
Declan/WallStreetJournal_model_v1
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- license: mit tags: - pytorch - diffusers - unconditional-image-generation - diffusion-models-class --- # Model Card for Unit 1 of the [Diffusion Models Class 🧨](https://github.com/huggingface/diffusion-models-class) This model is a diffusion model for unconditional image generation of cute 🦋. ## Usage ```python from diffusers import DDPMPipeline pipeline = DDPMPipeline.from_pretrained('shahukareem/sd-class-butterflies-64') image = pipeline().images[0] image ```
Declan/WallStreetJournal_model_v2
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
2022-12-01T19:09:34Z
--- license: mit tags: - generated_from_trainer model-index: - name: mdeberta_all results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mdeberta_all This model is a fine-tuned version of [microsoft/mdeberta-v3-base](https://huggingface.co/microsoft/mdeberta-v3-base) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.2148 - Aerospacemanufacturer Precision: 0.7073 - Aerospacemanufacturer Recall: 0.8406 - Aerospacemanufacturer F1: 0.7682 - Aerospacemanufacturer Number: 138 - Anatomicalstructure Precision: 0.6762 - Anatomicalstructure Recall: 0.7269 - Anatomicalstructure F1: 0.7006 - Anatomicalstructure Number: 227 - Artwork Precision: 0.5802 - Artwork Recall: 0.5802 - Artwork F1: 0.5802 - Artwork Number: 131 - Artist Precision: 0.7565 - Artist Recall: 0.7938 - Artist F1: 0.7747 - Artist Number: 1722 - Athlete Precision: 0.7195 - Athlete Recall: 0.7636 - Athlete F1: 0.7409 - Athlete Number: 719 - Carmanufacturer Precision: 0.6806 - Carmanufacturer Recall: 0.8176 - Carmanufacturer F1: 0.7429 - Carmanufacturer Number: 159 - Cleric Precision: 0.6867 - Cleric Recall: 0.5124 - Cleric F1: 0.5869 - Cleric Number: 201 - Clothing Precision: 0.5797 - Clothing Recall: 0.625 - Clothing F1: 0.6015 - Clothing Number: 128 - Disease Precision: 0.6262 - Disease Recall: 0.6768 - Disease F1: 0.6505 - Disease Number: 198 - Drink Precision: 0.7296 - Drink Recall: 0.8112 - Drink F1: 0.7682 - Drink Number: 143 - Facility Precision: 0.6439 - Facility Recall: 0.7203 - Facility F1: 0.6800 - Facility Number: 497 - Food Precision: 0.6786 - Food Recall: 0.5327 - Food F1: 0.5969 - Food Number: 214 - Humansettlement Precision: 0.8594 - Humansettlement Recall: 0.8792 - Humansettlement F1: 0.8692 - Humansettlement Number: 1689 - Medicalprocedure Precision: 0.6545 - Medicalprocedure Recall: 0.7606 - Medicalprocedure F1: 0.7036 - Medicalprocedure Number: 142 - Medication/vaccine Precision: 0.7183 - Medication/vaccine Recall: 0.765 - Medication/vaccine F1: 0.7409 - Medication/vaccine Number: 200 - Musicalgrp Precision: 0.7132 - Musicalgrp Recall: 0.7688 - Musicalgrp F1: 0.7400 - Musicalgrp Number: 372 - Musicalwork Precision: 0.7513 - Musicalwork Recall: 0.7052 - Musicalwork F1: 0.7275 - Musicalwork Number: 407 - Org Precision: 0.6335 - Org Recall: 0.6117 - Org F1: 0.6224 - Org Number: 667 - Otherloc Precision: 0.7514 - Otherloc Recall: 0.6205 - Otherloc F1: 0.6797 - Otherloc Number: 224 - Otherper Precision: 0.4558 - Otherper Recall: 0.5821 - Otherper F1: 0.5112 - Otherper Number: 859 - Otherprod Precision: 0.6076 - Otherprod Recall: 0.5543 - Otherprod F1: 0.5797 - Otherprod Number: 433 - Politician Precision: 0.6228 - Politician Recall: 0.4793 - Politician F1: 0.5417 - Politician Number: 603 - Privatecorp Precision: 0.7159 - Privatecorp Recall: 0.4884 - Privatecorp F1: 0.5806 - Privatecorp Number: 129 - Publiccorp Precision: 0.56 - Publiccorp Recall: 0.6914 - Publiccorp F1: 0.6188 - Publiccorp Number: 243 - Scientist Precision: 0.4545 - Scientist Recall: 0.4497 - Scientist F1: 0.4521 - Scientist Number: 189 - Software Precision: 0.7159 - Software Recall: 0.8046 - Software F1: 0.7577 - Software Number: 307 - Sportsgrp Precision: 0.7845 - Sportsgrp Recall: 0.8701 - Sportsgrp F1: 0.8251 - Sportsgrp Number: 385 - Sportsmanager Precision: 0.6667 - Sportsmanager Recall: 0.5361 - Sportsmanager F1: 0.5943 - Sportsmanager Number: 194 - Station Precision: 0.7406 - Station Recall: 0.8093 - Station F1: 0.7734 - Station Number: 194 - Symptom Precision: 0.6316 - Symptom Recall: 0.5581 - Symptom F1: 0.5926 - Symptom Number: 129 - Vehicle Precision: 0.5514 - Vehicle Recall: 0.6505 - Vehicle F1: 0.5969 - Vehicle Number: 206 - Visualwork Precision: 0.7538 - Visualwork Recall: 0.7951 - Visualwork F1: 0.7739 - Visualwork Number: 693 - Writtenwork Precision: 0.6913 - Writtenwork Recall: 0.6803 - Writtenwork F1: 0.6858 - Writtenwork Number: 563 - Overall Precision: 0.6928 - Overall Recall: 0.7142 - Overall F1: 0.7033 - Overall Accuracy: 0.9355 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 15.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | Aerospacemanufacturer Precision | Aerospacemanufacturer Recall | Aerospacemanufacturer F1 | Aerospacemanufacturer Number | Anatomicalstructure Precision | Anatomicalstructure Recall | Anatomicalstructure F1 | Anatomicalstructure Number | Artwork Precision | Artwork Recall | Artwork F1 | Artwork Number | Artist Precision | Artist Recall | Artist F1 | Artist Number | Athlete Precision | Athlete Recall | Athlete F1 | Athlete Number | Carmanufacturer Precision | Carmanufacturer Recall | Carmanufacturer F1 | Carmanufacturer Number | Cleric Precision | Cleric Recall | Cleric F1 | Cleric Number | Clothing Precision | Clothing Recall | Clothing F1 | Clothing Number | Disease Precision | Disease Recall | Disease F1 | Disease Number | Drink Precision | Drink Recall | Drink F1 | Drink Number | Facility Precision | Facility Recall | Facility F1 | Facility Number | Food Precision | Food Recall | Food F1 | Food Number | Humansettlement Precision | Humansettlement Recall | Humansettlement F1 | Humansettlement Number | Medicalprocedure Precision | Medicalprocedure Recall | Medicalprocedure F1 | Medicalprocedure Number | Medication/vaccine Precision | Medication/vaccine Recall | Medication/vaccine F1 | Medication/vaccine Number | Musicalgrp Precision | Musicalgrp Recall | Musicalgrp F1 | Musicalgrp Number | Musicalwork Precision | Musicalwork Recall | Musicalwork F1 | Musicalwork Number | Org Precision | Org Recall | Org F1 | Org Number | Otherloc Precision | Otherloc Recall | Otherloc F1 | Otherloc Number | Otherper Precision | Otherper Recall | Otherper F1 | Otherper Number | Otherprod Precision | Otherprod Recall | Otherprod F1 | Otherprod Number | Politician Precision | Politician Recall | Politician F1 | Politician Number | Privatecorp Precision | Privatecorp Recall | Privatecorp F1 | Privatecorp Number | Publiccorp Precision | Publiccorp Recall | Publiccorp F1 | Publiccorp Number | Scientist Precision | Scientist Recall | Scientist F1 | Scientist Number | Software Precision | Software Recall | Software F1 | Software Number | Sportsgrp Precision | Sportsgrp Recall | Sportsgrp F1 | Sportsgrp Number | Sportsmanager Precision | Sportsmanager Recall | Sportsmanager F1 | Sportsmanager Number | Station Precision | Station Recall | Station F1 | Station Number | Symptom Precision | Symptom Recall | Symptom F1 | Symptom Number | Vehicle Precision | Vehicle Recall | Vehicle F1 | Vehicle Number | Visualwork Precision | Visualwork Recall | Visualwork F1 | Visualwork Number | Writtenwork Precision | Writtenwork Recall | Writtenwork F1 | Writtenwork Number | Overall Precision | Overall Recall | Overall F1 | Overall Accuracy | |:-------------:|:-----:|:------:|:---------------:|:-------------------------------:|:----------------------------:|:------------------------:|:----------------------------:|:-----------------------------:|:--------------------------:|:----------------------:|:--------------------------:|:-----------------:|:--------------:|:----------:|:--------------:|:----------------:|:-------------:|:---------:|:-------------:|:-----------------:|:--------------:|:----------:|:--------------:|:-------------------------:|:----------------------:|:------------------:|:----------------------:|:----------------:|:-------------:|:---------:|:-------------:|:------------------:|:---------------:|:-----------:|:---------------:|:-----------------:|:--------------:|:----------:|:--------------:|:---------------:|:------------:|:--------:|:------------:|:------------------:|:---------------:|:-----------:|:---------------:|:--------------:|:-----------:|:-------:|:-----------:|:-------------------------:|:----------------------:|:------------------:|:----------------------:|:--------------------------:|:-----------------------:|:-------------------:|:-----------------------:|:----------------------------:|:-------------------------:|:---------------------:|:-------------------------:|:--------------------:|:-----------------:|:-------------:|:-----------------:|:---------------------:|:------------------:|:--------------:|:------------------:|:-------------:|:----------:|:------:|:----------:|:------------------:|:---------------:|:-----------:|:---------------:|:------------------:|:---------------:|:-----------:|:---------------:|:-------------------:|:----------------:|:------------:|:----------------:|:--------------------:|:-----------------:|:-------------:|:-----------------:|:---------------------:|:------------------:|:--------------:|:------------------:|:--------------------:|:-----------------:|:-------------:|:-----------------:|:-------------------:|:----------------:|:------------:|:----------------:|:------------------:|:---------------:|:-----------:|:---------------:|:-------------------:|:----------------:|:------------:|:----------------:|:-----------------------:|:--------------------:|:----------------:|:--------------------:|:-----------------:|:--------------:|:----------:|:--------------:|:-----------------:|:--------------:|:----------:|:--------------:|:-----------------:|:--------------:|:----------:|:--------------:|:--------------------:|:-----------------:|:-------------:|:-----------------:|:---------------------:|:------------------:|:--------------:|:------------------:|:-----------------:|:--------------:|:----------:|:----------------:| | 0.2881 | 1.0 | 21353 | 0.2534 | 0.5191 | 0.6884 | 0.5919 | 138 | 0.5693 | 0.6872 | 0.6228 | 227 | 0.4366 | 0.4733 | 0.4542 | 131 | 0.7109 | 0.8055 | 0.7552 | 1722 | 0.6782 | 0.6801 | 0.6792 | 719 | 0.6552 | 0.7170 | 0.6847 | 159 | 0.4874 | 0.4826 | 0.485 | 201 | 0.4639 | 0.6016 | 0.5238 | 128 | 0.4799 | 0.7222 | 0.5766 | 198 | 0.55 | 0.6923 | 0.6130 | 143 | 0.5305 | 0.6640 | 0.5898 | 497 | 0.4042 | 0.7196 | 0.5176 | 214 | 0.8164 | 0.8792 | 0.8466 | 1689 | 0.5799 | 0.6901 | 0.6302 | 142 | 0.5945 | 0.755 | 0.6652 | 200 | 0.7445 | 0.6425 | 0.6898 | 372 | 0.6667 | 0.6634 | 0.6650 | 407 | 0.5585 | 0.5652 | 0.5618 | 667 | 0.6724 | 0.5223 | 0.5879 | 224 | 0.3835 | 0.4773 | 0.4253 | 859 | 0.5237 | 0.4596 | 0.4895 | 433 | 0.5176 | 0.4643 | 0.4895 | 603 | 0.4688 | 0.1163 | 0.1863 | 129 | 0.4207 | 0.6008 | 0.4949 | 243 | 0.4099 | 0.3492 | 0.3771 | 189 | 0.6686 | 0.7362 | 0.7008 | 307 | 0.7688 | 0.7948 | 0.7816 | 385 | 0.5136 | 0.5825 | 0.5459 | 194 | 0.6667 | 0.8041 | 0.7290 | 194 | 0.5588 | 0.1473 | 0.2331 | 129 | 0.4910 | 0.5291 | 0.5093 | 206 | 0.6662 | 0.7489 | 0.7052 | 693 | 0.6180 | 0.5861 | 0.6016 | 563 | 0.6136 | 0.6640 | 0.6378 | 0.9220 | | 0.2195 | 2.0 | 42706 | 0.2288 | 0.6409 | 0.8406 | 0.7273 | 138 | 0.6908 | 0.6300 | 0.6590 | 227 | 0.5625 | 0.5496 | 0.5560 | 131 | 0.7336 | 0.8124 | 0.7710 | 1722 | 0.6466 | 0.8067 | 0.7178 | 719 | 0.6095 | 0.8050 | 0.6938 | 159 | 0.5848 | 0.4975 | 0.5376 | 201 | 0.5260 | 0.6328 | 0.5745 | 128 | 0.5427 | 0.6414 | 0.5880 | 198 | 0.7042 | 0.6993 | 0.7018 | 143 | 0.6157 | 0.7123 | 0.6604 | 497 | 0.6140 | 0.4907 | 0.5455 | 214 | 0.8454 | 0.8745 | 0.8597 | 1689 | 0.6125 | 0.6901 | 0.6490 | 142 | 0.6898 | 0.745 | 0.7163 | 200 | 0.7299 | 0.7339 | 0.7319 | 372 | 0.6901 | 0.7224 | 0.7059 | 407 | 0.6224 | 0.5907 | 0.6062 | 667 | 0.7312 | 0.6071 | 0.6634 | 224 | 0.4851 | 0.4750 | 0.4800 | 859 | 0.5994 | 0.4804 | 0.5333 | 433 | 0.5675 | 0.5091 | 0.5367 | 603 | 0.6905 | 0.4496 | 0.5446 | 129 | 0.5516 | 0.6379 | 0.5916 | 243 | 0.4570 | 0.3651 | 0.4059 | 189 | 0.7508 | 0.7459 | 0.7484 | 307 | 0.7833 | 0.8260 | 0.8040 | 385 | 0.7071 | 0.5103 | 0.5928 | 194 | 0.6920 | 0.7990 | 0.7416 | 194 | 0.4590 | 0.4341 | 0.4462 | 129 | 0.4717 | 0.7282 | 0.5725 | 206 | 0.7275 | 0.7821 | 0.7538 | 693 | 0.6618 | 0.6430 | 0.6523 | 563 | 0.6711 | 0.6946 | 0.6827 | 0.9317 | | 0.1965 | 3.0 | 64059 | 0.2148 | 0.7073 | 0.8406 | 0.7682 | 138 | 0.6762 | 0.7269 | 0.7006 | 227 | 0.5802 | 0.5802 | 0.5802 | 131 | 0.7565 | 0.7938 | 0.7747 | 1722 | 0.7195 | 0.7636 | 0.7409 | 719 | 0.6806 | 0.8176 | 0.7429 | 159 | 0.6867 | 0.5124 | 0.5869 | 201 | 0.5797 | 0.625 | 0.6015 | 128 | 0.6262 | 0.6768 | 0.6505 | 198 | 0.7296 | 0.8112 | 0.7682 | 143 | 0.6439 | 0.7203 | 0.6800 | 497 | 0.6786 | 0.5327 | 0.5969 | 214 | 0.8594 | 0.8792 | 0.8692 | 1689 | 0.6545 | 0.7606 | 0.7036 | 142 | 0.7183 | 0.765 | 0.7409 | 200 | 0.7132 | 0.7688 | 0.7400 | 372 | 0.7513 | 0.7052 | 0.7275 | 407 | 0.6335 | 0.6117 | 0.6224 | 667 | 0.7514 | 0.6205 | 0.6797 | 224 | 0.4558 | 0.5821 | 0.5112 | 859 | 0.6076 | 0.5543 | 0.5797 | 433 | 0.6228 | 0.4793 | 0.5417 | 603 | 0.7159 | 0.4884 | 0.5806 | 129 | 0.56 | 0.6914 | 0.6188 | 243 | 0.4545 | 0.4497 | 0.4521 | 189 | 0.7159 | 0.8046 | 0.7577 | 307 | 0.7845 | 0.8701 | 0.8251 | 385 | 0.6667 | 0.5361 | 0.5943 | 194 | 0.7406 | 0.8093 | 0.7734 | 194 | 0.6316 | 0.5581 | 0.5926 | 129 | 0.5514 | 0.6505 | 0.5969 | 206 | 0.7538 | 0.7951 | 0.7739 | 693 | 0.6913 | 0.6803 | 0.6858 | 563 | 0.6928 | 0.7142 | 0.7033 | 0.9355 | | 0.1665 | 4.0 | 85412 | 0.2193 | 0.7917 | 0.8261 | 0.8085 | 138 | 0.7069 | 0.7225 | 0.7146 | 227 | 0.5867 | 0.6718 | 0.6263 | 131 | 0.7710 | 0.7938 | 0.7823 | 1722 | 0.6962 | 0.7650 | 0.7290 | 719 | 0.7904 | 0.8302 | 0.8098 | 159 | 0.6221 | 0.5323 | 0.5737 | 201 | 0.5743 | 0.6641 | 0.6159 | 128 | 0.5966 | 0.7172 | 0.6514 | 198 | 0.7914 | 0.7692 | 0.7801 | 143 | 0.6395 | 0.7103 | 0.6730 | 497 | 0.6422 | 0.6121 | 0.6268 | 214 | 0.8338 | 0.9059 | 0.8683 | 1689 | 0.6711 | 0.7183 | 0.6939 | 142 | 0.7635 | 0.775 | 0.7692 | 200 | 0.7669 | 0.7608 | 0.7638 | 372 | 0.6872 | 0.7936 | 0.7366 | 407 | 0.7100 | 0.5982 | 0.6493 | 667 | 0.7181 | 0.7277 | 0.7228 | 224 | 0.4765 | 0.5553 | 0.5129 | 859 | 0.6225 | 0.5866 | 0.6040 | 433 | 0.6078 | 0.5191 | 0.5599 | 603 | 0.7222 | 0.5039 | 0.5936 | 129 | 0.6065 | 0.7737 | 0.6799 | 243 | 0.4783 | 0.5238 | 0.5 | 189 | 0.7313 | 0.7980 | 0.7632 | 307 | 0.8401 | 0.8597 | 0.8498 | 385 | 0.6058 | 0.6495 | 0.6269 | 194 | 0.7512 | 0.7938 | 0.7719 | 194 | 0.6983 | 0.6279 | 0.6612 | 129 | 0.5804 | 0.7184 | 0.6421 | 206 | 0.7571 | 0.8052 | 0.7804 | 693 | 0.6916 | 0.6892 | 0.6904 | 563 | 0.7012 | 0.7309 | 0.7158 | 0.9375 | | 0.1314 | 5.0 | 106765 | 0.2272 | 0.7707 | 0.8768 | 0.8203 | 138 | 0.7137 | 0.7577 | 0.7350 | 227 | 0.6058 | 0.6336 | 0.6194 | 131 | 0.7229 | 0.8513 | 0.7819 | 1722 | 0.7361 | 0.7761 | 0.7556 | 719 | 0.6839 | 0.8302 | 0.7500 | 159 | 0.5845 | 0.6020 | 0.5931 | 201 | 0.6148 | 0.6484 | 0.6312 | 128 | 0.6121 | 0.7172 | 0.6605 | 198 | 0.6970 | 0.8042 | 0.7468 | 143 | 0.6438 | 0.6982 | 0.6699 | 497 | 0.6197 | 0.6776 | 0.6473 | 214 | 0.8390 | 0.8887 | 0.8631 | 1689 | 0.7333 | 0.6972 | 0.7148 | 142 | 0.7443 | 0.815 | 0.7780 | 200 | 0.7217 | 0.7876 | 0.7532 | 372 | 0.7113 | 0.7568 | 0.7333 | 407 | 0.6682 | 0.6522 | 0.6601 | 667 | 0.7136 | 0.7009 | 0.7072 | 224 | 0.5351 | 0.4796 | 0.5058 | 859 | 0.5930 | 0.6259 | 0.6090 | 433 | 0.6112 | 0.5240 | 0.5643 | 603 | 0.7767 | 0.6202 | 0.6897 | 129 | 0.6254 | 0.7284 | 0.6730 | 243 | 0.4815 | 0.4815 | 0.4815 | 189 | 0.7654 | 0.8078 | 0.7861 | 307 | 0.7611 | 0.8935 | 0.8220 | 385 | 0.6667 | 0.6082 | 0.6361 | 194 | 0.7828 | 0.7990 | 0.7908 | 194 | 0.6692 | 0.6899 | 0.6794 | 129 | 0.5983 | 0.6942 | 0.6427 | 206 | 0.7584 | 0.8153 | 0.7858 | 693 | 0.6740 | 0.7052 | 0.6892 | 563 | 0.7006 | 0.7401 | 0.7198 | 0.9378 | | 0.1224 | 6.0 | 128118 | 0.2275 | 0.8286 | 0.8406 | 0.8345 | 138 | 0.6898 | 0.7445 | 0.7161 | 227 | 0.6013 | 0.7252 | 0.6574 | 131 | 0.7574 | 0.8415 | 0.7972 | 1722 | 0.7400 | 0.7483 | 0.7441 | 719 | 0.8084 | 0.8491 | 0.8282 | 159 | 0.7055 | 0.5721 | 0.6319 | 201 | 0.6061 | 0.625 | 0.6154 | 128 | 0.7090 | 0.6768 | 0.6925 | 198 | 0.7868 | 0.7483 | 0.7670 | 143 | 0.6454 | 0.7545 | 0.6957 | 497 | 0.6287 | 0.6963 | 0.6608 | 214 | 0.8548 | 0.8851 | 0.8697 | 1689 | 0.7669 | 0.7183 | 0.7418 | 142 | 0.75 | 0.825 | 0.7857 | 200 | 0.7130 | 0.8280 | 0.7662 | 372 | 0.6848 | 0.8059 | 0.7404 | 407 | 0.7112 | 0.6462 | 0.6771 | 667 | 0.7879 | 0.6964 | 0.7393 | 224 | 0.5378 | 0.5378 | 0.5378 | 859 | 0.6554 | 0.5797 | 0.6152 | 433 | 0.5946 | 0.5887 | 0.5917 | 603 | 0.8131 | 0.6744 | 0.7373 | 129 | 0.6483 | 0.7737 | 0.7054 | 243 | 0.5537 | 0.5185 | 0.5355 | 189 | 0.7704 | 0.7980 | 0.784 | 307 | 0.8415 | 0.8961 | 0.8679 | 385 | 0.6566 | 0.6701 | 0.6633 | 194 | 0.7879 | 0.8041 | 0.7959 | 194 | 0.6159 | 0.7829 | 0.6894 | 129 | 0.5887 | 0.7087 | 0.6432 | 206 | 0.7864 | 0.8023 | 0.7943 | 693 | 0.7388 | 0.6732 | 0.7045 | 563 | 0.7221 | 0.7475 | 0.7346 | 0.9406 | | 0.0964 | 7.0 | 149471 | 0.2456 | 0.7947 | 0.8696 | 0.8304 | 138 | 0.7107 | 0.7577 | 0.7335 | 227 | 0.6522 | 0.6870 | 0.6691 | 131 | 0.7780 | 0.8182 | 0.7976 | 1722 | 0.7546 | 0.7483 | 0.7514 | 719 | 0.7870 | 0.8365 | 0.8110 | 159 | 0.6020 | 0.6020 | 0.6020 | 201 | 0.58 | 0.6797 | 0.6259 | 128 | 0.6129 | 0.7677 | 0.6816 | 198 | 0.7468 | 0.8252 | 0.7841 | 143 | 0.6642 | 0.7284 | 0.6948 | 497 | 0.6840 | 0.6776 | 0.6808 | 214 | 0.8586 | 0.8810 | 0.8697 | 1689 | 0.7836 | 0.7394 | 0.7609 | 142 | 0.7082 | 0.825 | 0.7621 | 200 | 0.7731 | 0.7876 | 0.7803 | 372 | 0.7606 | 0.7494 | 0.7550 | 407 | 0.6726 | 0.6837 | 0.6781 | 667 | 0.7581 | 0.7277 | 0.7426 | 224 | 0.5176 | 0.5634 | 0.5396 | 859 | 0.6599 | 0.6005 | 0.6288 | 433 | 0.5938 | 0.5672 | 0.5802 | 603 | 0.8776 | 0.6667 | 0.7577 | 129 | 0.7198 | 0.7613 | 0.74 | 243 | 0.5078 | 0.5185 | 0.5131 | 189 | 0.7933 | 0.7752 | 0.7842 | 307 | 0.8033 | 0.8909 | 0.8448 | 385 | 0.6071 | 0.7010 | 0.6507 | 194 | 0.7429 | 0.8041 | 0.7723 | 194 | 0.7321 | 0.6357 | 0.6805 | 129 | 0.5775 | 0.7233 | 0.6422 | 206 | 0.7858 | 0.7994 | 0.7926 | 693 | 0.6678 | 0.7282 | 0.6967 | 563 | 0.7199 | 0.7475 | 0.7334 | 0.9403 | | 0.0838 | 8.0 | 170824 | 0.2562 | 0.7722 | 0.8841 | 0.8243 | 138 | 0.6929 | 0.7753 | 0.7318 | 227 | 0.6483 | 0.7176 | 0.6812 | 131 | 0.7859 | 0.8101 | 0.7978 | 1722 | 0.7419 | 0.7316 | 0.7367 | 719 | 0.7389 | 0.8365 | 0.7847 | 159 | 0.5797 | 0.5970 | 0.5882 | 201 | 0.5878 | 0.6797 | 0.6304 | 128 | 0.6574 | 0.7172 | 0.6860 | 198 | 0.7597 | 0.8182 | 0.7879 | 143 | 0.7108 | 0.7123 | 0.7116 | 497 | 0.6511 | 0.7150 | 0.6815 | 214 | 0.8791 | 0.8822 | 0.8806 | 1689 | 0.75 | 0.7606 | 0.7552 | 142 | 0.7594 | 0.805 | 0.7816 | 200 | 0.7842 | 0.8011 | 0.7926 | 372 | 0.7395 | 0.7813 | 0.7599 | 407 | 0.6965 | 0.6777 | 0.6869 | 667 | 0.7179 | 0.75 | 0.7336 | 224 | 0.5081 | 0.5809 | 0.5421 | 859 | 0.6327 | 0.6166 | 0.6246 | 433 | 0.6094 | 0.5821 | 0.5954 | 603 | 0.8776 | 0.6667 | 0.7577 | 129 | 0.7059 | 0.7407 | 0.7229 | 243 | 0.5444 | 0.5185 | 0.5312 | 189 | 0.7722 | 0.7948 | 0.7833 | 307 | 0.8067 | 0.8779 | 0.8408 | 385 | 0.6408 | 0.6804 | 0.6600 | 194 | 0.7546 | 0.8402 | 0.7951 | 194 | 0.6831 | 0.7519 | 0.7159 | 129 | 0.6255 | 0.7136 | 0.6667 | 206 | 0.7392 | 0.8427 | 0.7876 | 693 | 0.7289 | 0.7069 | 0.7178 | 563 | 0.7242 | 0.7514 | 0.7376 | 0.9414 | | 0.0753 | 9.0 | 192177 | 0.2708 | 0.8026 | 0.8841 | 0.8414 | 138 | 0.7054 | 0.8018 | 0.7505 | 227 | 0.6277 | 0.6565 | 0.6418 | 131 | 0.7762 | 0.8380 | 0.8059 | 1722 | 0.7552 | 0.7552 | 0.7552 | 719 | 0.7701 | 0.8428 | 0.8048 | 159 | 0.6610 | 0.5821 | 0.6190 | 201 | 0.5915 | 0.6562 | 0.6222 | 128 | 0.6575 | 0.7273 | 0.6906 | 198 | 0.7887 | 0.7832 | 0.7860 | 143 | 0.7050 | 0.7163 | 0.7106 | 497 | 0.6270 | 0.7383 | 0.6781 | 214 | 0.8441 | 0.8881 | 0.8656 | 1689 | 0.7589 | 0.7535 | 0.7562 | 142 | 0.7125 | 0.855 | 0.7773 | 200 | 0.755 | 0.8118 | 0.7824 | 372 | 0.7512 | 0.8010 | 0.7753 | 407 | 0.6788 | 0.6972 | 0.6879 | 667 | 0.7830 | 0.7411 | 0.7615 | 224 | 0.5155 | 0.5821 | 0.5467 | 859 | 0.6386 | 0.6490 | 0.6438 | 433 | 0.6629 | 0.5804 | 0.6189 | 603 | 0.8598 | 0.7132 | 0.7797 | 129 | 0.6667 | 0.7490 | 0.7054 | 243 | 0.4787 | 0.5344 | 0.505 | 189 | 0.7610 | 0.7883 | 0.7744 | 307 | 0.8285 | 0.8909 | 0.8586 | 385 | 0.7027 | 0.6701 | 0.6860 | 194 | 0.7778 | 0.8299 | 0.8030 | 194 | 0.6923 | 0.7674 | 0.7279 | 129 | 0.6396 | 0.6893 | 0.6636 | 206 | 0.7879 | 0.8095 | 0.7986 | 693 | 0.7110 | 0.7123 | 0.7116 | 563 | 0.7258 | 0.7593 | 0.7422 | 0.9424 | | 0.0574 | 10.0 | 213530 | 0.2862 | 0.8 | 0.8986 | 0.8464 | 138 | 0.7375 | 0.7797 | 0.7580 | 227 | 0.6471 | 0.6718 | 0.6592 | 131 | 0.7831 | 0.8008 | 0.7918 | 1722 | 0.6997 | 0.7747 | 0.7353 | 719 | 0.7714 | 0.8491 | 0.8084 | 159 | 0.6091 | 0.5970 | 0.6030 | 201 | 0.6357 | 0.6406 | 0.6381 | 128 | 0.7 | 0.7071 | 0.7035 | 198 | 0.7436 | 0.8112 | 0.7759 | 143 | 0.6729 | 0.7243 | 0.6977 | 497 | 0.6830 | 0.7150 | 0.6986 | 214 | 0.8627 | 0.8857 | 0.8741 | 1689 | 0.7483 | 0.7535 | 0.7509 | 142 | 0.7611 | 0.86 | 0.8075 | 200 | 0.7846 | 0.8226 | 0.8031 | 372 | 0.7640 | 0.7715 | 0.7677 | 407 | 0.6921 | 0.6942 | 0.6931 | 667 | 0.7478 | 0.7545 | 0.7511 | 224 | 0.5079 | 0.5960 | 0.5485 | 859 | 0.6457 | 0.6397 | 0.6427 | 433 | 0.6223 | 0.5821 | 0.6015 | 603 | 0.8704 | 0.7287 | 0.7932 | 129 | 0.7041 | 0.7737 | 0.7373 | 243 | 0.5073 | 0.5503 | 0.5279 | 189 | 0.7680 | 0.7980 | 0.7827 | 307 | 0.8658 | 0.8883 | 0.8769 | 385 | 0.7111 | 0.6598 | 0.6845 | 194 | 0.7681 | 0.8196 | 0.7930 | 194 | 0.7197 | 0.7364 | 0.7280 | 129 | 0.6192 | 0.7184 | 0.6652 | 206 | 0.7922 | 0.8196 | 0.8057 | 693 | 0.7206 | 0.7194 | 0.72 | 563 | 0.7282 | 0.7572 | 0.7424 | 0.9424 | | 0.0568 | 11.0 | 234883 | 0.2951 | 0.8026 | 0.8841 | 0.8414 | 138 | 0.7458 | 0.7753 | 0.7603 | 227 | 0.6241 | 0.6718 | 0.6471 | 131 | 0.7737 | 0.8240 | 0.7981 | 1722 | 0.7646 | 0.7455 | 0.7549 | 719 | 0.8121 | 0.8428 | 0.8272 | 159 | 0.6685 | 0.5920 | 0.6280 | 201 | 0.6870 | 0.6172 | 0.6502 | 128 | 0.7150 | 0.6970 | 0.7059 | 198 | 0.7872 | 0.7762 | 0.7817 | 143 | 0.6631 | 0.7485 | 0.7032 | 497 | 0.6842 | 0.6682 | 0.6761 | 214 | 0.8594 | 0.8828 | 0.8709 | 1689 | 0.7863 | 0.7254 | 0.7546 | 142 | 0.7824 | 0.845 | 0.8125 | 200 | 0.7628 | 0.8038 | 0.7827 | 372 | 0.7664 | 0.7740 | 0.7702 | 407 | 0.7232 | 0.6777 | 0.6997 | 667 | 0.7820 | 0.7366 | 0.7586 | 224 | 0.5362 | 0.5949 | 0.5640 | 859 | 0.6306 | 0.6467 | 0.6385 | 433 | 0.6472 | 0.5871 | 0.6157 | 603 | 0.8857 | 0.7209 | 0.7949 | 129 | 0.7138 | 0.7901 | 0.7500 | 243 | 0.5075 | 0.5397 | 0.5231 | 189 | 0.7834 | 0.8013 | 0.7923 | 307 | 0.8561 | 0.8961 | 0.8756 | 385 | 0.6809 | 0.6598 | 0.6702 | 194 | 0.7656 | 0.8247 | 0.7940 | 194 | 0.6736 | 0.7519 | 0.7106 | 129 | 0.6262 | 0.6505 | 0.6381 | 206 | 0.7892 | 0.8211 | 0.8048 | 693 | 0.7561 | 0.7105 | 0.7326 | 563 | 0.7390 | 0.7548 | 0.7468 | 0.9431 | | 0.0465 | 12.0 | 256236 | 0.3103 | 0.8194 | 0.9203 | 0.8669 | 138 | 0.7031 | 0.7930 | 0.7453 | 227 | 0.5867 | 0.6718 | 0.6263 | 131 | 0.7829 | 0.8211 | 0.8016 | 1722 | 0.7582 | 0.7413 | 0.7496 | 719 | 0.8059 | 0.8616 | 0.8328 | 159 | 0.6648 | 0.5920 | 0.6263 | 201 | 0.6385 | 0.6484 | 0.6434 | 128 | 0.6827 | 0.7172 | 0.6995 | 198 | 0.7778 | 0.8322 | 0.8041 | 143 | 0.6679 | 0.7324 | 0.6987 | 497 | 0.6864 | 0.7056 | 0.6959 | 214 | 0.8473 | 0.8905 | 0.8684 | 1689 | 0.7552 | 0.7606 | 0.7579 | 142 | 0.7362 | 0.865 | 0.7954 | 200 | 0.7487 | 0.8011 | 0.7740 | 372 | 0.7470 | 0.7764 | 0.7614 | 407 | 0.7042 | 0.7031 | 0.7037 | 667 | 0.7435 | 0.7634 | 0.7533 | 224 | 0.5438 | 0.5856 | 0.5639 | 859 | 0.6261 | 0.6536 | 0.6395 | 433 | 0.6442 | 0.6186 | 0.6311 | 603 | 0.8482 | 0.7364 | 0.7884 | 129 | 0.7283 | 0.7613 | 0.7445 | 243 | 0.5075 | 0.5397 | 0.5231 | 189 | 0.7915 | 0.7915 | 0.7915 | 307 | 0.8564 | 0.8987 | 0.8771 | 385 | 0.6211 | 0.7268 | 0.6698 | 194 | 0.7633 | 0.8144 | 0.7880 | 194 | 0.7313 | 0.7597 | 0.7452 | 129 | 0.6450 | 0.7233 | 0.6819 | 206 | 0.7758 | 0.8139 | 0.7944 | 693 | 0.7189 | 0.7176 | 0.7182 | 563 | 0.7312 | 0.7621 | 0.7464 | 0.9428 | | 0.0459 | 13.0 | 277589 | 0.3141 | 0.8267 | 0.8986 | 0.8611 | 138 | 0.7254 | 0.7797 | 0.7516 | 227 | 0.6099 | 0.6565 | 0.6324 | 131 | 0.7929 | 0.8182 | 0.8054 | 1722 | 0.7562 | 0.7677 | 0.7619 | 719 | 0.8084 | 0.8491 | 0.8282 | 159 | 0.6302 | 0.6020 | 0.6158 | 201 | 0.6412 | 0.6562 | 0.6486 | 128 | 0.6931 | 0.7071 | 0.7000 | 198 | 0.7770 | 0.8042 | 0.7904 | 143 | 0.6834 | 0.7384 | 0.7099 | 497 | 0.6967 | 0.6869 | 0.6918 | 214 | 0.8631 | 0.8845 | 0.8737 | 1689 | 0.7939 | 0.7324 | 0.7619 | 142 | 0.7830 | 0.83 | 0.8058 | 200 | 0.7822 | 0.8011 | 0.7915 | 372 | 0.7482 | 0.7740 | 0.7609 | 407 | 0.6982 | 0.6972 | 0.6977 | 667 | 0.7867 | 0.7411 | 0.7632 | 224 | 0.5323 | 0.5856 | 0.5576 | 859 | 0.6469 | 0.6559 | 0.6514 | 433 | 0.6512 | 0.6036 | 0.6265 | 603 | 0.8611 | 0.7209 | 0.7848 | 129 | 0.7287 | 0.7737 | 0.7505 | 243 | 0.5185 | 0.5185 | 0.5185 | 189 | 0.7910 | 0.8013 | 0.7961 | 307 | 0.8715 | 0.8987 | 0.8849 | 385 | 0.7283 | 0.6907 | 0.7090 | 194 | 0.7512 | 0.7938 | 0.7719 | 194 | 0.7313 | 0.7597 | 0.7452 | 129 | 0.6147 | 0.6893 | 0.6499 | 206 | 0.7947 | 0.8268 | 0.8105 | 693 | 0.7170 | 0.7247 | 0.7208 | 563 | 0.7406 | 0.7588 | 0.7496 | 0.9436 | | 0.0386 | 14.0 | 298942 | 0.3268 | 0.8333 | 0.9058 | 0.8681 | 138 | 0.7092 | 0.7841 | 0.7448 | 227 | 0.6028 | 0.6489 | 0.625 | 131 | 0.7848 | 0.8153 | 0.7998 | 1722 | 0.7701 | 0.7594 | 0.7647 | 719 | 0.8047 | 0.8553 | 0.8293 | 159 | 0.6373 | 0.6119 | 0.6244 | 201 | 0.6204 | 0.6641 | 0.6415 | 128 | 0.6794 | 0.7172 | 0.6978 | 198 | 0.7986 | 0.8042 | 0.8014 | 143 | 0.6691 | 0.7324 | 0.6993 | 497 | 0.7109 | 0.7009 | 0.7059 | 214 | 0.8624 | 0.8834 | 0.8728 | 1689 | 0.7754 | 0.7535 | 0.7643 | 142 | 0.7757 | 0.83 | 0.8019 | 200 | 0.7712 | 0.8065 | 0.7884 | 372 | 0.7621 | 0.7715 | 0.7668 | 407 | 0.6782 | 0.7076 | 0.6926 | 667 | 0.7661 | 0.7455 | 0.7557 | 224 | 0.5417 | 0.5669 | 0.5540 | 859 | 0.6388 | 0.6536 | 0.6461 | 433 | 0.6160 | 0.6119 | 0.6140 | 603 | 0.8889 | 0.7442 | 0.8101 | 129 | 0.7431 | 0.7737 | 0.7581 | 243 | 0.5025 | 0.5397 | 0.5204 | 189 | 0.7915 | 0.7915 | 0.7915 | 307 | 0.8618 | 0.8909 | 0.8761 | 385 | 0.6699 | 0.7113 | 0.6900 | 194 | 0.7621 | 0.8093 | 0.7850 | 194 | 0.7226 | 0.7674 | 0.7444 | 129 | 0.6384 | 0.6942 | 0.6651 | 206 | 0.7872 | 0.8167 | 0.8017 | 693 | 0.7140 | 0.7229 | 0.7184 | 563 | 0.7358 | 0.7585 | 0.7470 | 0.9435 | | 0.037 | 15.0 | 320295 | 0.3308 | 0.8117 | 0.9058 | 0.8562 | 138 | 0.716 | 0.7885 | 0.7505 | 227 | 0.6028 | 0.6489 | 0.625 | 131 | 0.7838 | 0.8252 | 0.8040 | 1722 | 0.7835 | 0.7552 | 0.7691 | 719 | 0.7953 | 0.8553 | 0.8242 | 159 | 0.6630 | 0.5970 | 0.6283 | 201 | 0.6296 | 0.6641 | 0.6464 | 128 | 0.6961 | 0.7172 | 0.7065 | 198 | 0.8 | 0.8112 | 0.8056 | 143 | 0.6849 | 0.7304 | 0.7069 | 497 | 0.7156 | 0.7056 | 0.7106 | 214 | 0.8631 | 0.8845 | 0.8737 | 1689 | 0.7852 | 0.7465 | 0.7653 | 142 | 0.7602 | 0.84 | 0.7981 | 200 | 0.7601 | 0.8091 | 0.7839 | 372 | 0.7506 | 0.7617 | 0.7561 | 407 | 0.6943 | 0.7151 | 0.7046 | 667 | 0.7767 | 0.7455 | 0.7608 | 224 | 0.5351 | 0.5588 | 0.5467 | 859 | 0.6453 | 0.6513 | 0.6483 | 433 | 0.6277 | 0.6153 | 0.6214 | 603 | 0.8981 | 0.7519 | 0.8186 | 129 | 0.7362 | 0.7695 | 0.7525 | 243 | 0.5178 | 0.5397 | 0.5285 | 189 | 0.7806 | 0.7883 | 0.7844 | 307 | 0.8804 | 0.8987 | 0.8895 | 385 | 0.6863 | 0.7216 | 0.7035 | 194 | 0.7696 | 0.8093 | 0.7889 | 194 | 0.7333 | 0.7674 | 0.7500 | 129 | 0.6471 | 0.6942 | 0.6698 | 206 | 0.7917 | 0.8225 | 0.8068 | 693 | 0.7255 | 0.7229 | 0.7242 | 563 | 0.7404 | 0.7600 | 0.7501 | 0.9437 | ### Framework versions - Transformers 4.26.0.dev0 - Pytorch 1.12.1+cu113 - Datasets 2.7.1 - Tokenizers 0.13.2
Declan/WallStreetJournal_model_v3
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
2022-12-01T19:21:45Z
--- license: mit tags: - pytorch - diffusers - unconditional-image-generation - diffusion-models-class --- # Model Card for Unit 1 of the [Diffusion Models Class 🧨](https://github.com/huggingface/diffusion-models-class) This model is a diffusion model for unconditional image generation of cryptopunks. ## Usage ```python from diffusers import DDPMPipeline pipeline = DDPMPipeline.from_pretrained(juancopi81/sd-class-cryptopunks-64) image = pipeline().images[0] image ```
Declan/WallStreetJournal_model_v4
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
Access to model centenocode/faceHug is restricted and you are not in the authorized list. Visit https://huggingface.co/centenocode/faceHug to ask for access.
Declan/WallStreetJournal_model_v5
[ "pytorch", "bert", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- datasets: - cardiffnlp/tweet_topic_single metrics: - f1 - accuracy model-index: - name: cardiffnlp/twitter-roberta-base-dec2021-topic-single results: - task: type: text-classification name: Text Classification dataset: name: cardiffnlp/tweet_topic_single type: cardiffnlp/tweet_topic_single split: test_2021 metrics: - name: Micro F1 (cardiffnlp/tweet_topic_single) type: micro_f1_cardiffnlp/tweet_topic_single value: 0.896042528056704 - name: Macro F1 (cardiffnlp/tweet_topic_single) type: micro_f1_cardiffnlp/tweet_topic_single value: 0.7861641383871055 - name: Accuracy (cardiffnlp/tweet_topic_single) type: accuracy_cardiffnlp/tweet_topic_single value: 0.896042528056704 pipeline_tag: text-classification widget: - text: Get the all-analog Classic Vinyl Edition of "Takin Off" Album from {@herbiehancock@} via {@bluenoterecords@} link below {{URL}} example_title: "topic_classification 1" - text: Yes, including Medicare and social security saving👍 example_title: "sentiment 1" - text: All two of them taste like ass. example_title: "offensive 1" - text: If you wanna look like a badass, have drama on social media example_title: "irony 1" - text: Whoever just unfollowed me you a bitch example_title: "hate 1" - text: I love swimming for the same reason I love meditating...the feeling of weightlessness. example_title: "emotion 1" - text: Beautiful sunset last night from the pontoon @TupperLakeNY example_title: "emoji 1" --- # cardiffnlp/twitter-roberta-base-dec2021-topic-single This model is a fine-tuned version of [cardiffnlp/twitter-roberta-base-dec2021](https://huggingface.co/cardiffnlp/twitter-roberta-base-dec2021) on the [`cardiffnlp/tweet_topic_single`](https://huggingface.co/datasets/cardiffnlp/tweet_topic_single) via [`tweetnlp`](https://github.com/cardiffnlp/tweetnlp). Training split is `train_all` and parameters have been tuned on the validation split `validation_2021`. Following metrics are achieved on the test split `test_2021` ([link](https://huggingface.co/cardiffnlp/twitter-roberta-base-dec2021-topic-single/raw/main/metric.json)). - F1 (micro): 0.896042528056704 - F1 (macro): 0.7861641383871055 - Accuracy: 0.896042528056704 ### Usage Install tweetnlp via pip. ```shell pip install tweetnlp ``` Load the model in python. ```python import tweetnlp model = tweetnlp.Classifier("cardiffnlp/twitter-roberta-base-dec2021-topic-single", max_length=128) model.predict('Get the all-analog Classic Vinyl Edition of "Takin Off" Album from {@herbiehancock@} via {@bluenoterecords@} link below {{URL}}') ``` ### Reference ``` @inproceedings{camacho-collados-etal-2022-tweetnlp, title={{T}weet{NLP}: {C}utting-{E}dge {N}atural {L}anguage {P}rocessing for {S}ocial {M}edia}, author={Camacho-Collados, Jose and Rezaee, Kiamehr and Riahi, Talayeh and Ushio, Asahi and Loureiro, Daniel and Antypas, Dimosthenis and Boisson, Joanne and Espinosa-Anke, Luis and Liu, Fangyu and Mart{'\i}nez-C{'a}mara, Eugenio and others}, author = "Ushio, Asahi and Camacho-Collados, Jose", booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: System Demonstrations", month = nov, year = "2022", address = "Abu Dhabi, U.A.E.", publisher = "Association for Computational Linguistics", } ```
Declan/WallStreetJournal_model_v6
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - autotrain - text-classification language: - en widget: - text: "I love AutoTrain 🤗" datasets: - alanila/autotrain-data-training co2_eq_emissions: emissions: 1.2620473255629743 --- # Model Trained Using AutoTrain - Problem type: Multi-class Classification - Model ID: 2307973004 - CO2 Emissions (in grams): 1.2620 ## Validation Metrics - Loss: 1.279 - Accuracy: 0.517 - Macro F1: 0.549 - Micro F1: 0.517 - Weighted F1: 0.443 - Macro Precision: 0.585 - Micro Precision: 0.517 - Weighted Precision: 0.480 - Macro Recall: 0.572 - Micro Recall: 0.517 - Weighted Recall: 0.517 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/models/alanila/autotrain-training-2307973004 ``` Or Python API: ``` from transformers import AutoModelForSequenceClassification, AutoTokenizer model = AutoModelForSequenceClassification.from_pretrained("alanila/auto_train", use_auth_token=True) tokenizer = AutoTokenizer.from_pretrained("alanila/auto_train", use_auth_token=True) inputs = tokenizer("I love AutoTrain", return_tensors="pt") outputs = model(**inputs) ```
Declan/test_push
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - gigaword metrics: - rouge model-index: - name: t5-small-finetuned-giga results: - task: name: Sequence-to-sequence Language Modeling type: text2text-generation dataset: name: gigaword type: gigaword config: default split: train[:10%] args: default metrics: - name: Rouge1 type: rouge value: 26.6579 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # t5-small-finetuned-giga This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on the gigaword dataset. It achieves the following results on the evaluation set: - Loss: 3.2594 - Rouge1: 26.6579 - Rouge2: 9.5505 - Rougel: 24.4987 - Rougelsum: 24.5146 - Gen Len: 13.5436 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:-----:|:---------------:|:-------:|:------:|:-------:|:---------:|:-------:| | 1.8512 | 1.0 | 23775 | 3.2594 | 26.6579 | 9.5505 | 24.4987 | 24.5146 | 13.5436 | ### Framework versions - Transformers 4.24.0 - Pytorch 1.12.1+cu113 - Datasets 2.7.1 - Tokenizers 0.13.2
DeepChem/ChemBERTa-10M-MTR
[ "pytorch", "roberta", "arxiv:1910.09700", "transformers" ]
null
{ "architectures": [ "RobertaForRegression" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
708
null
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # {MODEL_NAME} This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}') model = AutoModel.from_pretrained('{MODEL_NAME}') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 53 with parameters: ``` {'batch_size': 16, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.CosineSimilarityLoss.CosineSimilarityLoss` Parameters of the fit()-Method: ``` { "epochs": 20, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": 1060, "warmup_steps": 106, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: MPNetModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
DeepChem/ChemBERTa-5M-MTR
[ "pytorch", "roberta", "transformers" ]
null
{ "architectures": [ "RobertaForRegression" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - common_voice model-index: - name: wav2vec2-large-xls-r-300m-dutch-baseline results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-large-xls-r-300m-dutch-baseline This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the common_voice dataset. It achieves the following results on the evaluation set: - Loss: 0.5107 - Wer: 0.2674 - Cer: 0.0863 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - distributed_type: multi-GPU - num_devices: 2 - gradient_accumulation_steps: 4 - total_train_batch_size: 32 - total_eval_batch_size: 8 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 30 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | Cer | |:-------------:|:-----:|:----:|:---------------:|:------:|:------:| | 3.655 | 1.31 | 400 | 0.9337 | 0.7332 | 0.2534 | | 0.42 | 2.61 | 800 | 0.5018 | 0.4115 | 0.1374 | | 0.2267 | 3.92 | 1200 | 0.4776 | 0.3791 | 0.1259 | | 0.1624 | 5.23 | 1600 | 0.4807 | 0.3590 | 0.1208 | | 0.135 | 6.54 | 2000 | 0.4899 | 0.3417 | 0.1121 | | 0.1179 | 7.84 | 2400 | 0.5096 | 0.3445 | 0.1133 | | 0.1035 | 9.15 | 2800 | 0.4563 | 0.3455 | 0.1129 | | 0.092 | 10.46 | 3200 | 0.5061 | 0.3382 | 0.1127 | | 0.0804 | 11.76 | 3600 | 0.4969 | 0.3285 | 0.1088 | | 0.0748 | 13.07 | 4000 | 0.5274 | 0.3380 | 0.1114 | | 0.0669 | 14.38 | 4400 | 0.5201 | 0.3115 | 0.1028 | | 0.0588 | 15.69 | 4800 | 0.5238 | 0.3212 | 0.1054 | | 0.0561 | 16.99 | 5200 | 0.5273 | 0.3185 | 0.1044 | | 0.0513 | 18.3 | 5600 | 0.5577 | 0.3032 | 0.1010 | | 0.0476 | 19.61 | 6000 | 0.5298 | 0.3050 | 0.1008 | | 0.0408 | 20.91 | 6400 | 0.5725 | 0.2982 | 0.0984 | | 0.0376 | 22.22 | 6800 | 0.5605 | 0.2953 | 0.0966 | | 0.0339 | 23.53 | 7200 | 0.5419 | 0.2865 | 0.0938 | | 0.0315 | 24.84 | 7600 | 0.5530 | 0.2782 | 0.0915 | | 0.0286 | 26.14 | 8000 | 0.5354 | 0.2788 | 0.0917 | | 0.0259 | 27.45 | 8400 | 0.5245 | 0.2715 | 0.0878 | | 0.0231 | 28.76 | 8800 | 0.5107 | 0.2674 | 0.0863 | ### Framework versions - Transformers 4.17.0 - Pytorch 1.12.0+cu102 - Datasets 2.7.1 - Tokenizers 0.13.2
DeepChem/ChemBERTa-77M-MLM
[ "pytorch", "roberta", "fill-mask", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "RobertaForMaskedLM" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2,416
2022-12-01T19:47:03Z
--- datasets: - tweet_eval metrics: - f1 - accuracy model-index: - name: cardiffnlp/roberta-base-offensive results: - task: type: text-classification name: Text Classification dataset: name: tweet_eval type: offensive split: test metrics: - name: Micro F1 (tweet_eval/offensive) type: micro_f1_tweet_eval/offensive value: 0.8441860465116279 - name: Macro F1 (tweet_eval/offensive) type: micro_f1_tweet_eval/offensive value: 0.8038468085106383 - name: Accuracy (tweet_eval/offensive) type: accuracy_tweet_eval/offensive value: 0.8441860465116279 pipeline_tag: text-classification widget: - text: Get the all-analog Classic Vinyl Edition of "Takin Off" Album from {@herbiehancock@} via {@bluenoterecords@} link below {{URL}} example_title: "topic_classification 1" - text: Yes, including Medicare and social security saving👍 example_title: "sentiment 1" - text: All two of them taste like ass. example_title: "offensive 1" - text: If you wanna look like a badass, have drama on social media example_title: "irony 1" - text: Whoever just unfollowed me you a bitch example_title: "hate 1" - text: I love swimming for the same reason I love meditating...the feeling of weightlessness. example_title: "emotion 1" - text: Beautiful sunset last night from the pontoon @TupperLakeNY example_title: "emoji 1" --- # cardiffnlp/roberta-base-offensive This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the [`tweet_eval (offensive)`](https://huggingface.co/datasets/tweet_eval) via [`tweetnlp`](https://github.com/cardiffnlp/tweetnlp). Training split is `train` and parameters have been tuned on the validation split `validation`. Following metrics are achieved on the test split `test` ([link](https://huggingface.co/cardiffnlp/roberta-base-offensive/raw/main/metric.json)). - F1 (micro): 0.8441860465116279 - F1 (macro): 0.8038468085106383 - Accuracy: 0.8441860465116279 ### Usage Install tweetnlp via pip. ```shell pip install tweetnlp ``` Load the model in python. ```python import tweetnlp model = tweetnlp.Classifier("cardiffnlp/roberta-base-offensive", max_length=128) model.predict('Get the all-analog Classic Vinyl Edition of "Takin Off" Album from {@herbiehancock@} via {@bluenoterecords@} link below {{URL}}') ``` ### Reference ``` @inproceedings{camacho-collados-etal-2022-tweetnlp, title={{T}weet{NLP}: {C}utting-{E}dge {N}atural {L}anguage {P}rocessing for {S}ocial {M}edia}, author={Camacho-Collados, Jose and Rezaee, Kiamehr and Riahi, Talayeh and Ushio, Asahi and Loureiro, Daniel and Antypas, Dimosthenis and Boisson, Joanne and Espinosa-Anke, Luis and Liu, Fangyu and Mart{'\i}nez-C{'a}mara, Eugenio and others}, author = "Ushio, Asahi and Camacho-Collados, Jose", booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: System Demonstrations", month = nov, year = "2022", address = "Abu Dhabi, U.A.E.", publisher = "Association for Computational Linguistics", } ```
DeepChem/ChemBERTa-77M-MTR
[ "pytorch", "roberta", "transformers" ]
null
{ "architectures": [ "RobertaForRegression" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7,169
null
--- tags: - image-classification - pytorch - huggingpics metrics: - accuracy model-index: - name: ameeeer results: - task: name: Image Classification type: image-classification metrics: - name: Accuracy type: accuracy value: 0.8656716346740723 --- # ameeeer Autogenerated by HuggingPics🤗🖼️ Create your own image classifier for **anything** by running [the demo on Google Colab](https://colab.research.google.com/github/nateraw/huggingpics/blob/main/HuggingPics.ipynb). Report any issues with the demo at the [github repo](https://github.com/nateraw/huggingpics). ## Example Images #### corgi ![corgi](images/corgi.jpg) #### samoyed ![samoyed](images/samoyed.jpg) #### shiba inu ![shiba inu](images/shiba_inu.jpg)
DeepESP/gpt2-spanish-medium
[ "pytorch", "tf", "jax", "gpt2", "text-generation", "es", "dataset:ebooks", "transformers", "GPT-2", "Spanish", "ebooks", "nlg", "license:mit" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
340
null
--- tags: - conversational --- #RickandMorty DialoGPT Model
DeepPavlov/rubert-base-cased-conversational
[ "pytorch", "jax", "bert", "feature-extraction", "ru", "transformers", "has_space" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
17,362
null
--- license: creativeml-openrail-m tags: - text-to-image --- ### nicobooth1 Dreambooth model trained by klemkeni with [Hugging Face Dreambooth Training Space](https://huggingface.co/spaces/multimodalart/dreambooth-training) with the v1-5 base model You run your new concept via `diffusers` [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb). Don't forget to use the concept prompts! Sample pictures of: sdss (use that on your prompt)
DeepPavlov/rubert-base-cased-sentence
[ "pytorch", "jax", "bert", "feature-extraction", "ru", "arxiv:1508.05326", "arxiv:1809.05053", "arxiv:1908.10084", "transformers", "has_space" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
46,991
null
--- license: mit tags: - generated_from_trainer model-index: - name: taleoftwocities results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # taleoftwocities This model is a fine-tuned version of [gpt2](https://huggingface.co/gpt2) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Training results ### Framework versions - Transformers 4.24.0 - Pytorch 1.12.1+cu113 - Datasets 2.7.1 - Tokenizers 0.13.2
DeepPavlov/xlm-roberta-large-en-ru-mnli
[ "pytorch", "xlm-roberta", "text-classification", "en", "ru", "dataset:glue", "dataset:mnli", "transformers", "xlm-roberta-large", "xlm-roberta-large-en-ru", "xlm-roberta-large-en-ru-mnli", "has_space" ]
text-classification
{ "architectures": [ "XLMRobertaForSequenceClassification" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
227
2022-12-01T20:55:19Z
--- license: other tags: - generated_from_trainer metrics: - accuracy model-index: - name: 6.7b-ri-reproduce-combined-4-gpu-20-val-v2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # 6.7b-ri-reproduce-combined-4-gpu-20-val-v2 This model is a fine-tuned version of [facebook/opt-6.7b](https://huggingface.co/facebook/opt-6.7b) on the None dataset. It achieves the following results on the evaluation set: - Loss: 3.9434 - Accuracy: 0.0329 - Perplexity: 51.5916 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 9e-07 - train_batch_size: 1 - eval_batch_size: 8 - seed: 100 - distributed_type: multi-GPU - num_devices: 4 - total_train_batch_size: 4 - total_eval_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: constant - num_epochs: 15.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | Perplexity | |:-------------:|:-----:|:----:|:---------------:|:--------:|:----------:| | 2.5731 | 1.0 | 79 | 2.6113 | 0.0317 | 13.6171 | | 2.206 | 2.0 | 158 | 2.4805 | 0.0328 | 11.9469 | | 1.9105 | 3.0 | 237 | 2.4512 | 0.0333 | 11.6019 | | 1.6301 | 4.0 | 316 | 2.5078 | 0.0345 | 12.2780 | | 1.3733 | 5.0 | 395 | 2.6816 | 0.0342 | 14.6090 | | 1.1337 | 6.0 | 474 | 3.0078 | 0.0330 | 20.2431 | | 0.9619 | 7.0 | 553 | 3.1777 | 0.0330 | 23.9923 | | 0.798 | 8.0 | 632 | 3.2559 | 0.0330 | 25.9419 | | 0.6653 | 9.0 | 711 | 3.4277 | 0.0331 | 30.8068 | | 0.552 | 10.0 | 790 | 3.5566 | 0.0333 | 35.0453 | | 0.4568 | 11.0 | 869 | 3.7324 | 0.0324 | 41.7802 | | 0.3756 | 12.0 | 948 | 3.8184 | 0.0328 | 45.5295 | | 0.3119 | 13.0 | 1027 | 3.8477 | 0.0331 | 46.8831 | | 0.2448 | 14.0 | 1106 | 3.9062 | 0.0329 | 49.7122 | | 0.1986 | 15.0 | 1185 | 3.9434 | 0.0329 | 51.5916 | ### Framework versions - Transformers 4.25.0.dev0 - Pytorch 1.12.1+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
DeividasM/wav2vec2-large-xlsr-53-lithuanian
[ "pytorch", "jax", "wav2vec2", "automatic-speech-recognition", "lt", "dataset:common_voice", "transformers", "audio", "speech", "xlsr-fine-tuning-week", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
2022-12-01T20:56:07Z
--- license: other tags: - generated_from_trainer datasets: - ChaiML/dalio_combined_v1 model-index: - name: 6.7b-ri-reproduce-combined-4-gpu-0-val-v2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # 6.7b-ri-reproduce-combined-4-gpu-0-val-v2 This model is a fine-tuned version of [facebook/opt-6.7b](https://huggingface.co/facebook/opt-6.7b) on the ChaiML/dalio_combined_v1 dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 9e-07 - train_batch_size: 1 - eval_batch_size: 8 - seed: 100 - distributed_type: multi-GPU - num_devices: 4 - total_train_batch_size: 4 - total_eval_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: constant - num_epochs: 15.0 ### Training results ### Framework versions - Transformers 4.25.0.dev0 - Pytorch 1.12.1+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
DeltaHub/adapter_t5-3b_mrpc
[ "pytorch", "transformers" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: bert-mini-mlm-finetuned-emotion results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-mini-mlm-finetuned-emotion This model is a fine-tuned version of [google/bert_uncased_L-4_H-256_A-4](https://huggingface.co/google/bert_uncased_L-4_H-256_A-4) on the None dataset. It achieves the following results on the evaluation set: - Loss: 3.0041 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: constant - num_epochs: 200 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:------:|:----:|:---------------:| | 3.3034 | 22.73 | 500 | 3.0423 | | 3.0612 | 45.45 | 1000 | 3.0242 | | 2.9507 | 68.18 | 1500 | 2.9806 | | 2.8609 | 90.91 | 2000 | 3.0442 | | 2.7887 | 113.64 | 2500 | 3.0179 | | 2.7104 | 136.36 | 3000 | 3.0041 | ### Framework versions - Transformers 4.25.0 - Pytorch 1.12.1+cu113 - Datasets 2.7.1 - Tokenizers 0.13.2
DeskDown/MarianMixFT_en-fil
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- datasets: - tweet_eval metrics: - f1 - accuracy model-index: - name: cardiffnlp/roberta-base-emoji results: - task: type: text-classification name: Text Classification dataset: name: tweet_eval type: emoji split: test metrics: - name: Micro F1 (tweet_eval/emoji) type: micro_f1_tweet_eval/emoji value: 0.46048 - name: Macro F1 (tweet_eval/emoji) type: micro_f1_tweet_eval/emoji value: 0.34017715040391805 - name: Accuracy (tweet_eval/emoji) type: accuracy_tweet_eval/emoji value: 0.46048 pipeline_tag: text-classification widget: - text: Get the all-analog Classic Vinyl Edition of "Takin Off" Album from {@herbiehancock@} via {@bluenoterecords@} link below {{URL}} example_title: "topic_classification 1" - text: Yes, including Medicare and social security saving👍 example_title: "sentiment 1" - text: All two of them taste like ass. example_title: "offensive 1" - text: If you wanna look like a badass, have drama on social media example_title: "irony 1" - text: Whoever just unfollowed me you a bitch example_title: "hate 1" - text: I love swimming for the same reason I love meditating...the feeling of weightlessness. example_title: "emotion 1" - text: Beautiful sunset last night from the pontoon @TupperLakeNY example_title: "emoji 1" --- # cardiffnlp/roberta-base-emoji This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the [`tweet_eval (emoji)`](https://huggingface.co/datasets/tweet_eval) via [`tweetnlp`](https://github.com/cardiffnlp/tweetnlp). Training split is `train` and parameters have been tuned on the validation split `validation`. Following metrics are achieved on the test split `test` ([link](https://huggingface.co/cardiffnlp/roberta-base-emoji/raw/main/metric.json)). - F1 (micro): 0.46048 - F1 (macro): 0.34017715040391805 - Accuracy: 0.46048 ### Usage Install tweetnlp via pip. ```shell pip install tweetnlp ``` Load the model in python. ```python import tweetnlp model = tweetnlp.Classifier("cardiffnlp/roberta-base-emoji", max_length=128) model.predict('Get the all-analog Classic Vinyl Edition of "Takin Off" Album from {@herbiehancock@} via {@bluenoterecords@} link below {{URL}}') ``` ### Reference ``` @inproceedings{camacho-collados-etal-2022-tweetnlp, title={{T}weet{NLP}: {C}utting-{E}dge {N}atural {L}anguage {P}rocessing for {S}ocial {M}edia}, author={Camacho-Collados, Jose and Rezaee, Kiamehr and Riahi, Talayeh and Ushio, Asahi and Loureiro, Daniel and Antypas, Dimosthenis and Boisson, Joanne and Espinosa-Anke, Luis and Liu, Fangyu and Mart{'\i}nez-C{'a}mara, Eugenio and others}, author = "Ushio, Asahi and Camacho-Collados, Jose", booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: System Demonstrations", month = nov, year = "2022", address = "Abu Dhabi, U.A.E.", publisher = "Association for Computational Linguistics", } ```
DeskDown/MarianMixFT_en-hi
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
2022-12-01T21:46:24Z
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # {MODEL_NAME} This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 1024 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}') model = AutoModel.from_pretrained('{MODEL_NAME}') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: BertModel (1): Pooling({'word_embedding_dimension': 1024, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
DeskDown/MarianMixFT_en-ja
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
9
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: bert-small-mlm-finetuned-emotion results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-small-mlm-finetuned-emotion This model is a fine-tuned version of [google/bert_uncased_L-4_H-512_A-8](https://huggingface.co/google/bert_uncased_L-4_H-512_A-8) on the None dataset. It achieves the following results on the evaluation set: - Loss: 2.7413 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: constant - num_epochs: 200 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:------:|:----:|:---------------:| | 2.8418 | 22.73 | 500 | 2.7035 | | 2.5706 | 45.45 | 1000 | 2.6968 | | 2.4199 | 68.18 | 1500 | 2.6595 | | 2.2901 | 90.91 | 2000 | 2.7323 | | 2.1793 | 113.64 | 2500 | 2.7560 | | 2.0651 | 136.36 | 3000 | 2.7413 | ### Framework versions - Transformers 4.25.0 - Pytorch 1.12.1+cu113 - Datasets 2.7.1 - Tokenizers 0.13.2
DeskDown/MarianMixFT_en-ms
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- language: - en license: apache-2.0 tags: - generated_from_trainer datasets: - kejian/codeparrot-train-more-filter-3.3b-cleaned model-index: - name: mighty-ul results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mighty-ul This model was trained from scratch on the kejian/codeparrot-train-more-filter-3.3b-cleaned dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0008 - train_batch_size: 32 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.01 - training_steps: 50354 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.23.0 - Pytorch 1.13.0+cu116 - Datasets 2.0.0 - Tokenizers 0.12.1 # Full config {'dataset': {'datasets': ['kejian/codeparrot-train-more-filter-3.3b-cleaned'], 'is_split_by_sentences': True}, 'generation': {'batch_size': 128, 'metrics_configs': [{}, {'n': 1}, {}], 'scenario_configs': [{'display_as_html': True, 'generate_kwargs': {'do_sample': True, 'eos_token_id': 0, 'max_length': 640, 'min_length': 10, 'temperature': 0.7, 'top_k': 0, 'top_p': 0.9}, 'name': 'unconditional', 'num_hits_threshold': 0, 'num_samples': 2048}, {'display_as_html': True, 'generate_kwargs': {'do_sample': True, 'eos_token_id': 0, 'max_length': 272, 'min_length': 10, 'temperature': 0.7, 'top_k': 0, 'top_p': 0.9}, 'name': 'functions', 'num_hits_threshold': 0, 'num_samples': 2048, 'prompts_path': 'resources/functions_csnet.jsonl', 'use_prompt_for_scoring': True}], 'scorer_config': {}}, 'kl_gpt3_callback': {'gpt3_kwargs': {'model_name': 'code-cushman-001'}, 'max_tokens': 64, 'num_samples': 4096}, 'model': {'from_scratch': True, 'gpt2_config_kwargs': {'reorder_and_upcast_attn': True, 'scale_attn_by': True}, 'path_or_name': 'codeparrot/codeparrot-small'}, 'objective': {'alpha': 0.01, 'name': 'Unlikelihood', 'score_threshold': 0}, 'tokenizer': {'path_or_name': 'codeparrot/codeparrot-small'}, 'training': {'dataloader_num_workers': 0, 'effective_batch_size': 64, 'evaluation_strategy': 'no', 'fp16': True, 'hub_model_id': 'mighty-ul', 'hub_strategy': 'all_checkpoints', 'learning_rate': 0.0008, 'logging_first_step': True, 'logging_steps': 1, 'num_tokens': 3300000000.0, 'output_dir': 'training_output', 'per_device_train_batch_size': 16, 'push_to_hub': True, 'remove_unused_columns': False, 'save_steps': 25177, 'save_strategy': 'steps', 'seed': 42, 'warmup_ratio': 0.01, 'weight_decay': 0.1}} # Wandb URL: https://wandb.ai/kejian/uncategorized/runs/36fxn8ex
DeskDown/MarianMixFT_en-my
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- license: mit tags: - pytorch - diffusers - unconditional-image-generation - diffusion-models-class --- # Model Card for Unit 1 of the [Diffusion Models Class 🧨](https://github.com/huggingface/diffusion-models-class) This model is a diffusion model for unconditional image generation of cute 🦋. ## Usage ```python from diffusers import DDPMPipeline pipeline = DDPMPipeline.from_pretrained('Belyaev/sd-class-butterflies-32') image = pipeline().images[0] image ```
DeskDown/MarianMixFT_en-th
[ "pytorch", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- thumbnail: https://huggingface.co/front/thumbnails/google.png license: apache-2.0 --- BERT Miniatures === This is the set of 24 BERT models referenced in [Well-Read Students Learn Better: On the Importance of Pre-training Compact Models](https://arxiv.org/abs/1908.08962) (English only, uncased, trained with WordPiece masking). We have shown that the standard BERT recipe (including model architecture and training objective) is effective on a wide range of model sizes, beyond BERT-Base and BERT-Large. The smaller BERT models are intended for environments with restricted computational resources. They can be fine-tuned in the same manner as the original BERT models. However, they are most effective in the context of knowledge distillation, where the fine-tuning labels are produced by a larger and more accurate teacher. Our goal is to enable research in institutions with fewer computational resources and encourage the community to seek directions of innovation alternative to increasing model capacity. You can download the 24 BERT miniatures either from the [official BERT Github page](https://github.com/google-research/bert/), or via HuggingFace from the links below: | |H=128|H=256|H=512|H=768| |---|:---:|:---:|:---:|:---:| | **L=2** |[**2/128 (BERT-Tiny)**][2_128]|[2/256][2_256]|[2/512][2_512]|[2/768][2_768]| | **L=4** |[4/128][4_128]|[**4/256 (BERT-Mini)**][4_256]|[**4/512 (BERT-Small)**][4_512]|[4/768][4_768]| | **L=6** |[6/128][6_128]|[6/256][6_256]|[6/512][6_512]|[6/768][6_768]| | **L=8** |[8/128][8_128]|[8/256][8_256]|[**8/512 (BERT-Medium)**][8_512]|[8/768][8_768]| | **L=10** |[10/128][10_128]|[10/256][10_256]|[10/512][10_512]|[10/768][10_768]| | **L=12** |[12/128][12_128]|[12/256][12_256]|[12/512][12_512]|[**12/768 (BERT-Base)**][12_768]| Note that the BERT-Base model in this release is included for completeness only; it was re-trained under the same regime as the original model. Here are the corresponding GLUE scores on the test set: |Model|Score|CoLA|SST-2|MRPC|STS-B|QQP|MNLI-m|MNLI-mm|QNLI(v2)|RTE|WNLI|AX| |---|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:| |BERT-Tiny|64.2|0.0|83.2|81.1/71.1|74.3/73.6|62.2/83.4|70.2|70.3|81.5|57.2|62.3|21.0| |BERT-Mini|65.8|0.0|85.9|81.1/71.8|75.4/73.3|66.4/86.2|74.8|74.3|84.1|57.9|62.3|26.1| |BERT-Small|71.2|27.8|89.7|83.4/76.2|78.8/77.0|68.1/87.0|77.6|77.0|86.4|61.8|62.3|28.6| |BERT-Medium|73.5|38.0|89.6|86.6/81.6|80.4/78.4|69.6/87.9|80.0|79.1|87.7|62.2|62.3|30.5| For each task, we selected the best fine-tuning hyperparameters from the lists below, and trained for 4 epochs: - batch sizes: 8, 16, 32, 64, 128 - learning rates: 3e-4, 1e-4, 5e-5, 3e-5 If you use these models, please cite the following paper: ``` @article{turc2019, title={Well-Read Students Learn Better: On the Importance of Pre-training Compact Models}, author={Turc, Iulia and Chang, Ming-Wei and Lee, Kenton and Toutanova, Kristina}, journal={arXiv preprint arXiv:1908.08962v2 }, year={2019} } ``` [2_128]: https://huggingface.co/google/bert_uncased_L-2_H-128_A-2 [2_256]: https://huggingface.co/google/bert_uncased_L-2_H-256_A-4 [2_512]: https://huggingface.co/google/bert_uncased_L-2_H-512_A-8 [2_768]: https://huggingface.co/google/bert_uncased_L-2_H-768_A-12 [4_128]: https://huggingface.co/google/bert_uncased_L-4_H-128_A-2 [4_256]: https://huggingface.co/google/bert_uncased_L-4_H-256_A-4 [4_512]: https://huggingface.co/google/bert_uncased_L-4_H-512_A-8 [4_768]: https://huggingface.co/google/bert_uncased_L-4_H-768_A-12 [6_128]: https://huggingface.co/google/bert_uncased_L-6_H-128_A-2 [6_256]: https://huggingface.co/google/bert_uncased_L-6_H-256_A-4 [6_512]: https://huggingface.co/google/bert_uncased_L-6_H-512_A-8 [6_768]: https://huggingface.co/google/bert_uncased_L-6_H-768_A-12 [8_128]: https://huggingface.co/google/bert_uncased_L-8_H-128_A-2 [8_256]: https://huggingface.co/google/bert_uncased_L-8_H-256_A-4 [8_512]: https://huggingface.co/google/bert_uncased_L-8_H-512_A-8 [8_768]: https://huggingface.co/google/bert_uncased_L-8_H-768_A-12 [10_128]: https://huggingface.co/google/bert_uncased_L-10_H-128_A-2 [10_256]: https://huggingface.co/google/bert_uncased_L-10_H-256_A-4 [10_512]: https://huggingface.co/google/bert_uncased_L-10_H-512_A-8 [10_768]: https://huggingface.co/google/bert_uncased_L-10_H-768_A-12 [12_128]: https://huggingface.co/google/bert_uncased_L-12_H-128_A-2 [12_256]: https://huggingface.co/google/bert_uncased_L-12_H-256_A-4 [12_512]: https://huggingface.co/google/bert_uncased_L-12_H-512_A-8 [12_768]: https://huggingface.co/google/bert_uncased_L-12_H-768_A-12
DeskDown/MarianMix_en-zh_to_vi-ms-hi-ja
[ "pytorch", "tensorboard", "marian", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "MarianMTModel" ], "model_type": "marian", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # {MODEL_NAME} This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}') model = AutoModel.from_pretrained('{MODEL_NAME}') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 53 with parameters: ``` {'batch_size': 16, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.CosineSimilarityLoss.CosineSimilarityLoss` Parameters of the fit()-Method: ``` { "epochs": 20, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": 1060, "warmup_steps": 106, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: MPNetModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
Despin89/test
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
Finetuned model 01/012/2022 · Finetuned jonatasgrosman/wav2vec2-large-xlsr-53-spanish · 150 test audios (120 train, 30 test)
Dev-DGT/food-dbert-multiling
[ "pytorch", "distilbert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "DistilBertForTokenClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
17
null
Access to model Lariatty/ian is restricted and you are not in the authorized list. Visit https://huggingface.co/Lariatty/ian to ask for access.
DevsIA/imagenes
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
Access to model Gabriel9408/spacegabriel is restricted and you are not in the authorized list. Visit https://huggingface.co/Gabriel9408/spacegabriel to ask for access.
DewiBrynJones/wav2vec2-large-xlsr-welsh
[ "cy", "dataset:common_voice", "audio", "automatic-speech-recognition", "speech", "xlsr-fine-tuning-week", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-12-01T22:42:59Z
--- datasets: - cardiffnlp/tweet_topic_multi metrics: - f1 - accuracy model-index: - name: cardiffnlp/twitter-roberta-base-dec2021-topic-multi results: - task: type: text-classification name: Text Classification dataset: name: cardiffnlp/tweet_topic_multi type: cardiffnlp/tweet_topic_multi split: test_2021 metrics: - name: Micro F1 (cardiffnlp/tweet_topic_multi) type: micro_f1_cardiffnlp/tweet_topic_multi value: 0.7531433361274099 - name: Macro F1 (cardiffnlp/tweet_topic_multi) type: micro_f1_cardiffnlp/tweet_topic_multi value: 0.562239700912053 - name: Accuracy (cardiffnlp/tweet_topic_multi) type: accuracy_cardiffnlp/tweet_topic_multi value: 0.5288862418106015 pipeline_tag: text-classification widget: - text: Get the all-analog Classic Vinyl Edition of "Takin Off" Album from {@herbiehancock@} via {@bluenoterecords@} link below {{URL}} example_title: "topic_classification 1" - text: Yes, including Medicare and social security saving👍 example_title: "sentiment 1" - text: All two of them taste like ass. example_title: "offensive 1" - text: If you wanna look like a badass, have drama on social media example_title: "irony 1" - text: Whoever just unfollowed me you a bitch example_title: "hate 1" - text: I love swimming for the same reason I love meditating...the feeling of weightlessness. example_title: "emotion 1" - text: Beautiful sunset last night from the pontoon @TupperLakeNY example_title: "emoji 1" --- # cardiffnlp/twitter-roberta-base-dec2021-topic-multi This model is a fine-tuned version of [cardiffnlp/twitter-roberta-base-dec2021](https://huggingface.co/cardiffnlp/twitter-roberta-base-dec2021) on the [`cardiffnlp/tweet_topic_multi`](https://huggingface.co/datasets/cardiffnlp/tweet_topic_multi) via [`tweetnlp`](https://github.com/cardiffnlp/tweetnlp). Training split is `train_all` and parameters have been tuned on the validation split `validation_2021`. Following metrics are achieved on the test split `test_2021` ([link](https://huggingface.co/cardiffnlp/twitter-roberta-base-dec2021-topic-multi/raw/main/metric.json)). - F1 (micro): 0.7531433361274099 - F1 (macro): 0.562239700912053 - Accuracy: 0.5288862418106015 ### Usage Install tweetnlp via pip. ```shell pip install tweetnlp ``` Load the model in python. ```python import tweetnlp model = tweetnlp.Classifier("cardiffnlp/twitter-roberta-base-dec2021-topic-multi", max_length=128) model.predict('Get the all-analog Classic Vinyl Edition of "Takin Off" Album from {@herbiehancock@} via {@bluenoterecords@} link below {{URL}}') ``` ### Reference ``` @inproceedings{camacho-collados-etal-2022-tweetnlp, title={{T}weet{NLP}: {C}utting-{E}dge {N}atural {L}anguage {P}rocessing for {S}ocial {M}edia}, author={Camacho-Collados, Jose and Rezaee, Kiamehr and Riahi, Talayeh and Ushio, Asahi and Loureiro, Daniel and Antypas, Dimosthenis and Boisson, Joanne and Espinosa-Anke, Luis and Liu, Fangyu and Mart{'\i}nez-C{'a}mara, Eugenio and others}, author = "Ushio, Asahi and Camacho-Collados, Jose", booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: System Demonstrations", month = nov, year = "2022", address = "Abu Dhabi, U.A.E.", publisher = "Association for Computational Linguistics", } ```
Dhruva/Interstellar
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- datasets: - cardiffnlp/tweet_topic_multi metrics: - f1 - accuracy model-index: - name: cardiffnlp/twitter-roberta-base-2021-124m-topic-multi results: - task: type: text-classification name: Text Classification dataset: name: cardiffnlp/tweet_topic_multi type: cardiffnlp/tweet_topic_multi split: test_2021 metrics: - name: Micro F1 (cardiffnlp/tweet_topic_multi) type: micro_f1_cardiffnlp/tweet_topic_multi value: 0.7528230865746549 - name: Macro F1 (cardiffnlp/tweet_topic_multi) type: micro_f1_cardiffnlp/tweet_topic_multi value: 0.5564228688431104 - name: Accuracy (cardiffnlp/tweet_topic_multi) type: accuracy_cardiffnlp/tweet_topic_multi value: 0.535437760571769 pipeline_tag: text-classification widget: - text: Get the all-analog Classic Vinyl Edition of "Takin Off" Album from {@herbiehancock@} via {@bluenoterecords@} link below {{URL}} example_title: "topic_classification 1" - text: Yes, including Medicare and social security saving👍 example_title: "sentiment 1" - text: All two of them taste like ass. example_title: "offensive 1" - text: If you wanna look like a badass, have drama on social media example_title: "irony 1" - text: Whoever just unfollowed me you a bitch example_title: "hate 1" - text: I love swimming for the same reason I love meditating...the feeling of weightlessness. example_title: "emotion 1" - text: Beautiful sunset last night from the pontoon @TupperLakeNY example_title: "emoji 1" --- # cardiffnlp/twitter-roberta-base-2021-124m-topic-multi This model is a fine-tuned version of [cardiffnlp/twitter-roberta-base-2021-124m](https://huggingface.co/cardiffnlp/twitter-roberta-base-2021-124m) on the [`cardiffnlp/tweet_topic_multi`](https://huggingface.co/datasets/cardiffnlp/tweet_topic_multi) via [`tweetnlp`](https://github.com/cardiffnlp/tweetnlp). Training split is `train_all` and parameters have been tuned on the validation split `validation_2021`. Following metrics are achieved on the test split `test_2021` ([link](https://huggingface.co/cardiffnlp/twitter-roberta-base-2021-124m-topic-multi/raw/main/metric.json)). - F1 (micro): 0.7528230865746549 - F1 (macro): 0.5564228688431104 - Accuracy: 0.535437760571769 ### Usage Install tweetnlp via pip. ```shell pip install tweetnlp ``` Load the model in python. ```python import tweetnlp model = tweetnlp.Classifier("cardiffnlp/twitter-roberta-base-2021-124m-topic-multi", max_length=128) model.predict('Get the all-analog Classic Vinyl Edition of "Takin Off" Album from {@herbiehancock@} via {@bluenoterecords@} link below {{URL}}') ``` ### Reference ``` @inproceedings{camacho-collados-etal-2022-tweetnlp, title={{T}weet{NLP}: {C}utting-{E}dge {N}atural {L}anguage {P}rocessing for {S}ocial {M}edia}, author={Camacho-Collados, Jose and Rezaee, Kiamehr and Riahi, Talayeh and Ushio, Asahi and Loureiro, Daniel and Antypas, Dimosthenis and Boisson, Joanne and Espinosa-Anke, Luis and Liu, Fangyu and Mart{'\i}nez-C{'a}mara, Eugenio and others}, author = "Ushio, Asahi and Camacho-Collados, Jose", booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: System Demonstrations", month = nov, year = "2022", address = "Abu Dhabi, U.A.E.", publisher = "Association for Computational Linguistics", } ```
DivyanshuSheth/T5-Seq2Seq-Final
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-12-01T23:36:15Z
--- license: mit tags: - pytorch - diffusers - unconditional-image-generation - diffusion-models-class --- # Model Card for Unit 1 of the [Diffusion Models Class 🧨](https://github.com/huggingface/diffusion-models-class) This model is a diffusion model for unconditional image generation of cute 🦋. ## Usage ```python from diffusers import DDPMPipeline pipeline = DDPMPipeline.from_pretrained('dskill/sd-class-butterflies-32') image = pipeline().images[0] image ```
Dizoid/Lll
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: bert-base-mlm-finetuned-emotion results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-mlm-finetuned-emotion This model is a fine-tuned version of [google/bert_uncased_L-12_H-768_A-12](https://huggingface.co/google/bert_uncased_L-12_H-768_A-12) on the None dataset. It achieves the following results on the evaluation set: - Loss: 2.3374 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: constant - num_epochs: 200 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 2.4247 | 5.75 | 500 | 2.3526 | | 2.1825 | 11.49 | 1000 | 2.2778 | | 2.0578 | 17.24 | 1500 | 2.3802 | | 1.9059 | 22.99 | 2000 | 2.3358 | | 1.7966 | 28.74 | 2500 | 2.3374 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.12.1+cu113 - Datasets 2.7.1 - Tokenizers 0.13.2
Dmitriiserg/Pxd
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imdb metrics: - accuracy - f1 model-index: - name: finetuned-base_mini results: - task: name: Text Classification type: text-classification dataset: name: imdb type: imdb config: plain_text split: train args: plain_text metrics: - name: Accuracy type: accuracy value: 0.9076 - name: F1 type: f1 value: 0.9515621723631789 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuned-base_mini This model is a fine-tuned version of [google/bert_uncased_L-4_H-256_A-4](https://huggingface.co/google/bert_uncased_L-4_H-256_A-4) on the imdb dataset. It achieves the following results on the evaluation set: - Loss: 0.3938 - Accuracy: 0.9076 - F1: 0.9516 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: constant - num_epochs: 200 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.354 | 2.55 | 500 | 0.2300 | 0.9116 | 0.9538 | | 0.2086 | 5.1 | 1000 | 0.3182 | 0.8815 | 0.9370 | | 0.1401 | 7.65 | 1500 | 0.2160 | 0.9241 | 0.9605 | | 0.0902 | 10.2 | 2000 | 0.4684 | 0.8722 | 0.9317 | | 0.0654 | 12.76 | 2500 | 0.4885 | 0.8747 | 0.9332 | | 0.043 | 15.31 | 3000 | 0.3938 | 0.9076 | 0.9516 | ### Framework versions - Transformers 4.25.0 - Pytorch 1.12.1+cu113 - Datasets 2.7.1 - Tokenizers 0.13.2
Dmitry12/sber
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- datasets: - cardiffnlp/tweet_topic_multi metrics: - f1 - accuracy model-index: - name: cardiffnlp/roberta-base-topic-multi results: - task: type: text-classification name: Text Classification dataset: name: cardiffnlp/tweet_topic_multi type: cardiffnlp/tweet_topic_multi split: test_2021 metrics: - name: Micro F1 (cardiffnlp/tweet_topic_multi) type: micro_f1_cardiffnlp/tweet_topic_multi value: 0.7546616383825687 - name: Macro F1 (cardiffnlp/tweet_topic_multi) type: micro_f1_cardiffnlp/tweet_topic_multi value: 0.5959450154471646 - name: Accuracy (cardiffnlp/tweet_topic_multi) type: accuracy_cardiffnlp/tweet_topic_multi value: 0.5318642048838594 pipeline_tag: text-classification widget: - text: Get the all-analog Classic Vinyl Edition of "Takin Off" Album from {@herbiehancock@} via {@bluenoterecords@} link below {{URL}} example_title: "topic_classification 1" - text: Yes, including Medicare and social security saving👍 example_title: "sentiment 1" - text: All two of them taste like ass. example_title: "offensive 1" - text: If you wanna look like a badass, have drama on social media example_title: "irony 1" - text: Whoever just unfollowed me you a bitch example_title: "hate 1" - text: I love swimming for the same reason I love meditating...the feeling of weightlessness. example_title: "emotion 1" - text: Beautiful sunset last night from the pontoon @TupperLakeNY example_title: "emoji 1" --- # cardiffnlp/roberta-base-topic-multi This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the [`cardiffnlp/tweet_topic_multi`](https://huggingface.co/datasets/cardiffnlp/tweet_topic_multi) via [`tweetnlp`](https://github.com/cardiffnlp/tweetnlp). Training split is `train_all` and parameters have been tuned on the validation split `validation_2021`. Following metrics are achieved on the test split `test_2021` ([link](https://huggingface.co/cardiffnlp/roberta-base-topic-multi/raw/main/metric.json)). - F1 (micro): 0.7546616383825687 - F1 (macro): 0.5959450154471646 - Accuracy: 0.5318642048838594 ### Usage Install tweetnlp via pip. ```shell pip install tweetnlp ``` Load the model in python. ```python import tweetnlp model = tweetnlp.Classifier("cardiffnlp/roberta-base-topic-multi", max_length=128) model.predict('Get the all-analog Classic Vinyl Edition of "Takin Off" Album from {@herbiehancock@} via {@bluenoterecords@} link below {{URL}}') ``` ### Reference ``` @inproceedings{camacho-collados-etal-2022-tweetnlp, title={{T}weet{NLP}: {C}utting-{E}dge {N}atural {L}anguage {P}rocessing for {S}ocial {M}edia}, author={Camacho-Collados, Jose and Rezaee, Kiamehr and Riahi, Talayeh and Ushio, Asahi and Loureiro, Daniel and Antypas, Dimosthenis and Boisson, Joanne and Espinosa-Anke, Luis and Liu, Fangyu and Mart{'\i}nez-C{'a}mara, Eugenio and others}, author = "Ushio, Asahi and Camacho-Collados, Jose", booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing: System Demonstrations", month = nov, year = "2022", address = "Abu Dhabi, U.A.E.", publisher = "Association for Computational Linguistics", } ```
DongHyoungLee/kogpt2-base-v2-finetuned-kogpt2_nsmc_single_sentence_classification
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: - he tags: - language model --- ## AlephBertGimmel Modern Hebrew pretrained BERT model with a 128K token vocabulary. [Checkpoint](https://github.com/Dicta-Israel-Center-for-Text-Analysis/alephbertgimmel/tree/main/alephbertgimmel-base/ckpt_73780--Max512Seq) of the alephbertgimmel-base-512 from [alephbertgimmel](https://github.com/Dicta-Israel-Center-for-Text-Analysis/alephbertgimmel) ```python from transformers import AutoTokenizer, AutoModelForMaskedLM import torch from transformers import AutoModelForMaskedLM, AutoTokenizer model = AutoModelForMaskedLM.from_pretrained("imvladikon/alephbertgimmel-base-512") tokenizer = AutoTokenizer.from_pretrained("imvladikon/alephbertgimmel-base-512") text = "{} היא מטרופולין המהווה את מרכז הכלכלה" input = tokenizer.encode(text.format("[MASK]"), return_tensors="pt") mask_token_index = torch.where(input == tokenizer.mask_token_id)[1] token_logits = model(input).logits mask_token_logits = token_logits[0, mask_token_index, :] top_5_tokens = torch.topk(mask_token_logits, 5, dim=1).indices[0].tolist() for token in top_5_tokens: print(text.format(tokenizer.decode([token]))) # העיר היא מטרופולין המהווה את מרכז הכלכלה # ירושלים היא מטרופולין המהווה את מרכז הכלכלה # חיפה היא מטרופולין המהווה את מרכז הכלכלה # לונדון היא מטרופולין המהווה את מרכז הכלכלה # אילת היא מטרופולין המהווה את מרכז הכלכלה ``` ```python def ppl_naive(text, model, tokenizer): input = tokenizer.encode(text, return_tensors="pt") loss = model(input, labels=input)[0] return torch.exp(loss).item() text = """{} היא עיר הבירה של מדינת ישראל, והעיר הגדולה ביותר בישראל בגודל האוכלוסייה""" for word in ["חיפה", "ירושלים", "תל אביב"]: print(ppl_naive(text.format(word), model, tokenizer)) # 10.181422233581543 # 9.743313789367676 # 10.171016693115234 ``` When using AlephBertGimmel, please reference: ```bibtex @misc{guetta2022large, title={Large Pre-Trained Models with Extra-Large Vocabularies: A Contrastive Analysis of Hebrew BERT Models and a New One to Outperform Them All}, author={Eylon Guetta and Avi Shmidman and Shaltiel Shmidman and Cheyn Shmuel Shmidman and Joshua Guedalia and Moshe Koppel and Dan Bareket and Amit Seker and Reut Tsarfaty}, year={2022}, eprint={2211.15199}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
Donghyun/L2_BERT
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-12-02T00:34:44Z
--- license: cc-by-4.0 metrics: - bleu4 - meteor - rouge-l - bertscore - moverscore language: ru datasets: - lmqg/qg_ruquad pipeline_tag: text2text-generation tags: - question generation - answer extraction widget: - text: "generate question: Нелишним будет отметить, что, развивая это направление, Д. И. Менделеев, поначалу априорно выдвинув идею о температуре, при которой высота мениска будет нулевой, <hl> в мае 1860 года <hl> провёл серию опытов." example_title: "Question Generation Example 1" - text: "generate question: Однако, франкоязычный <hl> Квебек <hl> практически никогда не включается в состав Латинской Америки." example_title: "Question Generation Example 2" - text: "generate question: Классическим примером международного синдиката XX века была группа компаний <hl> Де Бирс <hl> , которая в 1980-е годы контролировала до 90 % мировой торговли алмазами." example_title: "Question Generation Example 3" - text: "extract answers: <hl> в английском языке в нарицательном смысле применяется термин rapid transit (скоростной городской транспорт), однако употребляется он только тогда, когда по смыслу невозможно ограничиться названием одной конкретной системы метрополитена. <hl> в остальных случаях используются индивидуальные названия: в лондоне — london underground, в нью-йорке — new york subway, в ливерпуле — merseyrail, в вашингтоне — washington metrorail, в сан-франциско — bart и т. п. в некоторых городах применяется название метро (англ. metro) для систем, по своему характеру близких к метро, или для всего городского транспорта (собственно метро и наземный пассажирский транспорт (в том числе автобусы и трамваи)) в совокупности." example_title: "Answer Extraction Example 1" - text: "extract answers: Вопреки ожиданиям, объединение денежных систем республик не привело к уменьшению инфляции. Напротив, закдензнаки стали невероятно быстро обесцениваться, особенно в 1924 году. Для обеспечения денежного рынка приходилось увеличивать эмиссию закдензнаков и выпускать в оборот купюры невероятно больших номиналов. <hl> Так, в период с 1 января по 20 марта 1924 года были введены в оборот купюры достоинством 25 000 000 рублей, затем — 250 000 000 рублей. <hl> И, наконец, в апреле 1924 года были выпущены купюры миллиардного достоинства (в просторечии лимард)." example_title: "Answer Extraction Example 2" model-index: - name: lmqg/mt5-base-ruquad-qg-ae results: - task: name: Text2text Generation type: text2text-generation dataset: name: lmqg/qg_ruquad type: default args: default metrics: - name: BLEU4 (Question Generation) type: bleu4_question_generation value: 20.06 - name: ROUGE-L (Question Generation) type: rouge_l_question_generation value: 35.35 - name: METEOR (Question Generation) type: meteor_question_generation value: 30.18 - name: BERTScore (Question Generation) type: bertscore_question_generation value: 87.9 - name: MoverScore (Question Generation) type: moverscore_question_generation value: 66.6 - name: QAAlignedF1Score-BERTScore (Question & Answer Generation (with Gold Answer)) type: qa_aligned_f1_score_bertscore_question_answer_generation_with_gold_answer value: 80.21 - name: QAAlignedRecall-BERTScore (Question & Answer Generation (with Gold Answer)) type: qa_aligned_recall_bertscore_question_answer_generation_with_gold_answer value: 84.49 - name: QAAlignedPrecision-BERTScore (Question & Answer Generation (with Gold Answer)) type: qa_aligned_precision_bertscore_question_answer_generation_with_gold_answer value: 76.48 - name: QAAlignedF1Score-MoverScore (Question & Answer Generation (with Gold Answer)) type: qa_aligned_f1_score_moverscore_question_answer_generation_with_gold_answer value: 57.17 - name: QAAlignedRecall-MoverScore (Question & Answer Generation (with Gold Answer)) type: qa_aligned_recall_moverscore_question_answer_generation_with_gold_answer value: 60.55 - name: QAAlignedPrecision-MoverScore (Question & Answer Generation (with Gold Answer)) type: qa_aligned_precision_moverscore_question_answer_generation_with_gold_answer value: 54.4 - name: BLEU4 (Answer Extraction) type: bleu4_answer_extraction value: 31.64 - name: ROUGE-L (Answer Extraction) type: rouge_l_answer_extraction value: 49.73 - name: METEOR (Answer Extraction) type: meteor_answer_extraction value: 38.79 - name: BERTScore (Answer Extraction) type: bertscore_answer_extraction value: 86.22 - name: MoverScore (Answer Extraction) type: moverscore_answer_extraction value: 74.64 - name: AnswerF1Score (Answer Extraction) type: answer_f1_score__answer_extraction value: 64.31 - name: AnswerExactMatch (Answer Extraction) type: answer_exact_match_answer_extraction value: 44.44 --- # Model Card of `lmqg/mt5-base-ruquad-qg-ae` This model is fine-tuned version of [google/mt5-base](https://huggingface.co/google/mt5-base) for question generation and answer extraction jointly on the [lmqg/qg_ruquad](https://huggingface.co/datasets/lmqg/qg_ruquad) (dataset_name: default) via [`lmqg`](https://github.com/asahi417/lm-question-generation). ### Overview - **Language model:** [google/mt5-base](https://huggingface.co/google/mt5-base) - **Language:** ru - **Training data:** [lmqg/qg_ruquad](https://huggingface.co/datasets/lmqg/qg_ruquad) (default) - **Online Demo:** [https://autoqg.net/](https://autoqg.net/) - **Repository:** [https://github.com/asahi417/lm-question-generation](https://github.com/asahi417/lm-question-generation) - **Paper:** [https://arxiv.org/abs/2210.03992](https://arxiv.org/abs/2210.03992) ### Usage - With [`lmqg`](https://github.com/asahi417/lm-question-generation#lmqg-language-model-for-question-generation-) ```python from lmqg import TransformersQG # initialize model model = TransformersQG(language="ru", model="lmqg/mt5-base-ruquad-qg-ae") # model prediction question_answer_pairs = model.generate_qa("Нелишним будет отметить, что, развивая это направление, Д. И. Менделеев, поначалу априорно выдвинув идею о температуре, при которой высота мениска будет нулевой, в мае 1860 года провёл серию опытов.") ``` - With `transformers` ```python from transformers import pipeline pipe = pipeline("text2text-generation", "lmqg/mt5-base-ruquad-qg-ae") # answer extraction answer = pipe("generate question: Нелишним будет отметить, что, развивая это направление, Д. И. Менделеев, поначалу априорно выдвинув идею о температуре, при которой высота мениска будет нулевой, <hl> в мае 1860 года <hl> провёл серию опытов.") # question generation question = pipe("extract answers: <hl> в английском языке в нарицательном смысле применяется термин rapid transit (скоростной городской транспорт), однако употребляется он только тогда, когда по смыслу невозможно ограничиться названием одной конкретной системы метрополитена. <hl> в остальных случаях используются индивидуальные названия: в лондоне — london underground, в нью-йорке — new york subway, в ливерпуле — merseyrail, в вашингтоне — washington metrorail, в сан-франциско — bart и т. п. в некоторых городах применяется название метро (англ. metro) для систем, по своему характеру близких к метро, или для всего городского транспорта (собственно метро и наземный пассажирский транспорт (в том числе автобусы и трамваи)) в совокупности.") ``` ## Evaluation - ***Metric (Question Generation)***: [raw metric file](https://huggingface.co/lmqg/mt5-base-ruquad-qg-ae/raw/main/eval/metric.first.sentence.paragraph_answer.question.lmqg_qg_ruquad.default.json) | | Score | Type | Dataset | |:-----------|--------:|:--------|:-----------------------------------------------------------------| | BERTScore | 87.9 | default | [lmqg/qg_ruquad](https://huggingface.co/datasets/lmqg/qg_ruquad) | | Bleu_1 | 36.66 | default | [lmqg/qg_ruquad](https://huggingface.co/datasets/lmqg/qg_ruquad) | | Bleu_2 | 29.53 | default | [lmqg/qg_ruquad](https://huggingface.co/datasets/lmqg/qg_ruquad) | | Bleu_3 | 24.23 | default | [lmqg/qg_ruquad](https://huggingface.co/datasets/lmqg/qg_ruquad) | | Bleu_4 | 20.06 | default | [lmqg/qg_ruquad](https://huggingface.co/datasets/lmqg/qg_ruquad) | | METEOR | 30.18 | default | [lmqg/qg_ruquad](https://huggingface.co/datasets/lmqg/qg_ruquad) | | MoverScore | 66.6 | default | [lmqg/qg_ruquad](https://huggingface.co/datasets/lmqg/qg_ruquad) | | ROUGE_L | 35.35 | default | [lmqg/qg_ruquad](https://huggingface.co/datasets/lmqg/qg_ruquad) | - ***Metric (Question & Answer Generation)***: [raw metric file](https://huggingface.co/lmqg/mt5-base-ruquad-qg-ae/raw/main/eval/metric.first.answer.paragraph.questions_answers.lmqg_qg_ruquad.default.json) | | Score | Type | Dataset | |:--------------------------------|--------:|:--------|:-----------------------------------------------------------------| | QAAlignedF1Score (BERTScore) | 80.21 | default | [lmqg/qg_ruquad](https://huggingface.co/datasets/lmqg/qg_ruquad) | | QAAlignedF1Score (MoverScore) | 57.17 | default | [lmqg/qg_ruquad](https://huggingface.co/datasets/lmqg/qg_ruquad) | | QAAlignedPrecision (BERTScore) | 76.48 | default | [lmqg/qg_ruquad](https://huggingface.co/datasets/lmqg/qg_ruquad) | | QAAlignedPrecision (MoverScore) | 54.4 | default | [lmqg/qg_ruquad](https://huggingface.co/datasets/lmqg/qg_ruquad) | | QAAlignedRecall (BERTScore) | 84.49 | default | [lmqg/qg_ruquad](https://huggingface.co/datasets/lmqg/qg_ruquad) | | QAAlignedRecall (MoverScore) | 60.55 | default | [lmqg/qg_ruquad](https://huggingface.co/datasets/lmqg/qg_ruquad) | - ***Metric (Answer Extraction)***: [raw metric file](https://huggingface.co/lmqg/mt5-base-ruquad-qg-ae/raw/main/eval/metric.first.answer.paragraph_sentence.answer.lmqg_qg_ruquad.default.json) | | Score | Type | Dataset | |:-----------------|--------:|:--------|:-----------------------------------------------------------------| | AnswerExactMatch | 44.44 | default | [lmqg/qg_ruquad](https://huggingface.co/datasets/lmqg/qg_ruquad) | | AnswerF1Score | 64.31 | default | [lmqg/qg_ruquad](https://huggingface.co/datasets/lmqg/qg_ruquad) | | BERTScore | 86.22 | default | [lmqg/qg_ruquad](https://huggingface.co/datasets/lmqg/qg_ruquad) | | Bleu_1 | 45.61 | default | [lmqg/qg_ruquad](https://huggingface.co/datasets/lmqg/qg_ruquad) | | Bleu_2 | 40.76 | default | [lmqg/qg_ruquad](https://huggingface.co/datasets/lmqg/qg_ruquad) | | Bleu_3 | 36.22 | default | [lmqg/qg_ruquad](https://huggingface.co/datasets/lmqg/qg_ruquad) | | Bleu_4 | 31.64 | default | [lmqg/qg_ruquad](https://huggingface.co/datasets/lmqg/qg_ruquad) | | METEOR | 38.79 | default | [lmqg/qg_ruquad](https://huggingface.co/datasets/lmqg/qg_ruquad) | | MoverScore | 74.64 | default | [lmqg/qg_ruquad](https://huggingface.co/datasets/lmqg/qg_ruquad) | | ROUGE_L | 49.73 | default | [lmqg/qg_ruquad](https://huggingface.co/datasets/lmqg/qg_ruquad) | ## Training hyperparameters The following hyperparameters were used during fine-tuning: - dataset_path: lmqg/qg_ruquad - dataset_name: default - input_types: ['paragraph_answer', 'paragraph_sentence'] - output_types: ['question', 'answer'] - prefix_types: ['qg', 'ae'] - model: google/mt5-base - max_length: 512 - max_length_output: 32 - epoch: 8 - batch: 32 - lr: 0.001 - fp16: False - random_seed: 1 - gradient_accumulation_steps: 2 - label_smoothing: 0.15 The full configuration can be found at [fine-tuning config file](https://huggingface.co/lmqg/mt5-base-ruquad-qg-ae/raw/main/trainer_config.json). ## Citation ``` @inproceedings{ushio-etal-2022-generative, title = "{G}enerative {L}anguage {M}odels for {P}aragraph-{L}evel {Q}uestion {G}eneration", author = "Ushio, Asahi and Alva-Manchego, Fernando and Camacho-Collados, Jose", booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing", month = dec, year = "2022", address = "Abu Dhabi, U.A.E.", publisher = "Association for Computational Linguistics", } ```
Dongjae/mrc2reader
[ "pytorch", "xlm-roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "XLMRobertaForQuestionAnswering" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
2022-12-02T00:43:44Z
--- library_name: stable-baselines3 tags: - FrozenLake-v1 - deep-reinforcement-learning - reinforcement-learning - stable-baselines3 model-index: - name: PPO results: - task: type: reinforcement-learning name: reinforcement-learning dataset: name: FrozenLake-v1 type: FrozenLake-v1 metrics: - type: mean_reward value: 1.00 +/- 0.00 name: mean_reward verified: false --- # **PPO** Agent playing **FrozenLake-v1** This is a trained model of a **PPO** agent playing **FrozenLake-v1** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
Doogie/Waynehills-KE-T5-doogie
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- annotations_creators: - expert-generated language: - en language_creators: - found license: - mit multilinguality: - monolingual paperswithcode_id: acronym-identification pretty_name: Acronym Identification Dataset size_categories: - 10K<n<100K source_datasets: - original task_categories: - token-classification task_ids: - token-classification-other-acronym-identification train-eval-index: - col_mapping: labels: tags tokens: tokens config: default splits: eval_split: test task: token-classification task_id: entity_extraction --- # Dataset Card for [Dataset Name] ## Table of Contents - [Table of Contents](#table-of-contents) - [Dataset Description](#dataset-description) - [Dataset Summary](#dataset-summary) - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards) - [Languages](#languages) - [Dataset Structure](#dataset-structure) - [Data Instances](#data-instances) - [Data Fields](#data-fields) - [Data Splits](#data-splits) - [Dataset Creation](#dataset-creation) - [Curation Rationale](#curation-rationale) - [Source Data](#source-data) - [Annotations](#annotations) - [Personal and Sensitive Information](#personal-and-sensitive-information) - [Considerations for Using the Data](#considerations-for-using-the-data) - [Social Impact of Dataset](#social-impact-of-dataset) - [Discussion of Biases](#discussion-of-biases) - [Other Known Limitations](#other-known-limitations) - [Additional Information](#additional-information) - [Dataset Curators](#dataset-curators) - [Licensing Information](#licensing-information) - [Citation Information](#citation-information) - [Contributions](#contributions) ## Dataset Description - **Homepage:** - **Repository:** - **Paper:** - **Leaderboard:** - **Point of Contact:** ### Dataset Summary [More Information Needed] ### Supported Tasks and Leaderboards [More Information Needed] ### Languages [More Information Needed] ## Dataset Structure ### Data Instances [More Information Needed] ### Data Fields [More Information Needed] ### Data Splits [More Information Needed] ## Dataset Creation ### Curation Rationale [More Information Needed] ### Source Data #### Initial Data Collection and Normalization [More Information Needed] #### Who are the source language producers? [More Information Needed] ### Annotations #### Annotation process [More Information Needed] #### Who are the annotators? [More Information Needed] ### Personal and Sensitive Information [More Information Needed] ## Considerations for Using the Data ### Social Impact of Dataset [More Information Needed] ### Discussion of Biases [More Information Needed] ### Other Known Limitations [More Information Needed] ## Additional Information ### Dataset Curators [More Information Needed] ### Licensing Information [More Information Needed] ### Citation Information [More Information Needed] ### Contributions Thanks to [@github-username](https://github.com/<github-username>) for adding this dataset.
Waynehillsdev/wav2vec2-base-timit-demo-colab
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "transformers", "generated_from_trainer", "license:apache-2.0" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
Access to model pscollins/nyu_dl_unbiased_teacher_v2 is restricted and you are not in the authorized list. Visit https://huggingface.co/pscollins/nyu_dl_unbiased_teacher_v2 to ask for access.
Doquey/DialoGPT-small-Michaelbot
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- language: zh license: creativeml-openrail-m tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - multilingual - English(En) - Chinese(Zh) - Spanish(Es) - French(Fr) - Russian(Ru) - Japanese(Ja) - Korean(Ko) - Arabic(Ar) - Italian(It) - diffusers widget: - text: "一张<鸣人>男孩的照片" example_title: 一张<鸣人>男孩的照片 --- # This is a DreamBooth model finetuned from the multilingual text-to-image model AltDiffusion. Dreambooth is one of the method of finetune the pretrained text-to-image model.Given as input just a few images of a subject, it learns to bind a unique identifier with that specific subject. AltDiffusion which is a multilingual text-to-image model.It currently supports 9 languages and will support more in future. We can use the dreambooth to finetune the AltDiffusion,so that we can get a multilingual Dreambooth model which could supports 9 languages. Here we give a example model finetuned use a dozen pictures of Uzumaki Naruto downloaded from web.The example code of inference is shown bellow.You can have a try and maybe train your own dreambooth.Hopes have fun! ## Example code of inference ``` from diffusers import AltDiffusionPipeline, DPMSolverMultistepScheduler import torch pipe = AltDiffusionPipeline.from_pretrained("BAAI/DreamBooth-AltDiffusion") pipe = pipe.to("cuda") pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) prompt = "一张<鸣人>男孩的照片" image = pipe(prompt, num_inference_steps=25).images[0] image.show() ```
DoyyingFace/bert-COVID-HATE-finetuned-test
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
29
null
Access to model felnata/images is restricted and you are not in the authorized list. Visit https://huggingface.co/felnata/images to ask for access.
albert-xlarge-v1
[ "pytorch", "tf", "albert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1909.11942", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "AlbertForMaskedLM" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
341
2022-12-02T02:40:11Z
--- license: apache-2.0 tags: - generated_from_trainer metrics: - rouge model-index: - name: t5-small-finetuned-rahul-summariza1 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # t5-small-finetuned-rahul-summariza1 This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.8150 - Rouge1: 27.4177 - Rouge2: 21.7327 - Rougel: 25.5996 - Rougelsum: 26.7326 - Gen Len: 19.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5.6e-05 - train_batch_size: 8 - eval_batch_size: 2 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:-------:| | 1.1387 | 1.0 | 16 | 0.8518 | 28.3651 | 21.9282 | 26.1118 | 27.2351 | 19.0 | | 0.9637 | 2.0 | 32 | 0.8150 | 27.4177 | 21.7327 | 25.5996 | 26.7326 | 19.0 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu113 - Datasets 2.7.1 - Tokenizers 0.13.2
albert-xlarge-v2
[ "pytorch", "tf", "albert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1909.11942", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "AlbertForMaskedLM" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2,973
2022-12-02T02:44:09Z
--- language: - en license: apache-2.0 tags: - generated_from_trainer datasets: - kejian/codeparrot-train-more-filter-3.3b-cleaned model-index: - name: mighty-rwr results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mighty-rwr This model was trained from scratch on the kejian/codeparrot-train-more-filter-3.3b-cleaned dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.001 - train_batch_size: 32 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.01 - training_steps: 50354 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.23.0 - Pytorch 1.13.0+cu116 - Datasets 2.0.0 - Tokenizers 0.12.1 # Full config {'dataset': {'datasets': ['kejian/codeparrot-train-more-filter-3.3b-cleaned'], 'is_split_by_sentences': True}, 'generation': {'batch_size': 128, 'metrics_configs': [{}, {'n': 1}, {}], 'scenario_configs': [{'display_as_html': True, 'generate_kwargs': {'do_sample': True, 'eos_token_id': 0, 'max_length': 640, 'min_length': 10, 'temperature': 0.7, 'top_k': 0, 'top_p': 0.9}, 'name': 'unconditional', 'num_hits_threshold': 0, 'num_samples': 2048}, {'display_as_html': True, 'generate_kwargs': {'do_sample': True, 'eos_token_id': 0, 'max_length': 272, 'min_length': 10, 'temperature': 0.7, 'top_k': 0, 'top_p': 0.9}, 'name': 'functions', 'num_hits_threshold': 0, 'num_samples': 2048, 'prompts_path': 'resources/functions_csnet.jsonl', 'use_prompt_for_scoring': True}], 'scorer_config': {}}, 'kl_gpt3_callback': {'gpt3_kwargs': {'model_name': 'code-cushman-001'}, 'max_tokens': 64, 'num_samples': 4096}, 'model': {'from_scratch': True, 'gpt2_config_kwargs': {'reorder_and_upcast_attn': True, 'scale_attn_by': True}, 'model_kwargs': {'value_head_config': {'is_detached': False}}, 'path_or_name': 'codeparrot/codeparrot-small'}, 'objective': {'alpha': 1, 'beta': 10, 'name': 'AWR'}, 'tokenizer': {'path_or_name': 'codeparrot/codeparrot-small'}, 'training': {'dataloader_num_workers': 0, 'effective_batch_size': 64, 'evaluation_strategy': 'no', 'fp16': True, 'hub_model_id': 'mighty-rwr', 'hub_strategy': 'all_checkpoints', 'learning_rate': 0.001, 'logging_first_step': True, 'logging_steps': 1, 'num_tokens': 3300000000.0, 'output_dir': 'training_output', 'per_device_train_batch_size': 16, 'push_to_hub': True, 'remove_unused_columns': False, 'save_steps': 25177, 'save_strategy': 'steps', 'seed': 42, 'warmup_ratio': 0.01, 'weight_decay': 0.1}} # Wandb URL: https://wandb.ai/kejian/uncategorized/runs/497nsm8z
albert-xxlarge-v1
[ "pytorch", "tf", "albert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1909.11942", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "AlbertForMaskedLM" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7,091
2022-12-02T02:44:10Z
--- language: - en license: apache-2.0 tags: - generated_from_trainer datasets: - kejian/codeparrot-train-more-filter-3.3b-cleaned model-index: - name: mighty-mle results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mighty-mle This model was trained from scratch on the kejian/codeparrot-train-more-filter-3.3b-cleaned dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0008 - train_batch_size: 32 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.01 - training_steps: 50354 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.23.0 - Pytorch 1.13.0+cu116 - Datasets 2.0.0 - Tokenizers 0.12.1 # Full config {'dataset': {'datasets': ['kejian/codeparrot-train-more-filter-3.3b-cleaned'], 'is_split_by_sentences': True}, 'generation': {'batch_size': 128, 'metrics_configs': [{}, {'n': 1}, {}], 'scenario_configs': [{'display_as_html': True, 'generate_kwargs': {'do_sample': True, 'eos_token_id': 0, 'max_length': 640, 'min_length': 10, 'temperature': 0.7, 'top_k': 0, 'top_p': 0.9}, 'name': 'unconditional', 'num_hits_threshold': 0, 'num_samples': 2048}, {'display_as_html': True, 'generate_kwargs': {'do_sample': True, 'eos_token_id': 0, 'max_length': 272, 'min_length': 10, 'temperature': 0.7, 'top_k': 0, 'top_p': 0.9}, 'name': 'functions', 'num_hits_threshold': 0, 'num_samples': 2048, 'prompts_path': 'resources/functions_csnet.jsonl', 'use_prompt_for_scoring': True}], 'scorer_config': {}}, 'kl_gpt3_callback': {'gpt3_kwargs': {'model_name': 'code-cushman-001'}, 'max_tokens': 64, 'num_samples': 4096}, 'model': {'from_scratch': True, 'gpt2_config_kwargs': {'reorder_and_upcast_attn': True, 'scale_attn_by': True}, 'path_or_name': 'codeparrot/codeparrot-small'}, 'objective': {'name': 'MLE'}, 'tokenizer': {'path_or_name': 'codeparrot/codeparrot-small'}, 'training': {'dataloader_num_workers': 0, 'effective_batch_size': 64, 'evaluation_strategy': 'no', 'fp16': True, 'hub_model_id': 'mighty-mle', 'hub_strategy': 'all_checkpoints', 'learning_rate': 0.0008, 'logging_first_step': True, 'logging_steps': 1, 'num_tokens': 3300000000.0, 'output_dir': 'training_output', 'per_device_train_batch_size': 16, 'push_to_hub': True, 'remove_unused_columns': False, 'save_steps': 25177, 'save_strategy': 'steps', 'seed': 42, 'warmup_ratio': 0.01, 'weight_decay': 0.1}} # Wandb URL: https://wandb.ai/kejian/uncategorized/runs/vl44x14u
bert-base-german-cased
[ "pytorch", "tf", "jax", "safetensors", "bert", "fill-mask", "de", "transformers", "exbert", "license:mit", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
175,983
2022-12-02T02:56:04Z
--- license: apache-2.0 tags: - generated_from_trainer metrics: - rouge model-index: - name: mt5-small-MT5-Intento1 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mt5-small-MT5-Intento1 This model is a fine-tuned version of [google/mt5-small](https://huggingface.co/google/mt5-small) on the None dataset. It achieves the following results on the evaluation set: - Loss: nan - Rouge1: 3.9645 - Rouge2: 0.8023 - Rougel: 3.8615 - Rougelsum: 3.8591 - Gen Len: 13.7379 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:------:|:------:|:------:|:---------:|:-------:| | 0.0 | 1.0 | 6034 | nan | 3.9645 | 0.8023 | 3.8615 | 3.8591 | 13.7379 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.12.1+cu113 - Datasets 2.7.1 - Tokenizers 0.13.2
bert-base-german-dbmdz-cased
[ "pytorch", "jax", "bert", "fill-mask", "de", "transformers", "license:mit", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1,814
2022-12-02T02:59:56Z
--- license: creativeml-openrail-m --- akrammodel_10000 was trained on a blend of (v1-5-pruned[a9263745] and anythingv3[6569e224] weighted sum M0.5) for 10000 steps, classification "artstyle", instance m_akram ## example <img alt="Showcase" src="https://huggingface.co/flamesbob/akaramModel/resolve/main/00035-1277575582-m_akram%2C((bes___.png"/> <img alt="Showcase" src="https://huggingface.co/flamesbob/akaramModel/resolve/main/00036-1277575585-m_akram%2C((bes___.png"/> <img alt="Showcase" src="https://huggingface.co/flamesbob/akaramModel/resolve/main/00037-1277575584-m_akram,((bes___.png"/> <img alt="Showcase" src="https://huggingface.co/flamesbob/akaramModel/resolve/main/00039-3151256283-m_akram,((bes___.png"/> License This embedding is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage. The CreativeML OpenRAIL License specifies: You can't use the embedding to deliberately produce nor share illegal or harmful outputs or content The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license You may re-distribute the weights and use the embedding commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully) Please read the full license here
bert-base-multilingual-uncased
[ "pytorch", "tf", "jax", "safetensors", "bert", "fill-mask", "multilingual", "af", "sq", "ar", "an", "hy", "ast", "az", "ba", "eu", "bar", "be", "bn", "inc", "bs", "br", "bg", "my", "ca", "ceb", "ce", "zh", "cv", "hr", "cs", "da", "nl", "en", "et", "fi", "fr", "gl", "ka", "de", "el", "gu", "ht", "he", "hi", "hu", "is", "io", "id", "ga", "it", "ja", "jv", "kn", "kk", "ky", "ko", "la", "lv", "lt", "roa", "nds", "lm", "mk", "mg", "ms", "ml", "mr", "min", "ne", "new", "nb", "nn", "oc", "fa", "pms", "pl", "pt", "pa", "ro", "ru", "sco", "sr", "scn", "sk", "sl", "aze", "es", "su", "sw", "sv", "tl", "tg", "ta", "tt", "te", "tr", "uk", "ud", "uz", "vi", "vo", "war", "cy", "fry", "pnb", "yo", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
328,585
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy - f1 model-index: - name: distilbert-base-uncased-finetuned-PN results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-PN This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.2139 - Accuracy: 0.9479 - F1: 0.9057 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | No log | 1.0 | 42 | 0.2918 | 0.9167 | 0.8571 | | No log | 2.0 | 84 | 0.2653 | 0.9062 | 0.8163 | | No log | 3.0 | 126 | 0.2139 | 0.9479 | 0.9057 | | No log | 4.0 | 168 | 0.2317 | 0.9167 | 0.8462 | | No log | 5.0 | 210 | 0.2492 | 0.9062 | 0.8235 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.12.1+cu113 - Datasets 2.7.1 - Tokenizers 0.13.2
bert-large-cased-whole-word-masking
[ "pytorch", "tf", "jax", "bert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2,316
2022-12-02T03:25:42Z
--- license: apache-2.0 tags: - generated_from_trainer metrics: - rouge model-index: - name: mt5-small-MT5-Intento2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mt5-small-MT5-Intento2 This model is a fine-tuned version of [google/mt5-small](https://huggingface.co/google/mt5-small) on the None dataset. It achieves the following results on the evaluation set: - Loss: nan - Rouge1: 3.9645 - Rouge2: 0.8023 - Rougel: 3.8615 - Rougelsum: 3.8591 - Gen Len: 13.7379 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.01 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:------:|:------:|:------:|:---------:|:-------:| | 0.0 | 1.0 | 1509 | nan | 3.9645 | 0.8023 | 3.8615 | 3.8591 | 13.7379 | | 0.0 | 2.0 | 3018 | nan | 3.9645 | 0.8023 | 3.8615 | 3.8591 | 13.7379 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.12.1+cu113 - Datasets 2.7.1 - Tokenizers 0.13.2
bert-large-uncased-whole-word-masking-finetuned-squad
[ "pytorch", "tf", "jax", "safetensors", "bert", "question-answering", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
question-answering
{ "architectures": [ "BertForQuestionAnswering" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
480,510
2022-12-02T03:36:29Z
--- language: - en license: apache-2.0 tags: - generated_from_trainer datasets: - kejian/codeparrot-train-more-filter-3.3b-cleaned model-index: - name: mighty-awr results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mighty-awr This model was trained from scratch on the kejian/codeparrot-train-more-filter-3.3b-cleaned dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.001 - train_batch_size: 32 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 8 - total_train_batch_size: 256 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.01 - training_steps: 12588 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.23.0 - Pytorch 1.13.0+cu116 - Datasets 2.0.0 - Tokenizers 0.12.1 # Full config {'dataset': {'datasets': ['kejian/codeparrot-train-more-filter-3.3b-cleaned'], 'is_split_by_sentences': True}, 'generation': {'batch_size': 128, 'metrics_configs': [{}, {'n': 1}, {}], 'scenario_configs': [{'display_as_html': True, 'generate_kwargs': {'do_sample': True, 'eos_token_id': 0, 'max_length': 640, 'min_length': 10, 'temperature': 0.7, 'top_k': 0, 'top_p': 0.9}, 'name': 'unconditional', 'num_hits_threshold': 0, 'num_samples': 2048}, {'display_as_html': True, 'generate_kwargs': {'do_sample': True, 'eos_token_id': 0, 'max_length': 272, 'min_length': 10, 'temperature': 0.7, 'top_k': 0, 'top_p': 0.9}, 'name': 'functions', 'num_hits_threshold': 0, 'num_samples': 2048, 'prompts_path': 'resources/functions_csnet.jsonl', 'use_prompt_for_scoring': True}], 'scorer_config': {}}, 'kl_gpt3_callback': {'gpt3_kwargs': {'model_name': 'code-cushman-001'}, 'max_tokens': 64, 'num_samples': 4096}, 'model': {'from_scratch': True, 'gpt2_config_kwargs': {'reorder_and_upcast_attn': True, 'scale_attn_by': True}, 'model_kwargs': {'value_head_config': {'is_detached': False}}, 'path_or_name': 'codeparrot/codeparrot-small'}, 'objective': {'alpha': 0.05, 'beta': 1, 'name': 'AWR'}, 'tokenizer': {'path_or_name': 'codeparrot/codeparrot-small'}, 'training': {'dataloader_num_workers': 0, 'effective_batch_size': 256, 'evaluation_strategy': 'no', 'fp16': True, 'hub_model_id': 'mighty-awr', 'hub_strategy': 'all_checkpoints', 'learning_rate': 0.001, 'logging_first_step': True, 'logging_steps': 1, 'num_tokens': 3300000000.0, 'output_dir': 'training_output', 'per_device_train_batch_size': 16, 'push_to_hub': True, 'remove_unused_columns': False, 'save_steps': 6294, 'save_strategy': 'steps', 'seed': 42, 'warmup_ratio': 0.01, 'weight_decay': 0.1}} # Wandb URL: https://wandb.ai/kejian/uncategorized/runs/cd18q0rv
distilbert-base-cased-distilled-squad
[ "pytorch", "tf", "rust", "safetensors", "openvino", "distilbert", "question-answering", "en", "dataset:squad", "arxiv:1910.01108", "arxiv:1910.09700", "transformers", "license:apache-2.0", "model-index", "autotrain_compatible", "has_space" ]
question-answering
{ "architectures": [ "DistilBertForQuestionAnswering" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
257,745
null
--- license: bigscience-openrail-m --- As a proof of concept, trying to improve accuracy went from 2 epochs to 100 epochs on the 10k Covid19 questions in the CDC corpus I collected. Details and code available here. https://github.com/jasondeden/capstone Model was having issues loading, but those appear to have been back-end and are now fixed, or else I was doing something wrong that I somehow missed.
distilbert-base-multilingual-cased
[ "pytorch", "tf", "onnx", "safetensors", "distilbert", "fill-mask", "multilingual", "af", "sq", "ar", "an", "hy", "ast", "az", "ba", "eu", "bar", "be", "bn", "inc", "bs", "br", "bg", "my", "ca", "ceb", "ce", "zh", "cv", "hr", "cs", "da", "nl", "en", "et", "fi", "fr", "gl", "ka", "de", "el", "gu", "ht", "he", "hi", "hu", "is", "io", "id", "ga", "it", "ja", "jv", "kn", "kk", "ky", "ko", "la", "lv", "lt", "roa", "nds", "lm", "mk", "mg", "ms", "ml", "mr", "mn", "min", "ne", "new", "nb", "nn", "oc", "fa", "pms", "pl", "pt", "pa", "ro", "ru", "sco", "sr", "scn", "sk", "sl", "aze", "es", "su", "sw", "sv", "tl", "tg", "th", "ta", "tt", "te", "tr", "uk", "ud", "uz", "vi", "vo", "war", "cy", "fry", "pnb", "yo", "dataset:wikipedia", "arxiv:1910.01108", "arxiv:1910.09700", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "DistilBertForMaskedLM" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8,339,633
2022-12-02T04:03:44Z
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # {MODEL_NAME} This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}') model = AutoModel.from_pretrained('{MODEL_NAME}') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 105 with parameters: ``` {'batch_size': 16, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.CosineSimilarityLoss.CosineSimilarityLoss` Parameters of the fit()-Method: ``` { "epochs": 20, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": 2100, "warmup_steps": 210, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: MPNetModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
xlm-mlm-100-1280
[ "pytorch", "tf", "xlm", "fill-mask", "multilingual", "en", "es", "fr", "de", "zh", "ru", "pt", "it", "ar", "ja", "id", "tr", "nl", "pl", "fa", "vi", "sv", "ko", "he", "ro", "no", "hi", "uk", "cs", "fi", "hu", "th", "da", "ca", "el", "bg", "sr", "ms", "bn", "hr", "sl", "az", "sk", "eo", "ta", "sh", "lt", "et", "ml", "la", "bs", "sq", "arz", "af", "ka", "mr", "eu", "tl", "ang", "gl", "nn", "ur", "kk", "be", "hy", "te", "lv", "mk", "als", "is", "wuu", "my", "sco", "mn", "ceb", "ast", "cy", "kn", "br", "an", "gu", "bar", "uz", "lb", "ne", "si", "war", "jv", "ga", "oc", "ku", "sw", "nds", "ckb", "ia", "yi", "fy", "scn", "gan", "tt", "am", "arxiv:1901.07291", "arxiv:1911.02116", "arxiv:1910.09700", "transformers", "license:cc-by-nc-4.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "XLMWithLMHeadModel" ], "model_type": "xlm", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2,106
2022-12-02T05:23:56Z
--- license: mit tags: - generated_from_trainer metrics: - f1 model-index: - name: xlm-roberta-base-finetuned-panx-de-fr results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-de-fr This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1792 - F1: 0.8619 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 12 - eval_batch_size: 12 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.2899 | 1.0 | 1430 | 0.1941 | 0.8143 | | 0.1547 | 2.0 | 2860 | 0.1673 | 0.8478 | | 0.095 | 3.0 | 4290 | 0.1792 | 0.8619 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.12.1+cu113 - Datasets 1.16.1 - Tokenizers 0.10.3
123www/test_model
[ "pytorch", "wav2vec2", "transformers" ]
null
{ "architectures": [ "Wav2Vec2ForSpeechClassification" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- license: mit tags: - generated_from_trainer model-index: - name: model_output_sorted_reversed_subreddit-wallstreetbets_1 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # model_output_sorted_reversed_subreddit-wallstreetbets_1 This model is a fine-tuned version of [gpt2](https://huggingface.co/gpt2) on the None dataset. It achieves the following results on the evaluation set: - Loss: 3.5417 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - gradient_accumulation_steps: 8 - total_train_batch_size: 512 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_steps: 1000 - num_epochs: 2 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 3.9986 | 0.12 | 500 | 3.8892 | | 3.8984 | 0.25 | 1000 | 3.8108 | | 3.8438 | 0.37 | 1500 | 3.7569 | | 3.8001 | 0.5 | 2000 | 3.7196 | | 3.7576 | 0.62 | 2500 | 3.6918 | | 3.7516 | 0.75 | 3000 | 3.6667 | | 3.7337 | 0.87 | 3500 | 3.6451 | | 3.6981 | 1.0 | 4000 | 3.6239 | | 3.5662 | 1.12 | 4500 | 3.6126 | | 3.5496 | 1.25 | 5000 | 3.5951 | | 3.5219 | 1.37 | 5500 | 3.5783 | | 3.5422 | 1.5 | 6000 | 3.5644 | | 3.5354 | 1.62 | 6500 | 3.5525 | | 3.4843 | 1.75 | 7000 | 3.5462 | | 3.52 | 1.87 | 7500 | 3.5424 | | 3.5146 | 1.99 | 8000 | 3.5417 | ### Framework versions - Transformers 4.24.0 - Pytorch 1.13.0+cu117 - Datasets 2.7.1 - Tokenizers 0.13.2
54Tor/test
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: creativeml-openrail-m --- ### ridley from [GT](https://wikiless.org/wiki/Glitch_Techs?lang=en) on [WD](https://huggingface.co/hakurei/waifu-diffusion) via Dreambooth #### model by no3 This your waifu-diffusion v1.3 model fine-tuned ridley taught to waifu-diffusion v1.3 with Dreambooth. It can be used by modifying the `instance_prompt`: **sks_ridley** You can also train your own concepts and upload them to the library by using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_training.ipynb). And you can run your new concept via `diffusers`: [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb), [Spaces with the Public Concepts loaded](https://huggingface.co/spaces/sd-dreambooth-library/stable-diffusion-dreambooth-concepts). ### note If you want to to use in UI like [AUTOMATIC1111](https://github.com/AUTOMATIC1111/stable-diffusion-webui) or any UI that's uses .ckpt files just download ckpt file here for your convenience. **just click on "ridley-wd-1.3-beta1.ckpt"** [ridley-wd-1.3-beta1.ckpt](https://huggingface.co/no3/ridley-wd-1.3-beta1/resolve/main/ridley-wd-1.3-beta1.ckpt) If you have issues or questions feel free to visit the Community Tab and start discussion about it. Here are images used for training this concept: ![image 1](https://huggingface.co/no3/ridley-wd-1.3-beta1/resolve/main/concept_images/11.png) ![image 2](https://huggingface.co/no3/ridley-wd-1.3-beta1/resolve/main/concept_images/13.png) ![image 3](https://huggingface.co/no3/ridley-wd-1.3-beta1/resolve/main/concept_images/15.png) ![image 4](https://huggingface.co/no3/ridley-wd-1.3-beta1/resolve/main/concept_images/16.png) ![image 5](https://huggingface.co/no3/ridley-wd-1.3-beta1/resolve/main/concept_images/20.png) ![image 6](https://huggingface.co/no3/ridley-wd-1.3-beta1/resolve/main/concept_images/10.png) ![image 7](https://huggingface.co/no3/ridley-wd-1.3-beta1/resolve/main/concept_images/9.png) ![image 8](https://huggingface.co/no3/ridley-wd-1.3-beta1/resolve/main/concept_images/7.png) ![image 9](https://huggingface.co/no3/ridley-wd-1.3-beta1/resolve/main/concept_images/21.png) ![image 10](https://huggingface.co/no3/ridley-wd-1.3-beta1/resolve/main/concept_images/5.png) ![image 11](https://huggingface.co/no3/ridley-wd-1.3-beta1/resolve/main/concept_images/19.png) ![image 12](https://huggingface.co/no3/ridley-wd-1.3-beta1/resolve/main/concept_images/21.png) ![image 13](https://huggingface.co/no3/ridley-wd-1.3-beta1/resolve/main/concept_images/23.png) ![image 14](https://huggingface.co/no3/ridley-wd-1.3-beta1/resolve/main/concept_images/18.png) ![image 15](https://huggingface.co/no3/ridley-wd-1.3-beta1/resolve/main/concept_images/3.png)
AI-Nordics/bert-large-swedish-cased
[ "pytorch", "megatron-bert", "fill-mask", "sv", "transformers", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "MegatronBertForMaskedLM" ], "model_type": "megatron-bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
28
2022-12-02T11:03:22Z
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy - precision - recall - f1 model-index: - name: distilbert-base-uncased-finetuned-cola-v6 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-cola-v6 This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.4210 - Accuracy: 0.9310 - Precision: 0.9310 - Recall: 0.9310 - F1: 0.9310 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 30 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | Precision | Recall | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:---------:|:------:|:------:| | No log | 6.25 | 50 | 1.2900 | 0.5862 | 0.5862 | 0.5862 | 0.5862 | | No log | 12.5 | 100 | 0.6947 | 0.8621 | 0.8621 | 0.8621 | 0.8621 | | No log | 18.75 | 150 | 0.4672 | 0.9310 | 0.9310 | 0.9310 | 0.9310 | | No log | 25.0 | 200 | 0.4265 | 0.9310 | 0.9310 | 0.9310 | 0.9310 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.12.1+cu113 - Datasets 2.7.1 - Tokenizers 0.13.2
AKMyscich/VetTrain-v1.2
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-12-02T11:58:25Z
--- license: cc-by-4.0 metrics: - bleu4 - meteor - rouge-l - bertscore - moverscore language: ko datasets: - lmqg/qg_koquad pipeline_tag: text2text-generation tags: - question generation - answer extraction widget: - text: "generate question: 1990년 영화 《 <hl> 남부군 <hl> 》에서 단역으로 영화배우 첫 데뷔에 이어 같은 해 KBS 드라마 《지구인》에서 단역으로 출연하였고 이듬해 MBC 《여명의 눈동자》를 통해 단역으로 출연하였다." example_title: "Question Generation Example 1" - text: "generate question: 백신이 없기때문에 예방책은 <hl> 살충제 <hl> 를 사용하면서 서식 장소(찻찬 받침, 배수로, 고인 물의 열린 저장소, 버려진 타이어 등)의 수를 줄임으로써 매개체를 통제할 수 있다." example_title: "Question Generation Example 2" - text: "generate question: <hl> 원테이크 촬영 <hl> 이기 때문에 한 사람이 실수를 하면 처음부터 다시 찍어야 하는 상황이 발생한다." example_title: "Question Generation Example 3" - text: "extract answers: 또한 스피어스는 많은 새로운 여성 아티스트들에게 영향을 끼쳤는데, 대표적으로 데미 로바토, 케이티 페리, 크리스티니아 드바지, 레이디 가가, 리틀 부츠, 셀레나 고메즈 & 더씬, 픽시 로트 이 있다. 2007년 비욘세 놀스는 Total Request Live와의 인터뷰에서 '나는 브리트니를 사랑하고 팬이에요. 특히 새 앨범 Blackout을 좋아해요'라고 말했다. 린제이 로한은 '언제나 브리트니 스피어스에게 영감을 받는다. 학창시절 그녀처럼 타블로이드에 오르기를 꿈꿔왔다'고 말하며 롤 모델로 꼽았다. 스피어스는 현대 음악가들에게 음악적 영감으로 언급되기도 했다. <hl> 마일리 사이러스는 자신의 히트곡 Party in the U.S.A. 가 브리트니에게 영감과 영향을 받은 곡이라고 밝혔다. <hl> 베리 매닐로우의 앨범 15 Minutes 역시 브리트니에게 영감을 얻었다고 언급되었다." example_title: "Answer Extraction Example 1" - text: "extract answers: 지난 22일 아프리카TV는 BJ 철구가 서비스 정지 처분을 받았음을 밝혔다. 서비스 정지 처분을 사유는 철구가 10대 청소년에게 유해한 장면을 방송으로 내보냈기 때문이었다. 문제가 된 장면은 BJ 철구가 미성년자는 시청할 수 없게 하는 19세 시청 가능 설정을 하지 않은 채 흡연하는 모습을 여과 없이 드러낸 장면이다. 아프리카TV는 청소년 보호 정책의 '청소년들이 해로운 환경으로부터 보호받을 수 있도록 조치한다'라고 조항을 근거로 철구에게 서비스 정지 처분을 내렸다. 흡연 이외에 음주 방송 등도 19세 시청 가능 설정을 해야만 방송할 수 있다. <hl> 게다가 철구의 방송 정지 처분은 이번에 처음이 아니라 16번 째기 때문에 더욱더 논란이 되고 있다. <hl>" example_title: "Answer Extraction Example 2" model-index: - name: lmqg/mt5-base-koquad-qg-ae results: - task: name: Text2text Generation type: text2text-generation dataset: name: lmqg/qg_koquad type: default args: default metrics: - name: BLEU4 (Question Generation) type: bleu4_question_generation value: 12.22 - name: ROUGE-L (Question Generation) type: rouge_l_question_generation value: 28.55 - name: METEOR (Question Generation) type: meteor_question_generation value: 29.86 - name: BERTScore (Question Generation) type: bertscore_question_generation value: 84.19 - name: MoverScore (Question Generation) type: moverscore_question_generation value: 83.24 - name: QAAlignedF1Score-BERTScore (Question & Answer Generation (with Gold Answer)) type: qa_aligned_f1_score_bertscore_question_answer_generation_with_gold_answer value: 80.28 - name: QAAlignedRecall-BERTScore (Question & Answer Generation (with Gold Answer)) type: qa_aligned_recall_bertscore_question_answer_generation_with_gold_answer value: 83.91 - name: QAAlignedPrecision-BERTScore (Question & Answer Generation (with Gold Answer)) type: qa_aligned_precision_bertscore_question_answer_generation_with_gold_answer value: 77.03 - name: QAAlignedF1Score-MoverScore (Question & Answer Generation (with Gold Answer)) type: qa_aligned_f1_score_moverscore_question_answer_generation_with_gold_answer value: 81.97 - name: QAAlignedRecall-MoverScore (Question & Answer Generation (with Gold Answer)) type: qa_aligned_recall_moverscore_question_answer_generation_with_gold_answer value: 86.43 - name: QAAlignedPrecision-MoverScore (Question & Answer Generation (with Gold Answer)) type: qa_aligned_precision_moverscore_question_answer_generation_with_gold_answer value: 78.1 - name: BLEU4 (Answer Extraction) type: bleu4_answer_extraction value: 34.98 - name: ROUGE-L (Answer Extraction) type: rouge_l_answer_extraction value: 83.83 - name: METEOR (Answer Extraction) type: meteor_answer_extraction value: 61.26 - name: BERTScore (Answer Extraction) type: bertscore_answer_extraction value: 96.14 - name: MoverScore (Answer Extraction) type: moverscore_answer_extraction value: 95.2 - name: AnswerF1Score (Answer Extraction) type: answer_f1_score__answer_extraction value: 88.43 - name: AnswerExactMatch (Answer Extraction) type: answer_exact_match_answer_extraction value: 83.02 --- # Model Card of `lmqg/mt5-base-koquad-qg-ae` This model is fine-tuned version of [google/mt5-base](https://huggingface.co/google/mt5-base) for question generation and answer extraction jointly on the [lmqg/qg_koquad](https://huggingface.co/datasets/lmqg/qg_koquad) (dataset_name: default) via [`lmqg`](https://github.com/asahi417/lm-question-generation). ### Overview - **Language model:** [google/mt5-base](https://huggingface.co/google/mt5-base) - **Language:** ko - **Training data:** [lmqg/qg_koquad](https://huggingface.co/datasets/lmqg/qg_koquad) (default) - **Online Demo:** [https://autoqg.net/](https://autoqg.net/) - **Repository:** [https://github.com/asahi417/lm-question-generation](https://github.com/asahi417/lm-question-generation) - **Paper:** [https://arxiv.org/abs/2210.03992](https://arxiv.org/abs/2210.03992) ### Usage - With [`lmqg`](https://github.com/asahi417/lm-question-generation#lmqg-language-model-for-question-generation-) ```python from lmqg import TransformersQG # initialize model model = TransformersQG(language="ko", model="lmqg/mt5-base-koquad-qg-ae") # model prediction question_answer_pairs = model.generate_qa("1990년 영화 《 남부군 》에서 단역으로 영화배우 첫 데뷔에 이어 같은 해 KBS 드라마 《지구인》에서 단역으로 출연하였고 이듬해 MBC 《여명의 눈동자》를 통해 단역으로 출연하였다.") ``` - With `transformers` ```python from transformers import pipeline pipe = pipeline("text2text-generation", "lmqg/mt5-base-koquad-qg-ae") # answer extraction answer = pipe("generate question: 1990년 영화 《 <hl> 남부군 <hl> 》에서 단역으로 영화배우 첫 데뷔에 이어 같은 해 KBS 드라마 《지구인》에서 단역으로 출연하였고 이듬해 MBC 《여명의 눈동자》를 통해 단역으로 출연하였다.") # question generation question = pipe("extract answers: 또한 스피어스는 많은 새로운 여성 아티스트들에게 영향을 끼쳤는데, 대표적으로 데미 로바토, 케이티 페리, 크리스티니아 드바지, 레이디 가가, 리틀 부츠, 셀레나 고메즈 & 더씬, 픽시 로트 이 있다. 2007년 비욘세 놀스는 Total Request Live와의 인터뷰에서 '나는 브리트니를 사랑하고 팬이에요. 특히 새 앨범 Blackout을 좋아해요'라고 말했다. 린제이 로한은 '언제나 브리트니 스피어스에게 영감을 받는다. 학창시절 그녀처럼 타블로이드에 오르기를 꿈꿔왔다'고 말하며 롤 모델로 꼽았다. 스피어스는 현대 음악가들에게 음악적 영감으로 언급되기도 했다. <hl> 마일리 사이러스는 자신의 히트곡 Party in the U.S.A. 가 브리트니에게 영감과 영향을 받은 곡이라고 밝혔다. <hl> 베리 매닐로우의 앨범 15 Minutes 역시 브리트니에게 영감을 얻었다고 언급되었다.") ``` ## Evaluation - ***Metric (Question Generation)***: [raw metric file](https://huggingface.co/lmqg/mt5-base-koquad-qg-ae/raw/main/eval/metric.first.sentence.paragraph_answer.question.lmqg_qg_koquad.default.json) | | Score | Type | Dataset | |:-----------|--------:|:--------|:-----------------------------------------------------------------| | BERTScore | 84.19 | default | [lmqg/qg_koquad](https://huggingface.co/datasets/lmqg/qg_koquad) | | Bleu_1 | 27.97 | default | [lmqg/qg_koquad](https://huggingface.co/datasets/lmqg/qg_koquad) | | Bleu_2 | 20.84 | default | [lmqg/qg_koquad](https://huggingface.co/datasets/lmqg/qg_koquad) | | Bleu_3 | 15.88 | default | [lmqg/qg_koquad](https://huggingface.co/datasets/lmqg/qg_koquad) | | Bleu_4 | 12.22 | default | [lmqg/qg_koquad](https://huggingface.co/datasets/lmqg/qg_koquad) | | METEOR | 29.86 | default | [lmqg/qg_koquad](https://huggingface.co/datasets/lmqg/qg_koquad) | | MoverScore | 83.24 | default | [lmqg/qg_koquad](https://huggingface.co/datasets/lmqg/qg_koquad) | | ROUGE_L | 28.55 | default | [lmqg/qg_koquad](https://huggingface.co/datasets/lmqg/qg_koquad) | - ***Metric (Question & Answer Generation)***: [raw metric file](https://huggingface.co/lmqg/mt5-base-koquad-qg-ae/raw/main/eval/metric.first.answer.paragraph.questions_answers.lmqg_qg_koquad.default.json) | | Score | Type | Dataset | |:--------------------------------|--------:|:--------|:-----------------------------------------------------------------| | QAAlignedF1Score (BERTScore) | 80.28 | default | [lmqg/qg_koquad](https://huggingface.co/datasets/lmqg/qg_koquad) | | QAAlignedF1Score (MoverScore) | 81.97 | default | [lmqg/qg_koquad](https://huggingface.co/datasets/lmqg/qg_koquad) | | QAAlignedPrecision (BERTScore) | 77.03 | default | [lmqg/qg_koquad](https://huggingface.co/datasets/lmqg/qg_koquad) | | QAAlignedPrecision (MoverScore) | 78.1 | default | [lmqg/qg_koquad](https://huggingface.co/datasets/lmqg/qg_koquad) | | QAAlignedRecall (BERTScore) | 83.91 | default | [lmqg/qg_koquad](https://huggingface.co/datasets/lmqg/qg_koquad) | | QAAlignedRecall (MoverScore) | 86.43 | default | [lmqg/qg_koquad](https://huggingface.co/datasets/lmqg/qg_koquad) | - ***Metric (Answer Extraction)***: [raw metric file](https://huggingface.co/lmqg/mt5-base-koquad-qg-ae/raw/main/eval/metric.first.answer.paragraph_sentence.answer.lmqg_qg_koquad.default.json) | | Score | Type | Dataset | |:-----------------|--------:|:--------|:-----------------------------------------------------------------| | AnswerExactMatch | 83.02 | default | [lmqg/qg_koquad](https://huggingface.co/datasets/lmqg/qg_koquad) | | AnswerF1Score | 88.43 | default | [lmqg/qg_koquad](https://huggingface.co/datasets/lmqg/qg_koquad) | | BERTScore | 96.14 | default | [lmqg/qg_koquad](https://huggingface.co/datasets/lmqg/qg_koquad) | | Bleu_1 | 74.93 | default | [lmqg/qg_koquad](https://huggingface.co/datasets/lmqg/qg_koquad) | | Bleu_2 | 65.39 | default | [lmqg/qg_koquad](https://huggingface.co/datasets/lmqg/qg_koquad) | | Bleu_3 | 51.39 | default | [lmqg/qg_koquad](https://huggingface.co/datasets/lmqg/qg_koquad) | | Bleu_4 | 34.98 | default | [lmqg/qg_koquad](https://huggingface.co/datasets/lmqg/qg_koquad) | | METEOR | 61.26 | default | [lmqg/qg_koquad](https://huggingface.co/datasets/lmqg/qg_koquad) | | MoverScore | 95.2 | default | [lmqg/qg_koquad](https://huggingface.co/datasets/lmqg/qg_koquad) | | ROUGE_L | 83.83 | default | [lmqg/qg_koquad](https://huggingface.co/datasets/lmqg/qg_koquad) | ## Training hyperparameters The following hyperparameters were used during fine-tuning: - dataset_path: lmqg/qg_koquad - dataset_name: default - input_types: ['paragraph_answer', 'paragraph_sentence'] - output_types: ['question', 'answer'] - prefix_types: ['qg', 'ae'] - model: google/mt5-base - max_length: 512 - max_length_output: 32 - epoch: 14 - batch: 32 - lr: 0.0001 - fp16: False - random_seed: 1 - gradient_accumulation_steps: 2 - label_smoothing: 0.15 The full configuration can be found at [fine-tuning config file](https://huggingface.co/lmqg/mt5-base-koquad-qg-ae/raw/main/trainer_config.json). ## Citation ``` @inproceedings{ushio-etal-2022-generative, title = "{G}enerative {L}anguage {M}odels for {P}aragraph-{L}evel {Q}uestion {G}eneration", author = "Ushio, Asahi and Alva-Manchego, Fernando and Camacho-Collados, Jose", booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing", month = dec, year = "2022", address = "Abu Dhabi, U.A.E.", publisher = "Association for Computational Linguistics", } ```
AKulk/wav2vec2-base-timit-epochs15
[ "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "transformers", "generated_from_trainer", "license:apache-2.0" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers --- # {MODEL_NAME} This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}') model = AutoModel.from_pretrained('{MODEL_NAME}') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 1060 with parameters: ``` {'batch_size': 16, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.CosineSimilarityLoss.CosineSimilarityLoss` Parameters of the fit()-Method: ``` { "epochs": 1, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": 1060, "warmup_steps": 106, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 128, 'do_lower_case': False}) with Transformer model: XLMRobertaModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
AUBMC-AIM/OCTaGAN
[ "license:cc-by-nc-4.0", "has_space" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-12-02T13:25:10Z
--- language: en inference: false tags: - text-generation - opt duplicated_from: autoevaluate/zero-shot-classification --- Hello. I am a model, to be evaluated.
AbdelrahmanZayed/my-awesome-model
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: mit tags: - pytorch - diffusers - unconditional-image-generation - diffusion-models-class --- # Model Card for Unit 1 of the [Diffusion Models Class 🧨](https://github.com/huggingface/diffusion-models-class) This model is a diffusion model for unconditional image generation of cute 🦋. ## Usage ```python from diffusers import DDPMPipeline pipeline = DDPMPipeline.from_pretrained('dfomin/sd-class-butterflies-32') image = pipeline().images[0] image ```
AdapterHub/bert-base-uncased-pf-anli_r3
[ "bert", "en", "dataset:anli", "arxiv:2104.08247", "adapter-transformers", "text-classification" ]
text-classification
{ "architectures": null, "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-12-02T15:51:46Z
--- license: creativeml-openrail-m tags: - text-to-image --- ### Kurzgesagt-style-v2-768 Dreambooth model trained on the v2-768 base model You run your new concept via `diffusers` [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb). Don't forget to use the concept prompts! Sample pictures of: Kurzgesagt style (use that on your prompt) ![Kurzgesagt style 0](https://huggingface.co/Fireman4740/kurzgesagt-style-v2-768/resolve/main/xy_grid-0012-2599613694.png)
AdapterHub/bert-base-uncased-pf-cosmos_qa
[ "bert", "en", "dataset:cosmos_qa", "arxiv:2104.08247", "adapter-transformers", "adapterhub:comsense/cosmosqa" ]
null
{ "architectures": null, "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
2022-12-02T16:21:03Z
--- tags: - spacy - token-classification language: - multilingual model-index: - name: xx_LeetSpeakNER_mstsb_mpnet results: - task: name: NER type: token-classification metrics: - name: NER Precision type: precision value: 0.912373549 - name: NER Recall type: recall value: 0.9160452962 - name: NER F Score type: f_score value: 0.9142057358 --- | Feature | Description | | --- | --- | | **Name** | `xx_LeetSpeakNER_mstsb_mpnet` | | **Version** | `0.0.0` | | **spaCy** | `>=3.4.3,<3.5.0` | | **Default Pipeline** | `transformer`, `ner` | | **Components** | `transformer`, `ner` | | **Vectors** | 0 keys, 0 unique vectors (0 dimensions) | | **Sources** | n/a | | **License** | n/a | | **Author** | [n/a]() | ### Label Scheme <details> <summary>View label scheme (4 labels for 1 components)</summary> | Component | Labels | | --- | --- | | **`ner`** | `INV_CAMO`, `LEETSPEAK`, `MIX`, `PUNCT_CAMO` | </details> ### Accuracy | Type | Score | | --- | --- | | `ENTS_F` | 91.42 | | `ENTS_P` | 91.24 | | `ENTS_R` | 91.60 | | `TRANSFORMER_LOSS` | 396910.59 | | `NER_LOSS` | 373097.06 |
AdapterHub/roberta-base-pf-wnut_17
[ "roberta", "en", "dataset:wnut_17", "arxiv:2104.08247", "adapter-transformers", "token-classification" ]
token-classification
{ "architectures": null, "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
2022-12-02T20:52:50Z
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: fin_sentiment results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # fin_sentiment This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | No log | 1.0 | 125 | 0.5599 | 0.7827 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.12.1+cu113 - Datasets 2.7.1 - Tokenizers 0.13.2
Adrianaforididk/Jinx
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy model-index: - name: fin_sentiment results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # fin_sentiment This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.5405 - Accuracy: 0.7758 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | No log | 1.0 | 125 | 0.5405 | 0.7758 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.12.1+cu113 - Datasets 2.7.1 - Tokenizers 0.13.2
Ahmadatiya97/Alannah
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for maxvit_base_tf_512.in1k An official MaxViT image classification model. Trained in tensorflow on ImageNet-1k by paper authors. Ported from official Tensorflow implementation (https://github.com/google-research/maxvit) to PyTorch by Ross Wightman. ### Model Variants in [maxxvit.py](https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/maxxvit.py) MaxxViT covers a number of related model architectures that share a common structure including: - CoAtNet - Combining MBConv (depthwise-separable) convolutional blocks in early stages with self-attention transformer blocks in later stages. - MaxViT - Uniform blocks across all stages, each containing a MBConv (depthwise-separable) convolution block followed by two self-attention blocks with different partitioning schemes (window followed by grid). - CoAtNeXt - A timm specific arch that uses ConvNeXt blocks in place of MBConv blocks in CoAtNet. All normalization layers are LayerNorm (no BatchNorm). - MaxxViT - A timm specific arch that uses ConvNeXt blocks in place of MBConv blocks in MaxViT. All normalization layers are LayerNorm (no BatchNorm). - MaxxViT-V2 - A MaxxViT variation that removes the window block attention leaving only ConvNeXt blocks and grid attention w/ more width to compensate. Aside from the major variants listed above, there are more subtle changes from model to model. Any model name with the string `rw` are `timm` specific configs w/ modelling adjustments made to favour PyTorch eager use. These were created while training initial reproductions of the models so there are variations. All models with the string `tf` are models exactly matching Tensorflow based models by the original paper authors with weights ported to PyTorch. This covers a number of MaxViT models. The official CoAtNet models were never released. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 119.9 - GMACs: 138.0 - Activations (M): 704.0 - Image size: 512 x 512 - **Papers:** - MaxViT: Multi-Axis Vision Transformer: https://arxiv.org/abs/2204.01697 - **Dataset:** ImageNet-1k ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('maxvit_base_tf_512.in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'maxvit_base_tf_512.in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 64, 256, 256]) # torch.Size([1, 96, 128, 128]) # torch.Size([1, 192, 64, 64]) # torch.Size([1, 384, 32, 32]) # torch.Size([1, 768, 16, 16]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'maxvit_base_tf_512.in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 768, 16, 16) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison ### By Top-1 |model |top1 |top5 |samples / sec |Params (M) |GMAC |Act (M)| |------------------------------------------------------------------------------------------------------------------------|----:|----:|--------------:|--------------:|-----:|------:| |[maxvit_xlarge_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_xlarge_tf_512.in21k_ft_in1k) |88.53|98.64| 21.76| 475.77|534.14|1413.22| |[maxvit_xlarge_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_xlarge_tf_384.in21k_ft_in1k) |88.32|98.54| 42.53| 475.32|292.78| 668.76| |[maxvit_base_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_base_tf_512.in21k_ft_in1k) |88.20|98.53| 50.87| 119.88|138.02| 703.99| |[maxvit_large_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_large_tf_512.in21k_ft_in1k) |88.04|98.40| 36.42| 212.33|244.75| 942.15| |[maxvit_large_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_large_tf_384.in21k_ft_in1k) |87.98|98.56| 71.75| 212.03|132.55| 445.84| |[maxvit_base_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_base_tf_384.in21k_ft_in1k) |87.92|98.54| 104.71| 119.65| 73.80| 332.90| |[maxvit_rmlp_base_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/maxvit_rmlp_base_rw_384.sw_in12k_ft_in1k) |87.81|98.37| 106.55| 116.14| 70.97| 318.95| |[maxxvitv2_rmlp_base_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/maxxvitv2_rmlp_base_rw_384.sw_in12k_ft_in1k) |87.47|98.37| 149.49| 116.09| 72.98| 213.74| |[coatnet_rmlp_2_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_384.sw_in12k_ft_in1k) |87.39|98.31| 160.80| 73.88| 47.69| 209.43| |[maxvit_rmlp_base_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/maxvit_rmlp_base_rw_224.sw_in12k_ft_in1k) |86.89|98.02| 375.86| 116.14| 23.15| 92.64| |[maxxvitv2_rmlp_base_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/maxxvitv2_rmlp_base_rw_224.sw_in12k_ft_in1k) |86.64|98.02| 501.03| 116.09| 24.20| 62.77| |[maxvit_base_tf_512.in1k](https://huggingface.co/timm/maxvit_base_tf_512.in1k) |86.60|97.92| 50.75| 119.88|138.02| 703.99| |[coatnet_2_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_2_rw_224.sw_in12k_ft_in1k) |86.57|97.89| 631.88| 73.87| 15.09| 49.22| |[maxvit_large_tf_512.in1k](https://huggingface.co/timm/maxvit_large_tf_512.in1k) |86.52|97.88| 36.04| 212.33|244.75| 942.15| |[coatnet_rmlp_2_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_224.sw_in12k_ft_in1k) |86.49|97.90| 620.58| 73.88| 15.18| 54.78| |[maxvit_base_tf_384.in1k](https://huggingface.co/timm/maxvit_base_tf_384.in1k) |86.29|97.80| 101.09| 119.65| 73.80| 332.90| |[maxvit_large_tf_384.in1k](https://huggingface.co/timm/maxvit_large_tf_384.in1k) |86.23|97.69| 70.56| 212.03|132.55| 445.84| |[maxvit_small_tf_512.in1k](https://huggingface.co/timm/maxvit_small_tf_512.in1k) |86.10|97.76| 88.63| 69.13| 67.26| 383.77| |[maxvit_tiny_tf_512.in1k](https://huggingface.co/timm/maxvit_tiny_tf_512.in1k) |85.67|97.58| 144.25| 31.05| 33.49| 257.59| |[maxvit_small_tf_384.in1k](https://huggingface.co/timm/maxvit_small_tf_384.in1k) |85.54|97.46| 188.35| 69.02| 35.87| 183.65| |[maxvit_tiny_tf_384.in1k](https://huggingface.co/timm/maxvit_tiny_tf_384.in1k) |85.11|97.38| 293.46| 30.98| 17.53| 123.42| |[maxvit_large_tf_224.in1k](https://huggingface.co/timm/maxvit_large_tf_224.in1k) |84.93|96.97| 247.71| 211.79| 43.68| 127.35| |[coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k) |84.90|96.96| 1025.45| 41.72| 8.11| 40.13| |[maxvit_base_tf_224.in1k](https://huggingface.co/timm/maxvit_base_tf_224.in1k) |84.85|96.99| 358.25| 119.47| 24.04| 95.01| |[maxxvit_rmlp_small_rw_256.sw_in1k](https://huggingface.co/timm/maxxvit_rmlp_small_rw_256.sw_in1k) |84.63|97.06| 575.53| 66.01| 14.67| 58.38| |[coatnet_rmlp_2_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_224.sw_in1k) |84.61|96.74| 625.81| 73.88| 15.18| 54.78| |[maxvit_rmlp_small_rw_224.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_small_rw_224.sw_in1k) |84.49|96.76| 693.82| 64.90| 10.75| 49.30| |[maxvit_small_tf_224.in1k](https://huggingface.co/timm/maxvit_small_tf_224.in1k) |84.43|96.83| 647.96| 68.93| 11.66| 53.17| |[maxvit_rmlp_tiny_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_tiny_rw_256.sw_in1k) |84.23|96.78| 807.21| 29.15| 6.77| 46.92| |[coatnet_1_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_1_rw_224.sw_in1k) |83.62|96.38| 989.59| 41.72| 8.04| 34.60| |[maxvit_tiny_rw_224.sw_in1k](https://huggingface.co/timm/maxvit_tiny_rw_224.sw_in1k) |83.50|96.50| 1100.53| 29.06| 5.11| 33.11| |[maxvit_tiny_tf_224.in1k](https://huggingface.co/timm/maxvit_tiny_tf_224.in1k) |83.41|96.59| 1004.94| 30.92| 5.60| 35.78| |[coatnet_rmlp_1_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_1_rw_224.sw_in1k) |83.36|96.45| 1093.03| 41.69| 7.85| 35.47| |[maxxvitv2_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxxvitv2_nano_rw_256.sw_in1k) |83.11|96.33| 1276.88| 23.70| 6.26| 23.05| |[maxxvit_rmlp_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxxvit_rmlp_nano_rw_256.sw_in1k) |83.03|96.34| 1341.24| 16.78| 4.37| 26.05| |[maxvit_rmlp_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_nano_rw_256.sw_in1k) |82.96|96.26| 1283.24| 15.50| 4.47| 31.92| |[maxvit_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_nano_rw_256.sw_in1k) |82.93|96.23| 1218.17| 15.45| 4.46| 30.28| |[coatnet_bn_0_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_bn_0_rw_224.sw_in1k) |82.39|96.19| 1600.14| 27.44| 4.67| 22.04| |[coatnet_0_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_0_rw_224.sw_in1k) |82.39|95.84| 1831.21| 27.44| 4.43| 18.73| |[coatnet_rmlp_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_nano_rw_224.sw_in1k) |82.05|95.87| 2109.09| 15.15| 2.62| 20.34| |[coatnext_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnext_nano_rw_224.sw_in1k) |81.95|95.92| 2525.52| 14.70| 2.47| 12.80| |[coatnet_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_nano_rw_224.sw_in1k) |81.70|95.64| 2344.52| 15.14| 2.41| 15.41| |[maxvit_rmlp_pico_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_pico_rw_256.sw_in1k) |80.53|95.21| 1594.71| 7.52| 1.85| 24.86| ### By Throughput (samples / sec) |model |top1 |top5 |samples / sec |Params (M) |GMAC |Act (M)| |------------------------------------------------------------------------------------------------------------------------|----:|----:|--------------:|--------------:|-----:|------:| |[coatnext_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnext_nano_rw_224.sw_in1k) |81.95|95.92| 2525.52| 14.70| 2.47| 12.80| |[coatnet_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_nano_rw_224.sw_in1k) |81.70|95.64| 2344.52| 15.14| 2.41| 15.41| |[coatnet_rmlp_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_nano_rw_224.sw_in1k) |82.05|95.87| 2109.09| 15.15| 2.62| 20.34| |[coatnet_0_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_0_rw_224.sw_in1k) |82.39|95.84| 1831.21| 27.44| 4.43| 18.73| |[coatnet_bn_0_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_bn_0_rw_224.sw_in1k) |82.39|96.19| 1600.14| 27.44| 4.67| 22.04| |[maxvit_rmlp_pico_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_pico_rw_256.sw_in1k) |80.53|95.21| 1594.71| 7.52| 1.85| 24.86| |[maxxvit_rmlp_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxxvit_rmlp_nano_rw_256.sw_in1k) |83.03|96.34| 1341.24| 16.78| 4.37| 26.05| |[maxvit_rmlp_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_nano_rw_256.sw_in1k) |82.96|96.26| 1283.24| 15.50| 4.47| 31.92| |[maxxvitv2_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxxvitv2_nano_rw_256.sw_in1k) |83.11|96.33| 1276.88| 23.70| 6.26| 23.05| |[maxvit_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_nano_rw_256.sw_in1k) |82.93|96.23| 1218.17| 15.45| 4.46| 30.28| |[maxvit_tiny_rw_224.sw_in1k](https://huggingface.co/timm/maxvit_tiny_rw_224.sw_in1k) |83.50|96.50| 1100.53| 29.06| 5.11| 33.11| |[coatnet_rmlp_1_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_1_rw_224.sw_in1k) |83.36|96.45| 1093.03| 41.69| 7.85| 35.47| |[coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k) |84.90|96.96| 1025.45| 41.72| 8.11| 40.13| |[maxvit_tiny_tf_224.in1k](https://huggingface.co/timm/maxvit_tiny_tf_224.in1k) |83.41|96.59| 1004.94| 30.92| 5.60| 35.78| |[coatnet_1_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_1_rw_224.sw_in1k) |83.62|96.38| 989.59| 41.72| 8.04| 34.60| |[maxvit_rmlp_tiny_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_tiny_rw_256.sw_in1k) |84.23|96.78| 807.21| 29.15| 6.77| 46.92| |[maxvit_rmlp_small_rw_224.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_small_rw_224.sw_in1k) |84.49|96.76| 693.82| 64.90| 10.75| 49.30| |[maxvit_small_tf_224.in1k](https://huggingface.co/timm/maxvit_small_tf_224.in1k) |84.43|96.83| 647.96| 68.93| 11.66| 53.17| |[coatnet_2_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_2_rw_224.sw_in12k_ft_in1k) |86.57|97.89| 631.88| 73.87| 15.09| 49.22| |[coatnet_rmlp_2_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_224.sw_in1k) |84.61|96.74| 625.81| 73.88| 15.18| 54.78| |[coatnet_rmlp_2_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_224.sw_in12k_ft_in1k) |86.49|97.90| 620.58| 73.88| 15.18| 54.78| |[maxxvit_rmlp_small_rw_256.sw_in1k](https://huggingface.co/timm/maxxvit_rmlp_small_rw_256.sw_in1k) |84.63|97.06| 575.53| 66.01| 14.67| 58.38| |[maxxvitv2_rmlp_base_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/maxxvitv2_rmlp_base_rw_224.sw_in12k_ft_in1k) |86.64|98.02| 501.03| 116.09| 24.20| 62.77| |[maxvit_rmlp_base_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/maxvit_rmlp_base_rw_224.sw_in12k_ft_in1k) |86.89|98.02| 375.86| 116.14| 23.15| 92.64| |[maxvit_base_tf_224.in1k](https://huggingface.co/timm/maxvit_base_tf_224.in1k) |84.85|96.99| 358.25| 119.47| 24.04| 95.01| |[maxvit_tiny_tf_384.in1k](https://huggingface.co/timm/maxvit_tiny_tf_384.in1k) |85.11|97.38| 293.46| 30.98| 17.53| 123.42| |[maxvit_large_tf_224.in1k](https://huggingface.co/timm/maxvit_large_tf_224.in1k) |84.93|96.97| 247.71| 211.79| 43.68| 127.35| |[maxvit_small_tf_384.in1k](https://huggingface.co/timm/maxvit_small_tf_384.in1k) |85.54|97.46| 188.35| 69.02| 35.87| 183.65| |[coatnet_rmlp_2_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_384.sw_in12k_ft_in1k) |87.39|98.31| 160.80| 73.88| 47.69| 209.43| |[maxxvitv2_rmlp_base_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/maxxvitv2_rmlp_base_rw_384.sw_in12k_ft_in1k) |87.47|98.37| 149.49| 116.09| 72.98| 213.74| |[maxvit_tiny_tf_512.in1k](https://huggingface.co/timm/maxvit_tiny_tf_512.in1k) |85.67|97.58| 144.25| 31.05| 33.49| 257.59| |[maxvit_rmlp_base_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/maxvit_rmlp_base_rw_384.sw_in12k_ft_in1k) |87.81|98.37| 106.55| 116.14| 70.97| 318.95| |[maxvit_base_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_base_tf_384.in21k_ft_in1k) |87.92|98.54| 104.71| 119.65| 73.80| 332.90| |[maxvit_base_tf_384.in1k](https://huggingface.co/timm/maxvit_base_tf_384.in1k) |86.29|97.80| 101.09| 119.65| 73.80| 332.90| |[maxvit_small_tf_512.in1k](https://huggingface.co/timm/maxvit_small_tf_512.in1k) |86.10|97.76| 88.63| 69.13| 67.26| 383.77| |[maxvit_large_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_large_tf_384.in21k_ft_in1k) |87.98|98.56| 71.75| 212.03|132.55| 445.84| |[maxvit_large_tf_384.in1k](https://huggingface.co/timm/maxvit_large_tf_384.in1k) |86.23|97.69| 70.56| 212.03|132.55| 445.84| |[maxvit_base_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_base_tf_512.in21k_ft_in1k) |88.20|98.53| 50.87| 119.88|138.02| 703.99| |[maxvit_base_tf_512.in1k](https://huggingface.co/timm/maxvit_base_tf_512.in1k) |86.60|97.92| 50.75| 119.88|138.02| 703.99| |[maxvit_xlarge_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_xlarge_tf_384.in21k_ft_in1k) |88.32|98.54| 42.53| 475.32|292.78| 668.76| |[maxvit_large_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_large_tf_512.in21k_ft_in1k) |88.04|98.40| 36.42| 212.33|244.75| 942.15| |[maxvit_large_tf_512.in1k](https://huggingface.co/timm/maxvit_large_tf_512.in1k) |86.52|97.88| 36.04| 212.33|244.75| 942.15| |[maxvit_xlarge_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_xlarge_tf_512.in21k_ft_in1k) |88.53|98.64| 21.76| 475.77|534.14|1413.22| ## Citation ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ``` ```bibtex @article{tu2022maxvit, title={MaxViT: Multi-Axis Vision Transformer}, author={Tu, Zhengzhong and Talebi, Hossein and Zhang, Han and Yang, Feng and Milanfar, Peyman and Bovik, Alan and Li, Yinxiao}, journal={ECCV}, year={2022}, } ``` ```bibtex @article{dai2021coatnet, title={CoAtNet: Marrying Convolution and Attention for All Data Sizes}, author={Dai, Zihang and Liu, Hanxiao and Le, Quoc V and Tan, Mingxing}, journal={arXiv preprint arXiv:2106.04803}, year={2021} } ```
Ahmadvakili/A
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k - imagenet-21k --- # Model card for maxvit_base_tf_512.in21k_ft_in1k An official MaxViT image classification model. Pretrained in tensorflow on ImageNet-21k (21843 Google specific instance of ImageNet-22k) and fine-tuned on ImageNet-1k by paper authors. Ported from official Tensorflow implementation (https://github.com/google-research/maxvit) to PyTorch by Ross Wightman. ### Model Variants in [maxxvit.py](https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/maxxvit.py) MaxxViT covers a number of related model architectures that share a common structure including: - CoAtNet - Combining MBConv (depthwise-separable) convolutional blocks in early stages with self-attention transformer blocks in later stages. - MaxViT - Uniform blocks across all stages, each containing a MBConv (depthwise-separable) convolution block followed by two self-attention blocks with different partitioning schemes (window followed by grid). - CoAtNeXt - A timm specific arch that uses ConvNeXt blocks in place of MBConv blocks in CoAtNet. All normalization layers are LayerNorm (no BatchNorm). - MaxxViT - A timm specific arch that uses ConvNeXt blocks in place of MBConv blocks in MaxViT. All normalization layers are LayerNorm (no BatchNorm). - MaxxViT-V2 - A MaxxViT variation that removes the window block attention leaving only ConvNeXt blocks and grid attention w/ more width to compensate. Aside from the major variants listed above, there are more subtle changes from model to model. Any model name with the string `rw` are `timm` specific configs w/ modelling adjustments made to favour PyTorch eager use. These were created while training initial reproductions of the models so there are variations. All models with the string `tf` are models exactly matching Tensorflow based models by the original paper authors with weights ported to PyTorch. This covers a number of MaxViT models. The official CoAtNet models were never released. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 119.9 - GMACs: 138.0 - Activations (M): 704.0 - Image size: 512 x 512 - **Papers:** - MaxViT: Multi-Axis Vision Transformer: https://arxiv.org/abs/2204.01697 - **Dataset:** ImageNet-1k - **Pretrain Dataset:** ImageNet-21k ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('maxvit_base_tf_512.in21k_ft_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'maxvit_base_tf_512.in21k_ft_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 64, 256, 256]) # torch.Size([1, 96, 128, 128]) # torch.Size([1, 192, 64, 64]) # torch.Size([1, 384, 32, 32]) # torch.Size([1, 768, 16, 16]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'maxvit_base_tf_512.in21k_ft_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 768, 16, 16) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison ### By Top-1 |model |top1 |top5 |samples / sec |Params (M) |GMAC |Act (M)| |------------------------------------------------------------------------------------------------------------------------|----:|----:|--------------:|--------------:|-----:|------:| |[maxvit_xlarge_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_xlarge_tf_512.in21k_ft_in1k) |88.53|98.64| 21.76| 475.77|534.14|1413.22| |[maxvit_xlarge_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_xlarge_tf_384.in21k_ft_in1k) |88.32|98.54| 42.53| 475.32|292.78| 668.76| |[maxvit_base_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_base_tf_512.in21k_ft_in1k) |88.20|98.53| 50.87| 119.88|138.02| 703.99| |[maxvit_large_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_large_tf_512.in21k_ft_in1k) |88.04|98.40| 36.42| 212.33|244.75| 942.15| |[maxvit_large_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_large_tf_384.in21k_ft_in1k) |87.98|98.56| 71.75| 212.03|132.55| 445.84| |[maxvit_base_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_base_tf_384.in21k_ft_in1k) |87.92|98.54| 104.71| 119.65| 73.80| 332.90| |[maxvit_rmlp_base_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/maxvit_rmlp_base_rw_384.sw_in12k_ft_in1k) |87.81|98.37| 106.55| 116.14| 70.97| 318.95| |[maxxvitv2_rmlp_base_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/maxxvitv2_rmlp_base_rw_384.sw_in12k_ft_in1k) |87.47|98.37| 149.49| 116.09| 72.98| 213.74| |[coatnet_rmlp_2_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_384.sw_in12k_ft_in1k) |87.39|98.31| 160.80| 73.88| 47.69| 209.43| |[maxvit_rmlp_base_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/maxvit_rmlp_base_rw_224.sw_in12k_ft_in1k) |86.89|98.02| 375.86| 116.14| 23.15| 92.64| |[maxxvitv2_rmlp_base_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/maxxvitv2_rmlp_base_rw_224.sw_in12k_ft_in1k) |86.64|98.02| 501.03| 116.09| 24.20| 62.77| |[maxvit_base_tf_512.in1k](https://huggingface.co/timm/maxvit_base_tf_512.in1k) |86.60|97.92| 50.75| 119.88|138.02| 703.99| |[coatnet_2_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_2_rw_224.sw_in12k_ft_in1k) |86.57|97.89| 631.88| 73.87| 15.09| 49.22| |[maxvit_large_tf_512.in1k](https://huggingface.co/timm/maxvit_large_tf_512.in1k) |86.52|97.88| 36.04| 212.33|244.75| 942.15| |[coatnet_rmlp_2_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_224.sw_in12k_ft_in1k) |86.49|97.90| 620.58| 73.88| 15.18| 54.78| |[maxvit_base_tf_384.in1k](https://huggingface.co/timm/maxvit_base_tf_384.in1k) |86.29|97.80| 101.09| 119.65| 73.80| 332.90| |[maxvit_large_tf_384.in1k](https://huggingface.co/timm/maxvit_large_tf_384.in1k) |86.23|97.69| 70.56| 212.03|132.55| 445.84| |[maxvit_small_tf_512.in1k](https://huggingface.co/timm/maxvit_small_tf_512.in1k) |86.10|97.76| 88.63| 69.13| 67.26| 383.77| |[maxvit_tiny_tf_512.in1k](https://huggingface.co/timm/maxvit_tiny_tf_512.in1k) |85.67|97.58| 144.25| 31.05| 33.49| 257.59| |[maxvit_small_tf_384.in1k](https://huggingface.co/timm/maxvit_small_tf_384.in1k) |85.54|97.46| 188.35| 69.02| 35.87| 183.65| |[maxvit_tiny_tf_384.in1k](https://huggingface.co/timm/maxvit_tiny_tf_384.in1k) |85.11|97.38| 293.46| 30.98| 17.53| 123.42| |[maxvit_large_tf_224.in1k](https://huggingface.co/timm/maxvit_large_tf_224.in1k) |84.93|96.97| 247.71| 211.79| 43.68| 127.35| |[coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k) |84.90|96.96| 1025.45| 41.72| 8.11| 40.13| |[maxvit_base_tf_224.in1k](https://huggingface.co/timm/maxvit_base_tf_224.in1k) |84.85|96.99| 358.25| 119.47| 24.04| 95.01| |[maxxvit_rmlp_small_rw_256.sw_in1k](https://huggingface.co/timm/maxxvit_rmlp_small_rw_256.sw_in1k) |84.63|97.06| 575.53| 66.01| 14.67| 58.38| |[coatnet_rmlp_2_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_224.sw_in1k) |84.61|96.74| 625.81| 73.88| 15.18| 54.78| |[maxvit_rmlp_small_rw_224.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_small_rw_224.sw_in1k) |84.49|96.76| 693.82| 64.90| 10.75| 49.30| |[maxvit_small_tf_224.in1k](https://huggingface.co/timm/maxvit_small_tf_224.in1k) |84.43|96.83| 647.96| 68.93| 11.66| 53.17| |[maxvit_rmlp_tiny_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_tiny_rw_256.sw_in1k) |84.23|96.78| 807.21| 29.15| 6.77| 46.92| |[coatnet_1_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_1_rw_224.sw_in1k) |83.62|96.38| 989.59| 41.72| 8.04| 34.60| |[maxvit_tiny_rw_224.sw_in1k](https://huggingface.co/timm/maxvit_tiny_rw_224.sw_in1k) |83.50|96.50| 1100.53| 29.06| 5.11| 33.11| |[maxvit_tiny_tf_224.in1k](https://huggingface.co/timm/maxvit_tiny_tf_224.in1k) |83.41|96.59| 1004.94| 30.92| 5.60| 35.78| |[coatnet_rmlp_1_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_1_rw_224.sw_in1k) |83.36|96.45| 1093.03| 41.69| 7.85| 35.47| |[maxxvitv2_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxxvitv2_nano_rw_256.sw_in1k) |83.11|96.33| 1276.88| 23.70| 6.26| 23.05| |[maxxvit_rmlp_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxxvit_rmlp_nano_rw_256.sw_in1k) |83.03|96.34| 1341.24| 16.78| 4.37| 26.05| |[maxvit_rmlp_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_nano_rw_256.sw_in1k) |82.96|96.26| 1283.24| 15.50| 4.47| 31.92| |[maxvit_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_nano_rw_256.sw_in1k) |82.93|96.23| 1218.17| 15.45| 4.46| 30.28| |[coatnet_bn_0_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_bn_0_rw_224.sw_in1k) |82.39|96.19| 1600.14| 27.44| 4.67| 22.04| |[coatnet_0_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_0_rw_224.sw_in1k) |82.39|95.84| 1831.21| 27.44| 4.43| 18.73| |[coatnet_rmlp_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_nano_rw_224.sw_in1k) |82.05|95.87| 2109.09| 15.15| 2.62| 20.34| |[coatnext_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnext_nano_rw_224.sw_in1k) |81.95|95.92| 2525.52| 14.70| 2.47| 12.80| |[coatnet_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_nano_rw_224.sw_in1k) |81.70|95.64| 2344.52| 15.14| 2.41| 15.41| |[maxvit_rmlp_pico_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_pico_rw_256.sw_in1k) |80.53|95.21| 1594.71| 7.52| 1.85| 24.86| ### By Throughput (samples / sec) |model |top1 |top5 |samples / sec |Params (M) |GMAC |Act (M)| |------------------------------------------------------------------------------------------------------------------------|----:|----:|--------------:|--------------:|-----:|------:| |[coatnext_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnext_nano_rw_224.sw_in1k) |81.95|95.92| 2525.52| 14.70| 2.47| 12.80| |[coatnet_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_nano_rw_224.sw_in1k) |81.70|95.64| 2344.52| 15.14| 2.41| 15.41| |[coatnet_rmlp_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_nano_rw_224.sw_in1k) |82.05|95.87| 2109.09| 15.15| 2.62| 20.34| |[coatnet_0_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_0_rw_224.sw_in1k) |82.39|95.84| 1831.21| 27.44| 4.43| 18.73| |[coatnet_bn_0_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_bn_0_rw_224.sw_in1k) |82.39|96.19| 1600.14| 27.44| 4.67| 22.04| |[maxvit_rmlp_pico_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_pico_rw_256.sw_in1k) |80.53|95.21| 1594.71| 7.52| 1.85| 24.86| |[maxxvit_rmlp_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxxvit_rmlp_nano_rw_256.sw_in1k) |83.03|96.34| 1341.24| 16.78| 4.37| 26.05| |[maxvit_rmlp_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_nano_rw_256.sw_in1k) |82.96|96.26| 1283.24| 15.50| 4.47| 31.92| |[maxxvitv2_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxxvitv2_nano_rw_256.sw_in1k) |83.11|96.33| 1276.88| 23.70| 6.26| 23.05| |[maxvit_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_nano_rw_256.sw_in1k) |82.93|96.23| 1218.17| 15.45| 4.46| 30.28| |[maxvit_tiny_rw_224.sw_in1k](https://huggingface.co/timm/maxvit_tiny_rw_224.sw_in1k) |83.50|96.50| 1100.53| 29.06| 5.11| 33.11| |[coatnet_rmlp_1_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_1_rw_224.sw_in1k) |83.36|96.45| 1093.03| 41.69| 7.85| 35.47| |[coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k) |84.90|96.96| 1025.45| 41.72| 8.11| 40.13| |[maxvit_tiny_tf_224.in1k](https://huggingface.co/timm/maxvit_tiny_tf_224.in1k) |83.41|96.59| 1004.94| 30.92| 5.60| 35.78| |[coatnet_1_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_1_rw_224.sw_in1k) |83.62|96.38| 989.59| 41.72| 8.04| 34.60| |[maxvit_rmlp_tiny_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_tiny_rw_256.sw_in1k) |84.23|96.78| 807.21| 29.15| 6.77| 46.92| |[maxvit_rmlp_small_rw_224.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_small_rw_224.sw_in1k) |84.49|96.76| 693.82| 64.90| 10.75| 49.30| |[maxvit_small_tf_224.in1k](https://huggingface.co/timm/maxvit_small_tf_224.in1k) |84.43|96.83| 647.96| 68.93| 11.66| 53.17| |[coatnet_2_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_2_rw_224.sw_in12k_ft_in1k) |86.57|97.89| 631.88| 73.87| 15.09| 49.22| |[coatnet_rmlp_2_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_224.sw_in1k) |84.61|96.74| 625.81| 73.88| 15.18| 54.78| |[coatnet_rmlp_2_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_224.sw_in12k_ft_in1k) |86.49|97.90| 620.58| 73.88| 15.18| 54.78| |[maxxvit_rmlp_small_rw_256.sw_in1k](https://huggingface.co/timm/maxxvit_rmlp_small_rw_256.sw_in1k) |84.63|97.06| 575.53| 66.01| 14.67| 58.38| |[maxxvitv2_rmlp_base_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/maxxvitv2_rmlp_base_rw_224.sw_in12k_ft_in1k) |86.64|98.02| 501.03| 116.09| 24.20| 62.77| |[maxvit_rmlp_base_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/maxvit_rmlp_base_rw_224.sw_in12k_ft_in1k) |86.89|98.02| 375.86| 116.14| 23.15| 92.64| |[maxvit_base_tf_224.in1k](https://huggingface.co/timm/maxvit_base_tf_224.in1k) |84.85|96.99| 358.25| 119.47| 24.04| 95.01| |[maxvit_tiny_tf_384.in1k](https://huggingface.co/timm/maxvit_tiny_tf_384.in1k) |85.11|97.38| 293.46| 30.98| 17.53| 123.42| |[maxvit_large_tf_224.in1k](https://huggingface.co/timm/maxvit_large_tf_224.in1k) |84.93|96.97| 247.71| 211.79| 43.68| 127.35| |[maxvit_small_tf_384.in1k](https://huggingface.co/timm/maxvit_small_tf_384.in1k) |85.54|97.46| 188.35| 69.02| 35.87| 183.65| |[coatnet_rmlp_2_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_384.sw_in12k_ft_in1k) |87.39|98.31| 160.80| 73.88| 47.69| 209.43| |[maxxvitv2_rmlp_base_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/maxxvitv2_rmlp_base_rw_384.sw_in12k_ft_in1k) |87.47|98.37| 149.49| 116.09| 72.98| 213.74| |[maxvit_tiny_tf_512.in1k](https://huggingface.co/timm/maxvit_tiny_tf_512.in1k) |85.67|97.58| 144.25| 31.05| 33.49| 257.59| |[maxvit_rmlp_base_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/maxvit_rmlp_base_rw_384.sw_in12k_ft_in1k) |87.81|98.37| 106.55| 116.14| 70.97| 318.95| |[maxvit_base_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_base_tf_384.in21k_ft_in1k) |87.92|98.54| 104.71| 119.65| 73.80| 332.90| |[maxvit_base_tf_384.in1k](https://huggingface.co/timm/maxvit_base_tf_384.in1k) |86.29|97.80| 101.09| 119.65| 73.80| 332.90| |[maxvit_small_tf_512.in1k](https://huggingface.co/timm/maxvit_small_tf_512.in1k) |86.10|97.76| 88.63| 69.13| 67.26| 383.77| |[maxvit_large_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_large_tf_384.in21k_ft_in1k) |87.98|98.56| 71.75| 212.03|132.55| 445.84| |[maxvit_large_tf_384.in1k](https://huggingface.co/timm/maxvit_large_tf_384.in1k) |86.23|97.69| 70.56| 212.03|132.55| 445.84| |[maxvit_base_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_base_tf_512.in21k_ft_in1k) |88.20|98.53| 50.87| 119.88|138.02| 703.99| |[maxvit_base_tf_512.in1k](https://huggingface.co/timm/maxvit_base_tf_512.in1k) |86.60|97.92| 50.75| 119.88|138.02| 703.99| |[maxvit_xlarge_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_xlarge_tf_384.in21k_ft_in1k) |88.32|98.54| 42.53| 475.32|292.78| 668.76| |[maxvit_large_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_large_tf_512.in21k_ft_in1k) |88.04|98.40| 36.42| 212.33|244.75| 942.15| |[maxvit_large_tf_512.in1k](https://huggingface.co/timm/maxvit_large_tf_512.in1k) |86.52|97.88| 36.04| 212.33|244.75| 942.15| |[maxvit_xlarge_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_xlarge_tf_512.in21k_ft_in1k) |88.53|98.64| 21.76| 475.77|534.14|1413.22| ## Citation ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ``` ```bibtex @article{tu2022maxvit, title={MaxViT: Multi-Axis Vision Transformer}, author={Tu, Zhengzhong and Talebi, Hossein and Zhang, Han and Yang, Feng and Milanfar, Peyman and Bovik, Alan and Li, Yinxiao}, journal={ECCV}, year={2022}, } ``` ```bibtex @article{dai2021coatnet, title={CoAtNet: Marrying Convolution and Attention for All Data Sizes}, author={Dai, Zihang and Liu, Hanxiao and Le, Quoc V and Tan, Mingxing}, journal={arXiv preprint arXiv:2106.04803}, year={2021} } ```
Ahmed59/Demo-Team-5-SIAD
[ "tf", "roberta", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "RobertaForSequenceClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
14
null
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for maxvit_large_tf_224.in1k An official MaxViT image classification model. Trained in tensorflow on ImageNet-1k by paper authors. Ported from official Tensorflow implementation (https://github.com/google-research/maxvit) to PyTorch by Ross Wightman. ### Model Variants in [maxxvit.py](https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/maxxvit.py) MaxxViT covers a number of related model architectures that share a common structure including: - CoAtNet - Combining MBConv (depthwise-separable) convolutional blocks in early stages with self-attention transformer blocks in later stages. - MaxViT - Uniform blocks across all stages, each containing a MBConv (depthwise-separable) convolution block followed by two self-attention blocks with different partitioning schemes (window followed by grid). - CoAtNeXt - A timm specific arch that uses ConvNeXt blocks in place of MBConv blocks in CoAtNet. All normalization layers are LayerNorm (no BatchNorm). - MaxxViT - A timm specific arch that uses ConvNeXt blocks in place of MBConv blocks in MaxViT. All normalization layers are LayerNorm (no BatchNorm). - MaxxViT-V2 - A MaxxViT variation that removes the window block attention leaving only ConvNeXt blocks and grid attention w/ more width to compensate. Aside from the major variants listed above, there are more subtle changes from model to model. Any model name with the string `rw` are `timm` specific configs w/ modelling adjustments made to favour PyTorch eager use. These were created while training initial reproductions of the models so there are variations. All models with the string `tf` are models exactly matching Tensorflow based models by the original paper authors with weights ported to PyTorch. This covers a number of MaxViT models. The official CoAtNet models were never released. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 211.8 - GMACs: 43.7 - Activations (M): 127.3 - Image size: 224 x 224 - **Papers:** - MaxViT: Multi-Axis Vision Transformer: https://arxiv.org/abs/2204.01697 - **Dataset:** ImageNet-1k ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('maxvit_large_tf_224.in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'maxvit_large_tf_224.in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 128, 112, 112]) # torch.Size([1, 128, 56, 56]) # torch.Size([1, 256, 28, 28]) # torch.Size([1, 512, 14, 14]) # torch.Size([1, 1024, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'maxvit_large_tf_224.in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 1024, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison ### By Top-1 |model |top1 |top5 |samples / sec |Params (M) |GMAC |Act (M)| |------------------------------------------------------------------------------------------------------------------------|----:|----:|--------------:|--------------:|-----:|------:| |[maxvit_xlarge_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_xlarge_tf_512.in21k_ft_in1k) |88.53|98.64| 21.76| 475.77|534.14|1413.22| |[maxvit_xlarge_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_xlarge_tf_384.in21k_ft_in1k) |88.32|98.54| 42.53| 475.32|292.78| 668.76| |[maxvit_base_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_base_tf_512.in21k_ft_in1k) |88.20|98.53| 50.87| 119.88|138.02| 703.99| |[maxvit_large_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_large_tf_512.in21k_ft_in1k) |88.04|98.40| 36.42| 212.33|244.75| 942.15| |[maxvit_large_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_large_tf_384.in21k_ft_in1k) |87.98|98.56| 71.75| 212.03|132.55| 445.84| |[maxvit_base_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_base_tf_384.in21k_ft_in1k) |87.92|98.54| 104.71| 119.65| 73.80| 332.90| |[maxvit_rmlp_base_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/maxvit_rmlp_base_rw_384.sw_in12k_ft_in1k) |87.81|98.37| 106.55| 116.14| 70.97| 318.95| |[maxxvitv2_rmlp_base_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/maxxvitv2_rmlp_base_rw_384.sw_in12k_ft_in1k) |87.47|98.37| 149.49| 116.09| 72.98| 213.74| |[coatnet_rmlp_2_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_384.sw_in12k_ft_in1k) |87.39|98.31| 160.80| 73.88| 47.69| 209.43| |[maxvit_rmlp_base_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/maxvit_rmlp_base_rw_224.sw_in12k_ft_in1k) |86.89|98.02| 375.86| 116.14| 23.15| 92.64| |[maxxvitv2_rmlp_base_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/maxxvitv2_rmlp_base_rw_224.sw_in12k_ft_in1k) |86.64|98.02| 501.03| 116.09| 24.20| 62.77| |[maxvit_base_tf_512.in1k](https://huggingface.co/timm/maxvit_base_tf_512.in1k) |86.60|97.92| 50.75| 119.88|138.02| 703.99| |[coatnet_2_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_2_rw_224.sw_in12k_ft_in1k) |86.57|97.89| 631.88| 73.87| 15.09| 49.22| |[maxvit_large_tf_512.in1k](https://huggingface.co/timm/maxvit_large_tf_512.in1k) |86.52|97.88| 36.04| 212.33|244.75| 942.15| |[coatnet_rmlp_2_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_224.sw_in12k_ft_in1k) |86.49|97.90| 620.58| 73.88| 15.18| 54.78| |[maxvit_base_tf_384.in1k](https://huggingface.co/timm/maxvit_base_tf_384.in1k) |86.29|97.80| 101.09| 119.65| 73.80| 332.90| |[maxvit_large_tf_384.in1k](https://huggingface.co/timm/maxvit_large_tf_384.in1k) |86.23|97.69| 70.56| 212.03|132.55| 445.84| |[maxvit_small_tf_512.in1k](https://huggingface.co/timm/maxvit_small_tf_512.in1k) |86.10|97.76| 88.63| 69.13| 67.26| 383.77| |[maxvit_tiny_tf_512.in1k](https://huggingface.co/timm/maxvit_tiny_tf_512.in1k) |85.67|97.58| 144.25| 31.05| 33.49| 257.59| |[maxvit_small_tf_384.in1k](https://huggingface.co/timm/maxvit_small_tf_384.in1k) |85.54|97.46| 188.35| 69.02| 35.87| 183.65| |[maxvit_tiny_tf_384.in1k](https://huggingface.co/timm/maxvit_tiny_tf_384.in1k) |85.11|97.38| 293.46| 30.98| 17.53| 123.42| |[maxvit_large_tf_224.in1k](https://huggingface.co/timm/maxvit_large_tf_224.in1k) |84.93|96.97| 247.71| 211.79| 43.68| 127.35| |[coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k) |84.90|96.96| 1025.45| 41.72| 8.11| 40.13| |[maxvit_base_tf_224.in1k](https://huggingface.co/timm/maxvit_base_tf_224.in1k) |84.85|96.99| 358.25| 119.47| 24.04| 95.01| |[maxxvit_rmlp_small_rw_256.sw_in1k](https://huggingface.co/timm/maxxvit_rmlp_small_rw_256.sw_in1k) |84.63|97.06| 575.53| 66.01| 14.67| 58.38| |[coatnet_rmlp_2_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_224.sw_in1k) |84.61|96.74| 625.81| 73.88| 15.18| 54.78| |[maxvit_rmlp_small_rw_224.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_small_rw_224.sw_in1k) |84.49|96.76| 693.82| 64.90| 10.75| 49.30| |[maxvit_small_tf_224.in1k](https://huggingface.co/timm/maxvit_small_tf_224.in1k) |84.43|96.83| 647.96| 68.93| 11.66| 53.17| |[maxvit_rmlp_tiny_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_tiny_rw_256.sw_in1k) |84.23|96.78| 807.21| 29.15| 6.77| 46.92| |[coatnet_1_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_1_rw_224.sw_in1k) |83.62|96.38| 989.59| 41.72| 8.04| 34.60| |[maxvit_tiny_rw_224.sw_in1k](https://huggingface.co/timm/maxvit_tiny_rw_224.sw_in1k) |83.50|96.50| 1100.53| 29.06| 5.11| 33.11| |[maxvit_tiny_tf_224.in1k](https://huggingface.co/timm/maxvit_tiny_tf_224.in1k) |83.41|96.59| 1004.94| 30.92| 5.60| 35.78| |[coatnet_rmlp_1_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_1_rw_224.sw_in1k) |83.36|96.45| 1093.03| 41.69| 7.85| 35.47| |[maxxvitv2_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxxvitv2_nano_rw_256.sw_in1k) |83.11|96.33| 1276.88| 23.70| 6.26| 23.05| |[maxxvit_rmlp_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxxvit_rmlp_nano_rw_256.sw_in1k) |83.03|96.34| 1341.24| 16.78| 4.37| 26.05| |[maxvit_rmlp_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_nano_rw_256.sw_in1k) |82.96|96.26| 1283.24| 15.50| 4.47| 31.92| |[maxvit_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_nano_rw_256.sw_in1k) |82.93|96.23| 1218.17| 15.45| 4.46| 30.28| |[coatnet_bn_0_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_bn_0_rw_224.sw_in1k) |82.39|96.19| 1600.14| 27.44| 4.67| 22.04| |[coatnet_0_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_0_rw_224.sw_in1k) |82.39|95.84| 1831.21| 27.44| 4.43| 18.73| |[coatnet_rmlp_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_nano_rw_224.sw_in1k) |82.05|95.87| 2109.09| 15.15| 2.62| 20.34| |[coatnext_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnext_nano_rw_224.sw_in1k) |81.95|95.92| 2525.52| 14.70| 2.47| 12.80| |[coatnet_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_nano_rw_224.sw_in1k) |81.70|95.64| 2344.52| 15.14| 2.41| 15.41| |[maxvit_rmlp_pico_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_pico_rw_256.sw_in1k) |80.53|95.21| 1594.71| 7.52| 1.85| 24.86| ### By Throughput (samples / sec) |model |top1 |top5 |samples / sec |Params (M) |GMAC |Act (M)| |------------------------------------------------------------------------------------------------------------------------|----:|----:|--------------:|--------------:|-----:|------:| |[coatnext_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnext_nano_rw_224.sw_in1k) |81.95|95.92| 2525.52| 14.70| 2.47| 12.80| |[coatnet_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_nano_rw_224.sw_in1k) |81.70|95.64| 2344.52| 15.14| 2.41| 15.41| |[coatnet_rmlp_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_nano_rw_224.sw_in1k) |82.05|95.87| 2109.09| 15.15| 2.62| 20.34| |[coatnet_0_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_0_rw_224.sw_in1k) |82.39|95.84| 1831.21| 27.44| 4.43| 18.73| |[coatnet_bn_0_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_bn_0_rw_224.sw_in1k) |82.39|96.19| 1600.14| 27.44| 4.67| 22.04| |[maxvit_rmlp_pico_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_pico_rw_256.sw_in1k) |80.53|95.21| 1594.71| 7.52| 1.85| 24.86| |[maxxvit_rmlp_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxxvit_rmlp_nano_rw_256.sw_in1k) |83.03|96.34| 1341.24| 16.78| 4.37| 26.05| |[maxvit_rmlp_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_nano_rw_256.sw_in1k) |82.96|96.26| 1283.24| 15.50| 4.47| 31.92| |[maxxvitv2_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxxvitv2_nano_rw_256.sw_in1k) |83.11|96.33| 1276.88| 23.70| 6.26| 23.05| |[maxvit_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_nano_rw_256.sw_in1k) |82.93|96.23| 1218.17| 15.45| 4.46| 30.28| |[maxvit_tiny_rw_224.sw_in1k](https://huggingface.co/timm/maxvit_tiny_rw_224.sw_in1k) |83.50|96.50| 1100.53| 29.06| 5.11| 33.11| |[coatnet_rmlp_1_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_1_rw_224.sw_in1k) |83.36|96.45| 1093.03| 41.69| 7.85| 35.47| |[coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k) |84.90|96.96| 1025.45| 41.72| 8.11| 40.13| |[maxvit_tiny_tf_224.in1k](https://huggingface.co/timm/maxvit_tiny_tf_224.in1k) |83.41|96.59| 1004.94| 30.92| 5.60| 35.78| |[coatnet_1_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_1_rw_224.sw_in1k) |83.62|96.38| 989.59| 41.72| 8.04| 34.60| |[maxvit_rmlp_tiny_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_tiny_rw_256.sw_in1k) |84.23|96.78| 807.21| 29.15| 6.77| 46.92| |[maxvit_rmlp_small_rw_224.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_small_rw_224.sw_in1k) |84.49|96.76| 693.82| 64.90| 10.75| 49.30| |[maxvit_small_tf_224.in1k](https://huggingface.co/timm/maxvit_small_tf_224.in1k) |84.43|96.83| 647.96| 68.93| 11.66| 53.17| |[coatnet_2_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_2_rw_224.sw_in12k_ft_in1k) |86.57|97.89| 631.88| 73.87| 15.09| 49.22| |[coatnet_rmlp_2_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_224.sw_in1k) |84.61|96.74| 625.81| 73.88| 15.18| 54.78| |[coatnet_rmlp_2_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_224.sw_in12k_ft_in1k) |86.49|97.90| 620.58| 73.88| 15.18| 54.78| |[maxxvit_rmlp_small_rw_256.sw_in1k](https://huggingface.co/timm/maxxvit_rmlp_small_rw_256.sw_in1k) |84.63|97.06| 575.53| 66.01| 14.67| 58.38| |[maxxvitv2_rmlp_base_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/maxxvitv2_rmlp_base_rw_224.sw_in12k_ft_in1k) |86.64|98.02| 501.03| 116.09| 24.20| 62.77| |[maxvit_rmlp_base_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/maxvit_rmlp_base_rw_224.sw_in12k_ft_in1k) |86.89|98.02| 375.86| 116.14| 23.15| 92.64| |[maxvit_base_tf_224.in1k](https://huggingface.co/timm/maxvit_base_tf_224.in1k) |84.85|96.99| 358.25| 119.47| 24.04| 95.01| |[maxvit_tiny_tf_384.in1k](https://huggingface.co/timm/maxvit_tiny_tf_384.in1k) |85.11|97.38| 293.46| 30.98| 17.53| 123.42| |[maxvit_large_tf_224.in1k](https://huggingface.co/timm/maxvit_large_tf_224.in1k) |84.93|96.97| 247.71| 211.79| 43.68| 127.35| |[maxvit_small_tf_384.in1k](https://huggingface.co/timm/maxvit_small_tf_384.in1k) |85.54|97.46| 188.35| 69.02| 35.87| 183.65| |[coatnet_rmlp_2_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_384.sw_in12k_ft_in1k) |87.39|98.31| 160.80| 73.88| 47.69| 209.43| |[maxxvitv2_rmlp_base_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/maxxvitv2_rmlp_base_rw_384.sw_in12k_ft_in1k) |87.47|98.37| 149.49| 116.09| 72.98| 213.74| |[maxvit_tiny_tf_512.in1k](https://huggingface.co/timm/maxvit_tiny_tf_512.in1k) |85.67|97.58| 144.25| 31.05| 33.49| 257.59| |[maxvit_rmlp_base_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/maxvit_rmlp_base_rw_384.sw_in12k_ft_in1k) |87.81|98.37| 106.55| 116.14| 70.97| 318.95| |[maxvit_base_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_base_tf_384.in21k_ft_in1k) |87.92|98.54| 104.71| 119.65| 73.80| 332.90| |[maxvit_base_tf_384.in1k](https://huggingface.co/timm/maxvit_base_tf_384.in1k) |86.29|97.80| 101.09| 119.65| 73.80| 332.90| |[maxvit_small_tf_512.in1k](https://huggingface.co/timm/maxvit_small_tf_512.in1k) |86.10|97.76| 88.63| 69.13| 67.26| 383.77| |[maxvit_large_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_large_tf_384.in21k_ft_in1k) |87.98|98.56| 71.75| 212.03|132.55| 445.84| |[maxvit_large_tf_384.in1k](https://huggingface.co/timm/maxvit_large_tf_384.in1k) |86.23|97.69| 70.56| 212.03|132.55| 445.84| |[maxvit_base_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_base_tf_512.in21k_ft_in1k) |88.20|98.53| 50.87| 119.88|138.02| 703.99| |[maxvit_base_tf_512.in1k](https://huggingface.co/timm/maxvit_base_tf_512.in1k) |86.60|97.92| 50.75| 119.88|138.02| 703.99| |[maxvit_xlarge_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_xlarge_tf_384.in21k_ft_in1k) |88.32|98.54| 42.53| 475.32|292.78| 668.76| |[maxvit_large_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_large_tf_512.in21k_ft_in1k) |88.04|98.40| 36.42| 212.33|244.75| 942.15| |[maxvit_large_tf_512.in1k](https://huggingface.co/timm/maxvit_large_tf_512.in1k) |86.52|97.88| 36.04| 212.33|244.75| 942.15| |[maxvit_xlarge_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_xlarge_tf_512.in21k_ft_in1k) |88.53|98.64| 21.76| 475.77|534.14|1413.22| ## Citation ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ``` ```bibtex @article{tu2022maxvit, title={MaxViT: Multi-Axis Vision Transformer}, author={Tu, Zhengzhong and Talebi, Hossein and Zhang, Han and Yang, Feng and Milanfar, Peyman and Bovik, Alan and Li, Yinxiao}, journal={ECCV}, year={2022}, } ``` ```bibtex @article{dai2021coatnet, title={CoAtNet: Marrying Convolution and Attention for All Data Sizes}, author={Dai, Zihang and Liu, Hanxiao and Le, Quoc V and Tan, Mingxing}, journal={arXiv preprint arXiv:2106.04803}, year={2021} } ```
AhmedBou/TuniBert
[ "pytorch", "bert", "text-classification", "ar", "transformers", "sentiment analysis", "classification", "arabic dialect", "tunisian dialect", "license:apache-2.0" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
44
null
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for maxvit_large_tf_384.in1k An official MaxViT image classification model. Trained in tensorflow on ImageNet-1k by paper authors. Ported from official Tensorflow implementation (https://github.com/google-research/maxvit) to PyTorch by Ross Wightman. ### Model Variants in [maxxvit.py](https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/maxxvit.py) MaxxViT covers a number of related model architectures that share a common structure including: - CoAtNet - Combining MBConv (depthwise-separable) convolutional blocks in early stages with self-attention transformer blocks in later stages. - MaxViT - Uniform blocks across all stages, each containing a MBConv (depthwise-separable) convolution block followed by two self-attention blocks with different partitioning schemes (window followed by grid). - CoAtNeXt - A timm specific arch that uses ConvNeXt blocks in place of MBConv blocks in CoAtNet. All normalization layers are LayerNorm (no BatchNorm). - MaxxViT - A timm specific arch that uses ConvNeXt blocks in place of MBConv blocks in MaxViT. All normalization layers are LayerNorm (no BatchNorm). - MaxxViT-V2 - A MaxxViT variation that removes the window block attention leaving only ConvNeXt blocks and grid attention w/ more width to compensate. Aside from the major variants listed above, there are more subtle changes from model to model. Any model name with the string `rw` are `timm` specific configs w/ modelling adjustments made to favour PyTorch eager use. These were created while training initial reproductions of the models so there are variations. All models with the string `tf` are models exactly matching Tensorflow based models by the original paper authors with weights ported to PyTorch. This covers a number of MaxViT models. The official CoAtNet models were never released. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 212.0 - GMACs: 132.6 - Activations (M): 445.8 - Image size: 384 x 384 - **Papers:** - MaxViT: Multi-Axis Vision Transformer: https://arxiv.org/abs/2204.01697 - **Dataset:** ImageNet-1k ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('maxvit_large_tf_384.in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'maxvit_large_tf_384.in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 128, 192, 192]) # torch.Size([1, 128, 96, 96]) # torch.Size([1, 256, 48, 48]) # torch.Size([1, 512, 24, 24]) # torch.Size([1, 1024, 12, 12]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'maxvit_large_tf_384.in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 1024, 12, 12) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison ### By Top-1 |model |top1 |top5 |samples / sec |Params (M) |GMAC |Act (M)| |------------------------------------------------------------------------------------------------------------------------|----:|----:|--------------:|--------------:|-----:|------:| |[maxvit_xlarge_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_xlarge_tf_512.in21k_ft_in1k) |88.53|98.64| 21.76| 475.77|534.14|1413.22| |[maxvit_xlarge_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_xlarge_tf_384.in21k_ft_in1k) |88.32|98.54| 42.53| 475.32|292.78| 668.76| |[maxvit_base_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_base_tf_512.in21k_ft_in1k) |88.20|98.53| 50.87| 119.88|138.02| 703.99| |[maxvit_large_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_large_tf_512.in21k_ft_in1k) |88.04|98.40| 36.42| 212.33|244.75| 942.15| |[maxvit_large_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_large_tf_384.in21k_ft_in1k) |87.98|98.56| 71.75| 212.03|132.55| 445.84| |[maxvit_base_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_base_tf_384.in21k_ft_in1k) |87.92|98.54| 104.71| 119.65| 73.80| 332.90| |[maxvit_rmlp_base_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/maxvit_rmlp_base_rw_384.sw_in12k_ft_in1k) |87.81|98.37| 106.55| 116.14| 70.97| 318.95| |[maxxvitv2_rmlp_base_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/maxxvitv2_rmlp_base_rw_384.sw_in12k_ft_in1k) |87.47|98.37| 149.49| 116.09| 72.98| 213.74| |[coatnet_rmlp_2_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_384.sw_in12k_ft_in1k) |87.39|98.31| 160.80| 73.88| 47.69| 209.43| |[maxvit_rmlp_base_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/maxvit_rmlp_base_rw_224.sw_in12k_ft_in1k) |86.89|98.02| 375.86| 116.14| 23.15| 92.64| |[maxxvitv2_rmlp_base_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/maxxvitv2_rmlp_base_rw_224.sw_in12k_ft_in1k) |86.64|98.02| 501.03| 116.09| 24.20| 62.77| |[maxvit_base_tf_512.in1k](https://huggingface.co/timm/maxvit_base_tf_512.in1k) |86.60|97.92| 50.75| 119.88|138.02| 703.99| |[coatnet_2_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_2_rw_224.sw_in12k_ft_in1k) |86.57|97.89| 631.88| 73.87| 15.09| 49.22| |[maxvit_large_tf_512.in1k](https://huggingface.co/timm/maxvit_large_tf_512.in1k) |86.52|97.88| 36.04| 212.33|244.75| 942.15| |[coatnet_rmlp_2_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_224.sw_in12k_ft_in1k) |86.49|97.90| 620.58| 73.88| 15.18| 54.78| |[maxvit_base_tf_384.in1k](https://huggingface.co/timm/maxvit_base_tf_384.in1k) |86.29|97.80| 101.09| 119.65| 73.80| 332.90| |[maxvit_large_tf_384.in1k](https://huggingface.co/timm/maxvit_large_tf_384.in1k) |86.23|97.69| 70.56| 212.03|132.55| 445.84| |[maxvit_small_tf_512.in1k](https://huggingface.co/timm/maxvit_small_tf_512.in1k) |86.10|97.76| 88.63| 69.13| 67.26| 383.77| |[maxvit_tiny_tf_512.in1k](https://huggingface.co/timm/maxvit_tiny_tf_512.in1k) |85.67|97.58| 144.25| 31.05| 33.49| 257.59| |[maxvit_small_tf_384.in1k](https://huggingface.co/timm/maxvit_small_tf_384.in1k) |85.54|97.46| 188.35| 69.02| 35.87| 183.65| |[maxvit_tiny_tf_384.in1k](https://huggingface.co/timm/maxvit_tiny_tf_384.in1k) |85.11|97.38| 293.46| 30.98| 17.53| 123.42| |[maxvit_large_tf_224.in1k](https://huggingface.co/timm/maxvit_large_tf_224.in1k) |84.93|96.97| 247.71| 211.79| 43.68| 127.35| |[coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k) |84.90|96.96| 1025.45| 41.72| 8.11| 40.13| |[maxvit_base_tf_224.in1k](https://huggingface.co/timm/maxvit_base_tf_224.in1k) |84.85|96.99| 358.25| 119.47| 24.04| 95.01| |[maxxvit_rmlp_small_rw_256.sw_in1k](https://huggingface.co/timm/maxxvit_rmlp_small_rw_256.sw_in1k) |84.63|97.06| 575.53| 66.01| 14.67| 58.38| |[coatnet_rmlp_2_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_224.sw_in1k) |84.61|96.74| 625.81| 73.88| 15.18| 54.78| |[maxvit_rmlp_small_rw_224.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_small_rw_224.sw_in1k) |84.49|96.76| 693.82| 64.90| 10.75| 49.30| |[maxvit_small_tf_224.in1k](https://huggingface.co/timm/maxvit_small_tf_224.in1k) |84.43|96.83| 647.96| 68.93| 11.66| 53.17| |[maxvit_rmlp_tiny_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_tiny_rw_256.sw_in1k) |84.23|96.78| 807.21| 29.15| 6.77| 46.92| |[coatnet_1_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_1_rw_224.sw_in1k) |83.62|96.38| 989.59| 41.72| 8.04| 34.60| |[maxvit_tiny_rw_224.sw_in1k](https://huggingface.co/timm/maxvit_tiny_rw_224.sw_in1k) |83.50|96.50| 1100.53| 29.06| 5.11| 33.11| |[maxvit_tiny_tf_224.in1k](https://huggingface.co/timm/maxvit_tiny_tf_224.in1k) |83.41|96.59| 1004.94| 30.92| 5.60| 35.78| |[coatnet_rmlp_1_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_1_rw_224.sw_in1k) |83.36|96.45| 1093.03| 41.69| 7.85| 35.47| |[maxxvitv2_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxxvitv2_nano_rw_256.sw_in1k) |83.11|96.33| 1276.88| 23.70| 6.26| 23.05| |[maxxvit_rmlp_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxxvit_rmlp_nano_rw_256.sw_in1k) |83.03|96.34| 1341.24| 16.78| 4.37| 26.05| |[maxvit_rmlp_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_nano_rw_256.sw_in1k) |82.96|96.26| 1283.24| 15.50| 4.47| 31.92| |[maxvit_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_nano_rw_256.sw_in1k) |82.93|96.23| 1218.17| 15.45| 4.46| 30.28| |[coatnet_bn_0_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_bn_0_rw_224.sw_in1k) |82.39|96.19| 1600.14| 27.44| 4.67| 22.04| |[coatnet_0_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_0_rw_224.sw_in1k) |82.39|95.84| 1831.21| 27.44| 4.43| 18.73| |[coatnet_rmlp_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_nano_rw_224.sw_in1k) |82.05|95.87| 2109.09| 15.15| 2.62| 20.34| |[coatnext_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnext_nano_rw_224.sw_in1k) |81.95|95.92| 2525.52| 14.70| 2.47| 12.80| |[coatnet_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_nano_rw_224.sw_in1k) |81.70|95.64| 2344.52| 15.14| 2.41| 15.41| |[maxvit_rmlp_pico_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_pico_rw_256.sw_in1k) |80.53|95.21| 1594.71| 7.52| 1.85| 24.86| ### By Throughput (samples / sec) |model |top1 |top5 |samples / sec |Params (M) |GMAC |Act (M)| |------------------------------------------------------------------------------------------------------------------------|----:|----:|--------------:|--------------:|-----:|------:| |[coatnext_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnext_nano_rw_224.sw_in1k) |81.95|95.92| 2525.52| 14.70| 2.47| 12.80| |[coatnet_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_nano_rw_224.sw_in1k) |81.70|95.64| 2344.52| 15.14| 2.41| 15.41| |[coatnet_rmlp_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_nano_rw_224.sw_in1k) |82.05|95.87| 2109.09| 15.15| 2.62| 20.34| |[coatnet_0_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_0_rw_224.sw_in1k) |82.39|95.84| 1831.21| 27.44| 4.43| 18.73| |[coatnet_bn_0_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_bn_0_rw_224.sw_in1k) |82.39|96.19| 1600.14| 27.44| 4.67| 22.04| |[maxvit_rmlp_pico_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_pico_rw_256.sw_in1k) |80.53|95.21| 1594.71| 7.52| 1.85| 24.86| |[maxxvit_rmlp_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxxvit_rmlp_nano_rw_256.sw_in1k) |83.03|96.34| 1341.24| 16.78| 4.37| 26.05| |[maxvit_rmlp_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_nano_rw_256.sw_in1k) |82.96|96.26| 1283.24| 15.50| 4.47| 31.92| |[maxxvitv2_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxxvitv2_nano_rw_256.sw_in1k) |83.11|96.33| 1276.88| 23.70| 6.26| 23.05| |[maxvit_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_nano_rw_256.sw_in1k) |82.93|96.23| 1218.17| 15.45| 4.46| 30.28| |[maxvit_tiny_rw_224.sw_in1k](https://huggingface.co/timm/maxvit_tiny_rw_224.sw_in1k) |83.50|96.50| 1100.53| 29.06| 5.11| 33.11| |[coatnet_rmlp_1_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_1_rw_224.sw_in1k) |83.36|96.45| 1093.03| 41.69| 7.85| 35.47| |[coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k) |84.90|96.96| 1025.45| 41.72| 8.11| 40.13| |[maxvit_tiny_tf_224.in1k](https://huggingface.co/timm/maxvit_tiny_tf_224.in1k) |83.41|96.59| 1004.94| 30.92| 5.60| 35.78| |[coatnet_1_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_1_rw_224.sw_in1k) |83.62|96.38| 989.59| 41.72| 8.04| 34.60| |[maxvit_rmlp_tiny_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_tiny_rw_256.sw_in1k) |84.23|96.78| 807.21| 29.15| 6.77| 46.92| |[maxvit_rmlp_small_rw_224.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_small_rw_224.sw_in1k) |84.49|96.76| 693.82| 64.90| 10.75| 49.30| |[maxvit_small_tf_224.in1k](https://huggingface.co/timm/maxvit_small_tf_224.in1k) |84.43|96.83| 647.96| 68.93| 11.66| 53.17| |[coatnet_2_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_2_rw_224.sw_in12k_ft_in1k) |86.57|97.89| 631.88| 73.87| 15.09| 49.22| |[coatnet_rmlp_2_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_224.sw_in1k) |84.61|96.74| 625.81| 73.88| 15.18| 54.78| |[coatnet_rmlp_2_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_224.sw_in12k_ft_in1k) |86.49|97.90| 620.58| 73.88| 15.18| 54.78| |[maxxvit_rmlp_small_rw_256.sw_in1k](https://huggingface.co/timm/maxxvit_rmlp_small_rw_256.sw_in1k) |84.63|97.06| 575.53| 66.01| 14.67| 58.38| |[maxxvitv2_rmlp_base_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/maxxvitv2_rmlp_base_rw_224.sw_in12k_ft_in1k) |86.64|98.02| 501.03| 116.09| 24.20| 62.77| |[maxvit_rmlp_base_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/maxvit_rmlp_base_rw_224.sw_in12k_ft_in1k) |86.89|98.02| 375.86| 116.14| 23.15| 92.64| |[maxvit_base_tf_224.in1k](https://huggingface.co/timm/maxvit_base_tf_224.in1k) |84.85|96.99| 358.25| 119.47| 24.04| 95.01| |[maxvit_tiny_tf_384.in1k](https://huggingface.co/timm/maxvit_tiny_tf_384.in1k) |85.11|97.38| 293.46| 30.98| 17.53| 123.42| |[maxvit_large_tf_224.in1k](https://huggingface.co/timm/maxvit_large_tf_224.in1k) |84.93|96.97| 247.71| 211.79| 43.68| 127.35| |[maxvit_small_tf_384.in1k](https://huggingface.co/timm/maxvit_small_tf_384.in1k) |85.54|97.46| 188.35| 69.02| 35.87| 183.65| |[coatnet_rmlp_2_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_384.sw_in12k_ft_in1k) |87.39|98.31| 160.80| 73.88| 47.69| 209.43| |[maxxvitv2_rmlp_base_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/maxxvitv2_rmlp_base_rw_384.sw_in12k_ft_in1k) |87.47|98.37| 149.49| 116.09| 72.98| 213.74| |[maxvit_tiny_tf_512.in1k](https://huggingface.co/timm/maxvit_tiny_tf_512.in1k) |85.67|97.58| 144.25| 31.05| 33.49| 257.59| |[maxvit_rmlp_base_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/maxvit_rmlp_base_rw_384.sw_in12k_ft_in1k) |87.81|98.37| 106.55| 116.14| 70.97| 318.95| |[maxvit_base_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_base_tf_384.in21k_ft_in1k) |87.92|98.54| 104.71| 119.65| 73.80| 332.90| |[maxvit_base_tf_384.in1k](https://huggingface.co/timm/maxvit_base_tf_384.in1k) |86.29|97.80| 101.09| 119.65| 73.80| 332.90| |[maxvit_small_tf_512.in1k](https://huggingface.co/timm/maxvit_small_tf_512.in1k) |86.10|97.76| 88.63| 69.13| 67.26| 383.77| |[maxvit_large_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_large_tf_384.in21k_ft_in1k) |87.98|98.56| 71.75| 212.03|132.55| 445.84| |[maxvit_large_tf_384.in1k](https://huggingface.co/timm/maxvit_large_tf_384.in1k) |86.23|97.69| 70.56| 212.03|132.55| 445.84| |[maxvit_base_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_base_tf_512.in21k_ft_in1k) |88.20|98.53| 50.87| 119.88|138.02| 703.99| |[maxvit_base_tf_512.in1k](https://huggingface.co/timm/maxvit_base_tf_512.in1k) |86.60|97.92| 50.75| 119.88|138.02| 703.99| |[maxvit_xlarge_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_xlarge_tf_384.in21k_ft_in1k) |88.32|98.54| 42.53| 475.32|292.78| 668.76| |[maxvit_large_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_large_tf_512.in21k_ft_in1k) |88.04|98.40| 36.42| 212.33|244.75| 942.15| |[maxvit_large_tf_512.in1k](https://huggingface.co/timm/maxvit_large_tf_512.in1k) |86.52|97.88| 36.04| 212.33|244.75| 942.15| |[maxvit_xlarge_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_xlarge_tf_512.in21k_ft_in1k) |88.53|98.64| 21.76| 475.77|534.14|1413.22| ## Citation ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ``` ```bibtex @article{tu2022maxvit, title={MaxViT: Multi-Axis Vision Transformer}, author={Tu, Zhengzhong and Talebi, Hossein and Zhang, Han and Yang, Feng and Milanfar, Peyman and Bovik, Alan and Li, Yinxiao}, journal={ECCV}, year={2022}, } ``` ```bibtex @article{dai2021coatnet, title={CoAtNet: Marrying Convolution and Attention for All Data Sizes}, author={Dai, Zihang and Liu, Hanxiao and Le, Quoc V and Tan, Mingxing}, journal={arXiv preprint arXiv:2106.04803}, year={2021} } ```
AhmedHassan19/model
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k - imagenet-21k --- # Model card for maxvit_large_tf_384.in21k_ft_in1k An official MaxViT image classification model. Pretrained in tensorflow on ImageNet-21k (21843 Google specific instance of ImageNet-22k) and fine-tuned on ImageNet-1k by paper authors. Ported from official Tensorflow implementation (https://github.com/google-research/maxvit) to PyTorch by Ross Wightman. ### Model Variants in [maxxvit.py](https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/maxxvit.py) MaxxViT covers a number of related model architectures that share a common structure including: - CoAtNet - Combining MBConv (depthwise-separable) convolutional blocks in early stages with self-attention transformer blocks in later stages. - MaxViT - Uniform blocks across all stages, each containing a MBConv (depthwise-separable) convolution block followed by two self-attention blocks with different partitioning schemes (window followed by grid). - CoAtNeXt - A timm specific arch that uses ConvNeXt blocks in place of MBConv blocks in CoAtNet. All normalization layers are LayerNorm (no BatchNorm). - MaxxViT - A timm specific arch that uses ConvNeXt blocks in place of MBConv blocks in MaxViT. All normalization layers are LayerNorm (no BatchNorm). - MaxxViT-V2 - A MaxxViT variation that removes the window block attention leaving only ConvNeXt blocks and grid attention w/ more width to compensate. Aside from the major variants listed above, there are more subtle changes from model to model. Any model name with the string `rw` are `timm` specific configs w/ modelling adjustments made to favour PyTorch eager use. These were created while training initial reproductions of the models so there are variations. All models with the string `tf` are models exactly matching Tensorflow based models by the original paper authors with weights ported to PyTorch. This covers a number of MaxViT models. The official CoAtNet models were never released. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 212.0 - GMACs: 132.6 - Activations (M): 445.8 - Image size: 384 x 384 - **Papers:** - MaxViT: Multi-Axis Vision Transformer: https://arxiv.org/abs/2204.01697 - **Dataset:** ImageNet-1k - **Pretrain Dataset:** ImageNet-21k ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('maxvit_large_tf_384.in21k_ft_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'maxvit_large_tf_384.in21k_ft_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 128, 192, 192]) # torch.Size([1, 128, 96, 96]) # torch.Size([1, 256, 48, 48]) # torch.Size([1, 512, 24, 24]) # torch.Size([1, 1024, 12, 12]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'maxvit_large_tf_384.in21k_ft_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 1024, 12, 12) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison ### By Top-1 |model |top1 |top5 |samples / sec |Params (M) |GMAC |Act (M)| |------------------------------------------------------------------------------------------------------------------------|----:|----:|--------------:|--------------:|-----:|------:| |[maxvit_xlarge_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_xlarge_tf_512.in21k_ft_in1k) |88.53|98.64| 21.76| 475.77|534.14|1413.22| |[maxvit_xlarge_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_xlarge_tf_384.in21k_ft_in1k) |88.32|98.54| 42.53| 475.32|292.78| 668.76| |[maxvit_base_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_base_tf_512.in21k_ft_in1k) |88.20|98.53| 50.87| 119.88|138.02| 703.99| |[maxvit_large_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_large_tf_512.in21k_ft_in1k) |88.04|98.40| 36.42| 212.33|244.75| 942.15| |[maxvit_large_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_large_tf_384.in21k_ft_in1k) |87.98|98.56| 71.75| 212.03|132.55| 445.84| |[maxvit_base_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_base_tf_384.in21k_ft_in1k) |87.92|98.54| 104.71| 119.65| 73.80| 332.90| |[maxvit_rmlp_base_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/maxvit_rmlp_base_rw_384.sw_in12k_ft_in1k) |87.81|98.37| 106.55| 116.14| 70.97| 318.95| |[maxxvitv2_rmlp_base_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/maxxvitv2_rmlp_base_rw_384.sw_in12k_ft_in1k) |87.47|98.37| 149.49| 116.09| 72.98| 213.74| |[coatnet_rmlp_2_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_384.sw_in12k_ft_in1k) |87.39|98.31| 160.80| 73.88| 47.69| 209.43| |[maxvit_rmlp_base_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/maxvit_rmlp_base_rw_224.sw_in12k_ft_in1k) |86.89|98.02| 375.86| 116.14| 23.15| 92.64| |[maxxvitv2_rmlp_base_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/maxxvitv2_rmlp_base_rw_224.sw_in12k_ft_in1k) |86.64|98.02| 501.03| 116.09| 24.20| 62.77| |[maxvit_base_tf_512.in1k](https://huggingface.co/timm/maxvit_base_tf_512.in1k) |86.60|97.92| 50.75| 119.88|138.02| 703.99| |[coatnet_2_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_2_rw_224.sw_in12k_ft_in1k) |86.57|97.89| 631.88| 73.87| 15.09| 49.22| |[maxvit_large_tf_512.in1k](https://huggingface.co/timm/maxvit_large_tf_512.in1k) |86.52|97.88| 36.04| 212.33|244.75| 942.15| |[coatnet_rmlp_2_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_224.sw_in12k_ft_in1k) |86.49|97.90| 620.58| 73.88| 15.18| 54.78| |[maxvit_base_tf_384.in1k](https://huggingface.co/timm/maxvit_base_tf_384.in1k) |86.29|97.80| 101.09| 119.65| 73.80| 332.90| |[maxvit_large_tf_384.in1k](https://huggingface.co/timm/maxvit_large_tf_384.in1k) |86.23|97.69| 70.56| 212.03|132.55| 445.84| |[maxvit_small_tf_512.in1k](https://huggingface.co/timm/maxvit_small_tf_512.in1k) |86.10|97.76| 88.63| 69.13| 67.26| 383.77| |[maxvit_tiny_tf_512.in1k](https://huggingface.co/timm/maxvit_tiny_tf_512.in1k) |85.67|97.58| 144.25| 31.05| 33.49| 257.59| |[maxvit_small_tf_384.in1k](https://huggingface.co/timm/maxvit_small_tf_384.in1k) |85.54|97.46| 188.35| 69.02| 35.87| 183.65| |[maxvit_tiny_tf_384.in1k](https://huggingface.co/timm/maxvit_tiny_tf_384.in1k) |85.11|97.38| 293.46| 30.98| 17.53| 123.42| |[maxvit_large_tf_224.in1k](https://huggingface.co/timm/maxvit_large_tf_224.in1k) |84.93|96.97| 247.71| 211.79| 43.68| 127.35| |[coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k) |84.90|96.96| 1025.45| 41.72| 8.11| 40.13| |[maxvit_base_tf_224.in1k](https://huggingface.co/timm/maxvit_base_tf_224.in1k) |84.85|96.99| 358.25| 119.47| 24.04| 95.01| |[maxxvit_rmlp_small_rw_256.sw_in1k](https://huggingface.co/timm/maxxvit_rmlp_small_rw_256.sw_in1k) |84.63|97.06| 575.53| 66.01| 14.67| 58.38| |[coatnet_rmlp_2_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_224.sw_in1k) |84.61|96.74| 625.81| 73.88| 15.18| 54.78| |[maxvit_rmlp_small_rw_224.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_small_rw_224.sw_in1k) |84.49|96.76| 693.82| 64.90| 10.75| 49.30| |[maxvit_small_tf_224.in1k](https://huggingface.co/timm/maxvit_small_tf_224.in1k) |84.43|96.83| 647.96| 68.93| 11.66| 53.17| |[maxvit_rmlp_tiny_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_tiny_rw_256.sw_in1k) |84.23|96.78| 807.21| 29.15| 6.77| 46.92| |[coatnet_1_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_1_rw_224.sw_in1k) |83.62|96.38| 989.59| 41.72| 8.04| 34.60| |[maxvit_tiny_rw_224.sw_in1k](https://huggingface.co/timm/maxvit_tiny_rw_224.sw_in1k) |83.50|96.50| 1100.53| 29.06| 5.11| 33.11| |[maxvit_tiny_tf_224.in1k](https://huggingface.co/timm/maxvit_tiny_tf_224.in1k) |83.41|96.59| 1004.94| 30.92| 5.60| 35.78| |[coatnet_rmlp_1_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_1_rw_224.sw_in1k) |83.36|96.45| 1093.03| 41.69| 7.85| 35.47| |[maxxvitv2_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxxvitv2_nano_rw_256.sw_in1k) |83.11|96.33| 1276.88| 23.70| 6.26| 23.05| |[maxxvit_rmlp_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxxvit_rmlp_nano_rw_256.sw_in1k) |83.03|96.34| 1341.24| 16.78| 4.37| 26.05| |[maxvit_rmlp_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_nano_rw_256.sw_in1k) |82.96|96.26| 1283.24| 15.50| 4.47| 31.92| |[maxvit_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_nano_rw_256.sw_in1k) |82.93|96.23| 1218.17| 15.45| 4.46| 30.28| |[coatnet_bn_0_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_bn_0_rw_224.sw_in1k) |82.39|96.19| 1600.14| 27.44| 4.67| 22.04| |[coatnet_0_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_0_rw_224.sw_in1k) |82.39|95.84| 1831.21| 27.44| 4.43| 18.73| |[coatnet_rmlp_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_nano_rw_224.sw_in1k) |82.05|95.87| 2109.09| 15.15| 2.62| 20.34| |[coatnext_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnext_nano_rw_224.sw_in1k) |81.95|95.92| 2525.52| 14.70| 2.47| 12.80| |[coatnet_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_nano_rw_224.sw_in1k) |81.70|95.64| 2344.52| 15.14| 2.41| 15.41| |[maxvit_rmlp_pico_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_pico_rw_256.sw_in1k) |80.53|95.21| 1594.71| 7.52| 1.85| 24.86| ### By Throughput (samples / sec) |model |top1 |top5 |samples / sec |Params (M) |GMAC |Act (M)| |------------------------------------------------------------------------------------------------------------------------|----:|----:|--------------:|--------------:|-----:|------:| |[coatnext_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnext_nano_rw_224.sw_in1k) |81.95|95.92| 2525.52| 14.70| 2.47| 12.80| |[coatnet_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_nano_rw_224.sw_in1k) |81.70|95.64| 2344.52| 15.14| 2.41| 15.41| |[coatnet_rmlp_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_nano_rw_224.sw_in1k) |82.05|95.87| 2109.09| 15.15| 2.62| 20.34| |[coatnet_0_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_0_rw_224.sw_in1k) |82.39|95.84| 1831.21| 27.44| 4.43| 18.73| |[coatnet_bn_0_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_bn_0_rw_224.sw_in1k) |82.39|96.19| 1600.14| 27.44| 4.67| 22.04| |[maxvit_rmlp_pico_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_pico_rw_256.sw_in1k) |80.53|95.21| 1594.71| 7.52| 1.85| 24.86| |[maxxvit_rmlp_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxxvit_rmlp_nano_rw_256.sw_in1k) |83.03|96.34| 1341.24| 16.78| 4.37| 26.05| |[maxvit_rmlp_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_nano_rw_256.sw_in1k) |82.96|96.26| 1283.24| 15.50| 4.47| 31.92| |[maxxvitv2_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxxvitv2_nano_rw_256.sw_in1k) |83.11|96.33| 1276.88| 23.70| 6.26| 23.05| |[maxvit_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_nano_rw_256.sw_in1k) |82.93|96.23| 1218.17| 15.45| 4.46| 30.28| |[maxvit_tiny_rw_224.sw_in1k](https://huggingface.co/timm/maxvit_tiny_rw_224.sw_in1k) |83.50|96.50| 1100.53| 29.06| 5.11| 33.11| |[coatnet_rmlp_1_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_1_rw_224.sw_in1k) |83.36|96.45| 1093.03| 41.69| 7.85| 35.47| |[coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k) |84.90|96.96| 1025.45| 41.72| 8.11| 40.13| |[maxvit_tiny_tf_224.in1k](https://huggingface.co/timm/maxvit_tiny_tf_224.in1k) |83.41|96.59| 1004.94| 30.92| 5.60| 35.78| |[coatnet_1_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_1_rw_224.sw_in1k) |83.62|96.38| 989.59| 41.72| 8.04| 34.60| |[maxvit_rmlp_tiny_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_tiny_rw_256.sw_in1k) |84.23|96.78| 807.21| 29.15| 6.77| 46.92| |[maxvit_rmlp_small_rw_224.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_small_rw_224.sw_in1k) |84.49|96.76| 693.82| 64.90| 10.75| 49.30| |[maxvit_small_tf_224.in1k](https://huggingface.co/timm/maxvit_small_tf_224.in1k) |84.43|96.83| 647.96| 68.93| 11.66| 53.17| |[coatnet_2_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_2_rw_224.sw_in12k_ft_in1k) |86.57|97.89| 631.88| 73.87| 15.09| 49.22| |[coatnet_rmlp_2_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_224.sw_in1k) |84.61|96.74| 625.81| 73.88| 15.18| 54.78| |[coatnet_rmlp_2_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_224.sw_in12k_ft_in1k) |86.49|97.90| 620.58| 73.88| 15.18| 54.78| |[maxxvit_rmlp_small_rw_256.sw_in1k](https://huggingface.co/timm/maxxvit_rmlp_small_rw_256.sw_in1k) |84.63|97.06| 575.53| 66.01| 14.67| 58.38| |[maxxvitv2_rmlp_base_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/maxxvitv2_rmlp_base_rw_224.sw_in12k_ft_in1k) |86.64|98.02| 501.03| 116.09| 24.20| 62.77| |[maxvit_rmlp_base_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/maxvit_rmlp_base_rw_224.sw_in12k_ft_in1k) |86.89|98.02| 375.86| 116.14| 23.15| 92.64| |[maxvit_base_tf_224.in1k](https://huggingface.co/timm/maxvit_base_tf_224.in1k) |84.85|96.99| 358.25| 119.47| 24.04| 95.01| |[maxvit_tiny_tf_384.in1k](https://huggingface.co/timm/maxvit_tiny_tf_384.in1k) |85.11|97.38| 293.46| 30.98| 17.53| 123.42| |[maxvit_large_tf_224.in1k](https://huggingface.co/timm/maxvit_large_tf_224.in1k) |84.93|96.97| 247.71| 211.79| 43.68| 127.35| |[maxvit_small_tf_384.in1k](https://huggingface.co/timm/maxvit_small_tf_384.in1k) |85.54|97.46| 188.35| 69.02| 35.87| 183.65| |[coatnet_rmlp_2_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_384.sw_in12k_ft_in1k) |87.39|98.31| 160.80| 73.88| 47.69| 209.43| |[maxxvitv2_rmlp_base_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/maxxvitv2_rmlp_base_rw_384.sw_in12k_ft_in1k) |87.47|98.37| 149.49| 116.09| 72.98| 213.74| |[maxvit_tiny_tf_512.in1k](https://huggingface.co/timm/maxvit_tiny_tf_512.in1k) |85.67|97.58| 144.25| 31.05| 33.49| 257.59| |[maxvit_rmlp_base_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/maxvit_rmlp_base_rw_384.sw_in12k_ft_in1k) |87.81|98.37| 106.55| 116.14| 70.97| 318.95| |[maxvit_base_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_base_tf_384.in21k_ft_in1k) |87.92|98.54| 104.71| 119.65| 73.80| 332.90| |[maxvit_base_tf_384.in1k](https://huggingface.co/timm/maxvit_base_tf_384.in1k) |86.29|97.80| 101.09| 119.65| 73.80| 332.90| |[maxvit_small_tf_512.in1k](https://huggingface.co/timm/maxvit_small_tf_512.in1k) |86.10|97.76| 88.63| 69.13| 67.26| 383.77| |[maxvit_large_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_large_tf_384.in21k_ft_in1k) |87.98|98.56| 71.75| 212.03|132.55| 445.84| |[maxvit_large_tf_384.in1k](https://huggingface.co/timm/maxvit_large_tf_384.in1k) |86.23|97.69| 70.56| 212.03|132.55| 445.84| |[maxvit_base_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_base_tf_512.in21k_ft_in1k) |88.20|98.53| 50.87| 119.88|138.02| 703.99| |[maxvit_base_tf_512.in1k](https://huggingface.co/timm/maxvit_base_tf_512.in1k) |86.60|97.92| 50.75| 119.88|138.02| 703.99| |[maxvit_xlarge_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_xlarge_tf_384.in21k_ft_in1k) |88.32|98.54| 42.53| 475.32|292.78| 668.76| |[maxvit_large_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_large_tf_512.in21k_ft_in1k) |88.04|98.40| 36.42| 212.33|244.75| 942.15| |[maxvit_large_tf_512.in1k](https://huggingface.co/timm/maxvit_large_tf_512.in1k) |86.52|97.88| 36.04| 212.33|244.75| 942.15| |[maxvit_xlarge_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_xlarge_tf_512.in21k_ft_in1k) |88.53|98.64| 21.76| 475.77|534.14|1413.22| ## Citation ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ``` ```bibtex @article{tu2022maxvit, title={MaxViT: Multi-Axis Vision Transformer}, author={Tu, Zhengzhong and Talebi, Hossein and Zhang, Han and Yang, Feng and Milanfar, Peyman and Bovik, Alan and Li, Yinxiao}, journal={ECCV}, year={2022}, } ``` ```bibtex @article{dai2021coatnet, title={CoAtNet: Marrying Convolution and Attention for All Data Sizes}, author={Dai, Zihang and Liu, Hanxiao and Le, Quoc V and Tan, Mingxing}, journal={arXiv preprint arXiv:2106.04803}, year={2021} } ```
Akashpb13/xlsr_kurmanji_kurdish
[ "pytorch", "safetensors", "wav2vec2", "automatic-speech-recognition", "kmr", "ku", "dataset:mozilla-foundation/common_voice_8_0", "transformers", "mozilla-foundation/common_voice_8_0", "generated_from_trainer", "robust-speech-event", "model_for_talk", "hf-asr-leaderboard", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
Prompts Per Member Jennie - Jenniepink Jisoo - Jisoopink Lisa - Lisapink Rose - Rosepink
Akashpb13/xlsr_maltese_wav2vec2
[ "pytorch", "jax", "wav2vec2", "automatic-speech-recognition", "mt", "dataset:common_voice", "transformers", "audio", "speech", "xlsr-fine-tuning-week", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- license: creativeml-openrail-m tags: - text-to-image - v2.0 - Embedding --- Textual Inversion Embedding by ConflictX For SD 2.x trained on 768x768 images from midjourney and other sources. Install by downloading the step embedding, and put it in the \embeddings folder Another themed one, this one is more focused on toxic environments and dystopian+dieselpunk themes. Use keyword: ChemPunk ![00574-3737579188-an alchemy table filled with bottles filled with glowing liquid, night time, dark, dim green lighting, chempunk style, render,.png](https://s3.amazonaws.com/moonup/production/uploads/1670023755848-6303c53d7373aacccd859bbd.png) ![00581-2027386424-megastructure, glowing green lights, night time, dark, dim green lighting, chempunk style, render, night time,.png](https://s3.amazonaws.com/moonup/production/uploads/1670023807124-6303c53d7373aacccd859bbd.png) ![00580-973433337-market stall, night time, dark, dim green lighting, chempunk style, render, night time,.png](https://s3.amazonaws.com/moonup/production/uploads/1670023813075-6303c53d7373aacccd859bbd.png) ![00566-739937999-a man with dirty facepaint walking the sewers, green lighting, chempunk style, render.png](https://s3.amazonaws.com/moonup/production/uploads/1670023835802-6303c53d7373aacccd859bbd.png) ![00582-3013747785-a filthy beautiful woman wearing a blue dress, glowing green lights, night time, dark, dim green lighting, chempunk style, rend.png](https://s3.amazonaws.com/moonup/production/uploads/1670023873411-6303c53d7373aacccd859bbd.png) ![00561-2721268141-a poisonous monster crawling in dark sewers looking for something to eat, green lighting, chempunk style, alien isolation,.png](https://s3.amazonaws.com/moonup/production/uploads/1670023846795-6303c53d7373aacccd859bbd.png) ![00583-518571012-a filthy creature with glowing eyes, glowing green lights, night time, dark, dim green lighting, chempunk style, render, night.png](https://s3.amazonaws.com/moonup/production/uploads/1670024090212-6303c53d7373aacccd859bbd.png)
Akira-Yana/distilbert-base-uncased-finetuned-cola
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: mit tags: - pytorch - diffusers - unconditional-image-generation - diffusion-models-class --- # Model Card for Unit 1 of the [Diffusion Models Class 🧨](https://github.com/huggingface/diffusion-models-class) This model is a diffusion model for unconditional image generation of cute 🦋. ## Usage ```python from diffusers import DDPMPipeline pipeline = DDPMPipeline.from_pretrained('jpequegn/sd-class-butterflies-32') image = pipeline().images[0] image ```
AkshaySg/gramCorrection
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
4
null
--- license: apache-2.0 tags: - generated_from_trainer metrics: - rouge model-index: - name: t5-small-finetuned-giga-test-full results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # t5-small-finetuned-giga-test-full This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on the None dataset. It achieves the following results on the evaluation set: - Loss: nan - Rouge1: 0.0 - Rouge2: 0.0 - Rougel: 0.0 - Rougelsum: 0.0 - Gen Len: 0.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:-----:|:---------------:|:------:|:------:|:------:|:---------:|:-------:| | 0.0 | 1.0 | 11791 | nan | 0.0 | 0.0 | 0.0 | 0.0 | 0.0 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.12.1+cu113 - Datasets 2.7.1 - Tokenizers 0.13.2
AkshaySg/langid
[ "multilingual", "dataset:VoxLingua107", "speechbrain", "audio-classification", "embeddings", "Language", "Identification", "pytorch", "ECAPA-TDNN", "TDNN", "VoxLingua107", "license:apache-2.0" ]
audio-classification
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
2022-12-03T00:11:03Z
--- language: - vi license: apache-2.0 tags: - whisper-event - generated_from_trainer datasets: - mozilla-foundation/common_voice_11_0 metrics: - wer model-index: - name: Whisper Small Vietnamese results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: mozilla-foundation/common_voice_11_0 vi type: mozilla-foundation/common_voice_11_0 config: vi split: None metrics: - name: Wer type: wer value: 34.21715788320368 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Small Vietnamese This model is a fine-tuned version of [openai/whisper-small](https://huggingface.co/openai/whisper-small) on the mozilla-foundation/common_voice_11_0 vi dataset. It achieves the following results on the evaluation set: - Loss: 0.9921 - Wer: 34.2172 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 64 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - training_steps: 5000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:-------:| | 0.0002 | 124.0 | 1000 | 0.7998 | 21.7706 | | 0.0001 | 249.0 | 2000 | 0.8833 | 28.9690 | | 0.0 | 374.0 | 3000 | 0.9382 | 30.8206 | | 0.0 | 499.0 | 4000 | 0.9754 | 34.4363 | | 0.0 | 624.0 | 5000 | 0.9921 | 34.2172 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0 - Datasets 2.7.1 - Tokenizers 0.13.2
Ale/Alen
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imdb metrics: - accuracy - f1 model-index: - name: finetuned-mlm_mini results: - task: name: Text Classification type: text-classification dataset: name: imdb type: imdb config: plain_text split: train args: plain_text metrics: - name: Accuracy type: accuracy value: 0.86176 - name: F1 type: f1 value: 0.925747679614988 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuned-mlm_mini This model is a fine-tuned version of [muhtasham/bert-mini-mlm-finetuned-emotion](https://huggingface.co/muhtasham/bert-mini-mlm-finetuned-emotion) on the imdb dataset. It achieves the following results on the evaluation set: - Loss: 0.6338 - Accuracy: 0.8618 - F1: 0.9257 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: constant - num_epochs: 200 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.3398 | 2.55 | 500 | 0.2366 | 0.9032 | 0.9491 | | 0.2028 | 5.1 | 1000 | 0.2750 | 0.8956 | 0.9449 | | 0.1382 | 7.65 | 1500 | 0.2798 | 0.9034 | 0.9492 | | 0.0899 | 10.2 | 2000 | 0.4193 | 0.882 | 0.9373 | | 0.0624 | 12.76 | 2500 | 0.5203 | 0.864 | 0.9270 | | 0.0417 | 15.31 | 3000 | 0.6338 | 0.8618 | 0.9257 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.12.1+cu113 - Datasets 2.7.1 - Tokenizers 0.13.2
Aleksandar/bert-srb-base-cased-oscar
[ "pytorch", "bert", "fill-mask", "transformers", "generated_from_trainer", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- ### Kevin-xd Dreambooth model trained by Allenbv with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb) Or you can run your new concept via `diffusers` [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb) Sample pictures of this concept: Kevekek ![0](https://huggingface.co/Allenbv/kevin-xd/resolve/main/sample_images/descarga_(3).png) ![1](https://huggingface.co/Allenbv/kevin-xd/resolve/main/sample_images/00093-2988603660-close_portrait_of_futuristic_(kevekek),_kintsugi,_modern_fine_art,_fractal,_intricate,_elegant,_highly_detailed,_digital_photogr.png) ![2](https://huggingface.co/Allenbv/kevin-xd/resolve/main/sample_images/descarga_(5).png) ![3](https://huggingface.co/Allenbv/kevin-xd/resolve/main/sample_images/00030-1415197780-close_portrait_of_(kevekek),_digital_painting,_artstation,_concept_art,_donato_giancola,_Joseph_Christian_Leyendecker,_WLOP,_Bor.png) ![4](https://huggingface.co/Allenbv/kevin-xd/resolve/main/sample_images/00066-2795391050-A_potrait_of_(kevekek)_as_a_mermaid_,_realistic_shaded,_fine_details._Night_setting._Very_anime_style._Realistic_shaded_lighting.png) ![5](https://huggingface.co/Allenbv/kevin-xd/resolve/main/sample_images/00067-2795391051-A_potrait_of_(kevekek)_as_a_mermaid_,_realistic_shaded,_fine_details._Night_setting._Very_anime_style._Realistic_shaded_lighting.png) ![6](https://huggingface.co/Allenbv/kevin-xd/resolve/main/sample_images/00090-3209711875-an_ultradetailed_beautiful_painting_of_(kevekek)_with_colorful_band_aids,_rave_concert_poster,_retro,_conrad_roset,_greg_rutkows.png) ![7](https://huggingface.co/Allenbv/kevin-xd/resolve/main/sample_images/00077-2029398807-beatiful_(kevekek)_wearing_cat_ears_with_big_sack,_anime_key_visual,_intricate,_stunning,_highly_detailed,_digital_painting,_art.png)
Aleksandar/bert-srb-ner-setimes-lr
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: t5-small-finetuned-giga-test-default-masking results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # t5-small-finetuned-giga-test-default-masking This model is a fine-tuned version of [t5-small](https://huggingface.co/t5-small) on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:-------:| | No log | 1.0 | 63 | 3.0764 | 33.0762 | 13.1207 | 30.2976 | 30.5562 | 18.775 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.12.1+cu113 - Datasets 2.7.1 - Tokenizers 0.13.2
Aleksandar/electra-srb-ner-setimes-lr
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - generated_from_trainer model-index: - name: bart-finetuned-iirc-prem-final results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bart-finetuned-iirc-prem-final This model is a fine-tuned version of [facebook/bart-base](https://huggingface.co/facebook/bart-base) on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 80 ### Training results ### Framework versions - Transformers 4.25.1 - Pytorch 1.12.1+cu113 - Datasets 2.7.1 - Tokenizers 0.13.2
Aleksandar/electra-srb-ner-setimes
[ "pytorch", "electra", "token-classification", "transformers", "generated_from_trainer", "autotrain_compatible" ]
token-classification
{ "architectures": [ "ElectraForTokenClassification" ], "model_type": "electra", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
2022-12-03T02:29:17Z
--- license: apache-2.0 tags: - generated_from_trainer datasets: - imdb metrics: - accuracy - f1 model-index: - name: finetuned-mlm_medium results: - task: name: Text Classification type: text-classification dataset: name: imdb type: imdb config: plain_text split: train args: plain_text metrics: - name: Accuracy type: accuracy value: 0.95416 - name: F1 type: f1 value: 0.976542350677529 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuned-mlm_medium This model is a fine-tuned version of [muhtasham/bert-medium-mlm-finetuned-emotion](https://huggingface.co/muhtasham/bert-medium-mlm-finetuned-emotion) on the imdb dataset. It achieves the following results on the evaluation set: - Loss: 0.2805 - Accuracy: 0.9542 - F1: 0.9765 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: constant - num_epochs: 200 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.2318 | 2.55 | 500 | 0.1428 | 0.9512 | 0.9750 | | 0.0777 | 5.1 | 1000 | 0.1976 | 0.9513 | 0.9750 | | 0.0362 | 7.65 | 1500 | 0.2704 | 0.9388 | 0.9684 | | 0.0234 | 10.2 | 2000 | 0.2245 | 0.9578 | 0.9784 | | 0.0181 | 12.76 | 2500 | 0.3703 | 0.9310 | 0.9643 | | 0.0158 | 15.31 | 3000 | 0.6137 | 0.9001 | 0.9474 | | 0.013 | 17.86 | 3500 | 0.2805 | 0.9542 | 0.9765 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.12.1+cu113 - Datasets 2.7.1 - Tokenizers 0.13.2
Aleksandar1932/gpt2-rock-124439808
[ "pytorch", "gpt2", "text-generation", "transformers" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
11
null
Access to model hemu2097/whisper-small-en is restricted and you are not in the authorized list. Visit https://huggingface.co/hemu2097/whisper-small-en to ask for access.
adorkin/xlm-roberta-en-ru-emoji
[ "pytorch", "safetensors", "xlm-roberta", "text-classification", "en", "ru", "dataset:tweet_eval", "transformers" ]
text-classification
{ "architectures": [ "XLMRobertaForSequenceClassification" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
31
null
--- license: creativeml-openrail-m tags: - text-to-image widget: - text: rgbylr --- ### rgbylr Dreambooth model trained by rq0 with [Hugging Face Dreambooth Training Space](https://huggingface.co/spaces/multimodalart/dreambooth-training) with the v2-768 base model You run your new concept via `diffusers` [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb). Don't forget to use the concept prompts! Sample pictures of: rgbylr (use that on your prompt) ![rgbylr 0](https://huggingface.co/rq0/rgbylr/resolve/main/concept_images/rgbylr_%281%29.jpg)![rgbylr 1](https://huggingface.co/rq0/rgbylr/resolve/main/concept_images/rgbylr_%282%29.jpg)![rgbylr 2](https://huggingface.co/rq0/rgbylr/resolve/main/concept_images/rgbylr_%283%29.jpg)![rgbylr 3](https://huggingface.co/rq0/rgbylr/resolve/main/concept_images/rgbylr_%284%29.jpg)![rgbylr 4](https://huggingface.co/rq0/rgbylr/resolve/main/concept_images/rgbylr_%285%29.jpg)![rgbylr 5](https://huggingface.co/rq0/rgbylr/resolve/main/concept_images/rgbylr_%286%29.jpg)![rgbylr 6](https://huggingface.co/rq0/rgbylr/resolve/main/concept_images/rgbylr_%287%29.jpg)![rgbylr 7](https://huggingface.co/rq0/rgbylr/resolve/main/concept_images/rgbylr_%288%29.jpg)![rgbylr 8](https://huggingface.co/rq0/rgbylr/resolve/main/concept_images/rgbylr_%289%29.jpg)![rgbylr 9](https://huggingface.co/rq0/rgbylr/resolve/main/concept_images/rgbylr_%2810%29.jpg)![rgbylr 10](https://huggingface.co/rq0/rgbylr/resolve/main/concept_images/rgbylr_%2811%29.jpg)![rgbylr 11](https://huggingface.co/rq0/rgbylr/resolve/main/concept_images/rgbylr_%2812%29.jpg)
AlekseyKulnevich/Pegasus-QuestionGeneration
[ "pytorch", "pegasus", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "PegasusForConditionalGeneration" ], "model_type": "pegasus", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
17
null
--- license: creativeml-openrail-m tags: - text-to-image --- ### azzy on Stable Diffusion via Dreambooth #### model by jefsnacker This your the Stable Diffusion model fine-tuned the azzy concept taught to Stable Diffusion with Dreambooth. It can be used by modifying the `instance_prompt`: **azzy cat** You can also train your own concepts and upload them to the library by using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_training.ipynb). And you can run your new concept via `diffusers`: [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb), [Spaces with the Public Concepts loaded](https://huggingface.co/spaces/sd-dreambooth-library/stable-diffusion-dreambooth-concepts) Here are the images used for training this concept: ![image 0](https://huggingface.co/jefsnacker/azzy/resolve/main/concept_images/IMG_6069.jpg) ![image 1](https://huggingface.co/jefsnacker/azzy/resolve/main/concept_images/7210C234-9D53-4818-BFFB-0A71755C9225.jpg) ![image 2](https://huggingface.co/jefsnacker/azzy/resolve/main/concept_images/IMG_6141.jpg) ![image 3](https://huggingface.co/jefsnacker/azzy/resolve/main/concept_images/IMG_1318.jpg) ![image 4](https://huggingface.co/jefsnacker/azzy/resolve/main/concept_images/IMG_3976.jpg)
AlexKay/xlm-roberta-large-qa-multilingual-finedtuned-ru
[ "pytorch", "xlm-roberta", "question-answering", "en", "ru", "multilingual", "arxiv:1912.09723", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
question-answering
{ "architectures": [ "XLMRobertaForQuestionAnswering" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10,012
null
--- license: creativeml-openrail-m tags: - stable-diffusion - text-to-image --- # gGWoman This is my new Stable Diffusion custom model that bring to you a generic woman generated with non-licenced images. The magic word is: gGWoman If you enjoy my work, please consider supporting me: [![Buy me a coffee](https://badgen.net/badge/icon/buymeacoffee?icon=buymeacoffee&label)](https://www.buymeacoffee.com/elrivx) Examples: <img src=https://imgur.com/CQR59kd.png width=30% height=30%> <img src=https://imgur.com/WVh9kE1.png width=30% height=30%> <img src=https://imgur.com/y0twso7.png width=30% height=30%> <img src=https://imgur.com/FVxkzzj.png width=30% height=30%> ## License This model is open access and available to all, with a CreativeML OpenRAIL-M license further specifying rights and usage. The CreativeML OpenRAIL License specifies: 1. You can't use the model to deliberately produce nor share illegal or harmful outputs or content 2. The authors claims no rights on the outputs you generate, you are free to use them and are accountable for their use which must not go against the provisions set in the license 3. You may re-distribute the weights and use the model commercially and/or as a service. If you do, please be aware you have to include the same use restrictions as the ones in the license and share a copy of the CreativeML OpenRAIL-M to all your users (please read the license entirely and carefully) [Please read the full license here](https://huggingface.co/spaces/CompVis/stable-diffusion-license)
Alexander-Learn/bert-finetuned-squad-accelerate
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- ### SMNBSLY Dreambooth model trained by Taboodada with [TheLastBen's fast-DreamBooth](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast-DreamBooth.ipynb) notebook Test the concept via A1111 Colab [fast-Colab-A1111](https://colab.research.google.com/github/TheLastBen/fast-stable-diffusion/blob/main/fast_stable_diffusion_AUTOMATIC1111.ipynb) Or you can run your new concept via `diffusers` [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb) Sample pictures of this concept: ![0](https://huggingface.co/Taboodada/smnbsly/resolve/main/sample_images/SMNBSLY_(3).jpeg) ![1](https://huggingface.co/Taboodada/smnbsly/resolve/main/sample_images/SMNBSLY_(7).jpeg) ![2](https://huggingface.co/Taboodada/smnbsly/resolve/main/sample_images/SMNBSLY_(6).jpeg)
AlgoveraAI/dcgan
[ "pytorch", "transformers" ]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- license: lgpl-lr ---import java.io.FileNotFoundException; import java.io.PrintWriter; public class GenerateDummyCode { public static void main(String[] args) { String className = "Eben"; String newLine = "\n"; String tab = "\t"; String classStart = "public class " + className + " {"; String closeBracket = "}"; String mainStart = "public static void main(String[] args) {"; String dummyContent = ""; String temp = null; int size = 10000; for (int i = 1; i <= size; i++) { temp = String.format("System.out.println(\" %6d .ci satir\");", i); dummyContent += tab + tab + temp + newLine; } String outputCode = classStart + newLine + newLine + tab + mainStart + newLine + newLine + dummyContent + newLine + tab + closeBracket + newLine + closeBracket + newLine; // System.out.println(result); // To print the output to the console PrintWriter out = null; try { out = new PrintWriter(className + ".java"); } catch (FileNotFoundException e) { System.out.println("File not found!"); } out.println(outputCode); out.close(); } } fun removeObject(algorithmTable: int, algorithm: int): int { for (list in methodString()) { val targetUnsafe: String = "**********" for (tree in unsafeNumberPort()) { codeTableString(142) codeThing43(ObjectList(), 67) } } thing43(285, 621) target(76) portListUser("NaN", "FIRE") for (click in unsafe()) { objectObjectAlgorithm(RowPerson()) val propertyListObject: User = personUnsafe(410, 86) } objectObject() println("HACK: the docs told me to do this") val thing43: int = 664 println("schnitzel") val stringString: int = 140 propertyTable(322) tree() val tableClickAlgorithm: String = "some error" user() for (target in unsafeTableElement()) { unsafe(75, 442, 171) } for (thing in arrayString()) { click(273, PersonString()) val port: String = "test" } clickObjectObject(110) return thing43 }