modelId
stringlengths
4
81
tags
sequence
pipeline_tag
stringclasses
17 values
config
dict
downloads
int64
0
59.7M
first_commit
timestamp[ns, tz=UTC]
card
stringlengths
51
438k
Dizoid/Lll
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: keras --- ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: | Hyperparameters | Value | | :-- | :-- | | name | Adam | | learning_rate | 0.001 | | decay | 0.0 | | beta_1 | 0.9 | | beta_2 | 0.999 | | epsilon | 1e-07 | | amsgrad | False | | training_precision | float32 | ## Model Plot <details> <summary>View Model Plot</summary> ![Model Image](./model.png) </details>
Dkwkk/Da
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: keras --- ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training Metrics Model history needed ## Model Plot <details> <summary>View Model Plot</summary> ![Model Image](./model.png) </details>
Dkwkk/W
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: keras --- ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Model Plot <details> <summary>View Model Plot</summary> ![Model Image](./model.png) </details>
Dmitriiserg/Pxd
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: keras --- ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'learning_rate': 0.001, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False} - training_precision: float32 ## Training Metrics Model history needed ## Model Plot <details> <summary>View Model Plot</summary> ![Model Image](./model.png) </details>
Dmitry12/sber
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- library_name: keras --- ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'Adam', 'learning_rate': 0.001, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False} - training_precision: float32 ## Training Metrics Model history needed ## Model Plot <details> <summary>View Model Plot</summary> ![Model Image](./model.png) </details>
Doogie/Waynehills-KE-T5-doogie
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: apache-2.0 tags: - image-classification - keras library_name: keras --- Keras Dog vs Cat based on the [official Keras documentation](https://keras.io/examples/vision/image_classification_from_scratch/)
Waynehillsdev/Waynehills_summary_tensorflow
[ "tf", "t5", "text2text-generation", "transformers", "generated_from_keras_callback", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- license: apache-2.0 tags: - image-classification - keras library_name: keras --- # Model Card for nateraw/keras-mobilevit-xxs-flowers
Doquey/DialoGPT-small-Luisbot1
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
# Flowers GAN <a href="https://colab.research.google.com/github/nateraw/huggingface-hub-examples/blob/main/pytorch_lightweight_gan.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> Give the [Github Repo](https://github.com/nateraw/huggingface-hub-examples) a ⭐️ ### Generated Images <video width="320" height="240" controls> <source src="https://huggingface.co/nateraw/lightweight-gan-flowers-64/resolve/main/generated.mp4" type="video/mp4"> </video> ### EMA <video width="320" height="240" controls> <source src="https://huggingface.co/nateraw/lightweight-gan-flowers-64/resolve/main/ema.mp4" type="video/mp4"> </video>
DoyyingFace/bert-asian-hate-tweets-asian-unclean-freeze-4
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
44
2021-11-15T19:59:19Z
--- tags: - image-classification - timm - generated_from_trainer library_tag: timm datasets: - cats_vs_dogs --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # my-cool-timm-model-2 This model is a fine-tuned version of [resnet18](https://huggingface.co/resnet18) on the cats_vs_dogs dataset. It achieves the following results on the evaluation set: - Loss: 0.2510 - Acc1: 95.2150 - Acc5: 100.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 256 - eval_batch_size: 256 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 10 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Acc1 | Acc5 | |:-------------:|:-----:|:----:|:---------------:|:-------:|:-----:| | No log | 0.07 | 5 | 0.3436 | 92.0820 | 100.0 | | 0.4914 | 0.14 | 10 | 0.2510 | 95.2150 | 100.0 | ### Framework versions - Transformers 4.12.3 - Pytorch 1.10.0+cu111 - Datasets 1.15.1 - Tokenizers 0.10.3
DoyyingFace/bert-asian-hate-tweets-asian-unclean-freeze-8
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
30
null
--- tags: - image-classification - timm - generated_from_trainer datasets: - cats_vs_dogs model-index: - name: my-cool-timm-model-3 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # my-cool-timm-model-3 This model is a fine-tuned version of [resnet18](https://huggingface.co/resnet18) on the cats_vs_dogs dataset. It achieves the following results on the evaluation set: - Loss: 0.2455 - Acc1: 94.4175 - Acc5: 100.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 256 - eval_batch_size: 256 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 10 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Acc1 | Acc5 | |:-------------:|:-----:|:----:|:---------------:|:-------:|:-----:| | 0.5152 | 0.14 | 10 | 0.2455 | 94.4175 | 100.0 | ### Framework versions - Transformers 4.12.3 - Pytorch 1.10.0+cu111 - Datasets 1.15.1 - Tokenizers 0.10.3
DoyyingFace/bert-asian-hate-tweets-asian-unclean-slanted
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
29
null
--- tags: - image-classification - timm library_tag: timm --- # Model card for my-cool-timm-model
DoyyingFace/bert-asian-hate-tweets-asian-unclean-warmup-100
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
28
null
--- tags: - image-classification - pytorch - huggingpics metrics: - accuracy model-index: - name: pasta-pizza-ravioli results: - task: name: Image Classification type: image-classification metrics: - name: Accuracy type: accuracy value: 0.9375 --- # pasta-pizza-ravioli Autogenerated by HuggingPics🤗🖼️ Create your own image classifier for **anything** by running [the demo](https://colab.research.google.com/github/nateraw/huggingpics/blob/main/HuggingPics.ipynb). Report any issues with the demo at the [github repo](https://github.com/nateraw/huggingpics). ## Example Images #### pasta ![pasta](images/pasta.jpg) #### pizza ![pizza](images/pizza.jpg) #### ravioli ![ravioli](images/ravioli.jpg)
DoyyingFace/bert-asian-hate-tweets-asian-unclean-warmup-25
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
30
null
--- license: apache-2.0 tags: - image-classification - huggingpics - generated_from_trainer model-index: - name: pasta-shapes results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # pasta-shapes This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.3761 - Acc: 0.9403 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4.0 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Acc | |:-------------:|:-----:|:----:|:---------------:|:------:| | 1.0328 | 1.0 | 24 | 0.9442 | 0.7463 | | 0.8742 | 2.0 | 48 | 0.7099 | 0.9403 | | 0.6451 | 3.0 | 72 | 0.5050 | 0.9403 | | 0.508 | 4.0 | 96 | 0.3761 | 0.9403 | ### Framework versions - Transformers 4.12.3 - Pytorch 1.9.0+cu111 - Tokenizers 0.10.3
DoyyingFace/bert-asian-hate-tweets-asian-unclean-warmup-75
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
37
2021-08-23T21:37:57Z
--- license: apache-2.0 tags: - huggingpics - image-classification - generated_from_trainer metrics: - accuracy model_index: - name: planes-trains-automobiles results: - task: name: Image Classification type: image-classification metric: name: Accuracy type: accuracy value: 0.9850746268656716 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # planes-trains-automobiles This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the huggingpics dataset. It achieves the following results on the evaluation set: - Loss: 0.0534 - Accuracy: 0.9851 ## Model description Autogenerated by HuggingPics🤗🖼️ Create your own image classifier for **anything** by running [the demo on Google Colab](https://colab.research.google.com/github/nateraw/huggingpics/blob/main/HuggingPics.ipynb). Report any issues with the demo at the [github repo](https://github.com/nateraw/huggingpics). ## Example Images #### automobiles ![automobiles](images/automobiles.jpg) #### planes ![planes](images/planes.jpg) #### trains ![trains](images/trains.jpg) ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 1337 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.0283 | 1.0 | 48 | 0.0434 | 0.9851 | | 0.0224 | 2.0 | 96 | 0.0548 | 0.9851 | | 0.0203 | 3.0 | 144 | 0.0445 | 0.9851 | | 0.0195 | 4.0 | 192 | 0.0534 | 0.9851 | ### Framework versions - Transformers 4.9.2 - Pytorch 1.9.0+cu102 - Datasets 1.11.0 - Tokenizers 0.10.3
DoyyingFace/bert-asian-hate-tweets-asian-unclean-with-clean-valid
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
33
2021-10-10T22:20:39Z
--- tags: - image-classification - keras library_name: keras --- # Quickdraw Model
DoyyingFace/bert-asian-hate-tweets-asonam-clean
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
27
2021-09-04T20:45:59Z
--- tags: - image-classification - pytorch - huggingpics metrics: - accuracy model-index: - name: rare-puppers-09-04-2021 results: - task: name: Image Classification type: image-classification metrics: - name: Accuracy type: accuracy value: 0.8657407164573669 --- # rare-puppers-09-04-2021 Autogenerated by HuggingPics🤗🖼️ Create your own image classifier for **anything** by running [the demo on Google Colab](https://colab.research.google.com/github/nateraw/huggingpics/blob/main/HuggingPics.ipynb). Report any issues with the demo at the [github repo](https://github.com/nateraw/huggingpics). ## Example Images #### corgi ![corgi](images/corgi.jpg) #### samoyed ![samoyed](images/samoyed.jpg) #### shiba inu ![shiba inu](images/shiba_inu.jpg)
DoyyingFace/bert-asian-hate-tweets-asonam-unclean
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
30
2021-12-10T21:18:34Z
--- tags: - image-classification - pytorch - huggingpics metrics: - accuracy model-index: - name: rare-puppers-123 results: - task: name: Image Classification type: image-classification metrics: - name: Accuracy type: accuracy value: 0.9701492786407471 --- # rare-puppers-123 Autogenerated by HuggingPics🤗🖼️ Create your own image classifier for **anything** by running [the demo on Google Colab](https://colab.research.google.com/github/nateraw/huggingpics/blob/main/HuggingPics.ipynb). Report any issues with the demo at the [github repo](https://github.com/nateraw/huggingpics). ## Example Images #### corgi ![corgi](images/corgi.jpg) #### samoyed ![samoyed](images/samoyed.jpg) #### shiba inu ![shiba inu](images/shiba_inu.jpg)
DoyyingFace/bert-asian-hate-tweets-concat-clean-with-unclean-valid
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
25
null
--- tags: - image-classification - pytorch - huggingpics metrics: - accuracy model-index: - name: rare-puppers-demo results: - task: name: Image Classification type: image-classification metrics: - name: Accuracy type: accuracy value: 0.9101123809814453 --- # rare-puppers-demo Autogenerated by HuggingPics🤗🖼️ Create your own image classifier for **anything** by running [the demo on Google Colab](https://colab.research.google.com/github/nateraw/huggingpics/blob/main/HuggingPics.ipynb). Report any issues with the demo at the [github repo](https://github.com/nateraw/huggingpics). ## Example Images #### corgi ![corgi](images/corgi.jpg) #### husky ![husky](images/husky.jpg) #### samoyed ![samoyed](images/samoyed.jpg) #### shiba inu ![shiba inu](images/shiba_inu.jpg)
DoyyingFace/bert-asian-hate-tweets-concat-clean
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
25
null
--- tags: - image-classification - pytorch - huggingpics metrics: - accuracy model-index: - name: rare-puppers-new-auth results: - task: name: Image Classification type: image-classification metrics: - name: Accuracy type: accuracy value: 0.89552241563797 --- # rare-puppers-new-auth Autogenerated by HuggingPics🤗🖼️ Create your own image classifier for **anything** by running [the demo on Google Colab](https://colab.research.google.com/github/nateraw/huggingpics/blob/main/HuggingPics.ipynb). Report any issues with the demo at the [github repo](https://github.com/nateraw/huggingpics). ## Example Images #### corgi ![corgi](images/corgi.jpg) #### samoyed ![samoyed](images/samoyed.jpg) #### shiba inu ![shiba inu](images/shiba_inu.jpg)
albert-base-v1
[ "pytorch", "tf", "safetensors", "albert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1909.11942", "transformers", "exbert", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "AlbertForMaskedLM" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
38,156
2021-06-29T20:17:28Z
--- tags: - image-classification - pytorch - huggingpics metrics: - accuracy model-index: - name: rare-puppers results: - task: name: Image Classification type: image-classification metrics: - name: Accuracy type: accuracy value: 0.9583333134651184 --- # rare-puppers Autogenerated by HuggingPics🤗🖼️ Create your own image classifier for **anything** by running [the demo on Google Colab](https://colab.research.google.com/github/nateraw/huggingpics/blob/main/HuggingPics.ipynb). Report any issues with the demo at the [github repo](https://github.com/nateraw/huggingpics). ## Example Images #### corgi ![corgi](images/corgi.jpg) #### samoyed ![samoyed](images/samoyed.jpg) #### shiba inu ![shiba inu](images/shiba_inu.jpg)
albert-xlarge-v2
[ "pytorch", "tf", "albert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1909.11942", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "AlbertForMaskedLM" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2,973
2021-11-23T04:45:30Z
--- tags: - image-classification - timm library_tag: timm --- # Model card for resnet18-random-classifier-123
bert-base-cased-finetuned-mrpc
[ "pytorch", "tf", "jax", "bert", "fill-mask", "transformers", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
11,644
2021-09-22T18:01:31Z
--- tags: - image-classification - timm - generated_from_trainer datasets: - beans model-index: - name: model results: - task: name: Image Classification type: image-classification dataset: name: beans type: beans args: default library_tag: timm --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # model This model is a fine-tuned version of [resnet18](https://huggingface.co/resnet18) on the beans dataset. It achieves the following results on the evaluation set: - Loss: 1.0219 - Acc1: 56.3910 - Acc5: 100.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 20 ### Training results ### Framework versions - Transformers 4.10.2 - Pytorch 1.7.1 - Datasets 1.12.1 - Tokenizers 0.10.3
bert-base-cased
[ "pytorch", "tf", "jax", "safetensors", "bert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "exbert", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8,621,271
2021-12-03T06:29:58Z
--- tags: - image-classification - timm library_tag: timm --- # Model card for resnet50-oxford-iiit-pet ![boxer](boxer.jpg)
bert-base-chinese
[ "pytorch", "tf", "jax", "safetensors", "bert", "fill-mask", "zh", "arxiv:1810.04805", "transformers", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3,377,486
2021-04-13T09:38:42Z
--- tags: - image-classification - pytorch datasets: - imagenet --- # Resnet50 Model from Torchvision ## Using the model ``` pip install modelz ``` ```python from modelz import ResnetModel model = ResnetModel.from_pretrained('nateraw/resnet50') ex_input = torch.rand(4, 3, 224, 224) out = model(ex_input) ```
bert-base-german-dbmdz-uncased
[ "pytorch", "jax", "safetensors", "bert", "fill-mask", "de", "transformers", "license:mit", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
68,305
2021-09-28T01:56:21Z
--- tags: - image-classification library_name: generic --- # Test
bert-base-multilingual-uncased
[ "pytorch", "tf", "jax", "safetensors", "bert", "fill-mask", "multilingual", "af", "sq", "ar", "an", "hy", "ast", "az", "ba", "eu", "bar", "be", "bn", "inc", "bs", "br", "bg", "my", "ca", "ceb", "ce", "zh", "cv", "hr", "cs", "da", "nl", "en", "et", "fi", "fr", "gl", "ka", "de", "el", "gu", "ht", "he", "hi", "hu", "is", "io", "id", "ga", "it", "ja", "jv", "kn", "kk", "ky", "ko", "la", "lv", "lt", "roa", "nds", "lm", "mk", "mg", "ms", "ml", "mr", "min", "ne", "new", "nb", "nn", "oc", "fa", "pms", "pl", "pt", "pa", "ro", "ru", "sco", "sr", "scn", "sk", "sl", "aze", "es", "su", "sw", "sv", "tl", "tg", "ta", "tt", "te", "tr", "uk", "ud", "uz", "vi", "vo", "war", "cy", "fry", "pnb", "yo", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
328,585
null
--- tags: - generated_from_trainer datasets: - image_folder model_index: - name: test_model_a results: - task: name: Image Classification type: image-classification dataset: name: image_folder type: image_folder args: default --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # test_model_a This model is a fine-tuned version of [lysandre/tiny-vit-random](https://huggingface.co/lysandre/tiny-vit-random) on the image_folder dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 40 ### Framework versions - Transformers 4.8.2 - Pytorch 1.9.0+cu102 - Datasets 1.9.1.dev0 - Tokenizers 0.10.3
bert-base-uncased
[ "pytorch", "tf", "jax", "rust", "safetensors", "bert", "fill-mask", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "exbert", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
59,663,489
2021-09-28T04:36:26Z
--- tags: - text-classification library_name: generic --- # Test
bert-large-cased-whole-word-masking-finetuned-squad
[ "pytorch", "tf", "jax", "rust", "safetensors", "bert", "question-answering", "en", "dataset:bookcorpus", "dataset:wikipedia", "arxiv:1810.04805", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
question-answering
{ "architectures": [ "BertForQuestionAnswering" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8,214
2021-08-31T21:59:55Z
--- tags: - image-classification - timm library_tag: timm --- # Model card for `timm-resnet50-beans` **TODO** **For now, try dragging and dropping this image into the inference widget. It should classify as angular_leaf_spot.** ![leaf_example](angular_leaf_spot_train.304.jpg)
distilbert-base-german-cased
[ "pytorch", "safetensors", "distilbert", "fill-mask", "de", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "DistilBertForMaskedLM" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
43,667
2021-09-04T01:12:45Z
--- tags: - image-classification - timm - generated_from_trainer datasets: - beans metrics: - accuracy model_index: - name: timm-resnet18-beans-test-2 results: - task: name: Image Classification type: image-classification dataset: name: beans type: beans args: default metric: name: Accuracy type: accuracy value: 0.5789473684210527 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # timm-resnet18-beans-test-2 This model is a fine-tuned version of [resnet18](https://huggingface.co/resnet18) on the beans dataset. It achieves the following results on the evaluation set: - Loss: 1.3225 - Accuracy: 0.5789 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.001 - train_batch_size: 4 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.2601 | 0.02 | 5 | 2.8349 | 0.5113 | | 1.8184 | 0.04 | 10 | 1.3225 | 0.5789 | ### Framework versions - Transformers 4.9.1 - Pytorch 1.9.0 - Datasets 1.11.1.dev0 - Tokenizers 0.10.3
distilbert-base-multilingual-cased
[ "pytorch", "tf", "onnx", "safetensors", "distilbert", "fill-mask", "multilingual", "af", "sq", "ar", "an", "hy", "ast", "az", "ba", "eu", "bar", "be", "bn", "inc", "bs", "br", "bg", "my", "ca", "ceb", "ce", "zh", "cv", "hr", "cs", "da", "nl", "en", "et", "fi", "fr", "gl", "ka", "de", "el", "gu", "ht", "he", "hi", "hu", "is", "io", "id", "ga", "it", "ja", "jv", "kn", "kk", "ky", "ko", "la", "lv", "lt", "roa", "nds", "lm", "mk", "mg", "ms", "ml", "mr", "mn", "min", "ne", "new", "nb", "nn", "oc", "fa", "pms", "pl", "pt", "pa", "ro", "ru", "sco", "sr", "scn", "sk", "sl", "aze", "es", "su", "sw", "sv", "tl", "tg", "th", "ta", "tt", "te", "tr", "uk", "ud", "uz", "vi", "vo", "war", "cy", "fry", "pnb", "yo", "dataset:wikipedia", "arxiv:1910.01108", "arxiv:1910.09700", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "DistilBertForMaskedLM" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8,339,633
2021-09-04T00:50:49Z
--- tags: - image-classification - timm - generated_from_trainer datasets: - beans metrics: - accuracy model_index: - name: timm-resnet18-beans-test results: - task: name: Image Classification type: image-classification dataset: name: beans type: beans args: default metric: name: Accuracy type: accuracy value: 0.3609022556390977 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # timm-resnet18-beans-test This model is a fine-tuned version of [resnet18](https://huggingface.co/resnet18) on the beans dataset. It achieves the following results on the evaluation set: - Loss: 1.2126 - Accuracy: 0.3609 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 10 ### Training results ### Framework versions - Transformers 4.9.1 - Pytorch 1.9.0 - Datasets 1.11.1.dev0 - Tokenizers 0.10.3
distilbert-base-uncased-distilled-squad
[ "pytorch", "tf", "tflite", "coreml", "safetensors", "distilbert", "question-answering", "en", "dataset:squad", "arxiv:1910.01108", "arxiv:1910.09700", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
question-answering
{ "architectures": [ "DistilBertForQuestionAnswering" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
100,097
2021-09-27T01:15:47Z
--- tags: - image-classification - timm library_tag: timm --- # Model card for timm-resnet18-imagenette-160px-5-epochs
distilbert-base-uncased-finetuned-sst-2-english
[ "pytorch", "tf", "rust", "safetensors", "distilbert", "text-classification", "en", "dataset:sst2", "dataset:glue", "arxiv:1910.01108", "doi:10.57967/hf/0181", "transformers", "license:apache-2.0", "model-index", "has_space" ]
text-classification
{ "architectures": [ "DistilBertForSequenceClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3,060,704
2021-10-08T03:14:22Z
--- tags: - timm - image-classification library_name: timm ---
t5-3b
[ "pytorch", "tf", "t5", "text2text-generation", "en", "fr", "ro", "de", "multilingual", "dataset:c4", "arxiv:1805.12471", "arxiv:1708.00055", "arxiv:1704.05426", "arxiv:1606.05250", "arxiv:1808.09121", "arxiv:1810.12885", "arxiv:1905.10044", "arxiv:1910.09700", "transformers", "summarization", "translation", "license:apache-2.0", "autotrain_compatible", "has_space" ]
translation
{ "architectures": [ "T5WithLMHeadModel" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
103,474
2021-10-29T04:04:00Z
--- tags: - image-classification - timm library_tag: timm --- # Model card for cait_m48_448
t5-base
[ "pytorch", "tf", "jax", "rust", "safetensors", "t5", "text2text-generation", "en", "fr", "ro", "de", "dataset:c4", "arxiv:1805.12471", "arxiv:1708.00055", "arxiv:1704.05426", "arxiv:1606.05250", "arxiv:1808.09121", "arxiv:1810.12885", "arxiv:1905.10044", "arxiv:1910.09700", "transformers", "summarization", "translation", "license:apache-2.0", "autotrain_compatible", "has_space" ]
translation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
6,339,864
2021-10-29T04:22:19Z
--- tags: - image-classification - timm library_tag: timm --- # Model card for cait_s24_224
xlm-mlm-xnli15-1024
[ "pytorch", "tf", "xlm", "fill-mask", "multilingual", "en", "fr", "es", "de", "el", "bg", "ru", "tr", "ar", "vi", "th", "zh", "hi", "sw", "ur", "arxiv:1901.07291", "arxiv:1910.09700", "transformers", "license:cc-by-nc-4.0", "autotrain_compatible", "has_space" ]
fill-mask
{ "architectures": [ "XLMWithLMHeadModel" ], "model_type": "xlm", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2,050
2021-10-29T04:40:27Z
--- tags: - image-classification - timm library_tag: timm --- # Model card for convit_base
AAli/distilbert-base-uncased-finetuned-cola
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2021-12-09T13:52:39Z
--- license: mit tags: - generated_from_trainer model-index: - name: xlm-roberta-base-squad2-distilled-finetuned-chaii-small results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-squad2-distilled-finetuned-chaii-small This model is a fine-tuned version of [deepset/xlm-roberta-base-squad2-distilled](https://huggingface.co/deepset/xlm-roberta-base-squad2-distilled) on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.12.5 - Pytorch 1.9.1 - Datasets 1.16.1 - Tokenizers 0.10.3
ATGdev/DialoGPT-small-harrypotter
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
16
2020-10-30T14:15:17Z
--- language: - te tags: - MaskedLM - Telugu - RoBERTa - Question-Answering - Token Classification - Text Classification --- # Indic-Transformers Telugu RoBERTa ## Model description This is a RoBERTa language model pre-trained on ~2 GB of monolingual training corpus. The pre-training data was majorly taken from [OSCAR](https://oscar-corpus.com/). This model can be fine-tuned on various downstream tasks like text-classification, POS-tagging, question-answering, etc. Embeddings from this model can also be used for feature-based training. ## Intended uses & limitations #### How to use ``` from transformers import AutoTokenizer, AutoModel tokenizer = AutoTokenizer.from_pretrained('neuralspace-reverie/indic-transformers-te-roberta') model = AutoModel.from_pretrained('neuralspace-reverie/indic-transformers-te-roberta') text = "మీరు ఎలా ఉన్నారు" input_ids = tokenizer(text, return_tensors='pt')['input_ids'] out = model(input_ids)[0] print(out.shape) # out = [1, 14, 768] ``` #### Limitations and bias The original language model has been trained using `PyTorch` and hence the use of `pytorch_model.bin` weights file is recommended. The h5 file for `Tensorflow` has been generated manually by commands suggested [here](https://huggingface.co/transformers/model_sharing.html).
AVSilva/bertimbau-large-fine-tuned-md
[ "pytorch", "bert", "fill-mask", "transformers", "generated_from_trainer", "license:mit", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
2020-10-07T13:24:00Z
--- language: is datasets: - Icelandic portion of the OSCAR corpus from INRIA - oscar --- # IsRoBERTa a RoBERTa-like masked language model Probably the first icelandic transformer language model! ## Overview **Language:** Icelandic **Downstream-task:** masked-lm **Training data:** OSCAR corpus **Code:** See [here](https://github.com/neurocode-io/icelandic-language-model) **Infrastructure**: 1x Nvidia K80 ## Hyperparameters ``` per_device_train_batch_size = 48 n_epochs = 1 vocab_size = 52.000 max_position_embeddings = 514 num_attention_heads = 12 num_hidden_layers = 6 type_vocab_size = 1 learning_rate=0.00005 ``` ## Usage ### In Transformers ```python from transformers import ( pipeline, AutoTokenizer, AutoModelWithLMHead ) model_name = "neurocode/IsRoBERTa" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelWithLMHead.from_pretrained(model_name) >>> fill_mask = pipeline( ... "fill-mask", ... model=model, ... tokenizer=tokenizer ... ) >>> result = fill_mask("Hann fór út að <mask>.") >>> result [ {'sequence': '<s>Hann fór út að nýju.</s>', 'score': 0.03395755589008331, 'token': 2219, 'token_str': 'Ġnýju'}, {'sequence': '<s>Hann fór út að undanförnu.</s>', 'score': 0.029087543487548828, 'token': 7590, 'token_str': 'Ġundanförnu'}, {'sequence': '<s>Hann fór út að lokum.</s>', 'score': 0.024420788511633873, 'token': 4384, 'token_str': 'Ġlokum'}, {'sequence': '<s>Hann fór út að þessu.</s>', 'score': 0.021231256425380707, 'token': 921, 'token_str': 'Ġþessu'}, {'sequence': '<s>Hann fór út að honum.</s>', 'score': 0.0205782949924469, 'token': 1136, 'token_str': 'Ġhonum'} ] ``` ## Authors Bobby Donchev: `contact [at] donchev.is` Elena Cramer: `elena.cramer [at] neurocode.io` ## About us We bring AI software for our customers live Our focus: AI software development Get in touch: [LinkedIn](https://de.linkedin.com/company/neurocodeio) | [Website](https://neurocode.io)
Abdullaziz/model1
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2021-12-15T16:18:53Z
```python from transformers import EncoderDecoderModel from importlib.machinery import SourceFileLoader from transformers.file_utils import cached_path, hf_bucket_url import torch import os ## Load model & tokenizer cache_dir='./cache' model_name='nguyenvulebinh/spelling-oov' def download_tokenizer_files(): resources = ['envibert_tokenizer.py', 'dict.txt', 'sentencepiece.bpe.model'] for item in resources: if not os.path.exists(os.path.join(cache_dir, item)): tmp_file = hf_bucket_url(model_name, filename=item) tmp_file = cached_path(tmp_file,cache_dir=cache_dir) os.rename(tmp_file, os.path.join(cache_dir, item)) download_tokenizer_files() spell_tokenizer = SourceFileLoader("envibert.tokenizer",os.path.join(cache_dir,'envibert_tokenizer.py')).load_module().RobertaTokenizer(cache_dir) spell_model = EncoderDecoderModel.from_pretrained(model_name) def oov_spelling(word, num_candidate=1): result = [] inputs = spell_tokenizer([word.lower()]) input_ids = inputs['input_ids'] attention_mask = inputs['attention_mask'] inputs = { "input_ids": torch.tensor(input_ids), "attention_mask": torch.tensor(attention_mask) } outputs = spell_model.generate(**inputs, num_return_sequences=num_candidate) for output in outputs.cpu().detach().numpy().tolist(): result.append(spell_tokenizer.sp_model.DecodePieces(spell_tokenizer.decode(output, skip_special_tokens=True).split())) return result oov_spelling('spacespeaker') # output: ['x pây x pếch cơ'] ```
Abozoroov/Me
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-01-17T05:47:32Z
--- license: mit tags: - generated_from_trainer datasets: - emotion metrics: - f1 model-index: - name: minilm-finetuned-emotion_nm results: - task: name: Text Classification type: text-classification dataset: name: emotion type: emotion args: default metrics: - name: F1 type: f1 value: 0.9322805793931607 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # minilm-finetuned-emotion_nm This model is a fine-tuned version of [microsoft/MiniLM-L12-H384-uncased](https://huggingface.co/microsoft/MiniLM-L12-H384-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.1918 - F1: 0.9323 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 1.3627 | 1.0 | 250 | 1.0048 | 0.5936 | | 0.8406 | 2.0 | 500 | 0.6477 | 0.8608 | | 0.5344 | 3.0 | 750 | 0.4025 | 0.9099 | | 0.3619 | 4.0 | 1000 | 0.3142 | 0.9188 | | 0.274 | 5.0 | 1250 | 0.2489 | 0.9277 | | 0.2225 | 6.0 | 1500 | 0.2320 | 0.9303 | | 0.191 | 7.0 | 1750 | 0.2083 | 0.9298 | | 0.1731 | 8.0 | 2000 | 0.1969 | 0.9334 | | 0.1606 | 9.0 | 2250 | 0.1928 | 0.9362 | | 0.1462 | 10.0 | 2500 | 0.1918 | 0.9323 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.0+cu111 - Datasets 1.17.0 - Tokenizers 0.10.3
AdapterHub/bert-base-uncased-pf-sst2
[ "bert", "en", "arxiv:2104.08247", "adapter-transformers", "text-classification", "adapterhub:sentiment/sst-2" ]
text-classification
{ "architectures": null, "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
2020-12-11T10:58:04Z
--- language: en tags: - tapas - sequence-classification license: apache-2.0 --- # TAPAS base model This model has 2 versions which can be used. The latest version, which is the default one, corresponds to the `tapas_inter_masklm_base_reset` checkpoint of the [original Github repository](https://github.com/google-research/tapas). This model was pre-trained on MLM and an additional step which the authors call intermediate pre-training. It uses relative position embeddings by default (i.e. resetting the position index at every cell of the table). The other (non-default) version which can be used is the one with absolute position embeddings: - `revision="v1"`, which corresponds to `tapas_inter_masklm_base` Disclaimer: The team releasing TAPAS did not write a model card for this model so this model card has been written by the Hugging Face team and contributors. ## Model description TAPAS is a BERT-like transformers model pretrained on a large corpus of English data from Wikipedia in a self-supervised fashion. This means it was pretrained on the raw tables and associated texts only, with no humans labelling them in any way (which is why it can use lots of publicly available data) with an automatic process to generate inputs and labels from those texts. More precisely, it was pretrained with two objectives: - Masked language modeling (MLM): taking a (flattened) table and associated context, the model randomly masks 15% of the words in the input, then runs the entire (partially masked) sequence through the model. The model then has to predict the masked words. This is different from traditional recurrent neural networks (RNNs) that usually see the words one after the other, or from autoregressive models like GPT which internally mask the future tokens. It allows the model to learn a bidirectional representation of a table and associated text. - Intermediate pre-training: to encourage numerical reasoning on tables, the authors additionally pre-trained the model by creating a balanced dataset of millions of syntactically created training examples. Here, the model must predict (classify) whether a sentence is supported or refuted by the contents of a table. The training examples are created based on synthetic as well as counterfactual statements. This way, the model learns an inner representation of the English language used in tables and associated texts, which can then be used to extract features useful for downstream tasks such as answering questions about a table, or determining whether a sentence is entailed or refuted by the contents of a table. Fine-tuning is done by adding one or more classification heads on top of the pre-trained model, and then jointly train these randomly initialized classification heads with the base model on a downstream task. ## Intended uses & limitations You can use the raw model for getting hidden representatons about table-question pairs, but it's mostly intended to be fine-tuned on a downstream task such as question answering or sequence classification. See the [model hub](https://huggingface.co/models?filter=tapas) to look for fine-tuned versions on a task that interests you. ## Training procedure ### Preprocessing The texts are lowercased and tokenized using WordPiece and a vocabulary size of 30,000. The inputs of the model are then of the form: ``` [CLS] Sentence [SEP] Flattened table [SEP] ``` ### Pre-training The model was pre-trained on 32 Cloud TPU v3 cores for 1,000,000 steps with maximum sequence length 512 and batch size of 512. In this setup, pre-training on MLM only takes around 3 days. Aditionally, the model has been further pre-trained on a second task (table entailment). See the original TAPAS [paper](https://www.aclweb.org/anthology/2020.acl-main.398/) and the [follow-up paper](https://www.aclweb.org/anthology/2020.findings-emnlp.27/) for more details. The optimizer used is Adam with a learning rate of 5e-5, and a warmup ratio of 0.01. ### BibTeX entry and citation info ```bibtex @misc{herzig2020tapas, title={TAPAS: Weakly Supervised Table Parsing via Pre-training}, author={Jonathan Herzig and Paweł Krzysztof Nowak and Thomas Müller and Francesco Piccinno and Julian Martin Eisenschlos}, year={2020}, eprint={2004.02349}, archivePrefix={arXiv}, primaryClass={cs.IR} } ``` ```bibtex @misc{eisenschlos2020understanding, title={Understanding tables with intermediate pre-training}, author={Julian Martin Eisenschlos and Syrine Krichene and Thomas Müller}, year={2020}, eprint={2010.00571}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
AiPorter/DialoGPT-small-Back_to_the_future
[ "pytorch", "gpt2", "text-generation", "transformers", "conversational" ]
conversational
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": 1000 }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- tags: - conversational --- # Digimon DialoGPT Model
Ajay191191/autonlp-Test-530014983
[ "pytorch", "bert", "text-classification", "en", "dataset:Ajay191191/autonlp-data-Test", "transformers", "autonlp", "co2_eq_emissions" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
34
null
--- language: - ja license: cc-by-sa-4.0 datasets: - wikipedia widget: - text: "早稲田 大学 で 自然 言語 処理 を" --- # nlp-waseda/gpt2-small-japanese-wikipedia This model is Japanese GPT-2 pretrained on Japanese Wikipedia. ## Intended uses & limitations You can use the raw model for text generation or fine-tune it to a downstream task. Note that the texts should be segmented into words using Juman++ in advance. ### How to use You can use this model directly with a pipeline for text generation. Since the generation relies on some randomness, we set a seed for reproducibility: ```python >>> from transformers import pipeline, set_seed >>> generator = pipeline('text-generation', model='nlp-waseda/gpt2-small-japanese-wikipedia') >>> set_seed(42) >>> generator("早稲田 大学 で 自然 言語 処理 を", max_length=30, do_sample=True, pad_token_id=2, num_return_sequences=5) [{'generated_text': '早稲田 大学 で 自然 言語 処理 を 学び 、 1969 年 に は 同 大学院 を 修了 。 東京 芝浦 電気 株式 会社 に 就職 後 、 情報 処理'}, {'generated_text': '早稲田 大学 で 自然 言語 処理 を 学び 、 帰国 後 は 立教 大学 理学部 助手 を 務めた 。 1978 年 に 神奈川 県立 湘南 高等 学校 校長 に 就任'}, {'generated_text': '早稲田 大学 で 自然 言語 処理 を 研究 。 1972 年 に 早稲田 大学 文学部 ドイツ 文学 専攻 を 卒業 し 、 同 年 から 1979 年 まで 上智 大学'}, {'generated_text': '早稲田 大学 で 自然 言語 処理 を 専攻 する 。 1979 年 東京 農工 大学 農学 部 卒業 。 1980 年 同 大学院 農学 研究 科 修士 課程 修了 。'}, {'generated_text': '早稲田 大学 で 自然 言語 処理 を 専攻 し ながら 、 日本 で 活動 する 自然 言語 研究 家 。 大学 時代 は 東京 大学 理学部 の 助手 を 務め'}] ``` Here is how to use this model to get the features of a given text in PyTorch: ```python from transformers import ReformerTokenizer, GPT2Model tokenizer = ReformerTokenizer.from_pretrained('nlp-waseda/gpt2-small-japanese-wikipedia') model = GPT2Model.from_pretrained('nlp-waseda/gpt2-small-japanese-wikipedia') text = "早稲田 大学 で 自然 言語 処理 を" encoded_input = tokenizer(text, return_tensors='pt') output = model(**encoded_input) ``` ## Training data The GPT-2 model was pretrained on Japanese Wikipedia, dumped on 2021-12-20. ## Training procedure ### Preprocessing The texts are normalized using zenhan, segmented into words using Juman++, and tokenized using SentencePiece. Juman++ 2.0.0-rc3 was used for pretraining. The model was trained on 8 NVIDIA A100 GPUs.
Ajteks/Chatbot
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2021-04-19T14:40:44Z
--- language: - en tags: - mental-health license: apache-2.0 datasets: - PubMed --- # Psych-Search Psych-Search is a work in progress to bring cutting edge NLP to mental health practitioners. The model detailed here serves as a foundation for traditional classification models as well as NLU models for a Psych-Search application. The goal of the Psych-Search Application is to use a combination of traditional text classification models to expand the scope of the MESH taxonomy with the inclusion of relevant categories for mental health pracitioners designing suicide prevention programs for adolescent communities within the United States, as well as the automatic extraction and standardization of entities such as risk factors and protective factors. Our first expansion efforts to the MESH taxonomy include categories: - Prevention Strategies - Protective Factors We are actively looking for partners on this work and would love to hear from you! Please ping us at [email protected]. ## Model description This model is an extension of [allenai/scibert_scivocab_uncased](https://huggingface.co/allenai/scibert_scivocab_uncased). Continued pretraining was done using SciBERT as the base model using abstract text only from Pyschology and Psychiatry PubMed research. Training was done on approximately 3.5 million papers for 10 epochs and evaluated on a task similar to BioASQ Task A. ## Intended uses & limitations #### How to use ```python from transformers import AutoTokenizer, AutoModel mname = "nlp4good/psych-search" tokenizer = AutoTokenizer.from_pretrained(mname) model = AutoModel.from_pretrained(mname) ``` ### Limitations and bias This model was trained on all PubMed abstracts categorized under [Psychology and Psychiatry](https://meshb.nlm.nih.gov/treeView). As of March 1, this corresponds to approximately 3.2 million papers that contains abstract text. Of these 3.2 million papers, relevant sparse mental health categories were back translated to increase the representation of certain mental health categories. There are several limitation with this dataset including large discrepancies in the number of papers associated with [Sexual and Gender Minorities](https://meshb.nlm.nih.gov/record/ui?ui=D000072339). The training data consisted of the following breakdown across gender groups: Female | Male | Sexual and Gender Minorities -------|---------|---------- 1,896,301 | 1,945,279 | 4,529 Similar discrepancies are present within [Ethnic Groups](https://meshb.nlm.nih.gov/record/ui?ui=D005006) as defined within the MESH taxonomy: | African Americans | Arabs | Asian Americans | Hispanic Americans | Indians, Central American | Indians, North American | Indians, South American | Indigenous Peoples | Mexican Americans | |-------------------|-------|-----------------|--------------------|---------------------------|-------------------------|-------------------------|--------------------|-------------------| | 31,027 | 2,437 | 5,612 | 18,893 | 124 | 5,657 | 633 | 174 | 3,234 | These discrepancies can have a significant impact on information retrieval systems, downstream machine learning models, and other forms of NLP that leverage these pretrained models. ## Training data This model was trained on all PubMed abstracts categorized under [Psychology and Psychiatry](https://meshb.nlm.nih.gov/treeView). As of March 1, this corresponds to approximately 3.2 million papers that contains abstract text. Of these 3.2 million papers, relevant sparse categories were back translated from english to french and from french to english to increase the representation of sparser mental health categories. This included backtranslating the following papers with the following categories: - Depressive Disorder - Risk Factors - Mental Disorders - Child, Preschool - Mental Health In aggregate, this process added 557,980 additional papers to our training data. ## Training procedure Continued pretraining was done on Psychology and Psychiatry PubMed papers for 10 epochs. Default parameters were used with the exception of gradient accumulation steps which was set at 4, with a per device train batch size of 32. 2 x Nvidia 3090's were used in the development of this model. ## Evaluation results To evaluate the effectiveness of psych-search within the mental health domain, an evaluation task was constructed by finetuning psych-search for a task similar to [BioASQ Task A](http://bioasq.org/). Here we perform large scale biomedical indexing using the MESH taxonomy associated with each paper underneath Psychology and Psychiatry. The evaluation metric is the micro F1 score across all second level descriptors within Psychology and Psychiatry. This corresponds to 38 different MESH categories used during evaluation. bert-base-uncased | SciBERT Scivocab Uncased | Psych-Search -------|---------|---------- 0.7348 | 0.7394 | 0.7415 ## Next Steps If you are interested in continuing to build on this work or have other ideas on how we can build on others work, please let us know! We can be reached at [email protected]. Our goal is to bring state of the art NLP capabilities to underserved areas of research, with mental health being our top priority.
Akash7897/distilbert-base-uncased-finetuned-sst2
[ "pytorch", "tensorboard", "distilbert", "text-classification", "dataset:glue", "transformers", "generated_from_trainer", "license:apache-2.0", "model-index" ]
text-classification
{ "architectures": [ "DistilBertForSequenceClassification" ], "model_type": "distilbert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
31
null
--- language: en pipeline_tag: fill-mask license: cc-by-sa-4.0 thumbnail: https://i.ibb.co/p3kQ7Rw/Screenshot-2020-10-06-at-12-16-36-PM.png tags: - legal widget: - text: "The applicant submitted that her husband was subjected to treatment amounting to [MASK] whilst in the custody of police." --- # LEGAL-BERT: The Muppets straight out of Law School <img align="left" src="https://i.ibb.co/p3kQ7Rw/Screenshot-2020-10-06-at-12-16-36-PM.png" width="100"/> LEGAL-BERT is a family of BERT models for the legal domain, intended to assist legal NLP research, computational law, and legal technology applications. To pre-train the different variations of LEGAL-BERT, we collected 12 GB of diverse English legal text from several fields (e.g., legislation, court cases, contracts) scraped from publicly available resources. Sub-domain variants (CONTRACTS-, EURLEX-, ECHR-) and/or general LEGAL-BERT perform better than using BERT out of the box for domain-specific tasks. A light-weight model (33% the size of BERT-BASE) pre-trained from scratch on legal data with competitive performance is also available. <br/><br/> --- I. Chalkidis, M. Fergadiotis, P. Malakasiotis, N. Aletras and I. Androutsopoulos. "LEGAL-BERT: The Muppets straight out of Law School". In Findings of Empirical Methods in Natural Language Processing (EMNLP 2020) (Short Papers), to be held online, 2020. (https://aclanthology.org/2020.findings-emnlp.261) --- ## Pre-training corpora The pre-training corpora of LEGAL-BERT include: * 116,062 documents of EU legislation, publicly available from EURLEX (http://eur-lex.europa.eu), the repository of EU Law running under the EU Publication Office. * 61,826 documents of UK legislation, publicly available from the UK legislation portal (http://www.legislation.gov.uk). * 19,867 cases from the European Court of Justice (ECJ), also available from EURLEX. * 12,554 cases from HUDOC, the repository of the European Court of Human Rights (ECHR) (http://hudoc.echr.coe.int/eng). * 164,141 cases from various courts across the USA, hosted in the Case Law Access Project portal (https://case.law). * 76,366 US contracts from EDGAR, the database of US Securities and Exchange Commission (SECOM) (https://www.sec.gov/edgar.shtml). ## Pre-training details * We trained BERT using the official code provided in Google BERT's GitHub repository (https://github.com/google-research/bert). * We released a model similar to the English BERT-BASE model (12-layer, 768-hidden, 12-heads, 110M parameters). * We chose to follow the same training set-up: 1 million training steps with batches of 256 sequences of length 512 with an initial learning rate 1e-4. * We were able to use a single Google Cloud TPU v3-8 provided for free from [TensorFlow Research Cloud (TFRC)](https://www.tensorflow.org/tfrc), while also utilizing [GCP research credits](https://edu.google.com/programs/credits/research). Huge thanks to both Google programs for supporting us! * Part of LEGAL-BERT is a light-weight model pre-trained from scratch on legal data, which achieves comparable performance to larger models, while being much more efficient (approximately 4 times faster) with a smaller environmental footprint. ## Models list | Model name | Model Path | Training corpora | | ------------------- | ------------------------------------ | ------------------- | | CONTRACTS-BERT-BASE | `nlpaueb/bert-base-uncased-contracts` | US contracts | | EURLEX-BERT-BASE | `nlpaueb/bert-base-uncased-eurlex` | EU legislation | | ECHR-BERT-BASE | `nlpaueb/bert-base-uncased-echr` | ECHR cases | | LEGAL-BERT-BASE * | `nlpaueb/legal-bert-base-uncased` | All | | LEGAL-BERT-SMALL | `nlpaueb/legal-bert-small-uncased` | All | \* LEGAL-BERT-BASE is the model referred to as LEGAL-BERT-SC in Chalkidis et al. (2020); a model trained from scratch in the legal corpora mentioned below using a newly created vocabulary by a sentence-piece tokenizer trained on the very same corpora. \*\* As many of you expressed interest in the LEGAL-BERT-FP models (those relying on the original BERT-BASE checkpoint), they have been released in Archive.org (https://archive.org/details/legal_bert_fp), as these models are secondary and possibly only interesting for those who aim to dig deeper in the open questions of Chalkidis et al. (2020). ## Load Pretrained Model ```python from transformers import AutoTokenizer, AutoModel tokenizer = AutoTokenizer.from_pretrained("nlpaueb/legal-bert-base-uncased") model = AutoModel.from_pretrained("nlpaueb/legal-bert-base-uncased") ``` ## Use LEGAL-BERT variants as Language Models | Corpus | Model | Masked token | Predictions | | --------------------------------- | ---------------------------------- | ------------ | ------------ | | | **BERT-BASE-UNCASED** | | (Contracts) | This [MASK] Agreement is between General Motors and John Murray . | employment | ('new', '0.09'), ('current', '0.04'), ('proposed', '0.03'), ('marketing', '0.03'), ('joint', '0.02') | (ECHR) | The applicant submitted that her husband was subjected to treatment amounting to [MASK] whilst in the custody of Adana Security Directorate | torture | ('torture', '0.32'), ('rape', '0.22'), ('abuse', '0.14'), ('death', '0.04'), ('violence', '0.03') | (EURLEX) | Establishing a system for the identification and registration of [MASK] animals and regarding the labelling of beef and beef products . | bovine | ('farm', '0.25'), ('livestock', '0.08'), ('draft', '0.06'), ('domestic', '0.05'), ('wild', '0.05') | | **CONTRACTS-BERT-BASE** | | (Contracts) | This [MASK] Agreement is between General Motors and John Murray . | employment | ('letter', '0.38'), ('dealer', '0.04'), ('employment', '0.03'), ('award', '0.03'), ('contribution', '0.02') | (ECHR) | The applicant submitted that her husband was subjected to treatment amounting to [MASK] whilst in the custody of Adana Security Directorate | torture | ('death', '0.39'), ('imprisonment', '0.07'), ('contempt', '0.05'), ('being', '0.03'), ('crime', '0.02') | (EURLEX) | Establishing a system for the identification and registration of [MASK] animals and regarding the labelling of beef and beef products . | bovine | (('domestic', '0.18'), ('laboratory', '0.07'), ('household', '0.06'), ('personal', '0.06'), ('the', '0.04') | | **EURLEX-BERT-BASE** | | (Contracts) | This [MASK] Agreement is between General Motors and John Murray . | employment | ('supply', '0.11'), ('cooperation', '0.08'), ('service', '0.07'), ('licence', '0.07'), ('distribution', '0.05') | (ECHR) | The applicant submitted that her husband was subjected to treatment amounting to [MASK] whilst in the custody of Adana Security Directorate | torture | ('torture', '0.66'), ('death', '0.07'), ('imprisonment', '0.07'), ('murder', '0.04'), ('rape', '0.02') | (EURLEX) | Establishing a system for the identification and registration of [MASK] animals and regarding the labelling of beef and beef products . | bovine | ('live', '0.43'), ('pet', '0.28'), ('certain', '0.05'), ('fur', '0.03'), ('the', '0.02') | | **ECHR-BERT-BASE** | | (Contracts) | This [MASK] Agreement is between General Motors and John Murray . | employment | ('second', '0.24'), ('latter', '0.10'), ('draft', '0.05'), ('bilateral', '0.05'), ('arbitration', '0.04') | (ECHR) | The applicant submitted that her husband was subjected to treatment amounting to [MASK] whilst in the custody of Adana Security Directorate | torture | ('torture', '0.99'), ('death', '0.01'), ('inhuman', '0.00'), ('beating', '0.00'), ('rape', '0.00') | (EURLEX) | Establishing a system for the identification and registration of [MASK] animals and regarding the labelling of beef and beef products . | bovine | ('pet', '0.17'), ('all', '0.12'), ('slaughtered', '0.10'), ('domestic', '0.07'), ('individual', '0.05') | | **LEGAL-BERT-BASE** | | (Contracts) | This [MASK] Agreement is between General Motors and John Murray . | employment | ('settlement', '0.26'), ('letter', '0.23'), ('dealer', '0.04'), ('master', '0.02'), ('supplemental', '0.02') | (ECHR) | The applicant submitted that her husband was subjected to treatment amounting to [MASK] whilst in the custody of Adana Security Directorate | torture | ('torture', '1.00'), ('detention', '0.00'), ('arrest', '0.00'), ('rape', '0.00'), ('death', '0.00') | (EURLEX) | Establishing a system for the identification and registration of [MASK] animals and regarding the labelling of beef and beef products . | bovine | ('live', '0.67'), ('beef', '0.17'), ('farm', '0.03'), ('pet', '0.02'), ('dairy', '0.01') | | **LEGAL-BERT-SMALL** | | (Contracts) | This [MASK] Agreement is between General Motors and John Murray . | employment | ('license', '0.09'), ('transition', '0.08'), ('settlement', '0.04'), ('consent', '0.03'), ('letter', '0.03') | (ECHR) | The applicant submitted that her husband was subjected to treatment amounting to [MASK] whilst in the custody of Adana Security Directorate | torture | ('torture', '0.59'), ('pain', '0.05'), ('ptsd', '0.05'), ('death', '0.02'), ('tuberculosis', '0.02') | (EURLEX) | Establishing a system for the identification and registration of [MASK] animals and regarding the labelling of beef and beef products . | bovine | ('all', '0.08'), ('live', '0.07'), ('certain', '0.07'), ('the', '0.07'), ('farm', '0.05') ## Evaluation on downstream tasks Consider the experiments in the article "LEGAL-BERT: The Muppets straight out of Law School". Chalkidis et al., 2020, (https://aclanthology.org/2020.findings-emnlp.261) ## Author - Publication ``` @inproceedings{chalkidis-etal-2020-legal, title = "{LEGAL}-{BERT}: The Muppets straight out of Law School", author = "Chalkidis, Ilias and Fergadiotis, Manos and Malakasiotis, Prodromos and Aletras, Nikolaos and Androutsopoulos, Ion", booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2020", month = nov, year = "2020", address = "Online", publisher = "Association for Computational Linguistics", doi = "10.18653/v1/2020.findings-emnlp.261", pages = "2898--2904" } ``` ## About Us [AUEB's Natural Language Processing Group](http://nlp.cs.aueb.gr) develops algorithms, models, and systems that allow computers to process and generate natural language texts. The group's current research interests include: * question answering systems for databases, ontologies, document collections, and the Web, especially biomedical question answering, * natural language generation from databases and ontologies, especially Semantic Web ontologies, text classification, including filtering spam and abusive content, * information extraction and opinion mining, including legal text analytics and sentiment analysis, * natural language processing tools for Greek, for example parsers and named-entity recognizers, machine learning in natural language processing, especially deep learning. The group is part of the Information Processing Laboratory of the Department of Informatics of the Athens University of Economics and Business. [Ilias Chalkidis](https://iliaschalkidis.github.io) on behalf of [AUEB's Natural Language Processing Group](http://nlp.cs.aueb.gr) | Github: [@ilias.chalkidis](https://github.com/iliaschalkidis) | Twitter: [@KiddoThe2B](https://twitter.com/KiddoThe2B) |
Akash7897/fill_mask_model
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: en pipeline_tag: fill-mask license: cc-by-sa-4.0 thumbnail: https://i.ibb.co/p3kQ7Rw/Screenshot-2020-10-06-at-12-16-36-PM.png tags: - legal widget: - text: "The applicant submitted that her husband was subjected to treatment amounting to [MASK] whilst in the custody of police." --- # LEGAL-BERT: The Muppets straight out of Law School <img align="left" src="https://i.ibb.co/p3kQ7Rw/Screenshot-2020-10-06-at-12-16-36-PM.png" width="100"/> LEGAL-BERT is a family of BERT models for the legal domain, intended to assist legal NLP research, computational law, and legal technology applications. To pre-train the different variations of LEGAL-BERT, we collected 12 GB of diverse English legal text from several fields (e.g., legislation, court cases, contracts) scraped from publicly available resources. Sub-domain variants (CONTRACTS-, EURLEX-, ECHR-) and/or general LEGAL-BERT perform better than using BERT out of the box for domain-specific tasks.<br> This is the light-weight version of BERT-BASE (33% the size of BERT-BASE) pre-trained from scratch on legal data, which achieves comparable performance to larger models, while being much more efficient (approximately 4 times faster) with a smaller environmental footprint. <br/><br/> --- I. Chalkidis, M. Fergadiotis, P. Malakasiotis, N. Aletras and I. Androutsopoulos. "LEGAL-BERT: The Muppets straight out of Law School". In Findings of Empirical Methods in Natural Language Processing (EMNLP 2020) (Short Papers), to be held online, 2020. (https://aclanthology.org/2020.findings-emnlp.261) --- ## Pre-training corpora The pre-training corpora of LEGAL-BERT include: * 116,062 documents of EU legislation, publicly available from EURLEX (http://eur-lex.europa.eu), the repository of EU Law running under the EU Publication Office. * 61,826 documents of UK legislation, publicly available from the UK legislation portal (http://www.legislation.gov.uk). * 19,867 cases from the European Court of Justice (ECJ), also available from EURLEX. * 12,554 cases from HUDOC, the repository of the European Court of Human Rights (ECHR) (http://hudoc.echr.coe.int/eng). * 164,141 cases from various courts across the USA, hosted in the Case Law Access Project portal (https://case.law). * 76,366 US contracts from EDGAR, the database of US Securities and Exchange Commission (SECOM) (https://www.sec.gov/edgar.shtml). ## Pre-training details * We trained BERT using the official code provided in Google BERT's GitHub repository (https://github.com/google-research/bert). * We released a model similar to the English BERT-BASE model (12-layer, 768-hidden, 12-heads, 110M parameters). * We chose to follow the same training set-up: 1 million training steps with batches of 256 sequences of length 512 with an initial learning rate 1e-4. * We were able to use a single Google Cloud TPU v3-8 provided for free from [TensorFlow Research Cloud (TFRC)](https://www.tensorflow.org/tfrc), while also utilizing [GCP research credits](https://edu.google.com/programs/credits/research). Huge thanks to both Google programs for supporting us! ## Models list | Model name | Model Path | Training corpora | | ------------------- | ------------------------------------ | ------------------- | | CONTRACTS-BERT-BASE | `nlpaueb/bert-base-uncased-contracts` | US contracts | | EURLEX-BERT-BASE | `nlpaueb/bert-base-uncased-eurlex` | EU legislation | | ECHR-BERT-BASE | `nlpaueb/bert-base-uncased-echr` | ECHR cases | | LEGAL-BERT-BASE * | `nlpaueb/legal-bert-base-uncased` | All | | LEGAL-BERT-SMALL | `nlpaueb/legal-bert-small-uncased` | All | \* LEGAL-BERT-BASE is the model referred to as LEGAL-BERT-SC in Chalkidis et al. (2020); a model trained from scratch in the legal corpora mentioned below using a newly created vocabulary by a sentence-piece tokenizer trained on the very same corpora. \*\* As many of you expressed interest in the LEGAL-BERT-FP models (those relying on the original BERT-BASE checkpoint), they have been released in Archive.org (https://archive.org/details/legal_bert_fp), as these models are secondary and possibly only interesting for those who aim to dig deeper in the open questions of Chalkidis et al. (2020). ## Load Pretrained Model ```python from transformers import AutoTokenizer, AutoModel tokenizer = AutoTokenizer.from_pretrained("nlpaueb/legal-bert-small-uncased") model = AutoModel.from_pretrained("nlpaueb/legal-bert-small-uncased") ``` ## Use LEGAL-BERT variants as Language Models | Corpus | Model | Masked token | Predictions | | --------------------------------- | ---------------------------------- | ------------ | ------------ | | | **BERT-BASE-UNCASED** | | (Contracts) | This [MASK] Agreement is between General Motors and John Murray . | employment | ('new', '0.09'), ('current', '0.04'), ('proposed', '0.03'), ('marketing', '0.03'), ('joint', '0.02') | (ECHR) | The applicant submitted that her husband was subjected to treatment amounting to [MASK] whilst in the custody of Adana Security Directorate | torture | ('torture', '0.32'), ('rape', '0.22'), ('abuse', '0.14'), ('death', '0.04'), ('violence', '0.03') | (EURLEX) | Establishing a system for the identification and registration of [MASK] animals and regarding the labelling of beef and beef products . | bovine | ('farm', '0.25'), ('livestock', '0.08'), ('draft', '0.06'), ('domestic', '0.05'), ('wild', '0.05') | | **CONTRACTS-BERT-BASE** | | (Contracts) | This [MASK] Agreement is between General Motors and John Murray . | employment | ('letter', '0.38'), ('dealer', '0.04'), ('employment', '0.03'), ('award', '0.03'), ('contribution', '0.02') | (ECHR) | The applicant submitted that her husband was subjected to treatment amounting to [MASK] whilst in the custody of Adana Security Directorate | torture | ('death', '0.39'), ('imprisonment', '0.07'), ('contempt', '0.05'), ('being', '0.03'), ('crime', '0.02') | (EURLEX) | Establishing a system for the identification and registration of [MASK] animals and regarding the labelling of beef and beef products . | bovine | (('domestic', '0.18'), ('laboratory', '0.07'), ('household', '0.06'), ('personal', '0.06'), ('the', '0.04') | | **EURLEX-BERT-BASE** | | (Contracts) | This [MASK] Agreement is between General Motors and John Murray . | employment | ('supply', '0.11'), ('cooperation', '0.08'), ('service', '0.07'), ('licence', '0.07'), ('distribution', '0.05') | (ECHR) | The applicant submitted that her husband was subjected to treatment amounting to [MASK] whilst in the custody of Adana Security Directorate | torture | ('torture', '0.66'), ('death', '0.07'), ('imprisonment', '0.07'), ('murder', '0.04'), ('rape', '0.02') | (EURLEX) | Establishing a system for the identification and registration of [MASK] animals and regarding the labelling of beef and beef products . | bovine | ('live', '0.43'), ('pet', '0.28'), ('certain', '0.05'), ('fur', '0.03'), ('the', '0.02') | | **ECHR-BERT-BASE** | | (Contracts) | This [MASK] Agreement is between General Motors and John Murray . | employment | ('second', '0.24'), ('latter', '0.10'), ('draft', '0.05'), ('bilateral', '0.05'), ('arbitration', '0.04') | (ECHR) | The applicant submitted that her husband was subjected to treatment amounting to [MASK] whilst in the custody of Adana Security Directorate | torture | ('torture', '0.99'), ('death', '0.01'), ('inhuman', '0.00'), ('beating', '0.00'), ('rape', '0.00') | (EURLEX) | Establishing a system for the identification and registration of [MASK] animals and regarding the labelling of beef and beef products . | bovine | ('pet', '0.17'), ('all', '0.12'), ('slaughtered', '0.10'), ('domestic', '0.07'), ('individual', '0.05') | | **LEGAL-BERT-BASE** | | (Contracts) | This [MASK] Agreement is between General Motors and John Murray . | employment | ('settlement', '0.26'), ('letter', '0.23'), ('dealer', '0.04'), ('master', '0.02'), ('supplemental', '0.02') | (ECHR) | The applicant submitted that her husband was subjected to treatment amounting to [MASK] whilst in the custody of Adana Security Directorate | torture | ('torture', '1.00'), ('detention', '0.00'), ('arrest', '0.00'), ('rape', '0.00'), ('death', '0.00') | (EURLEX) | Establishing a system for the identification and registration of [MASK] animals and regarding the labelling of beef and beef products . | bovine | ('live', '0.67'), ('beef', '0.17'), ('farm', '0.03'), ('pet', '0.02'), ('dairy', '0.01') | | **LEGAL-BERT-SMALL** | | (Contracts) | This [MASK] Agreement is between General Motors and John Murray . | employment | ('license', '0.09'), ('transition', '0.08'), ('settlement', '0.04'), ('consent', '0.03'), ('letter', '0.03') | (ECHR) | The applicant submitted that her husband was subjected to treatment amounting to [MASK] whilst in the custody of Adana Security Directorate | torture | ('torture', '0.59'), ('pain', '0.05'), ('ptsd', '0.05'), ('death', '0.02'), ('tuberculosis', '0.02') | (EURLEX) | Establishing a system for the identification and registration of [MASK] animals and regarding the labelling of beef and beef products . | bovine | ('all', '0.08'), ('live', '0.07'), ('certain', '0.07'), ('the', '0.07'), ('farm', '0.05') ## Evaluation on downstream tasks Consider the experiments in the article "LEGAL-BERT: The Muppets straight out of Law School". Chalkidis et al., 2020, (https://aclanthology.org/2020.findings-emnlp.261) ## Author - Publication ``` @inproceedings{chalkidis-etal-2020-legal, title = "{LEGAL}-{BERT}: The Muppets straight out of Law School", author = "Chalkidis, Ilias and Fergadiotis, Manos and Malakasiotis, Prodromos and Aletras, Nikolaos and Androutsopoulos, Ion", booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2020", month = nov, year = "2020", address = "Online", publisher = "Association for Computational Linguistics", doi = "10.18653/v1/2020.findings-emnlp.261", pages = "2898--2904" } ``` ## About Us [AUEB's Natural Language Processing Group](http://nlp.cs.aueb.gr) develops algorithms, models, and systems that allow computers to process and generate natural language texts. The group's current research interests include: * question answering systems for databases, ontologies, document collections, and the Web, especially biomedical question answering, * natural language generation from databases and ontologies, especially Semantic Web ontologies, text classification, including filtering spam and abusive content, * information extraction and opinion mining, including legal text analytics and sentiment analysis, * natural language processing tools for Greek, for example parsers and named-entity recognizers, machine learning in natural language processing, especially deep learning. The group is part of the Information Processing Laboratory of the Department of Informatics of the Athens University of Economics and Business. [Ilias Chalkidis](https://iliaschalkidis.github.io) on behalf of [AUEB's Natural Language Processing Group](http://nlp.cs.aueb.gr) | Github: [@ilias.chalkidis](https://github.com/iliaschalkidis) | Twitter: [@KiddoThe2B](https://twitter.com/KiddoThe2B) |
Akash7897/gpt2-wikitext2
[ "pytorch", "tensorboard", "gpt2", "text-generation", "transformers", "generated_from_trainer", "license:mit" ]
text-generation
{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": true, "max_length": 50 }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- language: en pipeline_tag: fill-mask license: cc-by-sa-4.0 thumbnail: https://i.ibb.co/0yz81K9/sec-bert-logo.png tags: - finance - financial widget: - text: "Total net sales [MASK] 2% or $5.4 billion during 2019 compared to 2018." - text: "Total net sales decreased 2% or $5.4 [MASK] during 2019 compared to 2018." - text: "During 2019, the Company [MASK] $67.1 billion of its common stock and paid dividend equivalents of $14.1 billion." - text: "During 2019, the Company repurchased $67.1 billion of its common [MASK] and paid dividend equivalents of $14.1 billion." - text: "During 2019, the Company repurchased $67.1 billion of its common stock and paid [MASK] equivalents of $14.1 billion." - text: "During 2019, the Company repurchased $67.1 billion of its common stock and paid dividend [MASK] of $14.1 billion." --- # SEC-BERT <img align="center" src="https://i.ibb.co/0yz81K9/sec-bert-logo.png" alt="SEC-BERT" width="400"/> <div style="text-align: justify"> SEC-BERT is a family of BERT models for the financial domain, intended to assist financial NLP research and FinTech applications. SEC-BERT consists of the following models: * **SEC-BERT-BASE** (this model): Same architecture as BERT-BASE trained on financial documents. * [**SEC-BERT-NUM**](https://huggingface.co/nlpaueb/sec-bert-num): Same as SEC-BERT-BASE but we replace every number token with a [NUM] pseudo-token handling all numeric expressions in a uniform manner, disallowing their fragmentation * [**SEC-BERT-SHAPE**](https://huggingface.co/nlpaueb/sec-bert-shape): Same as SEC-BERT-BASE but we replace numbers with pseudo-tokens that represent the number’s shape, so numeric expressions (of known shapes) are no longer fragmented, e.g., '53.2' becomes '[XX.X]' and '40,200.5' becomes '[XX,XXX.X]'. </div> ## Pre-training corpus The model was pre-trained on 260,773 10-K filings from 1993-2019, publicly available at <a href="https://www.sec.gov/">U.S. Securities and Exchange Commission (SEC)</a> ## Pre-training details <div style="text-align: justify"> * We created a new vocabulary of 30k subwords by training a [BertWordPieceTokenizer](https://github.com/huggingface/tokenizers) from scratch on the pre-training corpus. * We trained BERT using the official code provided in [Google BERT's GitHub repository](https://github.com/google-research/bert)</a>. * We then used [Hugging Face](https://huggingface.co)'s [Transformers](https://github.com/huggingface/transformers) conversion script to convert the TF checkpoint in the desired format in order to be able to load the model in two lines of code for both PyTorch and TF2 users. * We release a model similar to the English BERT-BASE model (12-layer, 768-hidden, 12-heads, 110M parameters). * We chose to follow the same training set-up: 1 million training steps with batches of 256 sequences of length 512 with an initial learning rate 1e-4. * We were able to use a single Google Cloud TPU v3-8 provided for free from [TensorFlow Research Cloud (TRC)]((https://sites.research.google/trc), while also utilizing [GCP research credits](https://edu.google.com/programs/credits/research). Huge thanks to both Google programs for supporting us! </div> ## Load Pretrained Model ```python from transformers import AutoTokenizer, AutoModel tokenizer = AutoTokenizer.from_pretrained("nlpaueb/sec-bert-base") model = AutoModel.from_pretrained("nlpaueb/sec-bert-base") ``` ## Using SEC-BERT variants as Language Models | Sample | Masked Token | | --------------------------------------------------- | ------------ | | Total net sales [MASK] 2% or $5.4 billion during 2019 compared to 2018. | decreased | Model | Predictions (Probability) | | --------------------------------------------------- | ------------ | | **BERT-BASE-UNCASED** | increased (0.221), were (0.131), are (0.103), rose (0.075), of (0.058) | **SEC-BERT-BASE** | increased (0.678), decreased (0.282), declined (0.017), grew (0.016), rose (0.004) | **SEC-BERT-NUM** | increased (0.753), decreased (0.211), grew (0.019), declined (0.010), rose (0.006) | **SEC-BERT-SHAPE** | increased (0.747), decreased (0.214), grew (0.021), declined (0.013), rose (0.002) | Sample | Masked Token | | --------------------------------------------------- | ------------ | | Total net sales decreased 2% or $5.4 [MASK] during 2019 compared to 2018. | billion | Model | Predictions (Probability) | | --------------------------------------------------- | ------------ | | **BERT-BASE-UNCASED** | billion (0.841), million (0.097), trillion (0.028), ##m (0.015), ##bn (0.006) | **SEC-BERT-BASE** | million (0.972), billion (0.028), millions (0.000), ##million (0.000), m (0.000) | **SEC-BERT-NUM** | million (0.974), billion (0.012), , (0.010), thousand (0.003), m (0.000) | **SEC-BERT-SHAPE** | million (0.978), billion (0.021), % (0.000), , (0.000), millions (0.000) | Sample | Masked Token | | --------------------------------------------------- | ------------ | | Total net sales decreased [MASK]% or $5.4 billion during 2019 compared to 2018. | 2 | Model | Predictions (Probability) | | --------------------------------------------------- | ------------ | | **BERT-BASE-UNCASED** | 20 (0.031), 10 (0.030), 6 (0.029), 4 (0.027), 30 (0.027) | **SEC-BERT-BASE** | 13 (0.045), 12 (0.040), 11 (0.040), 14 (0.035), 10 (0.035) | **SEC-BERT-NUM** | [NUM] (1.000), one (0.000), five (0.000), three (0.000), seven (0.000) | **SEC-BERT-SHAPE** | [XX] (0.316), [XX.X] (0.253), [X.X] (0.237), [X] (0.188), [X.XX] (0.002) | Sample | Masked Token | | --------------------------------------------------- | ------------ | | Total net sales decreased 2[MASK] or $5.4 billion during 2019 compared to 2018. | % | Model | Predictions (Probability) | | --------------------------------------------------- | ------------ | | **BERT-BASE-UNCASED** | % (0.795), percent (0.174), ##fold (0.009), billion (0.004), times (0.004) | **SEC-BERT-BASE** | % (0.924), percent (0.076), points (0.000), , (0.000), times (0.000) | **SEC-BERT-NUM** | % (0.882), percent (0.118), million (0.000), units (0.000), bps (0.000) | **SEC-BERT-SHAPE** | % (0.961), percent (0.039), bps (0.000), , (0.000), bcf (0.000) | Sample | Masked Token | | --------------------------------------------------- | ------------ | | Total net sales decreased 2% or $[MASK] billion during 2019 compared to 2018. | 5.4 | Model | Predictions (Probability) | | --------------------------------------------------- | ------------ | | **BERT-BASE-UNCASED** | 1 (0.074), 4 (0.045), 3 (0.044), 2 (0.037), 5 (0.034) | **SEC-BERT-BASE** | 1 (0.218), 2 (0.136), 3 (0.078), 4 (0.066), 5 (0.048) | **SEC-BERT-NUM** | [NUM] (1.000), l (0.000), 1 (0.000), - (0.000), 30 (0.000) | **SEC-BERT-SHAPE** | [X.X] (0.787), [X.XX] (0.095), [XX.X] (0.049), [X.XXX] (0.046), [X] (0.013) | Sample | Masked Token | | --------------------------------------------------- | ------------ | | Total net sales decreased 2% or $5.4 billion during [MASK] compared to 2018. | 2019 | Model | Predictions (Probability) | | --------------------------------------------------- | ------------ | | **BERT-BASE-UNCASED** | 2017 (0.485), 2018 (0.169), 2016 (0.164), 2015 (0.070), 2014 (0.022) | **SEC-BERT-BASE** | 2019 (0.990), 2017 (0.007), 2018 (0.003), 2020 (0.000), 2015 (0.000) | **SEC-BERT-NUM** | [NUM] (1.000), as (0.000), fiscal (0.000), year (0.000), when (0.000) | **SEC-BERT-SHAPE** | [XXXX] (1.000), as (0.000), year (0.000), periods (0.000), , (0.000) | Sample | Masked Token | | --------------------------------------------------- | ------------ | | Total net sales decreased 2% or $5.4 billion during 2019 compared to [MASK]. | 2018 | Model | Predictions (Probability) | | --------------------------------------------------- | ------------ | | **BERT-BASE-UNCASED** | 2017 (0.100), 2016 (0.097), above (0.054), inflation (0.050), previously (0.037) | **SEC-BERT-BASE** | 2018 (0.999), 2019 (0.000), 2017 (0.000), 2016 (0.000), 2014 (0.000) | **SEC-BERT-NUM** | [NUM] (1.000), year (0.000), last (0.000), sales (0.000), fiscal (0.000) | **SEC-BERT-SHAPE** | [XXXX] (1.000), year (0.000), sales (0.000), prior (0.000), years (0.000) | Sample | Masked Token | | --------------------------------------------------- | ------------ | | During 2019, the Company [MASK] $67.1 billion of its common stock and paid dividend equivalents of $14.1 billion. | repurchased | Model | Predictions (Probability) | | --------------------------------------------------- | ------------ | | **BERT-BASE-UNCASED** | held (0.229), sold (0.192), acquired (0.172), owned (0.052), traded (0.033) | **SEC-BERT-BASE** | repurchased (0.913), issued (0.036), purchased (0.029), redeemed (0.010), sold (0.003) | **SEC-BERT-NUM** | repurchased (0.917), purchased (0.054), reacquired (0.013), issued (0.005), acquired (0.003) | **SEC-BERT-SHAPE** | repurchased (0.902), purchased (0.068), issued (0.010), reacquired (0.008), redeemed (0.006) | Sample | Masked Token | | --------------------------------------------------- | ------------ | | During 2019, the Company repurchased $67.1 billion of its common [MASK] and paid dividend equivalents of $14.1 billion. | stock | Model | Predictions (Probability) | | --------------------------------------------------- | ------------ | | **BERT-BASE-UNCASED** | stock (0.835), assets (0.039), equity (0.025), debt (0.021), bonds (0.017) | **SEC-BERT-BASE** | stock (0.857), shares (0.135), equity (0.004), units (0.002), securities (0.000) | **SEC-BERT-NUM** | stock (0.842), shares (0.157), equity (0.000), securities (0.000), units (0.000) | **SEC-BERT-SHAPE** | stock (0.888), shares (0.109), equity (0.001), securities (0.001), stocks (0.000) | Sample | Masked Token | | --------------------------------------------------- | ------------ | | During 2019, the Company repurchased $67.1 billion of its common stock and paid [MASK] equivalents of $14.1 billion. | dividend | Model | Predictions (Probability) | | --------------------------------------------------- | ------------ | | **BERT-BASE-UNCASED** | cash (0.276), net (0.128), annual (0.083), the (0.040), debt (0.027) | **SEC-BERT-BASE** | dividend (0.890), cash (0.018), dividends (0.016), share (0.013), tax (0.010) | **SEC-BERT-NUM** | dividend (0.735), cash (0.115), share (0.087), tax (0.025), stock (0.013) | **SEC-BERT-SHAPE** | dividend (0.655), cash (0.248), dividends (0.042), share (0.019), out (0.003) | Sample | Masked Token | | --------------------------------------------------- | ------------ | | During 2019, the Company repurchased $67.1 billion of its common stock and paid dividend [MASK] of $14.1 billion. | equivalents | Model | Predictions (Probability) | | --------------------------------------------------- | ------------ | | **BERT-BASE-UNCASED** | revenue (0.085), earnings (0.078), rates (0.065), amounts (0.064), proceeds (0.062) | **SEC-BERT-BASE** | payments (0.790), distributions (0.087), equivalents (0.068), cash (0.013), amounts (0.004) | **SEC-BERT-NUM** | payments (0.845), equivalents (0.097), distributions (0.024), increases (0.005), dividends (0.004) | **SEC-BERT-SHAPE** | payments (0.784), equivalents (0.093), distributions (0.043), dividends (0.015), requirements (0.009) ## Publication <div style="text-align: justify"> If you use this model cite the following article:<br> [**FiNER: Financial Numeric Entity Recognition for XBRL Tagging**](https://arxiv.org/abs/2203.06482)<br> Lefteris Loukas, Manos Fergadiotis, Ilias Chalkidis, Eirini Spyropoulou, Prodromos Malakasiotis, Ion Androutsopoulos and George Paliouras<br> In the Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (ACL 2022) (Long Papers), Dublin, Republic of Ireland, May 22 - 27, 2022 </div> ``` @inproceedings{loukas-etal-2022-finer, title = {FiNER: Financial Numeric Entity Recognition for XBRL Tagging}, author = {Loukas, Lefteris and Fergadiotis, Manos and Chalkidis, Ilias and Spyropoulou, Eirini and Malakasiotis, Prodromos and Androutsopoulos, Ion and Paliouras George}, booktitle = {Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (ACL 2022)}, publisher = {Association for Computational Linguistics}, location = {Dublin, Republic of Ireland}, year = {2022}, url = {https://arxiv.org/abs/2203.06482} } ``` ## About Us <div style="text-align: justify"> [AUEB's Natural Language Processing Group](http://nlp.cs.aueb.gr) develops algorithms, models, and systems that allow computers to process and generate natural language texts. The group's current research interests include: * question answering systems for databases, ontologies, document collections, and the Web, especially biomedical question answering, * natural language generation from databases and ontologies, especially Semantic Web ontologies, text classification, including filtering spam and abusive content, * information extraction and opinion mining, including legal text analytics and sentiment analysis, * natural language processing tools for Greek, for example parsers and named-entity recognizers, machine learning in natural language processing, especially deep learning. The group is part of the Information Processing Laboratory of the Department of Informatics of the Athens University of Economics and Business. </div> [Manos Fergadiotis](https://manosfer.github.io) on behalf of [AUEB's Natural Language Processing Group](http://nlp.cs.aueb.gr)
Akash7897/my-newtokenizer
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- language: en pipeline_tag: fill-mask license: cc-by-sa-4.0 thumbnail: https://i.ibb.co/0yz81K9/sec-bert-logo.png tags: - finance - financial widget: - text: "Total net sales decreased [MASK]% or $[NUM] billion during [NUM] compared to [NUM]." - text: "Total net sales decreased [NUM]% or $[MASK] billion during [NUM] compared to [NUM]." - text: "Total net sales decreased [NUM]% or $[NUM] billion during [MASK] compared to [NUM]." - text: "During [MASK], the Company repurchased $[NUM] billion of its common stock and paid dividend equivalents of $[NUM] billion." - text: "During 2019, the Company repurchased $[MASK] billion of its common stock and paid dividend equivalents of $[NUM] billion." --- # SEC-BERT <img align="center" src="https://i.ibb.co/0yz81K9/sec-bert-logo.png" alt="sec-bert-logo" width="400"/> <div style="text-align: justify"> SEC-BERT is a family of BERT models for the financial domain, intended to assist financial NLP research and FinTech applications. SEC-BERT consists of the following models: * [**SEC-BERT-BASE**](https://huggingface.co/nlpaueb/sec-bert-base): Same architecture as BERT-BASE trained on financial documents. * **SEC-BERT-NUM** (this model): Same as SEC-BERT-BASE but we replace every number token with a [NUM] pseudo-token handling all numeric expressions in a uniform manner, disallowing their fragmentation). * [**SEC-BERT-SHAPE**](https://huggingface.co/nlpaueb/sec-bert-shape): Same as SEC-BERT-BASE but we replace numbers with pseudo-tokens that represent the number’s shape, so numeric expressions (of known shapes) are no longer fragmented, e.g., '53.2' becomes '[XX.X]' and '40,200.5' becomes '[XX,XXX.X]'. </div> ## Pre-training corpus The model was pre-trained on 260,773 10-K filings from 1993-2019, publicly available at <a href="https://www.sec.gov/">U.S. Securities and Exchange Commission (SEC)</a> ## Pre-training details <div style="text-align: justify"> * We created a new vocabulary of 30k subwords by training a [BertWordPieceTokenizer](https://github.com/huggingface/tokenizers) from scratch on the pre-training corpus. * We trained BERT using the official code provided in [Google BERT's GitHub repository](https://github.com/google-research/bert)</a>. * We then used [Hugging Face](https://huggingface.co)'s [Transformers](https://github.com/huggingface/transformers) conversion script to convert the TF checkpoint in the desired format in order to be able to load the model in two lines of code for both PyTorch and TF2 users. * We release a model similar to the English BERT-BASE model (12-layer, 768-hidden, 12-heads, 110M parameters). * We chose to follow the same training set-up: 1 million training steps with batches of 256 sequences of length 512 with an initial learning rate 1e-4. * We were able to use a single Google Cloud TPU v3-8 provided for free from [TensorFlow Research Cloud (TRC)](https://sites.research.google/trc), while also utilizing [GCP research credits](https://edu.google.com/programs/credits/research). Huge thanks to both Google programs for supporting us! </div> ## Load Pretrained Model ```python from transformers import AutoTokenizer, AutoModel tokenizer = AutoTokenizer.from_pretrained("nlpaueb/sec-bert-num") model = AutoModel.from_pretrained("nlpaueb/sec-bert-num") ``` ## Pre-process Text <div style="text-align: justify"> To use SEC-BERT-NUM, you have to pre-process texts replacing every numerical token with [NUM] pseudo-token. Below there is an example of how you can pre-process a simple sentence. This approach is quite simple; feel free to modify it as you see fit. </div> ```python import re import spacy from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("nlpaueb/sec-bert-num") spacy_tokenizer = spacy.load("en_core_web_sm") sentence = "Total net sales decreased 2% or $5.4 billion during 2019 compared to 2018." def sec_bert_num_preprocess(text): tokens = [t.text for t in spacy_tokenizer(text)] processed_text = [] for token in tokens: if re.fullmatch(r"(\d+[\d,.]*)|([,.]\d+)", token): processed_text.append('[NUM]') else: processed_text.append(token) return ' '.join(processed_text) tokenized_sentence = tokenizer.tokenize(sec_bert_num_preprocess(sentence)) print(tokenized_sentence) """ ['total', 'net', 'sales', 'decreased', '[NUM]', '%', 'or', '$', '[NUM]', 'billion', 'during', '[NUM]', 'compared', 'to', '[NUM]', '.'] """ ``` ## Using SEC-BERT variants as Language Models | Sample | Masked Token | | --------------------------------------------------- | ------------ | | Total net sales [MASK] 2% or $5.4 billion during 2019 compared to 2018. | decreased | Model | Predictions (Probability) | | --------------------------------------------------- | ------------ | | **BERT-BASE-UNCASED** | increased (0.221), were (0.131), are (0.103), rose (0.075), of (0.058) | **SEC-BERT-BASE** | increased (0.678), decreased (0.282), declined (0.017), grew (0.016), rose (0.004) | **SEC-BERT-NUM** | increased (0.753), decreased (0.211), grew (0.019), declined (0.010), rose (0.006) | **SEC-BERT-SHAPE** | increased (0.747), decreased (0.214), grew (0.021), declined (0.013), rose (0.002) | Sample | Masked Token | | --------------------------------------------------- | ------------ | | Total net sales decreased 2% or $5.4 [MASK] during 2019 compared to 2018. | billion | Model | Predictions (Probability) | | --------------------------------------------------- | ------------ | | **BERT-BASE-UNCASED** | billion (0.841), million (0.097), trillion (0.028), ##m (0.015), ##bn (0.006) | **SEC-BERT-BASE** | million (0.972), billion (0.028), millions (0.000), ##million (0.000), m (0.000) | **SEC-BERT-NUM** | million (0.974), billion (0.012), , (0.010), thousand (0.003), m (0.000) | **SEC-BERT-SHAPE** | million (0.978), billion (0.021), % (0.000), , (0.000), millions (0.000) | Sample | Masked Token | | --------------------------------------------------- | ------------ | | Total net sales decreased [MASK]% or $5.4 billion during 2019 compared to 2018. | 2 | Model | Predictions (Probability) | | --------------------------------------------------- | ------------ | | **BERT-BASE-UNCASED** | 20 (0.031), 10 (0.030), 6 (0.029), 4 (0.027), 30 (0.027) | **SEC-BERT-BASE** | 13 (0.045), 12 (0.040), 11 (0.040), 14 (0.035), 10 (0.035) | **SEC-BERT-NUM** | [NUM] (1.000), one (0.000), five (0.000), three (0.000), seven (0.000) | **SEC-BERT-SHAPE** | [XX] (0.316), [XX.X] (0.253), [X.X] (0.237), [X] (0.188), [X.XX] (0.002) | Sample | Masked Token | | --------------------------------------------------- | ------------ | | Total net sales decreased 2[MASK] or $5.4 billion during 2019 compared to 2018. | % | Model | Predictions (Probability) | | --------------------------------------------------- | ------------ | | **BERT-BASE-UNCASED** | % (0.795), percent (0.174), ##fold (0.009), billion (0.004), times (0.004) | **SEC-BERT-BASE** | % (0.924), percent (0.076), points (0.000), , (0.000), times (0.000) | **SEC-BERT-NUM** | % (0.882), percent (0.118), million (0.000), units (0.000), bps (0.000) | **SEC-BERT-SHAPE** | % (0.961), percent (0.039), bps (0.000), , (0.000), bcf (0.000) | Sample | Masked Token | | --------------------------------------------------- | ------------ | | Total net sales decreased 2% or $[MASK] billion during 2019 compared to 2018. | 5.4 | Model | Predictions (Probability) | | --------------------------------------------------- | ------------ | | **BERT-BASE-UNCASED** | 1 (0.074), 4 (0.045), 3 (0.044), 2 (0.037), 5 (0.034) | **SEC-BERT-BASE** | 1 (0.218), 2 (0.136), 3 (0.078), 4 (0.066), 5 (0.048) | **SEC-BERT-NUM** | [NUM] (1.000), l (0.000), 1 (0.000), - (0.000), 30 (0.000) | **SEC-BERT-SHAPE** | [X.X] (0.787), [X.XX] (0.095), [XX.X] (0.049), [X.XXX] (0.046), [X] (0.013) | Sample | Masked Token | | --------------------------------------------------- | ------------ | | Total net sales decreased 2% or $5.4 billion during [MASK] compared to 2018. | 2019 | Model | Predictions (Probability) | | --------------------------------------------------- | ------------ | | **BERT-BASE-UNCASED** | 2017 (0.485), 2018 (0.169), 2016 (0.164), 2015 (0.070), 2014 (0.022) | **SEC-BERT-BASE** | 2019 (0.990), 2017 (0.007), 2018 (0.003), 2020 (0.000), 2015 (0.000) | **SEC-BERT-NUM** | [NUM] (1.000), as (0.000), fiscal (0.000), year (0.000), when (0.000) | **SEC-BERT-SHAPE** | [XXXX] (1.000), as (0.000), year (0.000), periods (0.000), , (0.000) | Sample | Masked Token | | --------------------------------------------------- | ------------ | | Total net sales decreased 2% or $5.4 billion during 2019 compared to [MASK]. | 2018 | Model | Predictions (Probability) | | --------------------------------------------------- | ------------ | | **BERT-BASE-UNCASED** | 2017 (0.100), 2016 (0.097), above (0.054), inflation (0.050), previously (0.037) | **SEC-BERT-BASE** | 2018 (0.999), 2019 (0.000), 2017 (0.000), 2016 (0.000), 2014 (0.000) | **SEC-BERT-NUM** | [NUM] (1.000), year (0.000), last (0.000), sales (0.000), fiscal (0.000) | **SEC-BERT-SHAPE** | [XXXX] (1.000), year (0.000), sales (0.000), prior (0.000), years (0.000) | Sample | Masked Token | | --------------------------------------------------- | ------------ | | During 2019, the Company [MASK] $67.1 billion of its common stock and paid dividend equivalents of $14.1 billion. | repurchased | Model | Predictions (Probability) | | --------------------------------------------------- | ------------ | | **BERT-BASE-UNCASED** | held (0.229), sold (0.192), acquired (0.172), owned (0.052), traded (0.033) | **SEC-BERT-BASE** | repurchased (0.913), issued (0.036), purchased (0.029), redeemed (0.010), sold (0.003) | **SEC-BERT-NUM** | repurchased (0.917), purchased (0.054), reacquired (0.013), issued (0.005), acquired (0.003) | **SEC-BERT-SHAPE** | repurchased (0.902), purchased (0.068), issued (0.010), reacquired (0.008), redeemed (0.006) | Sample | Masked Token | | --------------------------------------------------- | ------------ | | During 2019, the Company repurchased $67.1 billion of its common [MASK] and paid dividend equivalents of $14.1 billion. | stock | Model | Predictions (Probability) | | --------------------------------------------------- | ------------ | | **BERT-BASE-UNCASED** | stock (0.835), assets (0.039), equity (0.025), debt (0.021), bonds (0.017) | **SEC-BERT-BASE** | stock (0.857), shares (0.135), equity (0.004), units (0.002), securities (0.000) | **SEC-BERT-NUM** | stock (0.842), shares (0.157), equity (0.000), securities (0.000), units (0.000) | **SEC-BERT-SHAPE** | stock (0.888), shares (0.109), equity (0.001), securities (0.001), stocks (0.000) | Sample | Masked Token | | --------------------------------------------------- | ------------ | | During 2019, the Company repurchased $67.1 billion of its common stock and paid [MASK] equivalents of $14.1 billion. | dividend | Model | Predictions (Probability) | | --------------------------------------------------- | ------------ | | **BERT-BASE-UNCASED** | cash (0.276), net (0.128), annual (0.083), the (0.040), debt (0.027) | **SEC-BERT-BASE** | dividend (0.890), cash (0.018), dividends (0.016), share (0.013), tax (0.010) | **SEC-BERT-NUM** | dividend (0.735), cash (0.115), share (0.087), tax (0.025), stock (0.013) | **SEC-BERT-SHAPE** | dividend (0.655), cash (0.248), dividends (0.042), share (0.019), out (0.003) | Sample | Masked Token | | --------------------------------------------------- | ------------ | | During 2019, the Company repurchased $67.1 billion of its common stock and paid dividend [MASK] of $14.1 billion. | equivalents | Model | Predictions (Probability) | | --------------------------------------------------- | ------------ | | **BERT-BASE-UNCASED** | revenue (0.085), earnings (0.078), rates (0.065), amounts (0.064), proceeds (0.062) | **SEC-BERT-BASE** | payments (0.790), distributions (0.087), equivalents (0.068), cash (0.013), amounts (0.004) | **SEC-BERT-NUM** | payments (0.845), equivalents (0.097), distributions (0.024), increases (0.005), dividends (0.004) | **SEC-BERT-SHAPE** | payments (0.784), equivalents (0.093), distributions (0.043), dividends (0.015), requirements (0.009) ## Publication <div style="text-align: justify"> If you use this model cite the following article:<br> [**FiNER: Financial Numeric Entity Recognition for XBRL Tagging**](https://arxiv.org/abs/2203.06482)<br> Lefteris Loukas, Manos Fergadiotis, Ilias Chalkidis, Eirini Spyropoulou, Prodromos Malakasiotis, Ion Androutsopoulos and George Paliouras<br> In the Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (ACL 2022) (Long Papers), Dublin, Republic of Ireland, May 22 - 27, 2022 </div> ``` @inproceedings{loukas-etal-2022-finer, title = {FiNER: Financial Numeric Entity Recognition for XBRL Tagging}, author = {Loukas, Lefteris and Fergadiotis, Manos and Chalkidis, Ilias and Spyropoulou, Eirini and Malakasiotis, Prodromos and Androutsopoulos, Ion and Paliouras George}, booktitle = {Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (ACL 2022)}, publisher = {Association for Computational Linguistics}, location = {Dublin, Republic of Ireland}, year = {2022}, url = {https://arxiv.org/abs/2203.06482} } ``` ## About Us <div style="text-align: justify"> [AUEB's Natural Language Processing Group](http://nlp.cs.aueb.gr) develops algorithms, models, and systems that allow computers to process and generate natural language texts. The group's current research interests include: * question answering systems for databases, ontologies, document collections, and the Web, especially biomedical question answering, * natural language generation from databases and ontologies, especially Semantic Web ontologies, text classification, including filtering spam and abusive content, * information extraction and opinion mining, including legal text analytics and sentiment analysis, * natural language processing tools for Greek, for example parsers and named-entity recognizers, machine learning in natural language processing, especially deep learning. The group is part of the Information Processing Laboratory of the Department of Informatics of the Athens University of Economics and Business. </div> [Manos Fergadiotis](https://manosfer.github.io) on behalf of [AUEB's Natural Language Processing Group](http://nlp.cs.aueb.gr)
Akashpb13/Galician_xlsr
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "gl", "dataset:mozilla-foundation/common_voice_8_0", "transformers", "mozilla-foundation/common_voice_8_0", "generated_from_trainer", "robust-speech-event", "model_for_talk", "hf-asr-leaderboard", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- tags: - generated_from_keras_callback - dpr license: apache-2.0 model-index: - name: dpr-ctx_encoder_bert_uncased_L-12_H-128_A-2 results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # dpr-ctx_encoder_bert_uncased_L-2_H-128_A-2 This model(google/bert_uncased_L-2_H-128_A-2) was trained from scratch on training data: data.retriever.nq-adv-hn-train(facebookresearch/DPR). It achieves the following results on the evaluation set: ## Evaluation data evaluation dataset: facebook-dpr-dev-dataset from official DPR github |model_name|data_name|num of queries|num of passages|R@10|R@20|R@50|R@100|R@100| |---|---|---|---|---|---|---|---|---| |nlpconnect/dpr-ctx_encoder_bert_uncased_L-2_H-128_A-2(our)|nq-dev dataset|6445|199795|60.53%|68.28%|76.07%|80.98%|91.45%| |nlpconnect/dpr-ctx_encoder_bert_uncased_L-12_H-128_A-2(our)|nq-dev dataset|6445|199795|65.43%|71.99%|79.03%|83.24%|92.11%| |*facebook/dpr-ctx_encoder-single-nq-base(hf/fb)|nq-dev dataset|6445|199795|40.94%|49.27%|59.05%|66.00%|82.00%| evaluation dataset: UKPLab/beir test data but we have used first 2lac passage only. |model_name|data_name|num of queries|num of passages|R@10|R@20|R@50|R@100|R@100| |---|---|---|---|---|---|---|---|---| |nlpconnect/dpr-ctx_encoder_bert_uncased_L-2_H-128_A-2(our)|nq-test dataset|3452|200001|49.68%|59.06%|69.40%|75.75%|89.28%| |nlpconnect/dpr-ctx_encoder_bert_uncased_L-12_H-128_A-2(our)|nq-test dataset|3452|200001|51.62%|61.09%|70.10%|76.07%|88.70%| |*facebook/dpr-ctx_encoder-single-nq-base(hf/fb)|nq-test dataset|3452|200001|32.93%|43.74%|56.95%|66.30%|83.92%| Note: * means we have evaluated on same eval dataset. ### Usage (HuggingFace Transformers) ```python passage_encoder = TFAutoModel.from_pretrained("nlpconnect/dpr-ctx_encoder_bert_uncased_L-12_H-128_A-2") query_encoder = TFAutoModel.from_pretrained("nlpconnect/dpr-question_encoder_bert_uncased_L-12_H-128_A-2") p_tokenizer = AutoTokenizer.from_pretrained("nlpconnect/dpr-ctx_encoder_bert_uncased_L-12_H-128_A-2") q_tokenizer = AutoTokenizer.from_pretrained("nlpconnect/dpr-question_encoder_bert_uncased_L-12_H-128_A-2") def get_title_text_combined(passage_dicts): res = [] for p in passage_dicts: res.append(tuple((p['title'], p['text']))) return res processed_passages = get_title_text_combined(passage_dicts) def extracted_passage_embeddings(processed_passages, model_config): passage_inputs = tokenizer.batch_encode_plus( processed_passages, add_special_tokens=True, truncation=True, padding="max_length", max_length=model_config.passage_max_seq_len, return_token_type_ids=True ) passage_embeddings = passage_encoder.predict([np.array(passage_inputs['input_ids']), np.array(passage_inputs['attention_mask']), np.array(passage_inputs['token_type_ids'])], batch_size=512, verbose=1) return passage_embeddings passage_embeddings = extracted_passage_embeddings(processed_passages, model_config) def extracted_query_embeddings(queries, model_config): query_inputs = tokenizer.batch_encode_plus( queries, add_special_tokens=True, truncation=True, padding="max_length", max_length=model_config.query_max_seq_len, return_token_type_ids=True ) query_embeddings = query_encoder.predict([np.array(query_inputs['input_ids']), np.array(query_inputs['attention_mask']), np.array(query_inputs['token_type_ids'])], batch_size=512, verbose=1) return query_embeddings query_embeddings = extracted_query_embeddings(queries, model_config) ``` ### Training hyperparameters The following hyperparameters were used during training: - optimizer: None - training_precision: float32 ### Framework versions - Transformers 4.15.0 - TensorFlow 2.7.0 - Tokenizers 0.10.3
Akashpb13/xlsr_hungarian_new
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "hu", "dataset:mozilla-foundation/common_voice_8_0", "transformers", "generated_from_trainer", "hf-asr-leaderboard", "model_for_talk", "mozilla-foundation/common_voice_8_0", "robust-speech-event", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
--- license: apache-2.0 tags: - qa datasets: - squad_v2 - natural_questions model-index: - name: nlpconnect/roberta-base-squad2-nq results: - task: type: question-answering name: Question Answering dataset: name: squad_v2 type: squad_v2 config: squad_v2 split: validation metrics: - type: exact_match value: 80.3185 name: Exact Match verified: true verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiOTlmNTE0N2U3MTA1MDY1ZGZjYTYxZGIwMWUwN2EzYWM1MzhhZDI2Y2FiZDcxYTk1YTkyYzcxNGViYTM4MTUxNCIsInZlcnNpb24iOjF9.QOTfyyo4ttC1iCceQM7fYeJG9u976t1rG8RM-UxTIORP_rJHgdoYymjpTd4aghwkxg6hn3jeSKqpR5qV__0MAg - type: f1 value: 83.4669 name: F1 verified: true verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiMjg5NjgwZjVmNDZlYjYyOTlhZjgxNGJjYmMyMDUzZjQ1YTdhOWExZjVjMmE2YmJlMGUyZTQ5MzE3ZTUxMjY0ZCIsInZlcnNpb24iOjF9.qQ4U9ZwpqJeeU2lEWQ2bN_Ktq0kJbGEKjOq9liFy0_7EpTtYSc9Qzr64sJOO40fJ08Twe2At3weuz6aPgBQIDA - task: type: question-answering name: Question Answering dataset: name: squad type: squad config: plain_text split: validation metrics: - type: exact_match value: 85.5666 name: Exact Match verified: true verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiYmQzMzQzOTUwNjcwN2NjOGMwNDRiZmEwZTA4OGNhZGIzZjUzNmM5MzEzYWRmOTQwMzlhNDY3ZDllYWQ3Y2ZlYSIsInZlcnNpb24iOjF9.3t6pbSduzMYHZisQWgacYssbu3ver3Xmn9hIaRO-SlRw8qsBlE5z4xM8yo5fLluZy-o_mZ6Z5l31XWpGxcNvBw - type: f1 value: 92.1939 name: F1 verified: true verifyToken: eyJhbGciOiJFZERTQSIsInR5cCI6IkpXVCJ9.eyJoYXNoIjoiYjEzZGYxODU4YWNlZmM5ZDE5ODBhZWUyMmZlN2I3MDNlMTlkYTU1M2ZiNjMwY2QyYzM4YWZiOGIzZGMzODcwZSIsInZlcnNpb24iOjF9.5wQliHDlVaZK_dIOcJYGKCo-DPtPcmpSlaf2E4EuQJcW23rNN2gci8_h_RS0ay-6m1MF-7BgsIeivlMDZgSKBQ --- # Roberta-base-Squad2-NQ ## What is SQuAD? Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span, from the corresponding reading passage, or the question might be unanswerable. SQuAD2.0 combines the 100,000 questions in SQuAD1.1 with over 50,000 unanswerable questions written adversarially by crowdworkers to look similar to answerable ones. To do well on SQuAD2.0, systems must not only answer questions when possible, but also determine when no answer is supported by the paragraph and abstain from answering. ## The Natural Questions Dataset To help spur development in open-domain question answering, we have created the Natural Questions (NQ) corpus, along with a challenge website based on this data. The NQ corpus contains questions from real users, and it requires QA systems to read and comprehend an entire Wikipedia article that may or may not contain the answer to the question. The inclusion of real user questions, and the requirement that solutions should read an entire page to find the answer, cause NQ to be a more realistic and challenging task than prior QA datasets. ## Training Firstly, we took base roberta model and trained on SQuQD 2.0 dataset for 2 epoch and then after we took NQ Small answer and trained for 1 epoch. Total Dataset Size: 204416 Examples from squadv2 and NQ Small answer dataset ## Evaluation Eval Dataset: Squadv2 dev ``` {'exact': 80.2998399730481, 'f1': 83.4402145786235, 'total': 11873, 'HasAns_exact': 79.08232118758434, 'HasAns_f1': 85.37207619635592, 'HasAns_total': 5928, 'NoAns_exact': 81.5138772077376, 'NoAns_f1': 81.5138772077376, 'NoAns_total': 5945, 'best_exact': 80.2998399730481, 'best_exact_thresh': 0.0, 'best_f1': 83.44021457862335, 'best_f1_thresh': 0.0} ```
Akashpb13/xlsr_kurmanji_kurdish
[ "pytorch", "safetensors", "wav2vec2", "automatic-speech-recognition", "kmr", "ku", "dataset:mozilla-foundation/common_voice_8_0", "transformers", "mozilla-foundation/common_voice_8_0", "generated_from_trainer", "robust-speech-event", "model_for_talk", "hf-asr-leaderboard", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10
null
--- tags: - image-to-text - image-captioning license: apache-2.0 widget: - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/savanna.jpg example_title: Savanna - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/football-match.jpg example_title: Football Match - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/airport.jpg example_title: Airport --- # nlpconnect/vit-gpt2-image-captioning This is an image captioning model trained by @ydshieh in [flax ](https://github.com/huggingface/transformers/tree/main/examples/flax/image-captioning) this is pytorch version of [this](https://huggingface.co/ydshieh/vit-gpt2-coco-en-ckpts). # The Illustrated Image Captioning using transformers ![](https://ankur3107.github.io/assets/images/vision-encoder-decoder.png) * https://ankur3107.github.io/blogs/the-illustrated-image-captioning-using-transformers/ # Sample running code ```python from transformers import VisionEncoderDecoderModel, ViTImageProcessor, AutoTokenizer import torch from PIL import Image model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning") feature_extractor = ViTImageProcessor.from_pretrained("nlpconnect/vit-gpt2-image-captioning") tokenizer = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-captioning") device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model.to(device) max_length = 16 num_beams = 4 gen_kwargs = {"max_length": max_length, "num_beams": num_beams} def predict_step(image_paths): images = [] for image_path in image_paths: i_image = Image.open(image_path) if i_image.mode != "RGB": i_image = i_image.convert(mode="RGB") images.append(i_image) pixel_values = feature_extractor(images=images, return_tensors="pt").pixel_values pixel_values = pixel_values.to(device) output_ids = model.generate(pixel_values, **gen_kwargs) preds = tokenizer.batch_decode(output_ids, skip_special_tokens=True) preds = [pred.strip() for pred in preds] return preds predict_step(['doctor.e16ba4e4.jpg']) # ['a woman in a hospital bed with a woman in a hospital bed'] ``` # Sample running code using transformers pipeline ```python from transformers import pipeline image_to_text = pipeline("image-to-text", model="nlpconnect/vit-gpt2-image-captioning") image_to_text("https://ankur3107.github.io/assets/images/image-captioning-example.png") # [{'generated_text': 'a soccer game with a player jumping to catch the ball '}] ``` # Contact for any help * https://huggingface.co/ankur310794 * https://twitter.com/ankur310794 * http://github.com/ankur3107 * https://www.linkedin.com/in/ankur310794
Aleksandar/bert-srb-base-cased-oscar
[ "pytorch", "bert", "fill-mask", "transformers", "generated_from_trainer", "autotrain_compatible" ]
fill-mask
{ "architectures": [ "BertForMaskedLM" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
## About `Distilgpt2` model finetuned on a dataset of inspirational/motivational quotes taken from the [Quotes-500K](https://github.com/ShivaliGoel/Quotes-500K) dataset. The model can generate inspirational quotes, many of which sound quite realistic. ## Code for Training The code for fine-tuning the model can be found in this repo: https://github.com/Quotify-Bot/model-training. ## Training Details The model was fine-tuned for **50 epochs** on Google Colab's GPU using about **100,000 quotes** from the original dataset. ## Some Interesting Quotes **Prompt**: Friendship is like > Friendship is like a flower. when it blooms, it beautifies this world with its fragrance. **Prompt**: Life is like > Life is like travelling through time so stop being afraid of taking a chance and start appreciating where you are in life. **Prompt**: Motivation > Motivation will drive you to action, which in turn attracts inspiration from beyond. **Prompt**: In the end > In the end, it is necessary to discover your inner beauty and truth.
Aleksandar/bert-srb-ner-setimes-lr
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
2022-02-17T02:41:22Z
--- license: mit tags: - flair - token-classification - sequence-tagger-model language: "pt" widget: - text: "FISIOTERAPIA TRAUMATO - MANHÃ Henrique Dias, 38 anos. Exercícios metabólicos de extremidades inferiores. Realizo mobilização patelar e leve mobilização de flexão de joelho conforme liberado pelo Dr Marcelo Arocha. Oriento cuidados e posicionamentos." --- ## Portuguese Name Identification The [NoHarm-Anony - De-Identification of Clinical Notes Using Contextualized Language Models and a Token Classifier](https://link.springer.com/chapter/10.1007/978-3-030-91699-2_3) paper contains Flair-based models for Portuguese Language, initialized with [Flair BBP](https://github.com/jneto04/ner-pt) & trained on clinical notes with names tagged. ### Demo: How to use in Flair Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`) ```python from flair.data import Sentence from flair.models import SequenceTagger # load tagger tagger = SequenceTagger.load("noharm-ai/anony") # make example sentence sentence = Sentence("FISIOTERAPIA TRAUMATO - MANHÃ Henrique Dias, 38 anos. Exercícios metabólicos de extremidades inferiores. Realizo mobilização patelar e leve mobilização de flexão de joelho conforme liberado pelo Dr Marcelo Arocha. Oriento cuidados e posicionamentos.") # predict NER tags tagger.predict(sentence) # print sentence print(sentence) # print predicted NER spans print('The following NER tags are found:') # iterate over entities and print for entity in sentence.get_spans('ner'): print(entity) ``` This yields the following output: ``` Span [5,6]: "Henrique Dias" [− Labels: NOME (0.9735)] Span [31,32]: "Marcelo Arocha" [− Labels: NOME (0.9803)] ``` So, the entities "*Henrique Dias*" (labeled as a **nome**) and "*Marcelo Arocha*" (labeled as a **nome**) are found in the sentence. ## More Information Refer to the original paper, [De-Identification of Clinical Notes Using Contextualized Language Models and a Token Classifier](https://link.springer.com/chapter/10.1007/978-3-030-91699-2_3) for additional details and performance. ## Acknowledgements We thank Dr. Ana Helena D. P. S. Ulbrich, who provided the clinical notes dataset from the hospital, for her valuable cooperation. We also thank the volunteers of the Institute of Artificial Intelligence in Healthcare Celso Pereira and Ana Lúcia Dias, for the dataset annotation. ## Citation ``` @inproceedings{santos2021identification, title={De-Identification of Clinical Notes Using Contextualized Language Models and a Token Classifier}, author={Santos, Joaquim and dos Santos, Henrique DP and Tabalipa, F{\'a}bio and Vieira, Renata}, booktitle={Brazilian Conference on Intelligent Systems}, pages={33--41}, year={2021}, organization={Springer} } ```
Aleksandar/bert-srb-ner-setimes
[ "pytorch", "bert", "token-classification", "transformers", "generated_from_trainer", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
2021-04-29T10:45:50Z
# Generate News in Thai language by keywords. MODEL_NAME = 'nonamenlp/news_gen' TOKENIZER_NAME = "nonamenlp/news_gen" trained_model = MT5ForConditionalGeneration.from_pretrained(MODEL_NAME, return_dict=True) tokenizer = T5Tokenizer.from_pretrained(TOKENIZER_NAME)
Aleksandar/electra-srb-ner-setimes
[ "pytorch", "electra", "token-classification", "transformers", "generated_from_trainer", "autotrain_compatible" ]
token-classification
{ "architectures": [ "ElectraForTokenClassification" ], "model_type": "electra", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
2022-01-31T22:20:22Z
--- tags: - conversational --- # mremoji DialoGPT Model
Aleksandra/distilbert-base-uncased-finetuned-squad
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - conversational --- # 7evenpool DialoGPT Model
AlekseyKulnevich/Pegasus-Summarization
[ "pytorch", "pegasus", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "PegasusForConditionalGeneration" ], "model_type": "pegasus", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
2021-11-27T14:06:47Z
--- language: en license: apache-2.0 tags: - generated_from_trainer - t5-base model-index: - name: cover-letter-t5-base results: [] widget: - text: "coverletter name: Nouamane Tazi job: Machine Learning Engineer at HuggingFace background: Master's student in AI at the University of Paris Saclay experiences: I participated in the Digital Tech Year program, developing three minimal valuable products for three companies in a 7-week constraint. I also spent 1 year as a machine learning engineer for Flashbrand where I mainly worked on their chatbot . And I recently completed the HuggingFace course, where I built an amazing huggingface space. I am a strong team player." --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # cover-letter-t5-base This model is a fine-tuned version of [t5-base](https://huggingface.co/t5-base) on cover letter samples scraped from Indeed and JobHero. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.12.5 - Pytorch 1.10.0+cu111 - Datasets 1.16.1 - Tokenizers 0.10.3
AlexKay/xlm-roberta-large-qa-multilingual-finedtuned-ru
[ "pytorch", "xlm-roberta", "question-answering", "en", "ru", "multilingual", "arxiv:1912.09723", "transformers", "license:apache-2.0", "autotrain_compatible", "has_space" ]
question-answering
{ "architectures": [ "XLMRobertaForQuestionAnswering" ], "model_type": "xlm-roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
10,012
null
--- language: - ar license: apache-2.0 tags: - ar - automatic-speech-recognition - common_voice - generated_from_trainer - hf-asr-leaderboard - robust-speech-event datasets: - common_voice model-index: - name: XLS-R-300M - Arabic results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: Robust Speech Event - Dev Data type: speech-recognition-community-v2/dev_data args: ar metrics: - name: Test WER type: wer value: 1.0 - name: Test CER type: cer value: 1.0 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-xls-r-300m-ar This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the COMMON_VOICE - AR dataset. It achieves the following results on the evaluation set: - eval_loss: 3.0191 - eval_wer: 1.0 - eval_runtime: 252.2389 - eval_samples_per_second: 30.217 - eval_steps_per_second: 0.476 - epoch: 1.0 - step: 340 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 2000 - num_epochs: 5 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.17.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.18.2.dev0 - Tokenizers 0.11.0 #### Evaluation Commands Please use the evaluation script `eval.py` included in the repo. 1. To evaluate on `speech-recognition-community-v2/dev_data` ```bash python eval.py --model_id nouamanetazi/wav2vec2-xls-r-300m-ar --dataset speech-recognition-community-v2/dev_data --config ar --split validation --chunk_length_s 5.0 --stride_length_s 1.0 ```
AlexMaclean/sentence-compression-roberta
[ "pytorch", "roberta", "token-classification", "transformers", "generated_from_trainer", "license:mit", "autotrain_compatible" ]
token-classification
{ "architectures": [ "RobertaForTokenClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
13
null
--- language: - ar license: apache-2.0 tags: - ar - automatic-speech-recognition - common_voice - generated_from_trainer - hf-asr-leaderboard - robust-speech-event datasets: - common_voice model-index: - name: XLS-R-300M - Arabic results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: Robust Speech Event - Dev Data type: speech-recognition-community-v2/dev_data args: ar metrics: - name: Test WER type: wer value: 1.0 - name: Test CER type: cer value: 1.0 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-xls-r-300m-ar This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the COMMON_VOICE - AR dataset. It achieves the following results on the evaluation set: - eval_loss: 3.0191 - eval_wer: 1.0 - eval_runtime: 252.2389 - eval_samples_per_second: 30.217 - eval_steps_per_second: 0.476 - epoch: 1.0 - step: 340 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 2000 - num_epochs: 5 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.17.0.dev0 - Pytorch 1.10.2+cu102 - Datasets 1.18.2.dev0 - Tokenizers 0.11.0 #### Evaluation Commands Please use the evaluation script `eval.py` included in the repo. 1. To evaluate on `speech-recognition-community-v2/dev_data` ```bash python eval.py --model_id nouamanetazi/wav2vec2-xls-r-300m-ar --dataset speech-recognition-community-v2/dev_data --config ar --split validation --chunk_length_s 5.0 --stride_length_s 1.0 ```
AlexN/xls-r-300m-fr-0
[ "pytorch", "wav2vec2", "automatic-speech-recognition", "fr", "dataset:mozilla-foundation/common_voice_8_0", "transformers", "mozilla-foundation/common_voice_8_0", "generated_from_trainer", "robust-speech-event", "hf-asr-leaderboard", "license:apache-2.0", "model-index" ]
automatic-speech-recognition
{ "architectures": [ "Wav2Vec2ForCTC" ], "model_type": "wav2vec2", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- language: - hu tags: - token-classification license: gpl metrics: - F1 widget: - text: "A jótékonysági szervezet által idézett Forbes-adatok szerint a világ tíz leggazdagabb embere: Elon Musk (Tesla, SpaceX), Jeff Bezos (Amazon, Blue Origin), Bernard Arnault és családja (LVMH, azaz Louis Vuitton és Moët Hennessy), Bill Gates (Microsoft), Larry Ellison (Oracle), Larry Page (Google), Sergey Brin (Google), Mark Zuckerberg (Facebook), Steve Ballmer (Microsoft) és Warren Buffett (befektető). Miközben vagyonuk együttesen 700 milliárdról másfél ezer milliárd dollárra nőtt 2020 márciusa és 2021 novembere között, jelentős eltérések vannak közöttük: Musk vagyona több mint 1000 százalékos, míg Gatesé szerényebb, 30 százalékos növekedést mutatott." inference: parameters: aggregation_strategy: "first" --- # Hungarian named entity recognition model with four entity types: PER ORG LOC MISC - Pretrained model used: SZTAKI-HLT/hubert-base-cc - Finetuned on NYTK-NerKor Corpus ## Limitations - max_seq_length = 448 ## See [https://huggingface.co/novakat/nerkor-cars-onpp-hubert](https://huggingface.co/novakat/nerkor-cars-onpp-hubert) for a much more elaborate Hungarian named entity model.
AlexaRyck/KEITH
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: mit language: en tags: - conversational - npc-engine --- # BART chatbot trained on [LIGHT](https://parl.ai/projects/light/) dataset with [Text Generative Adversarial Imitation Learning](https://arxiv.org/abs/2004.13796) This model is intended to be used with [npc-engine](https://github.com/npc-engine/npc-engine). It was based on [facebook/bart-large](https://huggingface.co/facebook/bart-large). [microsoft/deberta-base](https://huggingface.co/microsoft/deberta-base) was used as an adversarial for GAIL stage.
Alexander-Learn/bert-finetuned-ner-accelerate
[ "pytorch", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- license: mit language: en tags: - text-to-speech - npc-engine --- # Exported [FlowtronTTS](https://arxiv.org/abs/2005.05957) with [WaveGlow](https://arxiv.org/abs/1811.00002) vocoder This model is intended to be used with [npc-engine](https://github.com/npc-engine/npc-engine). Fork used for exporting https://github.com/npc-engine/flowtron Original code https://github.com/NVIDIA/flowtron
Alexander-Learn/bert-finetuned-ner
[ "pytorch", "tensorboard", "bert", "token-classification", "transformers", "autotrain_compatible" ]
token-classification
{ "architectures": [ "BertForTokenClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- license: mit language: en tags: - speech-to-text - npc-engine --- # Exported [Nemo](https://github.com/NVIDIA/NeMo) models for Speech to Text with [OpenSLR 11](https://www.openslr.org/11/) librispeech 3-gram language model This model is intended to be used with [npc-engine](https://github.com/npc-engine/npc-engine).
Alexander-Learn/bert-finetuned-squad-accelerate
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: mit language: en tags: - sentence-similarity - npc-engine --- # Export of [sentence-transformers/paraphrase-MiniLM-L6-v2](https://huggingface.co/sentence-transformers/paraphrase-MiniLM-L6-v2) This model is intended to be used with [npc-engine](https://github.com/npc-engine/npc-engine).
AlexeyYazev/my-awesome-model
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
This is the BERT-Medium model from Google: https://github.com/google-research/bert#bert. A BERT model with 2 layers, 128 hidden unit size, and 2 attention heads.
Alfia/anekdotes
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- license: mit --- ## MiniLM: 3 Layer Version This is a 3 layer version of [microsoft/MiniLM-L12-H384-uncased](https://huggingface.co/microsoft/MiniLM-L12-H384-uncased/) by keeping only the layer [3, 7, 11].
Alireza1044/albert-base-v2-rte
[ "pytorch", "tensorboard", "albert", "text-classification", "en", "dataset:glue", "transformers", "generated_from_trainer", "license:apache-2.0" ]
text-classification
{ "architectures": [ "AlbertForSequenceClassification" ], "model_type": "albert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
30
null
# MiniLMv2 This is a MiniLMv2 model from: [https://github.com/microsoft/unilm](https://github.com/microsoft/unilm/tree/master/minilm)
Amrrs/indian-foods
[ "pytorch", "tensorboard", "vit", "image-classification", "transformers", "huggingpics", "model-index", "autotrain_compatible" ]
image-classification
{ "architectures": [ "ViTForImageClassification" ], "model_type": "vit", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
33
null
<!--- # ############################################################################################## # # Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ############################################################################################## --> [Megatron](https://arxiv.org/pdf/1909.08053.pdf) is a large, powerful transformer developed by the Applied Deep Learning Research team at NVIDIA. This particular Megatron model was trained from a generative, left-to-right transformer in the style of GPT-2. This model was trained on text sourced from Wikipedia, RealNews, OpenWebText, and CC-Stories. It contains 345 million parameters. Find more information at [https://github.com/NVIDIA/Megatron-LM](https://github.com/NVIDIA/Megatron-LM) # How to run Megatron GPT2 using Transformers ## Prerequisites In that guide, we run all the commands from a folder called `$MYDIR` and defined as (in `bash`): ``` export MYDIR=$HOME ``` Feel free to change the location at your convenience. To run some of the commands below, you'll have to clone `Transformers`. ``` git clone https://github.com/huggingface/transformers.git $MYDIR/transformers ``` ## Get the checkpoints from the NVIDIA GPU Cloud You must create a directory called `nvidia/megatron-gpt2-345m`: ``` mkdir -p $MYDIR/nvidia/megatron-gpt2-345m ``` You can download the checkpoints from the [NVIDIA GPU Cloud (NGC)](https://ngc.nvidia.com/catalog/models/nvidia:megatron_lm_345m). For that you have to [sign up](https://ngc.nvidia.com/signup) for and setup the NVIDIA GPU Cloud (NGC) Registry CLI. Further documentation for downloading models can be found in the [NGC documentation](https://docs.nvidia.com/dgx/ngc-registry-cli-user-guide/index.html#topic_6_4_1). Alternatively, you can directly download the checkpoints using: ``` wget --content-disposition https://api.ngc.nvidia.com/v2/models/nvidia/megatron_lm_345m/versions/v0.0/zip -O $MYDIR/nvidia/megatron-gpt2-345m/checkpoint.zip ``` ## Converting the checkpoint In order to be loaded into `Transformers`, the checkpoint has to be converted. You should run the following command for that purpose. That command will create `config.json` and `pytorch_model.bin` in `$MYDIR/nvidia/megatron-gpt2-345m`. You can move those files to different directories if needed. ``` python3 $MYDIR/transformers/src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py $MYDIR/nvidia/megatron-gpt2-345m/checkpoint.zip ``` As explained in [PR #14956](https://github.com/huggingface/transformers/pull/14956), if when running this conversion script and you're getting an exception: ``` ModuleNotFoundError: No module named 'megatron.model.enums' ``` you need to tell python where to find the clone of Megatron-LM, e.g.: ``` cd /tmp git clone https://github.com/NVIDIA/Megatron-LM PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_bert/convert_megatron_bert_checkpoint.py ... ``` Or, if you already have it cloned elsewhere, simply adjust the path to the existing path. If the training was done using a Megatron-LM fork, e.g. [Megatron-DeepSpeed](https://github.com/microsoft/Megatron-DeepSpeed/) then you may need to have that one in your path, i.e., /path/to/Megatron-DeepSpeed. ## Text generation The following code shows how to use the Megatron GPT2 checkpoint and the Transformers API to generate text. ``` import os import torch from transformers import GPT2Tokenizer, GPT2LMHeadModel # The tokenizer. Megatron was trained with standard tokenizer(s). tokenizer = GPT2Tokenizer.from_pretrained('gpt2') # The path to the config/checkpoint (see the conversion step above). directory = os.path.join(os.environ['MYDIR'], 'nvidia/megatron-gpt2-345m') # Load the model from $MYDIR/nvidia/megatron-gpt2-345m. model = GPT2LMHeadModel.from_pretrained(directory) # Copy to the device and use FP16. assert torch.cuda.is_available() device = torch.device("cuda") model.to(device) model.eval() model.half() # Generate the sentence. output = model.generate(input_ids=None, max_length=32, num_return_sequences=1) # Output the text. for sentence in output: sentence = sentence.tolist() text = tokenizer.decode(sentence, clean_up_tokenization_spaces=True) print(text) ``` # To use this as a normal HuggingFace model If you want to use this model with HF Trainer, here is a quick way to do that: 1. Download nvidia checkpoint: ``` wget --content-disposition https://api.ngc.nvidia.com/v2/models/nvidia/megatron_lm_345m/versions/v0.0/zip -O megatron_lm_345m_v0.0.zip ``` 2. Convert: ``` python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py megatron_lm_345m_v0.0.zip ``` 3. Fetch missing files ``` git clone https://huggingface.co/nvidia/megatron-gpt2-345m/ ``` 4. Move the converted files into the cloned model dir ``` mv config.json pytorch_model.bin megatron-gpt2-345m/ ``` 5. The `megatron-gpt2-345m` dir should now have all the files which can be passed to HF Trainer as `--model_name_or_path megatron-gpt2-345m` # Original code The original Megatron code can be found here: [https://github.com/NVIDIA/Megatron-LM](https://github.com/NVIDIA/Megatron-LM).
Andrija/SRoBERTa-base-NER
[ "pytorch", "roberta", "token-classification", "hr", "sr", "multilingual", "dataset:hr500k", "transformers", "license:apache-2.0", "autotrain_compatible" ]
token-classification
{ "architectures": [ "RobertaForTokenClassification" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
12
null
--- language: zh-tw datasets: DRCD tasks: Question Answering --- # BERT DRCD 384 This model is a fine-tune checkpoint of [bert-base-chinese](https://huggingface.co/bert-base-chinese), fine-tuned on DRCD dataset. This model reaches a F1 score of 86. This model reaches a EM score of 83. Training Arguments: - length: 384 - stride: 128 - learning_rate: 3e-5 - batch_size: 10 - epoch: 3 [Colab for detailed](https://colab.research.google.com/drive/1kZv7ZRmvUdCKEhQg8MBrKljGWvV2X3CP?usp=sharing) ## Deployment Deploy [BERT-DRCD-QuestionAnswering](https://github.com/pleomax0730/BERT-DRCD-QuestionAnswering) model using `FastAPI` and containerized using `Docker`. ## Usage ### In Transformers ```python text = "鴻海科技集團是由臺灣企業家郭台銘創辦的跨國企業,總部位於臺灣新北市土城區,主要生產地則在中國大陸,以富士康做為商標名稱。其專注於電子產品的代工服務,研發生產精密電氣元件、機殼、準系統、系統組裝、光通訊元件、液晶顯示件等3C產品上、下游產品及服務。" query = "鴻海集團總部位於哪裡?" device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") tokenizer = BertTokenizerFast.from_pretrained("nyust-eb210/braslab-bert-drcd-384") model = BertForQuestionAnswering.from_pretrained("nyust-eb210/braslab-bert-drcd-384").to(device) encoded_input = tokenizer(text, query, return_tensors="pt").to(device) qa_outputs = model(**encoded_input) start = torch.argmax(qa_outputs.start_logits).item() end = torch.argmax(qa_outputs.end_logits).item() answer = encoded_input.input_ids.tolist()[0][start : end + 1] answer = "".join(tokenizer.decode(answer).split()) start_prob = torch.max(torch.nn.Softmax(dim=-1)(qa_outputs.start_logits)).item() end_prob = torch.max(torch.nn.Softmax(dim=-1)(qa_outputs.end_logits)).item() confidence = (start_prob + end_prob) / 2 print(answer, confidence) # 臺灣新北市土城區, 0.92 ```
AnonymousSub/AR_rule_based_roberta_only_classfn_epochs_1_shard_1
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
7
null
# BERT MCN-Model using SMM4H 2017 (subtask 3) data The model was trained using [clagator/biobert_v1.1_pubmed_nli_sts](https://huggingface.co/clagator/biobert_v1.1_pubmed_nli_sts) as a base and the smm4h dataset from 2017 from subtask 3. ## Dataset See [here](https://github.com/olastor/medical-concept-normalization/tree/main/data/smm4h) for the scripts and datasets. **Attribution** Sarker, Abeed (2018), “Data and systems for medication-related text classification and concept normalization from Twitter: Insights from the Social Media Mining for Health (SMM4H)-2017 shared task”, Mendeley Data, V2, doi: 10.17632/rxwfb3tysd.2 ### Test Results - Acc: 89.44 - Acc@2: 91.84 - Acc@3: 93.20 - Acc@5: 94.32 - Acc@10: 95.04 Acc@N denotes the accuracy taking the top N predictions of the model into account, not just the first one.
AnonymousSub/AR_rule_based_roberta_only_classfn_epochs_1_shard_10
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- license: apache-2.0 tags: - generated_from_keras_callback model-index: - name: distilgpt2-finetuned-reddit-aita-text-gen results: [] --- <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> # distilgpt2-finetuned-reddit-aita-text-gen This model is a fine-tuned version of [distilgpt2](https://huggingface.co/distilgpt2) on an unknown dataset. It achieves the following results on the evaluation set: ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': 0.001, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-07, 'amsgrad': False, 'weight_decay_rate': 0.0} - training_precision: float32 ### Training results ### Framework versions - Transformers 4.14.1 - TensorFlow 2.7.0 - Datasets 1.16.1 - Tokenizers 0.10.3
AnonymousSub/AR_rule_based_twostagetriplet_hier_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- language: en datasets: - LJSpeech - LibriTTS tags: - audio - TTS license: apache-2.0 --- # ontocord/fastspeech2-en Modified version of the text-to-speech system [FastSpeech 2: Fast and High-Quality End-to-End Text to Speech] (https://arxiv.org/abs/2006.04558v1). ## Installation ``` git clone https://github.com/ontocord/fastspeech2_hf pip install transformers torchaudio ``` ## Usage The model can be used directly as follows: ``` # load the model and tokenizer from fastspeech2_hf.modeling_fastspeech2 import FastSpeech2ForPretraining, FastSpeech2Tokenizer model = FastSpeech2ForPretraining.from_pretrained("ontocord/fastspeech2-en") tokenizer = FastSpeech2Tokenizer.from_pretrained("ontocord/fastspeech2-en") # some helper routines from IPython.display import Audio as IPAudio, display as IPdisplay import torch import torchaudio def play_audio(waveform, sample_rate): waveform = waveform.numpy() if len(waveform.shape)==1: IPdisplay(IPAudio(waveform, rate=sample_rate)) return num_channels, num_frames = waveform.shape if num_channels <= 1: IPdisplay(IPAudio(waveform[0], rate=sample_rate)) elif num_channels == 2: IPdisplay(IPAudio((waveform[0], waveform[1]), rate=sample_rate)) else: raise ValueError("Waveform with more than 2 channels are not supported.") # set the g2p module for the tokenizer tokenizer.set_g2p(model.fastspeech2.g2p) # you can run in half mode on gpu. model = model.cuda().half() sentences = [ "Advanced text to speech models such as Fast Speech can synthesize speech significantly faster than previous auto regressive models with comparable quality. The training of Fast Speech model relies on an auto regressive teacher model for duration prediction and knowledge distillation, which can ease the one to many mapping problem in T T S. However, Fast Speech has several disadvantages, 1, the teacher student distillation pipeline is complicated, 2, the duration extracted from the teacher model is not accurate enough, and the target mel spectrograms distilled from teacher model suffer from information loss due to data simplification, both of which limit the voice quality. ", "Printing, in the only sense with which we are at present concerned, differs from most if not from all the arts and crafts represented in the Exhibition " "in being comparatively modern. ", "For although the Chinese took impressions from wood blocks engraved in relief for centuries before the woodcutters of the Netherlands, by a similar process " "produced the block books, which were the immediate predecessors of the true printed book, " "the invention of movable metal letters in the middle of the fifteenth century may justly be considered as the invention of the art of printing. ", "And it is worth mention in passing that, as an example of fine typography, " "the earliest book printed with movable types, the Gutenberg, or \"forty-two line Bible\" of about 1455, " "has never been surpassed. ", "Printing, then, for our purpose, may be considered as the art of making books by means of movable types. " "Now, as all books not primarily intended as picture-books consist principally of types composed to form letterpress,", ] batch = tokenizer(sentences, return_tensors="pt", padding=True) model.eval() with torch.no_grad(): out = model(use_postnet=False, **batch) wav =out[-2] for line, phone, w in zip(sentences, tokenizer.batch_decode(batch['input_ids']), wav): print ("txt:", line) print ("phoneme:", phone) play_audio(w.type(torch.FloatTensor), model.config.sampling_rate) ``` ##Github Code Repo Current code for this model can be found [here](https://github.com/ontocord/fastspeech2_hf) This is a work in progress (WIP) port of the model and code from [this repo] (https://github.com/ming024/FastSpeech2). The datasets on which this model was trained: - LJSpeech: a single-speaker English dataset consists of 13100 short audio clips of a female speaker reading passages from 7 non-fiction books, approximately 24 hours in total. - LibriTTS: a multi-speaker English dataset containing 585 hours of speech by 2456 speakers.
AnonymousSub/AR_specter
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- language: vi datasets: - common_voice - FOSD: https://data.mendeley.com/datasets/k9sxg2twv4/4 metrics: - wer tags: - language-modeling - audio - automatic-speech-recognition - speech - xlsr-fine-tuning-week license: apache-2.0 model-index: - name: MT5 Fix Asr Vietnamese by Ontocord results: - task: name: Speech Recognition type: automatic-speech-recognition dataset: name: Common Voice vi type: common_voice args: vi metrics: - name: Test WER type: wer value: 25.207182 --- # Ontocord/mt5-fix-asr-vietnamese Fine-tuned mt5 to correct output of an ASR model trained on [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) which was trained on Vietnamese using the [Common Voice](https://huggingface.co/datasets/common_voice), and [FOSD](https://data.mendeley.com/datasets/k9sxg2twv4/4). ## Usage The model can be used directly by submitting vietnamese asr text, but is is best to use with the ontocord/wav2vec2-large-xlsr-vietnamese model. ``` import torch import torchaudio from datasets import load_dataset from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor, pipelines device = torch.device("cuda" if torch.cuda.is_available() else "cpu") test_dataset = load_dataset("common_voice", "vi", split="test[:2%]") processor = Wav2Vec2Processor.from_pretrained("ontocord/wav2vec2-large-xlsr-53-vietnamese") model = Wav2Vec2ForCTC.from_pretrained("ontocord/wav2vec2-large-xlsr-53-vietnamese").to(device) mt5 = pipelines.pipeline("text2text-generation","ontocord/mt5-fix-asr-vietnamese", device=0 if device == "cuda" else -1) resampler = torchaudio.transforms.Resample(48_000, 16_000) # Preprocessing the datasets. # We need to read the aduio files as arrays def speech_file_to_array_fn(batch): speech_array, sampling_rate = torchaudio.load(batch["path"]) batch["speech"] = resampler(speech_array).squeeze().numpy() return batch test_dataset = test_dataset.map(speech_file_to_array_fn) inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): logits = model(inputs.input_values.to(device), attention_mask=inputs.attention_mask.to(device)).logits predicted_ids = torch.argmax(logits, dim=-1) print("Prediction:", [aHash['generated_text'] for aHash in mt5(processor.batch_decode(predicted_ids), max_length=100)]) print("Reference:", test_dataset["sentence"][:2]) ``` ## Evaluation The model can be evaluated as follows on the Vietnamese test data of Common Voice. ``` import torch import torchaudio from datasets import load_dataset, load_metric from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor, pipelines import re test_dataset = load_dataset("common_voice", "vi", split="test") wer = load_metric("wer") device = torch.device("cuda" if torch.cuda.is_available() else "cpu") processor = Wav2Vec2Processor.from_pretrained("ontocord/wav2vec2-large-xlsr-vietnamese") model = Wav2Vec2ForCTC.from_pretrained("ontocord/wav2vec2-large-xlsr-vietnamese").to(device) mt5 = pipelines.pipeline("text2text-generation","ontocord/mt5-fix-asr-vietnamese", device=0 if device == "cuda" else -1) chars_to_ignore_regex = '[\\\+\@\ǀ\,\?\.\!\-\;\:\"\“\%\‘\”\�]' resampler = torchaudio.transforms.Resample(48_000, 16_000) # Preprocessing the datasets. # We need to read the aduio files as arrays def speech_file_to_array_fn(batch): batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower() speech_array, sampling_rate = torchaudio.load(batch["path"]) batch["speech"] = resampler(speech_array).squeeze().numpy() return batch test_dataset = test_dataset.map(speech_file_to_array_fn) # you may also want to use the decode_string from https://huggingface.co/Nhut/wav2vec2-large-xlsr-vietnamese def evaluate(batch): inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): logits = model(inputs.input_values.to(device), attention_mask=inputs.attention_mask.to(device)).logits pred_ids = torch.argmax(logits, dim=-1) max_length = int(pred_ids.size()[1]) txt = [aHash['generated_text'].strip() for aHash in mt5(processor.batch_decode(pred_ids), max_length=max_length)] batch["pred_strings"] = txt return batch result = test_dataset.map(evaluate, batched=True, batch_size=8) print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"]))) ``` **Test Result**: 25.207182 ## Training The Common Voice train, validation, and FPT datasets were used for training. The script used for training can be found here # TODO
AnonymousSub/EManuals_BERT_copy
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- language: vi datasets: - common_voice - FOSD: https://data.mendeley.com/datasets/k9sxg2twv4/4 metrics: - wer tags: - audio - automatic-speech-recognition - speech - xlsr-fine-tuning-week license: apache-2.0 model-index: - name: XLSR Wav2Vec2 Vietnamese by Ontocord results: - task: name: Speech Recognition type: automatic-speech-recognition dataset: name: Common Voice vi type: common_voice args: vi metrics: - name: Test WER type: wer value: 42.403315 --- # Ontocord/Wav2Vec2-Large-XLSR-53-Vietnamese Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on Vietnamese using the [Common Voice](https://huggingface.co/datasets/common_voice), [FOSD](https://data.mendeley.com/datasets/k9sxg2twv4/4). When using this model, make sure that your speech input is sampled at 16kHz. ## Usage The model can be used directly (without a language model) as follows: ``` import torch import torchaudio from datasets import load_dataset from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor test_dataset = load_dataset("common_voice", "vi", split="test[:2%]") processor = Wav2Vec2Processor.from_pretrained("ontocord/wav2vec2-large-xlsr-53-vietnamese") model = Wav2Vec2ForCTC.from_pretrained("ontocord/wav2vec2-large-xlsr-53-vietnamese") resampler = torchaudio.transforms.Resample(48_000, 16_000) # Preprocessing the datasets. # We need to read the aduio files as arrays def speech_file_to_array_fn(batch): speech_array, sampling_rate = torchaudio.load(batch["path"]) batch["speech"] = resampler(speech_array).squeeze().numpy() return batch test_dataset = test_dataset.map(speech_file_to_array_fn) inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits predicted_ids = torch.argmax(logits, dim=-1) print("Prediction:", processor.batch_decode(predicted_ids)) print("Reference:", test_dataset["sentence"][:2]) ``` ## Evaluation The model can be evaluated as follows on the Vietnamese test data of Common Voice. ``` import torch import torchaudio from datasets import load_dataset, load_metric from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor import re test_dataset = load_dataset("common_voice", "vi", split="test") wer = load_metric("wer") processor = Wav2Vec2Processor.from_pretrained("ontocord/wav2vec2-large-xlsr-vietnamese") model = Wav2Vec2ForCTC.from_pretrained("ontocord/wav2vec2-large-xlsr-vietnamese") model.to("cuda") chars_to_ignore_regex = '[\\\+\@\ǀ\,\?\.\!\-\;\:\"\“\%\‘\”\�]' resampler = torchaudio.transforms.Resample(48_000, 16_000) # Preprocessing the datasets. # We need to read the aduio files as arrays def speech_file_to_array_fn(batch): batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower() speech_array, sampling_rate = torchaudio.load(batch["path"]) batch["speech"] = resampler(speech_array).squeeze().numpy() return batch test_dataset = test_dataset.map(speech_file_to_array_fn) # you may also want to use the decode_string from https://huggingface.co/Nhut/wav2vec2-large-xlsr-vietnamese def evaluate(batch): inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): logits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits pred_ids = torch.argmax(logits, dim=-1) batch["pred_strings"] = processor.batch_decode(pred_ids) return batch result = test_dataset.map(evaluate, batched=True, batch_size=8) print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"]))) ``` **Test Result**: 42.403315 ## Training The Common Voice train, validation, and FPT datasets were used for training. The script used for training can be found here # TODO
AnonymousSub/SR_EManuals-RoBERTa
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- tags: - conversational --- # Elon Musk DialogGPT Model
AnonymousSub/SR_bert-base-uncased
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- tags: - vision widget: - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/cat-dog-music.png candidate_labels: playing music, playing sports example_title: Cat & Dog --- # Model Card: CLIP Disclaimer: The model card is taken and modified from the official CLIP repository, it can be found [here](https://github.com/openai/CLIP/blob/main/model-card.md). ## Model Details The CLIP model was developed by researchers at OpenAI to learn about what contributes to robustness in computer vision tasks. The model was also developed to test the ability of models to generalize to arbitrary image classification tasks in a zero-shot manner. It was not developed for general model deployment - to deploy models like CLIP, researchers will first need to carefully study their capabilities in relation to the specific context they’re being deployed within. ### Model Date January 2021 ### Model Type The base model uses a ViT-B/16 Transformer architecture as an image encoder and uses a masked self-attention Transformer as a text encoder. These encoders are trained to maximize the similarity of (image, text) pairs via a contrastive loss. The original implementation had two variants: one using a ResNet image encoder and the other using a Vision Transformer. This repository has the variant with the Vision Transformer. ### Documents - [Blog Post](https://openai.com/blog/clip/) - [CLIP Paper](https://arxiv.org/abs/2103.00020) ### Use with Transformers ```python3 from PIL import Image import requests from transformers import CLIPProcessor, CLIPModel model = CLIPModel.from_pretrained("openai/clip-vit-base-patch16") processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch16") url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) inputs = processor(text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True) outputs = model(**inputs) logits_per_image = outputs.logits_per_image # this is the image-text similarity score probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities ``` ## Model Use ### Intended Use The model is intended as a research output for research communities. We hope that this model will enable researchers to better understand and explore zero-shot, arbitrary image classification. We also hope it can be used for interdisciplinary studies of the potential impact of such models - the CLIP paper includes a discussion of potential downstream impacts to provide an example for this sort of analysis. #### Primary intended uses The primary intended users of these models are AI researchers. We primarily imagine the model will be used by researchers to better understand robustness, generalization, and other capabilities, biases, and constraints of computer vision models. ### Out-of-Scope Use Cases **Any** deployed use case of the model - whether commercial or not - is currently out of scope. Non-deployed use cases such as image search in a constrained environment, are also not recommended unless there is thorough in-domain testing of the model with a specific, fixed class taxonomy. This is because our safety assessment demonstrated a high need for task specific testing especially given the variability of CLIP’s performance with different class taxonomies. This makes untested and unconstrained deployment of the model in any use case currently potentially harmful. Certain use cases which would fall under the domain of surveillance and facial recognition are always out-of-scope regardless of performance of the model. This is because the use of artificial intelligence for tasks such as these can be premature currently given the lack of testing norms and checks to ensure its fair use. Since the model has not been purposefully trained in or evaluated on any languages other than English, its use should be limited to English language use cases. ## Data The model was trained on publicly available image-caption data. This was done through a combination of crawling a handful of websites and using commonly-used pre-existing image datasets such as [YFCC100M](http://projects.dfki.uni-kl.de/yfcc100m/). A large portion of the data comes from our crawling of the internet. This means that the data is more representative of people and societies most connected to the internet which tend to skew towards more developed nations, and younger, male users. ### Data Mission Statement Our goal with building this dataset was to test out robustness and generalizability in computer vision tasks. As a result, the focus was on gathering large quantities of data from different publicly-available internet data sources. The data was gathered in a mostly non-interventionist manner. However, we only crawled websites that had policies against excessively violent and adult images and allowed us to filter out such content. We do not intend for this dataset to be used as the basis for any commercial or deployed model and will not be releasing the dataset. ## Performance and Limitations ### Performance We have evaluated the performance of CLIP on a wide range of benchmarks across a variety of computer vision datasets such as OCR to texture recognition to fine-grained classification. The paper describes model performance on the following datasets: - Food101 - CIFAR10 - CIFAR100 - Birdsnap - SUN397 - Stanford Cars - FGVC Aircraft - VOC2007 - DTD - Oxford-IIIT Pet dataset - Caltech101 - Flowers102 - MNIST - SVHN - IIIT5K - Hateful Memes - SST-2 - UCF101 - Kinetics700 - Country211 - CLEVR Counting - KITTI Distance - STL-10 - RareAct - Flickr30 - MSCOCO - ImageNet - ImageNet-A - ImageNet-R - ImageNet Sketch - ObjectNet (ImageNet Overlap) - Youtube-BB - ImageNet-Vid ## Limitations CLIP and our analysis of it have a number of limitations. CLIP currently struggles with respect to certain tasks such as fine grained classification and counting objects. CLIP also poses issues with regards to fairness and bias which we discuss in the paper and briefly in the next section. Additionally, our approach to testing CLIP also has an important limitation- in many cases we have used linear probes to evaluate the performance of CLIP and there is evidence suggesting that linear probes can underestimate model performance. ### Bias and Fairness We find that the performance of CLIP - and the specific biases it exhibits - can depend significantly on class design and the choices one makes for categories to include and exclude. We tested the risk of certain kinds of denigration with CLIP by classifying images of people from [Fairface](https://arxiv.org/abs/1908.04913) into crime-related and non-human animal categories. We found significant disparities with respect to race and gender. Additionally, we found that these disparities could shift based on how the classes were constructed. (Details captured in the Broader Impacts Section in the paper). We also tested the performance of CLIP on gender, race and age classification using the Fairface dataset (We default to using race categories as they are constructed in the Fairface dataset.) in order to assess quality of performance across different demographics. We found accuracy >96% across all races for gender classification with ‘Middle Eastern’ having the highest accuracy (98.4%) and ‘White’ having the lowest (96.5%). Additionally, CLIP averaged ~93% for racial classification and ~63% for age classification. Our use of evaluations to test for gender, race and age classification as well as denigration harms is simply to evaluate performance of the model across people and surface potential risks and not to demonstrate an endorsement/enthusiasm for such tasks. ## Feedback ### Where to send questions or comments about the model Please use [this Google Form](https://forms.gle/Uv7afRH5dvY34ZEs9)
AnonymousSub/SR_cline
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- tags: - vision widget: - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/cat-dog-music.png candidate_labels: playing music, playing sports example_title: Cat & Dog --- # Model Card: CLIP Disclaimer: The model card is taken and modified from the official CLIP repository, it can be found [here](https://github.com/openai/CLIP/blob/main/model-card.md). ## Model Details The CLIP model was developed by researchers at OpenAI to learn about what contributes to robustness in computer vision tasks. The model was also developed to test the ability of models to generalize to arbitrary image classification tasks in a zero-shot manner. It was not developed for general model deployment - to deploy models like CLIP, researchers will first need to carefully study their capabilities in relation to the specific context they’re being deployed within. ### Model Date January 2021 ### Model Type The model uses a ViT-B/32 Transformer architecture as an image encoder and uses a masked self-attention Transformer as a text encoder. These encoders are trained to maximize the similarity of (image, text) pairs via a contrastive loss. The original implementation had two variants: one using a ResNet image encoder and the other using a Vision Transformer. This repository has the variant with the Vision Transformer. ### Documents - [Blog Post](https://openai.com/blog/clip/) - [CLIP Paper](https://arxiv.org/abs/2103.00020) ### Use with Transformers ```python3 from PIL import Image import requests from transformers import CLIPProcessor, CLIPModel model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32") url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) inputs = processor(text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True) outputs = model(**inputs) logits_per_image = outputs.logits_per_image # this is the image-text similarity score probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities ``` ## Model Use ### Intended Use The model is intended as a research output for research communities. We hope that this model will enable researchers to better understand and explore zero-shot, arbitrary image classification. We also hope it can be used for interdisciplinary studies of the potential impact of such models - the CLIP paper includes a discussion of potential downstream impacts to provide an example for this sort of analysis. #### Primary intended uses The primary intended users of these models are AI researchers. We primarily imagine the model will be used by researchers to better understand robustness, generalization, and other capabilities, biases, and constraints of computer vision models. ### Out-of-Scope Use Cases **Any** deployed use case of the model - whether commercial or not - is currently out of scope. Non-deployed use cases such as image search in a constrained environment, are also not recommended unless there is thorough in-domain testing of the model with a specific, fixed class taxonomy. This is because our safety assessment demonstrated a high need for task specific testing especially given the variability of CLIP’s performance with different class taxonomies. This makes untested and unconstrained deployment of the model in any use case currently potentially harmful. Certain use cases which would fall under the domain of surveillance and facial recognition are always out-of-scope regardless of performance of the model. This is because the use of artificial intelligence for tasks such as these can be premature currently given the lack of testing norms and checks to ensure its fair use. Since the model has not been purposefully trained in or evaluated on any languages other than English, its use should be limited to English language use cases. ## Data The model was trained on publicly available image-caption data. This was done through a combination of crawling a handful of websites and using commonly-used pre-existing image datasets such as [YFCC100M](http://projects.dfki.uni-kl.de/yfcc100m/). A large portion of the data comes from our crawling of the internet. This means that the data is more representative of people and societies most connected to the internet which tend to skew towards more developed nations, and younger, male users. ### Data Mission Statement Our goal with building this dataset was to test out robustness and generalizability in computer vision tasks. As a result, the focus was on gathering large quantities of data from different publicly-available internet data sources. The data was gathered in a mostly non-interventionist manner. However, we only crawled websites that had policies against excessively violent and adult images and allowed us to filter out such content. We do not intend for this dataset to be used as the basis for any commercial or deployed model and will not be releasing the dataset. ## Performance and Limitations ### Performance We have evaluated the performance of CLIP on a wide range of benchmarks across a variety of computer vision datasets such as OCR to texture recognition to fine-grained classification. The paper describes model performance on the following datasets: - Food101 - CIFAR10 - CIFAR100 - Birdsnap - SUN397 - Stanford Cars - FGVC Aircraft - VOC2007 - DTD - Oxford-IIIT Pet dataset - Caltech101 - Flowers102 - MNIST - SVHN - IIIT5K - Hateful Memes - SST-2 - UCF101 - Kinetics700 - Country211 - CLEVR Counting - KITTI Distance - STL-10 - RareAct - Flickr30 - MSCOCO - ImageNet - ImageNet-A - ImageNet-R - ImageNet Sketch - ObjectNet (ImageNet Overlap) - Youtube-BB - ImageNet-Vid ## Limitations CLIP and our analysis of it have a number of limitations. CLIP currently struggles with respect to certain tasks such as fine grained classification and counting objects. CLIP also poses issues with regards to fairness and bias which we discuss in the paper and briefly in the next section. Additionally, our approach to testing CLIP also has an important limitation- in many cases we have used linear probes to evaluate the performance of CLIP and there is evidence suggesting that linear probes can underestimate model performance. ### Bias and Fairness We find that the performance of CLIP - and the specific biases it exhibits - can depend significantly on class design and the choices one makes for categories to include and exclude. We tested the risk of certain kinds of denigration with CLIP by classifying images of people from [Fairface](https://arxiv.org/abs/1908.04913) into crime-related and non-human animal categories. We found significant disparities with respect to race and gender. Additionally, we found that these disparities could shift based on how the classes were constructed. (Details captured in the Broader Impacts Section in the paper). We also tested the performance of CLIP on gender, race and age classification using the Fairface dataset (We default to using race categories as they are constructed in the Fairface dataset.) in order to assess quality of performance across different demographics. We found accuracy >96% across all races for gender classification with ‘Middle Eastern’ having the highest accuracy (98.4%) and ‘White’ having the lowest (96.5%). Additionally, CLIP averaged ~93% for racial classification and ~63% for age classification. Our use of evaluations to test for gender, race and age classification as well as denigration harms is simply to evaluate performance of the model across people and surface potential risks and not to demonstrate an endorsement/enthusiasm for such tasks. ## Feedback ### Where to send questions or comments about the model Please use [this Google Form](https://forms.gle/Uv7afRH5dvY34ZEs9)
AnonymousSub/SR_consert
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- tags: - vision widget: - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/cat-dog-music.png candidate_labels: playing music, playing sports example_title: Cat & Dog --- # Model Card: CLIP Disclaimer: The model card is taken and modified from the official CLIP repository, it can be found [here](https://github.com/openai/CLIP/blob/main/model-card.md). ## Model Details The CLIP model was developed by researchers at OpenAI to learn about what contributes to robustness in computer vision tasks. The model was also developed to test the ability of models to generalize to arbitrary image classification tasks in a zero-shot manner. It was not developed for general model deployment - to deploy models like CLIP, researchers will first need to carefully study their capabilities in relation to the specific context they’re being deployed within. ### Model Date January 2021 ### Model Type The base model uses a ViT-L/14 Transformer architecture as an image encoder and uses a masked self-attention Transformer as a text encoder. These encoders are trained to maximize the similarity of (image, text) pairs via a contrastive loss. The original implementation had two variants: one using a ResNet image encoder and the other using a Vision Transformer. This repository has the variant with the Vision Transformer. ### Documents - [Blog Post](https://openai.com/blog/clip/) - [CLIP Paper](https://arxiv.org/abs/2103.00020) ### Use with Transformers ```python from PIL import Image import requests from transformers import CLIPProcessor, CLIPModel model = CLIPModel.from_pretrained("openai/clip-vit-large-patch14") processor = CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14") url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) inputs = processor(text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True) outputs = model(**inputs) logits_per_image = outputs.logits_per_image # this is the image-text similarity score probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities ``` ## Model Use ### Intended Use The model is intended as a research output for research communities. We hope that this model will enable researchers to better understand and explore zero-shot, arbitrary image classification. We also hope it can be used for interdisciplinary studies of the potential impact of such models - the CLIP paper includes a discussion of potential downstream impacts to provide an example for this sort of analysis. #### Primary intended uses The primary intended users of these models are AI researchers. We primarily imagine the model will be used by researchers to better understand robustness, generalization, and other capabilities, biases, and constraints of computer vision models. ### Out-of-Scope Use Cases **Any** deployed use case of the model - whether commercial or not - is currently out of scope. Non-deployed use cases such as image search in a constrained environment, are also not recommended unless there is thorough in-domain testing of the model with a specific, fixed class taxonomy. This is because our safety assessment demonstrated a high need for task specific testing especially given the variability of CLIP’s performance with different class taxonomies. This makes untested and unconstrained deployment of the model in any use case currently potentially harmful. Certain use cases which would fall under the domain of surveillance and facial recognition are always out-of-scope regardless of performance of the model. This is because the use of artificial intelligence for tasks such as these can be premature currently given the lack of testing norms and checks to ensure its fair use. Since the model has not been purposefully trained in or evaluated on any languages other than English, its use should be limited to English language use cases. ## Data The model was trained on publicly available image-caption data. This was done through a combination of crawling a handful of websites and using commonly-used pre-existing image datasets such as [YFCC100M](http://projects.dfki.uni-kl.de/yfcc100m/). A large portion of the data comes from our crawling of the internet. This means that the data is more representative of people and societies most connected to the internet which tend to skew towards more developed nations, and younger, male users. ### Data Mission Statement Our goal with building this dataset was to test out robustness and generalizability in computer vision tasks. As a result, the focus was on gathering large quantities of data from different publicly-available internet data sources. The data was gathered in a mostly non-interventionist manner. However, we only crawled websites that had policies against excessively violent and adult images and allowed us to filter out such content. We do not intend for this dataset to be used as the basis for any commercial or deployed model and will not be releasing the dataset. ## Performance and Limitations ### Performance We have evaluated the performance of CLIP on a wide range of benchmarks across a variety of computer vision datasets such as OCR to texture recognition to fine-grained classification. The paper describes model performance on the following datasets: - Food101 - CIFAR10 - CIFAR100 - Birdsnap - SUN397 - Stanford Cars - FGVC Aircraft - VOC2007 - DTD - Oxford-IIIT Pet dataset - Caltech101 - Flowers102 - MNIST - SVHN - IIIT5K - Hateful Memes - SST-2 - UCF101 - Kinetics700 - Country211 - CLEVR Counting - KITTI Distance - STL-10 - RareAct - Flickr30 - MSCOCO - ImageNet - ImageNet-A - ImageNet-R - ImageNet Sketch - ObjectNet (ImageNet Overlap) - Youtube-BB - ImageNet-Vid ## Limitations CLIP and our analysis of it have a number of limitations. CLIP currently struggles with respect to certain tasks such as fine grained classification and counting objects. CLIP also poses issues with regards to fairness and bias which we discuss in the paper and briefly in the next section. Additionally, our approach to testing CLIP also has an important limitation- in many cases we have used linear probes to evaluate the performance of CLIP and there is evidence suggesting that linear probes can underestimate model performance. ### Bias and Fairness We find that the performance of CLIP - and the specific biases it exhibits - can depend significantly on class design and the choices one makes for categories to include and exclude. We tested the risk of certain kinds of denigration with CLIP by classifying images of people from [Fairface](https://arxiv.org/abs/1908.04913) into crime-related and non-human animal categories. We found significant disparities with respect to race and gender. Additionally, we found that these disparities could shift based on how the classes were constructed. (Details captured in the Broader Impacts Section in the paper). We also tested the performance of CLIP on gender, race and age classification using the Fairface dataset (We default to using race categories as they are constructed in the Fairface dataset.) in order to assess quality of performance across different demographics. We found accuracy >96% across all races for gender classification with ‘Middle Eastern’ having the highest accuracy (98.4%) and ‘White’ having the lowest (96.5%). Additionally, CLIP averaged ~93% for racial classification and ~63% for age classification. Our use of evaluations to test for gender, race and age classification as well as denigration harms is simply to evaluate performance of the model across people and surface potential risks and not to demonstrate an endorsement/enthusiasm for such tasks. ## Feedback ### Where to send questions or comments about the model Please use [this Google Form](https://forms.gle/Uv7afRH5dvY34ZEs9)
AnonymousSub/SR_declutr
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- license: apache-2.0 tags: - vision datasets: - imagenet-21k --- # ImageGPT (large-sized model) ImageGPT (iGPT) model pre-trained on ImageNet ILSVRC 2012 (14 million images, 21,843 classes) at resolution 32x32. It was introduced in the paper [Generative Pretraining from Pixels](https://cdn.openai.com/papers/Generative_Pretraining_from_Pixels_V2.pdf) by Chen et al. and first released in [this repository](https://github.com/openai/image-gpt). See also the official [blog post](https://openai.com/blog/image-gpt/). Disclaimer: The team releasing ImageGPT did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model description The ImageGPT (iGPT) is a transformer decoder model (GPT-like) pretrained on a large collection of images in a self-supervised fashion, namely ImageNet-21k, at a resolution of 32x32 pixels. The goal for the model is simply to predict the next pixel value, given the previous ones. By pre-training the model, it learns an inner representation of images that can then be used to: - extract features useful for downstream tasks: one can either use ImageGPT to produce fixed image features, in order to train a linear model (like a sklearn logistic regression model or SVM). This is also referred to as "linear probing". - perform (un)conditional image generation. ## Intended uses & limitations You can use the raw model for either feature extractor or (un) conditional image generation. See the [model hub](https://huggingface.co/models?search=openai/imagegpt) to all ImageGPT variants. ### How to use Here is how to use this model in PyTorch to perform unconditional image generation: ```python from transformers import ImageGPTFeatureExtractor, ImageGPTForCausalImageModeling import torch import matplotlib.pyplot as plt import numpy as np feature_extractor = ImageGPTFeatureExtractor.from_pretrained('openai/imagegpt-large') model = ImageGPTForCausalImageModeling.from_pretrained('openai/imagegpt-large') device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model.to(device) # unconditional generation of 8 images batch_size = 8 context = torch.full((batch_size, 1), model.config.vocab_size - 1) #initialize with SOS token context = torch.tensor(context).to(device) output = model.generate(pixel_values=context, max_length=model.config.n_positions + 1, temperature=1.0, do_sample=True, top_k=40) clusters = feature_extractor.clusters n_px = feature_extractor.size samples = output[:,1:].cpu().detach().numpy() samples_img = [np.reshape(np.rint(127.5 * (clusters[s] + 1.0)), [n_px, n_px, 3]).astype(np.uint8) for s in samples] # convert color cluster tokens back to pixels f, axes = plt.subplots(1, batch_size, dpi=300) for img, ax in zip(samples_img, axes): ax.axis('off') ax.imshow(img) ``` ## Training data The ImageGPT model was pretrained on [ImageNet-21k](http://www.image-net.org/), a dataset consisting of 14 million images and 21k classes. ## Training procedure ### Preprocessing Images are first resized/rescaled to the same resolution (32x32) and normalized across the RGB channels. Next, color-clustering is performed. This means that every pixel is turned into one of 512 possible cluster values. This way, one ends up with a sequence of 32x32 = 1024 pixel values, rather than 32x32x3 = 3072, which is prohibitively large for Transformer-based models. ### Pretraining Training details can be found in section 3.4 of v2 of the paper. ## Evaluation results For evaluation results on several image classification benchmarks, we refer to the original paper. ### BibTeX entry and citation info ```bibtex @InProceedings{pmlr-v119-chen20s, title = {Generative Pretraining From Pixels}, author = {Chen, Mark and Radford, Alec and Child, Rewon and Wu, Jeffrey and Jun, Heewoo and Luan, David and Sutskever, Ilya}, booktitle = {Proceedings of the 37th International Conference on Machine Learning}, pages = {1691--1703}, year = {2020}, editor = {III, Hal Daumé and Singh, Aarti}, volume = {119}, series = {Proceedings of Machine Learning Research}, month = {13--18 Jul}, publisher = {PMLR}, pdf = {http://proceedings.mlr.press/v119/chen20s/chen20s.pdf}, url = {https://proceedings.mlr.press/v119/chen20s.html } ``` ```bibtex @inproceedings{deng2009imagenet, title={Imagenet: A large-scale hierarchical image database}, author={Deng, Jia and Dong, Wei and Socher, Richard and Li, Li-Jia and Li, Kai and Fei-Fei, Li}, booktitle={2009 IEEE conference on computer vision and pattern recognition}, pages={248--255}, year={2009}, organization={Ieee} } ```
AnonymousSub/SR_rule_based_bert_quadruplet_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- license: apache-2.0 tags: - vision datasets: - imagenet-21k --- # ImageGPT (medium-sized model) ImageGPT (iGPT) model pre-trained on ImageNet ILSVRC 2012 (14 million images, 21,843 classes) at resolution 32x32. It was introduced in the paper [Generative Pretraining from Pixels](https://cdn.openai.com/papers/Generative_Pretraining_from_Pixels_V2.pdf) by Chen et al. and first released in [this repository](https://github.com/openai/image-gpt). See also the official [blog post](https://openai.com/blog/image-gpt/). Disclaimer: The team releasing ImageGPT did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model description The ImageGPT (iGPT) is a transformer decoder model (GPT-like) pretrained on a large collection of images in a self-supervised fashion, namely ImageNet-21k, at a resolution of 32x32 pixels. The goal for the model is simply to predict the next pixel value, given the previous ones. By pre-training the model, it learns an inner representation of images that can then be used to: - extract features useful for downstream tasks: one can either use ImageGPT to produce fixed image features, in order to train a linear model (like a sklearn logistic regression model or SVM). This is also referred to as "linear probing". - perform (un)conditional image generation. ## Intended uses & limitations You can use the raw model for either feature extractor or (un) conditional image generation. See the [model hub](https://huggingface.co/models?search=openai/imagegpt) to all ImageGPT variants. ### How to use Here is how to use this model in PyTorch to perform unconditional image generation: ```python from transformers import ImageGPTFeatureExtractor, ImageGPTForCausalImageModeling import torch import matplotlib.pyplot as plt import numpy as np feature_extractor = ImageGPTFeatureExtractor.from_pretrained('openai/imagegpt-medium') model = ImageGPTForCausalImageModeling.from_pretrained('openai/imagegpt-medium') device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model.to(device) # unconditional generation of 8 images batch_size = 8 context = torch.full((batch_size, 1), model.config.vocab_size - 1) #initialize with SOS token context = torch.tensor(context).to(device) output = model.generate(pixel_values=context, max_length=model.config.n_positions + 1, temperature=1.0, do_sample=True, top_k=40) clusters = feature_extractor.clusters n_px = feature_extractor.size samples = output[:,1:].cpu().detach().numpy() samples_img = [np.reshape(np.rint(127.5 * (clusters[s] + 1.0)), [n_px, n_px, 3]).astype(np.uint8) for s in samples] # convert color cluster tokens back to pixels f, axes = plt.subplots(1, batch_size, dpi=300) for img, ax in zip(samples_img, axes): ax.axis('off') ax.imshow(img) ``` ## Training data The ImageGPT model was pretrained on [ImageNet-21k](http://www.image-net.org/), a dataset consisting of 14 million images and 21k classes. ## Training procedure ### Preprocessing Images are first resized/rescaled to the same resolution (32x32) and normalized across the RGB channels. Next, color-clustering is performed. This means that every pixel is turned into one of 512 possible cluster values. This way, one ends up with a sequence of 32x32 = 1024 pixel values, rather than 32x32x3 = 3072, which is prohibitively large for Transformer-based models. ### Pretraining Training details can be found in section 3.4 of v2 of the paper. ## Evaluation results For evaluation results on several image classification benchmarks, we refer to the original paper. ### BibTeX entry and citation info ```bibtex @InProceedings{pmlr-v119-chen20s, title = {Generative Pretraining From Pixels}, author = {Chen, Mark and Radford, Alec and Child, Rewon and Wu, Jeffrey and Jun, Heewoo and Luan, David and Sutskever, Ilya}, booktitle = {Proceedings of the 37th International Conference on Machine Learning}, pages = {1691--1703}, year = {2020}, editor = {III, Hal Daumé and Singh, Aarti}, volume = {119}, series = {Proceedings of Machine Learning Research}, month = {13--18 Jul}, publisher = {PMLR}, pdf = {http://proceedings.mlr.press/v119/chen20s/chen20s.pdf}, url = {https://proceedings.mlr.press/v119/chen20s.html } ``` ```bibtex @inproceedings{deng2009imagenet, title={Imagenet: A large-scale hierarchical image database}, author={Deng, Jia and Dong, Wei and Socher, Richard and Li, Li-Jia and Li, Kai and Fei-Fei, Li}, booktitle={2009 IEEE conference on computer vision and pattern recognition}, pages={248--255}, year={2009}, organization={Ieee} } ```
AnonymousSub/SR_rule_based_bert_triplet_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- license: apache-2.0 tags: - vision datasets: - imagenet-21k --- # ImageGPT (small-sized model) ImageGPT (iGPT) model pre-trained on ImageNet ILSVRC 2012 (14 million images, 21,843 classes) at resolution 32x32. It was introduced in the paper [Generative Pretraining from Pixels](https://cdn.openai.com/papers/Generative_Pretraining_from_Pixels_V2.pdf) by Chen et al. and first released in [this repository](https://github.com/openai/image-gpt). See also the official [blog post](https://openai.com/blog/image-gpt/). Disclaimer: The team releasing ImageGPT did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model description The ImageGPT (iGPT) is a transformer decoder model (GPT-like) pretrained on a large collection of images in a self-supervised fashion, namely ImageNet-21k, at a resolution of 32x32 pixels. The goal for the model is simply to predict the next pixel value, given the previous ones. By pre-training the model, it learns an inner representation of images that can then be used to: - extract features useful for downstream tasks: one can either use ImageGPT to produce fixed image features, in order to train a linear model (like a sklearn logistic regression model or SVM). This is also referred to as "linear probing". - perform (un)conditional image generation. ## Intended uses & limitations You can use the raw model for either feature extractor or (un) conditional image generation. See the [model hub](https://huggingface.co/models?search=openai/imagegpt) to all ImageGPT variants. ### How to use Here is how to use this model in PyTorch to perform unconditional image generation: ```python from transformers import ImageGPTFeatureExtractor, ImageGPTForCausalImageModeling import torch import matplotlib.pyplot as plt import numpy as np feature_extractor = ImageGPTFeatureExtractor.from_pretrained('openai/imagegpt-small') model = ImageGPTForCausalImageModeling.from_pretrained('openai/imagegpt-small') device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model.to(device) # unconditional generation of 8 images batch_size = 8 context = torch.full((batch_size, 1), model.config.vocab_size - 1) #initialize with SOS token context = torch.tensor(context).to(device) output = model.generate(pixel_values=context, max_length=model.config.n_positions + 1, temperature=1.0, do_sample=True, top_k=40) clusters = feature_extractor.clusters n_px = feature_extractor.size samples = output[:,1:].cpu().detach().numpy() samples_img = [np.reshape(np.rint(127.5 * (clusters[s] + 1.0)), [n_px, n_px, 3]).astype(np.uint8) for s in samples] # convert color cluster tokens back to pixels f, axes = plt.subplots(1, batch_size, dpi=300) for img, ax in zip(samples_img, axes): ax.axis('off') ax.imshow(img) ``` ## Training data The ImageGPT model was pretrained on [ImageNet-21k](http://www.image-net.org/), a dataset consisting of 14 million images and 21k classes. ## Training procedure ### Preprocessing Images are first resized/rescaled to the same resolution (32x32) and normalized across the RGB channels. Next, color-clustering is performed. This means that every pixel is turned into one of 512 possible cluster values. This way, one ends up with a sequence of 32x32 = 1024 pixel values, rather than 32x32x3 = 3072, which is prohibitively large for Transformer-based models. ### Pretraining Training details can be found in section 3.4 of v2 of the paper. ## Evaluation results For evaluation results on several image classification benchmarks, we refer to the original paper. ### BibTeX entry and citation info ```bibtex @InProceedings{pmlr-v119-chen20s, title = {Generative Pretraining From Pixels}, author = {Chen, Mark and Radford, Alec and Child, Rewon and Wu, Jeffrey and Jun, Heewoo and Luan, David and Sutskever, Ilya}, booktitle = {Proceedings of the 37th International Conference on Machine Learning}, pages = {1691--1703}, year = {2020}, editor = {III, Hal Daumé and Singh, Aarti}, volume = {119}, series = {Proceedings of Machine Learning Research}, month = {13--18 Jul}, publisher = {PMLR}, pdf = {http://proceedings.mlr.press/v119/chen20s/chen20s.pdf}, url = {https://proceedings.mlr.press/v119/chen20s.html } ``` ```bibtex @inproceedings{deng2009imagenet, title={Imagenet: A large-scale hierarchical image database}, author={Deng, Jia and Dong, Wei and Socher, Richard and Li, Li-Jia and Li, Kai and Fei-Fei, Li}, booktitle={2009 IEEE conference on computer vision and pattern recognition}, pages={248--255}, year={2009}, organization={Ieee} } ```
AnonymousSub/SR_rule_based_roberta_bert_quadruplet_epochs_1_shard_10
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- license: mit tags: - nowcasting - forecasting - timeseries - remote-sensing - gan --- # DGMR ## Model description [More information needed] ## Intended uses & limitations [More information needed] ## How to use [More information needed] ## Limitations and bias [More information needed] ## Training data [More information needed] ## Training procedure [More information needed] ## Evaluation results [More information needed]
AnonymousSub/SR_rule_based_roberta_hier_triplet_epochs_1_shard_1
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- language: multilingual tags: - Extract Names license: apache-2.0 --- ## Extract names in any language.
AnonymousSub/SR_rule_based_roberta_hier_triplet_epochs_1_shard_1_wikiqa_copy
[ "pytorch", "roberta", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "RobertaModel" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
# MyModelName Borges02 ## Model description You can generate new short stories from Jorge Luis Borges. ## Intended uses & limitations #### How to use ```python # You can include sample code which will be formatted ``` #### Limitations and bias Provide examples of latent issues and potential remediations. ## Training data Describe the data you used to train the model. If you initialized it with pre-trained weights, add a link to the pre-trained model card or repository with description of the pre-training data. ## Training procedure Preprocessing, hardware used, hyperparameters... ## Eval results ### BibTeX entry and citation info ```bibtex @inproceedings{..., year={2020} } ```
AnonymousSub/SR_specter
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- language: ru --- BART model fine-tuned to aggregate crowd-sourced transcriptions. Repository: [GitHub](https://github.com/orzhan/bart-transcription-aggregation)
AnonymousSub/SciFive_pubmedqa_question_generation
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
7
null
Text simplification model for Russian. Fine-tuned ruGPT3-large https://github.com/orzhan/rusimscore --- language: ru
AnonymousSub/T5_pubmedqa_question_generation
[ "pytorch", "t5", "text2text-generation", "transformers", "autotrain_compatible" ]
text2text-generation
{ "architectures": [ "T5ForConditionalGeneration" ], "model_type": "t5", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": true, "length_penalty": 2, "max_length": 200, "min_length": 30, "no_repeat_ngram_size": 3, "num_beams": 4, "prefix": "summarize: " }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to German: " }, "translation_en_to_fr": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to French: " }, "translation_en_to_ro": { "early_stopping": true, "max_length": 300, "num_beams": 4, "prefix": "translate English to Romanian: " } } }
6
null
T5-small model fine-tuned for extractive summarization on long documents. Repository: [GitHub](https://github.com/orzhan/t5-long-extract)
AnonymousSub/bert-base-uncased_wikiqa
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
30
null
This is a t5-base model trained on the multi_news dataset for abstraction summarization
AnonymousSub/bert_hier_diff_equal_wts_epochs_1_shard_10
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- tags: - text-to-image library_name: generic --- # Image generation using pretrained BigGAN ## Warning: This only works for ImageNet inputs. List of possible inputs: https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a GitHub repository: https://github.com/huggingface/pytorch-pretrained-BigGAN
AnonymousSub/bert_mean_diff_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
6
null
--- tags: - audio - ConvTasNet - audio-to-audio datasets: - Libri1Mix - enh_single license: cc-by-sa-4.0 library_tag: generic --- ## Clone from Asteroid model `JorisCos/ConvTasNet_Libri1Mix_enhsignle_16k` Description: This model was trained by Joris Cosentino using the librimix recipe in [Asteroid](https://github.com/asteroid-team/asteroid). It was trained on the `enh_single` task of the Libri1Mix dataset. Training config: ```yml data: n_src: 1 sample_rate: 16000 segment: 3 task: enh_single train_dir: data/wav16k/min/train-360 valid_dir: data/wav16k/min/dev filterbank: kernel_size: 32 n_filters: 512 stride: 16 masknet: bn_chan: 128 hid_chan: 512 mask_act: relu n_blocks: 8 n_repeats: 3 n_src: 1 skip_chan: 128 optim: lr: 0.001 optimizer: adam weight_decay: 0.0 training: batch_size: 6 early_stop: true epochs: 200 half_lr: true num_workers: 4 ``` Results: On Libri1Mix min test set : ```yml si_sdr: 14.743051006476085 si_sdr_imp: 11.293269700616385 sdr: 15.300522933671061 sdr_imp: 11.797860134458015 sir: Infinity sir_imp: NaN sar: 15.300522933671061 sar_imp: 11.797860134458015 stoi: 0.9310514162434267 stoi_imp: 0.13513159270288563 ``` License notice: This work "ConvTasNet_Libri1Mix_enhsignle_16k" is a derivative of [LibriSpeech ASR corpus](http://www.openslr.org/12) by Vassil Panayotov, used under [CC BY 4.0](https://creativecommons.org/licenses/by/4.0/); of The WSJ0 Hipster Ambient Mixtures dataset by [Whisper.ai](http://wham.whisper.ai/), used under [CC BY-NC 4.0](https://creativecommons.org/licenses/by-nc/4.0/) (Research only). "ConvTasNet_Libri1Mix_enhsignle_16k" is licensed under [Attribution-ShareAlike 3.0 Unported](https://creativecommons.org/licenses/by-sa/3.0/) by Joris Cosentino
AnonymousSub/bert_snips
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
5
null
--- tags: - adapter-transformers --- # Adapter transformers
AnonymousSub/bert_triplet_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
2
null
--- benchmark: superb library_name: superb language: en datasets: - librispeech_asr tags: - audio - automatic-speech-recognition - superb license: apache-2.0 widget: - example_title: Librispeech sample 1 src: https://cdn-media.huggingface.co/speech_samples/sample1.flac - example_title: Librispeech sample 2 src: https://cdn-media.huggingface.co/speech_samples/sample2.flac --- # Fork of Wav2Vec2-Base-960h [Facebook's Wav2Vec2](https://ai.facebook.com/blog/wav2vec-20-learning-the-structure-of-speech-from-raw-audio/) The base model pretrained and fine-tuned on 960 hours of Librispeech on 16kHz sampled speech audio. When using the model make sure that your speech input is also sampled at 16Khz. [Paper](https://arxiv.org/abs/2006.11477) Authors: Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli **Abstract** We show for the first time that learning powerful representations from speech audio alone followed by fine-tuning on transcribed speech can outperform the best semi-supervised methods while being conceptually simpler. wav2vec 2.0 masks the speech input in the latent space and solves a contrastive task defined over a quantization of the latent representations which are jointly learned. Experiments using all labeled data of Librispeech achieve 1.8/3.3 WER on the clean/other test sets. When lowering the amount of labeled data to one hour, wav2vec 2.0 outperforms the previous state of the art on the 100 hour subset while using 100 times less labeled data. Using just ten minutes of labeled data and pre-training on 53k hours of unlabeled data still achieves 4.8/8.2 WER. This demonstrates the feasibility of speech recognition with limited amounts of labeled data. The original model can be found under https://github.com/pytorch/fairseq/tree/master/examples/wav2vec#wav2vec-20. # Usage To transcribe audio files the model can be used as a standalone acoustic model as follows: ```python from transformers import Wav2Vec2Tokenizer, Wav2Vec2ForCTC from datasets import load_dataset import soundfile as sf import torch # load model and tokenizer tokenizer = Wav2Vec2Tokenizer.from_pretrained("facebook/wav2vec2-base-960h") model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h") # define function to read in sound file def map_to_array(batch): speech, _ = sf.read(batch["file"]) batch["speech"] = speech return batch # load dummy dataset and read soundfiles ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", split="validation") ds = ds.map(map_to_array) # tokenize input_values = tokenizer(ds["speech"][:2], return_tensors="pt", padding="longest").input_values # Batch size 1 # retrieve logits logits = model(input_values).logits # take argmax and decode predicted_ids = torch.argmax(logits, dim=-1) transcription = tokenizer.batch_decode(predicted_ids) ``` ## Evaluation This code snippet shows how to evaluate **facebook/wav2vec2-base-960h** on LibriSpeech's "clean" and "other" test data. ```python from datasets import load_dataset from transformers import Wav2Vec2ForCTC, Wav2Vec2Tokenizer import soundfile as sf import torch from jiwer import wer librispeech_eval = load_dataset("librispeech_asr", "clean", split="test") model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h").to("cuda") tokenizer = Wav2Vec2Tokenizer.from_pretrained("facebook/wav2vec2-base-960h") def map_to_array(batch): speech, _ = sf.read(batch["file"]) batch["speech"] = speech return batch librispeech_eval = librispeech_eval.map(map_to_array) def map_to_pred(batch): input_values = tokenizer(batch["speech"], return_tensors="pt", padding="longest").input_values with torch.no_grad(): logits = model(input_values.to("cuda")).logits predicted_ids = torch.argmax(logits, dim=-1) transcription = tokenizer.batch_decode(predicted_ids) batch["transcription"] = transcription return batch result = librispeech_eval.map(map_to_pred, batched=True, batch_size=1, remove_columns=["speech"]) print("WER:", wer(result["text"], result["transcription"])) ``` *Result (WER)*: | "clean" | "other" | |---|---| | 3.4 | 8.6 |
AnonymousSub/bert_triplet_epochs_1_shard_10
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
1
null
--- tags: - audio-to-audio library_name: generic --- # Audio to Audio repository template This is a template repository for Audio to Audio to support generic inference with Hugging Face Hub generic Inference API. Examples of Audio to Audio are Source Separation and Speech Enhancement. There are two required steps: 1. Specify the requirements by defining a `requirements.txt` file. 2. Implement the `pipeline.py` `__init__` and `__call__` methods. These methods are called by the Inference API. The `__init__` method should load the model and preload all the elements needed for inference (model, processors, tokenizers, etc.). This is only called once. The `__call__` method performs the actual inference. Make sure to follow the same input/output specifications defined in the template for the pipeline to work. Example repos * https://huggingface.co/osanseviero/ConvTasNet_Libri1Mix_enhsingle_16k
AnonymousSub/cline-emanuals-s10-SR
[]
null
{ "architectures": null, "model_type": null, "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
0
null
--- tags: - allennlp - question-answering --- # TODO: Fill this model card
AnonymousSub/cline-emanuals-techqa
[ "pytorch", "roberta", "question-answering", "transformers", "autotrain_compatible" ]
question-answering
{ "architectures": [ "RobertaForQuestionAnswering" ], "model_type": "roberta", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
4
null
--- tags: - sentence-transformers - feature-extraction --- # TODO: Name of Model TODO: Description ## Model Description TODO: Add relevant content (0) Base Transformer Type: DistilBertModel (1) Pooling mean (2) Dense 768x512 ## Usage (Sentence-Transformers) Using this model becomes more convenient when you have [sentence-transformers](https://github.com/UKPLab/sentence-transformers) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence"] model = SentenceTransformer(TODO) embeddings = model.encode(sentences) print(embeddings) ``` ## TODO: Training Procedure ## TODO: Evaluation Results ## TODO: Citing & Authors
AnonymousSub/consert-s10-SR
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
28
2021-08-17T08:22:19Z
--- library_name: generic language: - en pipeline_tag: text-to-image --- ## Fork of DALL·E mini - Generate images from text For the original repo, head to https://huggingface.co/flax-community/dalle-mini
AnonymousSub/dummy_2
[ "pytorch", "bert", "text-classification", "transformers" ]
text-classification
{ "architectures": [ "BertForSequenceClassification" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
39
null
--- tags: - spacy - token-classification language: - en model-index: - name: fashion_brands_patterns results: - task: name: NER type: token-classification metrics: - name: NER Precision type: precision value: 0.0 - name: NER Recall type: recall value: 0.0 - name: NER F Score type: f_score value: 0.0 --- | Feature | Description | | --- | --- | | **Name** | `en_ner_fashion` | | **Version** | `0.0.0` | | **spaCy** | `>=3.1.0,<3.2.0` | | **Default Pipeline** | `tok2vec`, `ner` | | **Components** | `tok2vec`, `ner` | | **Vectors** | 0 keys, 0 unique vectors (0 dimensions) | | **Sources** | n/a | | **License** | n/a | | **Author** | [n/a]() | ### Label Scheme <details> <summary>View label scheme (1 labels for 1 components)</summary> | Component | Labels | | --- | --- | | **`ner`** | `FASHION_BRAND` | </details> ### Accuracy | Type | Score | | --- | --- | | `ENTS_F` | 0.00 | | `ENTS_P` | 0.00 | | `ENTS_R` | 0.00 | | `TOK2VEC_LOSS` | 1043.55 | | `NER_LOSS` | 1414323.43 |
AnonymousSub/dummy_2_parent
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
3
null
--- tags: - image-classification library_name: generic --- # Dog vs Cat Image Classification with FastAI CNN Training is based in FastAI [Quick Start](https://docs.fast.ai/quick_start.html). Example training ## Training The model was trained as follows ```python path = untar_data(URLs.PETS)/'images' def is_cat(x): return x[0].isupper() dls = ImageDataLoaders.from_name_func( path, get_image_files(path), valid_pct=0.2, seed=42, label_func=is_cat, item_tfms=Resize(224)) learn = cnn_learner(dls, resnet34, metrics=error_rate) learn.fine_tune(1) ```
AnonymousSub/rule_based_bert_hier_diff_equal_wts_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
2021-05-18T07:19:49Z
--- tags: - translation widget: - text: "I have a problem with my iphone that needs to be resolved asap!!" - max_length: 1 license: apache-2.0 --- # Fastspeech english model
AnonymousSub/rule_based_bert_quadruplet_epochs_1_shard_1
[ "pytorch", "bert", "feature-extraction", "transformers" ]
feature-extraction
{ "architectures": [ "BertModel" ], "model_type": "bert", "task_specific_params": { "conversational": { "max_length": null }, "summarization": { "early_stopping": null, "length_penalty": null, "max_length": null, "min_length": null, "no_repeat_ngram_size": null, "num_beams": null, "prefix": null }, "text-generation": { "do_sample": null, "max_length": null }, "translation_en_to_de": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_fr": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null }, "translation_en_to_ro": { "early_stopping": null, "max_length": null, "num_beams": null, "prefix": null } } }
8
null
--- tags: - text-classification library_name: fasttext widget: - text: "apple" example_title: "apple" - text: "cat" example_title: "cat" - text: "sunny" example_title: "sunny" - text: "water" example_title: "water" --- # Fasttext nearest neighbors