Search is not available for this dataset
pipeline_tag
stringclasses
48 values
library_name
stringclasses
205 values
text
stringlengths
0
18.3M
metadata
stringlengths
2
1.07B
id
stringlengths
5
122
last_modified
null
tags
listlengths
1
1.84k
sha
null
created_at
stringlengths
25
25
text-classification
transformers
{}
Katsiaryna/qnli-electra-base-finetuned_auc
null
[ "transformers", "pytorch", "tensorboard", "electra", "text-classification", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
Katsiaryna/stsb-TinyBERT-L-4-finetuned_auc
null
[ "transformers", "pytorch", "tensorboard", "bert", "text-classification", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
Katsiaryna/stsb-TinyBERT-L-4-finetuned_auc_151221-5-001
null
[ "transformers", "pytorch", "tensorboard", "bert", "text-classification", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
Katsiaryna/stsb-TinyBERT-L-4-finetuned_auc_151221-normal
null
[ "transformers", "pytorch", "tensorboard", "bert", "text-classification", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
Katsiaryna/stsb-TinyBERT-L-4-finetuned_auc_151221-top1
null
[ "transformers", "pytorch", "tensorboard", "bert", "text-classification", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
Katsiaryna/stsb-TinyBERT-L-4-finetuned_auc_151221-top3
null
[ "transformers", "pytorch", "tensorboard", "bert", "text-classification", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
Katsiaryna/stsb-TinyBERT-L-4-finetuned_auc_151221-top3_op1
null
[ "transformers", "pytorch", "tensorboard", "bert", "text-classification", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
Katsiaryna/stsb-TinyBERT-L-4-finetuned_auc_151221-top3_op2
null
[ "transformers", "pytorch", "tensorboard", "bert", "text-classification", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
Katsiaryna/stsb-TinyBERT-L-4-finetuned_auc_151221-top3_op3
null
[ "transformers", "pytorch", "tensorboard", "bert", "text-classification", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
Katsiaryna/stsb-TinyBERT-L-4-finetuned_auc_161221-top3
null
[ "transformers", "pytorch", "tensorboard", "bert", "text-classification", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
{}
Katsiaryna/stsb-TinyBERT-L-4-finetuned_auc_20000-top3-BCE
null
[ "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
Katsiaryna/stsb-TinyBERT-L-4-finetuned_auc_40000-top3-BCE
null
[ "transformers", "pytorch", "tensorboard", "bert", "text-classification", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
Katsiaryna/stsb-TinyBERT-L-4-finetuned_auc_40000-top3
null
[ "transformers", "pytorch", "tensorboard", "bert", "text-classification", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
{}
Katsiaryna/stsb-TinyBERT-L-4-finetuned_auc_all-top3
null
[ "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
{}
Katsiaryna/stsb-TinyBERT-L-4-finetuned_auc_k
null
[ "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
{}
Katsiaryna/stsb-distilroberta-base-finetuned_9th_auc
null
[ "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
Katsiaryna/stsb-distilroberta-base-finetuned_9th_auc_ce
null
[ "transformers", "pytorch", "tensorboard", "roberta", "text-classification", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
{}
Katsiaryna/stsb-distilroberta-base-finetuned_auc
null
[ "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
{}
Katsiaryna/stsb-distilroberta-base-finetuned_auc_121221
null
[ "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
{}
Katsiaryna/stsb-roberta-base-finetuned_auc
null
[ "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
{}
Kavod/wav2vec2-base-timit-demo-colab
null
[ "region:us" ]
null
2022-03-02T23:29:04+00:00
text-generation
transformers
# Joshua Dialogue Model
{"tags": ["conversational"]}
KaydenSou/Joshua
null
[ "transformers", "pytorch", "gpt2", "text-generation", "conversational", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-complaints-product This model was trained from the [CFBP](https://www.consumerfinance.gov/data-research/consumer-complaints/) dataset, also made available on the HuggingFace Datasets library. This model predicts the type of financial complaint based on the text provided ## Model description A DistilBert Text Classification Model, with 18 possible classes to determine the nature of a financial customer complaint. ## Intended uses & limitations This model is used as part of.a demonstration for E2E Machine Learning Projects focused on Contact Centre Automation: - **Infrastructure:** Terraform - **ML Ops:** HuggingFace (Datasets, Hub, Transformers) - **Ml Explainability:** SHAP - **Cloud:** AWS - Model Hosting: Lambda - DB Backend: DynamoDB - Orchestration: Step-Functions - UI Hosting: EC2 - Routing: API Gateway - **UI:** Budibase ## Training and evaluation data consumer_complaints dataset ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 3 ### Framework versions - Transformers 4.16.1 - Pytorch 1.10.0+cu111 - Datasets 1.18.2 - Tokenizers 0.11.0
{"tags": ["generated_from_trainer"], "datasets": ["consumer_complaints"], "model-index": [{"name": "distilbert-complaints-product", "results": []}]}
Kayvane/distilbert-complaints-product
null
[ "transformers", "pytorch", "distilbert", "text-classification", "generated_from_trainer", "dataset:consumer_complaints", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-undersampled-noweights This model was trained from scratch on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 33 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 100 - num_epochs: 5 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.16.2 - Pytorch 1.10.0+cu111 - Datasets 1.18.3 - Tokenizers 0.11.0
{"tags": ["generated_from_trainer"], "model-index": [{"name": "distilbert-undersampled-noweights", "results": []}]}
Kayvane/distilbert-undersampled-noweights
null
[ "transformers", "pytorch", "distilbert", "text-classification", "generated_from_trainer", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-undersampled This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.0826 - Accuracy: 0.9811 - F1: 0.9810 - Recall: 0.9811 - Precision: 0.9812 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 33 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 100 - num_epochs: 5 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Recall | Precision | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:------:|:------:|:---------:| | 0.0959 | 0.2 | 2000 | 0.0999 | 0.9651 | 0.9628 | 0.9651 | 0.9655 | | 0.0618 | 0.41 | 4000 | 0.0886 | 0.9717 | 0.9717 | 0.9717 | 0.9731 | | 0.159 | 0.61 | 6000 | 0.0884 | 0.9719 | 0.9720 | 0.9719 | 0.9728 | | 0.0513 | 0.81 | 8000 | 0.0785 | 0.9782 | 0.9782 | 0.9782 | 0.9788 | | 0.0219 | 1.01 | 10000 | 0.0680 | 0.9779 | 0.9779 | 0.9779 | 0.9783 | | 0.036 | 1.22 | 12000 | 0.0745 | 0.9787 | 0.9787 | 0.9787 | 0.9792 | | 0.0892 | 1.42 | 14000 | 0.0675 | 0.9786 | 0.9786 | 0.9786 | 0.9789 | | 0.0214 | 1.62 | 16000 | 0.0760 | 0.9799 | 0.9798 | 0.9799 | 0.9801 | | 0.0882 | 1.83 | 18000 | 0.0800 | 0.9800 | 0.9800 | 0.9800 | 0.9802 | | 0.0234 | 2.03 | 20000 | 0.0720 | 0.9813 | 0.9813 | 0.9813 | 0.9815 | | 0.0132 | 2.23 | 22000 | 0.0738 | 0.9803 | 0.9803 | 0.9803 | 0.9805 | | 0.0136 | 2.43 | 24000 | 0.0847 | 0.9804 | 0.9804 | 0.9804 | 0.9806 | | 0.0119 | 2.64 | 26000 | 0.0826 | 0.9811 | 0.9810 | 0.9811 | 0.9812 | ### Framework versions - Transformers 4.16.2 - Pytorch 1.10.0+cu111 - Datasets 1.18.3 - Tokenizers 0.11.0
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "metrics": ["accuracy", "f1", "recall", "precision"], "model-index": [{"name": "distilbert-undersampled", "results": []}]}
Kayvane/distilbert-undersampled
null
[ "transformers", "pytorch", "distilbert", "text-classification", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
{}
Kayvane/distilvert-complaints-subproduct
null
[ "transformers", "pytorch", "distilbert", "text-classification", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
{}
KazLib/distilbert-base-uncased-finetuned-squad
null
[ "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
# Model Trained Using AutoNLP - Problem type: Binary Classification - Model ID: 13522454 ## Validation Metrics - Loss: 0.31450966000556946 - Accuracy: 0.8461538461538461 - Precision: 0.8181818181818182 - Recall: 0.782608695652174 - AUC: 0.9369259032455604 - F1: 0.8 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoNLP"}' https://api-inference.huggingface.co/models/Kceilord/autonlp-tc-13522454 ``` Or Python API: ``` from transformers import AutoModelForSequenceClassification, AutoTokenizer model = AutoModelForSequenceClassification.from_pretrained("Kceilord/autonlp-tc-13522454", use_auth_token=True) tokenizer = AutoTokenizer.from_pretrained("Kceilord/autonlp-tc-13522454", use_auth_token=True) inputs = tokenizer("I love AutoNLP", return_tensors="pt") outputs = model(**inputs) ```
{"language": "en", "tags": "autonlp", "datasets": ["Kceilord/autonlp-data-tc"], "widget": [{"text": "I love AutoNLP \ud83e\udd17"}]}
Kceilord/autonlp-tc-13522454
null
[ "transformers", "pytorch", "distilbert", "text-classification", "autonlp", "en", "dataset:Kceilord/autonlp-data-tc", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-generation
transformers
#Harry Potter DialoGPT Model
{"tags": ["conversational"]}
Keen/DialoGPT-small-potter
null
[ "transformers", "pytorch", "gpt2", "text-generation", "conversational", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
{}
Keiraz14/finetuned-finbert
null
[ "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
{}
Keisuke/wav2vec2-base-timit-demo-colab
null
[ "region:us" ]
null
2022-03-02T23:29:04+00:00
text-generation
transformers
# Rick3 DialoGPT Model
{"tags": ["conversational"]}
KekLord/DialoGPT-small-rick3
null
[ "transformers", "pytorch", "gpt2", "text-generation", "conversational", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-generation
transformers
# Siesta
{"tags": ["conversational"]}
Keqing/Keqing-Siesta
null
[ "transformers", "pytorch", "gpt2", "text-generation", "conversational", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-generation
transformers
@ Spamton G. Spamton DialoGPT Model
{"tags": ["conversational"]}
Keqipig/DialoGPT-small-spamton
null
[ "transformers", "pytorch", "gpt2", "text-generation", "conversational", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
{}
Kerui/CS412-Project
null
[ "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # koelectra-sts-v0.4 This model was trained from scratch on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.3368 - Pearson: 0.9303 - Spearmanr: 0.9287 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Pearson | Spearmanr | |:-------------:|:-----:|:----:|:---------------:|:-------:|:---------:| | 0.0345 | 1.0 | 730 | 0.3368 | 0.9303 | 0.9287 | | 0.0343 | 2.0 | 1460 | 0.3368 | 0.9303 | 0.9287 | | 0.0337 | 3.0 | 2190 | 0.3368 | 0.9303 | 0.9287 | | 0.0345 | 4.0 | 2920 | 0.3368 | 0.9303 | 0.9287 | | 0.0347 | 5.0 | 3650 | 0.3368 | 0.9303 | 0.9287 | ### Framework versions - Transformers 4.10.0 - Pytorch 1.10.1+cu113 - Datasets 1.17.0 - Tokenizers 0.10.3
{"tags": ["generated_from_trainer"], "metrics": ["spearmanr"]}
Ketzu/koelectra-sts-v0.4
null
[ "transformers", "pytorch", "tensorboard", "electra", "text-classification", "generated_from_trainer", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
text2text-generation
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bart-base-finetuned-pubmed This model is a fine-tuned version of [facebook/bart-base](https://huggingface.co/facebook/bart-base) on the pub_med_summarization_dataset dataset. It achieves the following results on the evaluation set: - Loss: 2.0277 - Rouge1: 9.3963 - Rouge2: 4.0473 - Rougel: 8.4526 - Rougelsum: 8.9659 - Gen Len: 20.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:-----:|:---------------:|:------:|:------:|:------:|:---------:|:-------:| | 2.3706 | 1.0 | 4000 | 2.1245 | 9.1644 | 3.8264 | 8.2223 | 8.718 | 20.0 | | 2.2246 | 2.0 | 8000 | 2.0811 | 9.023 | 3.7716 | 8.1453 | 8.5998 | 20.0 | | 2.1034 | 3.0 | 12000 | 2.0469 | 9.4412 | 4.0783 | 8.4949 | 8.9977 | 20.0 | | 2.0137 | 4.0 | 16000 | 2.0390 | 9.2261 | 3.9307 | 8.3154 | 8.7937 | 20.0 | | 1.9288 | 5.0 | 20000 | 2.0277 | 9.3963 | 4.0473 | 8.4526 | 8.9659 | 20.0 | ### Framework versions - Transformers 4.16.2 - Pytorch 1.10.0+cu111 - Datasets 1.18.3 - Tokenizers 0.11.6
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "datasets": ["pub_med_summarization_dataset"], "metrics": ["rouge"], "model-index": [{"name": "bart-base-finetuned-pubmed", "results": [{"task": {"type": "text2text-generation", "name": "Sequence-to-sequence Language Modeling"}, "dataset": {"name": "pub_med_summarization_dataset", "type": "pub_med_summarization_dataset", "args": "document"}, "metrics": [{"type": "rouge", "value": 9.3963, "name": "Rouge1"}]}]}]}
Kevincp560/bart-base-finetuned-pubmed
null
[ "transformers", "pytorch", "tensorboard", "bart", "text2text-generation", "generated_from_trainer", "dataset:pub_med_summarization_dataset", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
text2text-generation
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bart-large-cnn-finetuned-pubmed This model is a fine-tuned version of [facebook/bart-large-cnn](https://huggingface.co/facebook/bart-large-cnn) on the pub_med_summarization_dataset dataset. It achieves the following results on the evaluation set: - Loss: 1.8416 - Rouge1: 40.4866 - Rouge2: 16.7472 - Rougel: 24.9831 - Rougelsum: 36.4002 - Gen Len: 142.0 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:-----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:--------:| | 1.932 | 1.0 | 4000 | 1.8110 | 38.1151 | 15.2255 | 23.4286 | 34.2521 | 141.8905 | | 1.7001 | 2.0 | 8000 | 1.7790 | 39.8217 | 16.3042 | 24.649 | 35.831 | 142.0 | | 1.5 | 3.0 | 12000 | 1.7971 | 40.6108 | 17.0446 | 25.1977 | 36.5556 | 141.9865 | | 1.3316 | 4.0 | 16000 | 1.8106 | 40.0466 | 16.4851 | 24.7094 | 36.0998 | 141.9335 | | 1.1996 | 5.0 | 20000 | 1.8416 | 40.4866 | 16.7472 | 24.9831 | 36.4002 | 142.0 | ### Framework versions - Transformers 4.16.2 - Pytorch 1.10.0+cu111 - Datasets 1.18.3 - Tokenizers 0.11.6
{"license": "mit", "tags": ["generated_from_trainer"], "datasets": ["pub_med_summarization_dataset"], "metrics": ["rouge"], "model-index": [{"name": "bart-large-cnn-finetuned-pubmed", "results": [{"task": {"type": "text2text-generation", "name": "Sequence-to-sequence Language Modeling"}, "dataset": {"name": "pub_med_summarization_dataset", "type": "pub_med_summarization_dataset", "args": "document"}, "metrics": [{"type": "rouge", "value": 40.4866, "name": "Rouge1"}]}]}]}
Kevincp560/bart-large-cnn-finetuned-pubmed
null
[ "transformers", "pytorch", "tensorboard", "bart", "text2text-generation", "generated_from_trainer", "dataset:pub_med_summarization_dataset", "license:mit", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
text2text-generation
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bart-large-finetuned-pubmed This model is a fine-tuned version of [facebook/bart-large](https://huggingface.co/facebook/bart-large) on the pub_med_summarization_dataset dataset. It achieves the following results on the evaluation set: - Loss: 1.8135 - Rouge1: 10.946 - Rouge2: 5.0933 - Rougel: 9.5608 - Rougelsum: 10.4259 - Gen Len: 19.0495 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:-----:|:---------------:|:-------:|:------:|:------:|:---------:|:-------:| | 2.0861 | 1.0 | 4000 | 1.8909 | 8.7344 | 3.6919 | 7.8804 | 8.3305 | 20.0 | | 1.8996 | 2.0 | 8000 | 1.8261 | 10.2124 | 4.6212 | 8.9842 | 9.7417 | 17.632 | | 1.7459 | 3.0 | 12000 | 1.8160 | 9.4933 | 4.4117 | 8.3977 | 9.0758 | 16.4775 | | 1.6258 | 4.0 | 16000 | 1.8136 | 10.8248 | 5.0335 | 9.4286 | 10.3123 | 18.724 | | 1.5214 | 5.0 | 20000 | 1.8135 | 10.946 | 5.0933 | 9.5608 | 10.4259 | 19.0495 | ### Framework versions - Transformers 4.16.2 - Pytorch 1.10.0+cu111 - Datasets 1.18.3 - Tokenizers 0.11.6
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "datasets": ["pub_med_summarization_dataset"], "metrics": ["rouge"], "model-index": [{"name": "bart-large-finetuned-pubmed", "results": [{"task": {"type": "text2text-generation", "name": "Sequence-to-sequence Language Modeling"}, "dataset": {"name": "pub_med_summarization_dataset", "type": "pub_med_summarization_dataset", "args": "document"}, "metrics": [{"type": "rouge", "value": 10.946, "name": "Rouge1"}]}]}]}
Kevincp560/bart-large-finetuned-pubmed
null
[ "transformers", "pytorch", "tensorboard", "bart", "text2text-generation", "generated_from_trainer", "dataset:pub_med_summarization_dataset", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
{}
KeyNoOne/Friendly_Friends
null
[ "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
{}
Khalidnassiri/distilbert-base-uncased-finetuned-squad
null
[ "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
{}
Khalima/N
null
[ "region:us" ]
null
2022-03-02T23:29:04+00:00
text-generation
transformers
# Model for chat bot to talk like tony stark
{"tags": ["conversational"]}
KhanAdeeb/model-tony-stark
null
[ "transformers", "pytorch", "gpt2", "text-generation", "conversational", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
question-answering
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-multilingual-cased-finetuned-squad This model is a fine-tuned version of [bert-base-multilingual-cased](https://huggingface.co/bert-base-multilingual-cased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.4919 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 1.1782 | 1.0 | 579 | 0.5258 | | 0.4938 | 2.0 | 1158 | 0.4639 | | 0.32 | 3.0 | 1737 | 0.4919 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.0+cu111 - Datasets 1.17.0 - Tokenizers 0.10.3
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "model-index": [{"name": "bert-base-multilingual-cased-finetuned-squad", "results": []}]}
Khanh/bert-base-multilingual-cased-finetuned-squad
null
[ "transformers", "pytorch", "tensorboard", "bert", "question-answering", "generated_from_trainer", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
question-answering
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-multilingual-cased-finetuned-viquad This model is a fine-tuned version of [bert-base-multilingual-cased](https://huggingface.co/bert-base-multilingual-cased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.9815 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | No log | 1.0 | 65 | 2.5534 | | No log | 2.0 | 130 | 2.1165 | | No log | 3.0 | 195 | 1.9815 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.0+cu111 - Datasets 1.17.0 - Tokenizers 0.10.3
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "model-index": [{"name": "bert-base-multilingual-cased-finetuned-viquad", "results": []}]}
Khanh/bert-base-multilingual-cased-finetuned-viquad
null
[ "transformers", "pytorch", "tensorboard", "bert", "question-answering", "generated_from_trainer", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
question-answering
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-multilingual-cased-finetuned-squad This model is a fine-tuned version of [distilbert-base-multilingual-cased](https://huggingface.co/distilbert-base-multilingual-cased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.6587 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 1.923 | 1.0 | 579 | 0.8439 | | 0.8479 | 2.0 | 1158 | 0.6784 | | 0.6148 | 3.0 | 1737 | 0.6587 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.0+cu111 - Datasets 1.17.0 - Tokenizers 0.10.3
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "model-index": [{"name": "distilbert-base-multilingual-cased-finetuned-squad", "results": []}]}
Khanh/distilbert-base-multilingual-cased-finetuned-squad
null
[ "transformers", "pytorch", "tensorboard", "distilbert", "question-answering", "generated_from_trainer", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
question-answering
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-multilingual-cased-finetuned-viquad This model is a fine-tuned version of [distilbert-base-multilingual-cased](https://huggingface.co/distilbert-base-multilingual-cased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 3.4241 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | No log | 1.0 | 65 | 4.0975 | | No log | 2.0 | 130 | 3.9315 | | No log | 3.0 | 195 | 3.6742 | | No log | 4.0 | 260 | 3.4878 | | No log | 5.0 | 325 | 3.4241 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.0+cu111 - Datasets 1.17.0 - Tokenizers 0.10.3
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "model-index": [{"name": "distilbert-base-multilingual-cased-finetuned-viquad", "results": []}]}
Khanh/distilbert-base-multilingual-cased-finetuned-viquad
null
[ "transformers", "pytorch", "tensorboard", "distilbert", "question-answering", "generated_from_trainer", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
question-answering
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-squad This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.5539 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 0.7665 | 1.0 | 2295 | 0.5231 | | 0.5236 | 2.0 | 4590 | 0.5539 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.0+cu111 - Datasets 1.17.0 - Tokenizers 0.10.3
{"license": "mit", "tags": ["generated_from_trainer"], "model-index": [{"name": "xlm-roberta-base-finetuned-squad", "results": []}]}
Khanh/xlm-roberta-base-finetuned-squad
null
[ "transformers", "pytorch", "tensorboard", "xlm-roberta", "question-answering", "generated_from_trainer", "license:mit", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
question-answering
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-viquad This model is a fine-tuned version of [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) on the None dataset. It achieves the following results on the evaluation set: - Loss: 2.3761 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | No log | 1.0 | 259 | 2.9945 | | 3.3665 | 2.0 | 518 | 2.3761 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.0+cu111 - Datasets 1.17.0 - Tokenizers 0.10.3
{"license": "mit", "tags": ["generated_from_trainer"], "model-index": [{"name": "xlm-roberta-base-finetuned-viquad", "results": []}]}
Khanh/xlm-roberta-base-finetuned-viquad
null
[ "transformers", "pytorch", "tensorboard", "xlm-roberta", "question-answering", "generated_from_trainer", "license:mit", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
{}
Kharman/Kharman
null
[ "region:us" ]
null
2022-03-02T23:29:04+00:00
feature-extraction
transformers
{}
KheireddineDaouadi/SIMCSEARA
null
[ "transformers", "pytorch", "bert", "feature-extraction", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
zero-shot-classification
transformers
{"language": "ar", "license": "other", "tags": ["zero-shot-classification", "nli", "pytorch"], "datasets": ["xnli"], "pipeline_tag": "zero-shot-classification"}
KheireddineDaouadi/ZeroAraElectra
null
[ "transformers", "pytorch", "electra", "text-classification", "zero-shot-classification", "nli", "ar", "dataset:xnli", "license:other", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
feature-extraction
transformers
{}
KheireddineDaouadi/arsent
null
[ "transformers", "pytorch", "bert", "feature-extraction", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
{}
Khere/DialoGPT-small-luz
null
[ "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
VietnameseQA model based on custom dataset.
{}
KhoiNXM/KhoiNXM_Vietnamese_QA
null
[ "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
# CLOG Assessment generator model
{}
Khu1998/clog-assessment-model
null
[ "transformers", "tf", "bert", "text-classification", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
text2text-generation
transformers
{}
Khu1998/clog-clo-model
null
[ "transformers", "pytorch", "jax", "bart", "text2text-generation", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-cola This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the glue dataset. It achieves the following results on the evaluation set: - Loss: 0.5327 - Matthews Correlation: 0.5233 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Matthews Correlation | |:-------------:|:-----:|:----:|:---------------:|:--------------------:| | 0.5314 | 1.0 | 535 | 0.4955 | 0.4270 | | 0.3545 | 2.0 | 1070 | 0.5327 | 0.5233 | | 0.2418 | 3.0 | 1605 | 0.6180 | 0.5132 | | 0.1722 | 4.0 | 2140 | 0.7344 | 0.5158 | | 0.1243 | 5.0 | 2675 | 0.8581 | 0.5196 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.0+cu111 - Datasets 1.17.0 - Tokenizers 0.10.3
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "datasets": ["glue"], "metrics": ["matthews_correlation"], "model-index": [{"name": "distilbert-base-uncased-finetuned-cola", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "glue", "type": "glue", "args": "cola"}, "metrics": [{"type": "matthews_correlation", "value": 0.5232819075279987, "name": "Matthews Correlation"}]}]}]}
Kien/distilbert-base-uncased-finetuned-cola
null
[ "transformers", "pytorch", "tensorboard", "distilbert", "text-classification", "generated_from_trainer", "dataset:glue", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-cola This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unkown dataset. It achieves the following results on the evaluation set: - Loss: 0.1037 - Matthews Correlation: 0.9719 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Matthews Correlation | |:-------------:|:-----:|:----:|:---------------:|:--------------------:| | 0.2094 | 1.0 | 525 | 0.1069 | 0.9607 | | 0.0483 | 2.0 | 1050 | 0.0878 | 0.9719 | | 0.0296 | 3.0 | 1575 | 0.1263 | 0.9664 | | 0.0108 | 4.0 | 2100 | 0.1037 | 0.9719 | | 0.0096 | 5.0 | 2625 | 0.1065 | 0.9719 | ### Framework versions - Transformers 4.9.2 - Pytorch 1.9.0+cu102 - Datasets 1.11.0 - Tokenizers 0.10.3
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "metrics": ["matthews_correlation"], "model_index": [{"name": "distilbert-base-uncased-finetuned-cola", "results": [{"task": {"name": "Text Classification", "type": "text-classification"}, "metric": {"name": "Matthews Correlation", "type": "matthews_correlation", "value": 0.9719066462260881}}]}]}
Kieran/distilbert-base-uncased-finetuned-cola
null
[ "transformers", "pytorch", "tensorboard", "distilbert", "text-classification", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
{}
Kiinitix/Butler-chatbot
null
[ "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
{}
Kim480/Arcane
null
[ "region:us" ]
null
2022-03-02T23:29:04+00:00
text-generation
null
{"tags": ["conversational"]}
KingCodeSquid/Octavian
null
[ "conversational", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-generation
null
{"tags": ["conversational"]}
KingCodeSquid/Octavian2
null
[ "conversational", "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
{}
Kinley/roberta-base-bne-finetuned-amazon_reviews_multi
null
[ "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-emotion This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.2224 - Accuracy: 0.9225 - F1: 0.9228 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.84 | 1.0 | 250 | 0.3133 | 0.909 | 0.9070 | | 0.2459 | 2.0 | 500 | 0.2224 | 0.9225 | 0.9228 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.9.1 - Datasets 1.16.1 - Tokenizers 0.10.3
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "datasets": ["emotion"], "metrics": ["accuracy", "f1"], "model-index": [{"name": "distilbert-base-uncased-finetuned-emotion", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "emotion", "type": "emotion", "args": "default"}, "metrics": [{"type": "accuracy", "value": 0.9225, "name": "Accuracy"}, {"type": "f1", "value": 0.9227765339978083, "name": "F1"}]}]}]}
Kiran146/distilbert-base-uncased-finetuned-emotion
null
[ "transformers", "pytorch", "tensorboard", "distilbert", "text-classification", "generated_from_trainer", "dataset:emotion", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
this is my ReadMe
{}
KiranM/someNewModel
null
[ "region:us" ]
null
2022-03-02T23:29:04+00:00
text2text-generation
transformers
### 📝 Description MBart for Russian summarization fine-tuned for **dialogues** summarization. This model was firstly fine-tuned by [Ilya Gusev](https://hf.co/IlyaGusev) on [Gazeta dataset](https://huggingface.co/datasets/IlyaGusev/gazeta). We have **fine tuned** that model on [SamSum dataset](https://huggingface.co/datasets/samsum) **translated to Russian** using GoogleTranslateAPI 🤗 Moreover! We have implemented a **! telegram bot [@summarization_bot](https://t.me/summarization_bot) !** with the inference of this model. Add it to the chat and get summaries instead of dozens spam messages!  🤗 ### ❓ How to use with code ```python from transformers import MBartTokenizer, MBartForConditionalGeneration # Download model and tokenizer model_name = "Kirili4ik/mbart_ruDialogSum" tokenizer = AutoTokenizer.from_pretrained(model_name) model = MBartForConditionalGeneration.from_pretrained(model_name) model.eval() article_text = "..." input_ids = tokenizer( [article_text], max_length=600, padding="max_length", truncation=True, return_tensors="pt", )["input_ids"] output_ids = model.generate( input_ids=input_ids, top_k=0, num_beams=3, no_repeat_ngram_size=3 )[0] summary = tokenizer.decode(output_ids, skip_special_tokens=True) print(summary) ```
{"language": ["ru"], "license": "cc", "tags": ["mbart"], "datasets": ["IlyaGusev/gazeta", "samsum", "samsum_(translated_into_Russian)"], "inference": {"parameters": {"no_repeat_ngram_size": "4,", "num_beams": 5}}, "widget": [{"text": "\u0414\u0436\u0435\u0444\u0444: \u041c\u043e\u0433\u0443 \u043b\u0438 \u044f \u043e\u0431\u0443\u0447\u0438\u0442\u044c \u043c\u043e\u0434\u0435\u043b\u044c \ud83e\udd17 Transformers \u043d\u0430 Amazon SageMaker? \n\u0424\u0438\u043b\u0438\u043f\u043f: \u041a\u043e\u043d\u0435\u0447\u043d\u043e, \u0432\u044b \u043c\u043e\u0436\u0435\u0442\u0435 \u0438\u0441\u043f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u044c \u043d\u043e\u0432\u044b\u0439 \u043a\u043e\u043d\u0442\u0435\u0439\u043d\u0435\u0440 \u0434\u043b\u044f \u0433\u043b\u0443\u0431\u043e\u043a\u043e\u0433\u043e \u043e\u0431\u0443\u0447\u0435\u043d\u0438\u044f HuggingFace. \n\u0414\u0436\u0435\u0444\u0444: \u0425\u043e\u0440\u043e\u0448\u043e.\n\u0414\u0436\u0435\u0444\u0444: \u0438 \u043a\u0430\u043a \u044f \u043c\u043e\u0433\u0443 \u043d\u0430\u0447\u0430\u0442\u044c? \n\u0414\u0436\u0435\u0444\u0444: \u0433\u0434\u0435 \u044f \u043c\u043e\u0433\u0443 \u043d\u0430\u0439\u0442\u0438 \u0434\u043e\u043a\u0443\u043c\u0435\u043d\u0442\u0430\u0446\u0438\u044e? \n\u0424\u0438\u043b\u0438\u043f\u043f: \u043e\u043a, \u043e\u043a, \u0437\u0434\u0435\u0441\u044c \u043c\u043e\u0436\u043d\u043e \u043d\u0430\u0439\u0442\u0438 \u0432\u0441\u0435: https://huggingface.co/blog/the-partnership-amazon-sagemaker-and-hugging-face\n"}], "model-index": [{"name": "mbart_ruDialogSum", "results": [{"task": {"type": "abstractive-text-summarization", "name": "Abstractive Dialogue Summarization"}, "dataset": {"name": "SAMSum Corpus (translated to Russian)", "type": "samsum"}, "metrics": [{"type": "rogue-1", "value": 34.5, "name": "Validation ROGUE-1"}, {"type": "rogue-l", "value": 33, "name": "Validation ROGUE-L"}, {"type": "rogue-1", "value": 31, "name": "Test ROGUE-1"}, {"type": "rogue-l", "value": 28, "name": "Test ROGUE-L"}]}]}]}
Kirili4ik/mbart_ruDialogSum
null
[ "transformers", "pytorch", "mbart", "text2text-generation", "ru", "license:cc", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-generation
transformers
{}
Kirili4ik/ruDialoGpt3-medium-finetuned-telegram-6ep
null
[ "transformers", "pytorch", "gpt2", "text-generation", "autotrain_compatible", "endpoints_compatible", "has_space", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-generation
transformers
### 📝 Description DialoGPT trained on Russian language and fine tuned on my telegram chat. This model was created by [sberbank-ai](https://hf.co/sberbank-ai) and trained on Russian forums (see [Grossmend's model](https://hf.co/Grossmend/rudialogpt3_medium_based_on_gpt2)). You can find info about how it has been trained on [habr](https://habr.com/ru/company/icl_services/blog/548244/) (in Russian). I have created a **simple pipeline** and **fine tuned** that model on my own **exported telegram chat** (~30mb json). It is in fact very easy to get the data from telegram and fine tune a model. Therefore, I made a **colab tutorial** for it: https://colab.research.google.com/drive/1fnAVURjyZRK9VQg1Co_-SKUQnRES8l9R?usp=sharing ⚠️ Due to specifics of the data Hosted inference API may not work properly ⚠️ 🤗To try it use my [Spaces demo](https://huggingface.co/spaces/Kirili4ik/chat-with-Kirill)🤗 ### ❓ How to use with code ```python # Download model and tokenizer checkpoint = "Kirili4ik/ruDialoGpt3-medium-finetuned-telegram" tokenizer = AutoTokenizer.from_pretrained(checkpoint) model = AutoModelForCausalLM.from_pretrained(checkpoint) model.eval() # util function to get expected len after tokenizing def get_length_param(text: str, tokenizer) -> str: tokens_count = len(tokenizer.encode(text)) if tokens_count <= 15: len_param = '1' elif tokens_count <= 50: len_param = '2' elif tokens_count <= 256: len_param = '3' else: len_param = '-' return len_param # util function to get next person number (1/0) for Machine or Human in the dialogue def get_user_param(text: dict, machine_name_in_chat: str) -> str: if text['from'] == machine_name_in_chat: return '1' # machine else: return '0' # human chat_history_ids = torch.zeros((1, 0), dtype=torch.int) while True: next_who = input("Who's phrase?\t") #input("H / G?") # Human or GPT # In case Human if next_who == "H" or next_who == "Human": input_user = input("===> Human: ") # encode the new user input, add parameters and return a tensor in Pytorch new_user_input_ids = tokenizer.encode(f"|0|{get_length_param(input_user, tokenizer)}|" \ + input_user + tokenizer.eos_token, return_tensors="pt") # append the new user input tokens to the chat history chat_history_ids = torch.cat([chat_history_ids, new_user_input_ids], dim=-1) if next_who == "G" or next_who == "GPT": next_len = input("Phrase len? 1/2/3/-\t") #input("Exp. len?(-/1/2/3): ") # encode the new user input, add parameters and return a tensor in Pytorch new_user_input_ids = tokenizer.encode(f"|1|{next_len}|", return_tensors="pt") # append the new user input tokens to the chat history chat_history_ids = torch.cat([chat_history_ids, new_user_input_ids], dim=-1) # print(tokenizer.decode(chat_history_ids[-1])) # uncomment to see full gpt input # save previous len input_len = chat_history_ids.shape[-1] # generated a response; PS you can read about the parameters at hf.co/blog/how-to-generate chat_history_ids = model.generate( chat_history_ids, num_return_sequences=1, # use for more variants, but have to print [i] max_length=512, no_repeat_ngram_size=3, do_sample=True, top_k=50, top_p=0.9, temperature = 0.6, # 0 for greedy mask_token_id=tokenizer.mask_token_id, eos_token_id=tokenizer.eos_token_id, unk_token_id=tokenizer.unk_token_id, pad_token_id=tokenizer.pad_token_id, device='cpu' ) # pretty print last ouput tokens from bot print(f"===> GPT-3: {tokenizer.decode(chat_history_ids[:, input_len:][0], skip_special_tokens=True)}") ```
{"language": ["ru", "ru-RU"], "tags": ["conversational"]}
Kirili4ik/ruDialoGpt3-medium-finetuned-telegram
null
[ "transformers", "pytorch", "safetensors", "gpt2", "text-generation", "conversational", "autotrain_compatible", "endpoints_compatible", "has_space", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
{}
Kirsty/DialoGPT-small-harrypotter
null
[ "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
{}
Kirsty/HarryPotterChatBot
null
[ "region:us" ]
null
2022-03-02T23:29:04+00:00
text2text-generation
transformers
T5-base fine-tuned on SQuAD and CoQA datasets for question generation language: - en-us tags: - question-generation license: - MIT datasets: - SQuAD 2.0 - CoQA
{}
Kithogue/T5_Question_Generation
null
[ "transformers", "pytorch", "t5", "text2text-generation", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
{}
KittK4tt30/Me
null
[ "region:us" ]
null
2022-03-02T23:29:04+00:00
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Wangchanberta-Depress-Finetuned This model is a fine-tuned version of [airesearch/wangchanberta-base-att-spm-uncased](https://huggingface.co/airesearch/wangchanberta-base-att-spm-uncased) on the wisesight_sentiment dataset. It achieves the following results on the evaluation set: - Loss: 0.5910 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 400 - num_epochs: 4 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 1.0114 | 0.08 | 200 | 0.9538 | | 0.8617 | 0.15 | 400 | 0.8280 | | 0.7882 | 0.23 | 600 | 0.7472 | | 0.7132 | 0.3 | 800 | 0.7264 | | 0.7226 | 0.38 | 1000 | 0.7265 | | 0.6854 | 0.45 | 1200 | 0.6792 | | 0.621 | 0.53 | 1400 | 0.6451 | | 0.6093 | 0.61 | 1600 | 0.6364 | | 0.6099 | 0.68 | 1800 | 0.6128 | | 0.5766 | 0.76 | 2000 | 0.6388 | | 0.6033 | 0.83 | 2200 | 0.6148 | | 0.5966 | 0.91 | 2400 | 0.6440 | | 0.6208 | 0.98 | 2600 | 0.5910 | | 0.5178 | 1.06 | 2800 | 0.6340 | | 0.4863 | 1.13 | 3000 | 0.7177 | | 0.4852 | 1.21 | 3200 | 0.6766 | | 0.4711 | 1.29 | 3400 | 0.6739 | | 0.5203 | 1.36 | 3600 | 0.6429 | | 0.5167 | 1.44 | 3800 | 0.6539 | | 0.5053 | 1.51 | 4000 | 0.6172 | | 0.5076 | 1.59 | 4200 | 0.6053 | | 0.4704 | 1.66 | 4400 | 0.6474 | | 0.4807 | 1.74 | 4600 | 0.6225 | | 0.4792 | 1.82 | 4800 | 0.6282 | | 0.5177 | 1.89 | 5000 | 0.6011 | | 0.4839 | 1.97 | 5200 | 0.6231 | | 0.4155 | 2.04 | 5400 | 0.6668 | | 0.3923 | 2.12 | 5600 | 0.6886 | | 0.3713 | 2.19 | 5800 | 0.6895 | | 0.364 | 2.27 | 6000 | 0.6886 | | 0.3774 | 2.34 | 6200 | 0.7117 | | 0.4001 | 2.42 | 6400 | 0.7081 | | 0.3531 | 2.5 | 6600 | 0.7465 | | 0.3768 | 2.57 | 6800 | 0.7706 | | 0.3324 | 2.65 | 7000 | 0.7456 | | 0.3597 | 2.72 | 7200 | 0.7507 | | 0.3868 | 2.8 | 7400 | 0.7542 | | 0.4141 | 2.87 | 7600 | 0.7223 | | 0.3701 | 2.95 | 7800 | 0.7374 | | 0.3175 | 3.03 | 8000 | 0.7615 | | 0.2951 | 3.1 | 8200 | 0.7880 | | 0.2885 | 3.18 | 8400 | 0.8158 | | 0.2913 | 3.25 | 8600 | 0.8565 | | 0.2815 | 3.33 | 8800 | 0.8649 | | 0.2748 | 3.4 | 9000 | 0.8783 | | 0.2776 | 3.48 | 9200 | 0.8851 | | 0.2982 | 3.56 | 9400 | 0.8922 | | 0.2939 | 3.63 | 9600 | 0.8796 | | 0.2712 | 3.71 | 9800 | 0.8873 | | 0.2918 | 3.78 | 10000 | 0.8973 | | 0.3144 | 3.86 | 10200 | 0.8978 | | 0.2988 | 3.93 | 10400 | 0.8951 | ### Framework versions - Transformers 4.11.2 - Pytorch 1.11.0+cu113 - Datasets 2.1.0 - Tokenizers 0.10.3
{"tags": ["generated_from_trainer"], "datasets": ["wisesight_sentiment"], "model-index": [{"name": "Wangchanberta-Depress-Finetuned", "results": []}]}
Kittipot/Wangchanberta-Depress-Finetuned
null
[ "transformers", "pytorch", "tensorboard", "camembert", "text-classification", "generated_from_trainer", "dataset:wisesight_sentiment", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
{}
Kizukym/DialoGPT-small-whotao
null
[ "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
{}
Kleinopp/bert
null
[ "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
{"license": "bsd-2-clause"}
Kneecapsnatcher/Unon
null
[ "license:bsd-2-clause", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-generation
transformers
# MORTY!!!
{"tags": ["conversational"]}
KnutZuidema/DialoGPT-small-morty
null
[ "transformers", "pytorch", "gpt2", "text-generation", "conversational", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
{}
Kobby/opus-mt-en-ro-finetuned-en-to-ro
null
[ "region:us" ]
null
2022-03-02T23:29:04+00:00
text-generation
transformers
{}
KoboldAI/GPT-J-6B-Adventure
null
[ "transformers", "pytorch", "gptj", "text-generation", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-generation
transformers
# GPT-J 6B - Janeway ## Model Description GPT-J 6B-Janeway is a finetune created using EleutherAI's GPT-J 6B model. ## Training data The training data contains around 2210 ebooks, mostly in the sci-fi and fantasy genres. The dataset is based on the same dataset used by GPT-Neo-2.7B-Picard, with 20% more data in various genres. Some parts of the dataset have been prepended using the following text: `[Genre: <genre1>,<genre2>]` ### How to use You can use this model directly with a pipeline for text generation. This example generates a different sequence each time it's run: ```py >>> from transformers import pipeline >>> generator = pipeline('text-generation', model='KoboldAI/GPT-J-6B-Janeway') >>> generator("Welcome Captain Janeway, I apologize for the delay.", do_sample=True, min_length=50) [{'generated_text': 'Welcome Captain Janeway, I apologize for the delay."\nIt's all right," Janeway said. "I'm certain that you're doing your best to keep me informed of what\'s going on."'}] ``` ### Limitations and Biases The core functionality of GPT-J is taking a string of text and predicting the next token. While language models are widely used for tasks other than this, there are a lot of unknowns with this work. When prompting GPT-J it is important to remember that the statistically most likely next token is often not the token that produces the most "accurate" text. Never depend upon GPT-J to produce factually accurate output. GPT-J was trained on the Pile, a dataset known to contain profanity, lewd, and otherwise abrasive language. Depending upon use case GPT-J may produce socially unacceptable text. See [Sections 5 and 6 of the Pile paper](https://arxiv.org/abs/2101.00027) for a more detailed analysis of the biases in the Pile. As with all language models, it is hard to predict in advance how GPT-J will respond to particular prompts and offensive content may occur without warning. We recommend having a human curate or filter the outputs before releasing them, both to censor undesirable content and to improve the quality of the results. ### BibTeX entry and citation info The model uses the following model as base: ```bibtex @misc{gpt-j, author = {Wang, Ben and Komatsuzaki, Aran}, title = {{GPT-J-6B: A 6 Billion Parameter Autoregressive Language Model}}, howpublished = {\url{https://github.com/kingoflolz/mesh-transformer-jax}}, year = 2021, month = May } ``` ## Acknowledgements This project would not have been possible without compute generously provided by Google through the [TPU Research Cloud](https://sites.research.google/trc/), as well as the Cloud TPU team for providing early access to the [Cloud TPU VM](https://cloud.google.com/blog/products/compute/introducing-cloud-tpu-vms) Alpha.
{"language": "en", "license": "mit"}
KoboldAI/GPT-J-6B-Janeway
null
[ "transformers", "pytorch", "gptj", "text-generation", "en", "arxiv:2101.00027", "license:mit", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-generation
transformers
# GPT-J 6B - Shinen ## Model Description GPT-J 6B-Shinen is a finetune created using EleutherAI's GPT-J 6B model. Compared to GPT-Neo-2.7-Horni, this model is much heavier on the sexual content. **Warning: THIS model is NOT suitable for use by minors. The model will output X-rated content.** ## Training data The training data contains user-generated stories from sexstories.com. All stories are tagged using the following way: ``` [Theme: <theme1>, <theme2> ,<theme3>] <Story goes here> ``` ### How to use You can use this model directly with a pipeline for text generation. This example generates a different sequence each time it's run: ```py >>> from transformers import pipeline >>> generator = pipeline('text-generation', model='KoboldAI/GPT-J-6B-Shinen') >>> generator("She was staring at me", do_sample=True, min_length=50) [{'generated_text': 'She was staring at me with a look that said it all. She wanted me so badly tonight that I wanted'}] ``` ### Limitations and Biases The core functionality of GPT-J is taking a string of text and predicting the next token. While language models are widely used for tasks other than this, there are a lot of unknowns with this work. When prompting GPT-J it is important to remember that the statistically most likely next token is often not the token that produces the most "accurate" text. Never depend upon GPT-J to produce factually accurate output. GPT-J was trained on the Pile, a dataset known to contain profanity, lewd, and otherwise abrasive language. Depending upon use case GPT-J may produce socially unacceptable text. See [Sections 5 and 6 of the Pile paper](https://arxiv.org/abs/2101.00027) for a more detailed analysis of the biases in the Pile. As with all language models, it is hard to predict in advance how GPT-J will respond to particular prompts and offensive content may occur without warning. We recommend having a human curate or filter the outputs before releasing them, both to censor undesirable content and to improve the quality of the results. ### BibTeX entry and citation info The model uses the following model as base: ```bibtex @misc{gpt-j, author = {Wang, Ben and Komatsuzaki, Aran}, title = {{GPT-J-6B: A 6 Billion Parameter Autoregressive Language Model}}, howpublished = {\url{https://github.com/kingoflolz/mesh-transformer-jax}}, year = 2021, month = May } ``` ## Acknowledgements This project would not have been possible without compute generously provided by Google through the [TPU Research Cloud](https://sites.research.google/trc/), as well as the Cloud TPU team for providing early access to the [Cloud TPU VM](https://cloud.google.com/blog/products/compute/introducing-cloud-tpu-vms) Alpha.
{"language": "en", "license": "mit"}
KoboldAI/GPT-J-6B-Shinen
null
[ "transformers", "pytorch", "gptj", "text-generation", "en", "arxiv:2101.00027", "license:mit", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-generation
transformers
# Model Card for GPT-J-6B-Skein # Model Details ## Model Description - **Developed by:** KoboldAI - **Shared by [Optional]:** KoboldAI - **Model type:** Text Generation - **Language(s) (NLP):** English - **License:** Apache License 2.0 - **Related Models:** [GPT-J 6B](https://huggingface.co/EleutherAI/gpt-j-6B?text=My+name+is+Mariama%2C+my+favorite) - **Parent Model:** GPT-J - **Resources for more information:** - [GitHub Repo](https://github.com/kingoflolz/mesh-transformer-jax) - [Associated Model Doc](https://huggingface.co/docs/transformers/main/en/model_doc/gptj#transformers.GPTJForCausalLM) # Uses ## Direct Use This model is designed for creative story generation. It can understand both free-form text and text written in interactive fiction style with actions starting with "> You", such as: ``` You become aware of her breathing -- the slight expansion of her ribs, the soft exhalation -- natural, and yet somehow studied. "Ah -- by the way," she says, in a way that utterly fails to be casual, "have you seen the artist out there? -- My artist, that is." "No," you respond, uneasy. You open your mouth and close it again. > You ask about the experience of waking up ``` ## Downstream Use [Optional] More information needed ## Out-of-Scope Use The model should not be used to intentionally create hostile or alienating environments for people. # Bias, Risks, and Limitations The core functionality of GPT-J is taking a string of text and predicting the next token. While language models are widely used for tasks other than this, there are a lot of unknowns with this work. When prompting GPT-J it is important to remember that the statistically most likely next token is often not the token that produces the most "accurate" text. Never depend upon GPT-J to produce factually accurate output. GPT-J was trained on the Pile, a dataset known to contain profanity, lewd, and otherwise abrasive language. Depending upon use case GPT-J may produce socially unacceptable text. See Sections 5 and 6 of the Pile paper for a more detailed analysis of the biases in the Pile. As with all language models, it is hard to predict in advance how GPT-J will respond to particular prompts and offensive content may occur without warning. We recommend having a human curate or filter the outputs before releasing them, both to censor undesirable content and to improve the quality of the results. See the [GPT-J 6B model card](https://huggingface.co/EleutherAI/gpt-j-6B?text=My+name+is+Mariama%2C+my+favorite) for more information. ## Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. # Training Details ## Training Data The data are mostly comprised of light novels from the dataset of the [KoboldAI/GPT-Neo-2.7B-Horni-LN](https://huggingface.co/KoboldAI/GPT-Neo-2.7B-Horni-LN) model and assorted interactive fiction. The dataset uses `[Themes: <comma-separated list of genres>]` for tagging, which means that if similar text is placed in the context, the model will attempt to generate text in the specified style(s). For more details about the dataset, consult [this document](https://wandb.ai/ve-forbryderne/skein/runs/files/files/datasets/README.txt). ## Training Procedure ### Preprocessing The data were preprocessed using the Python package ftfy to eliminate as much as possible non-ASCII punctuation characters and possible encoding errors. The interactive fiction in the dataset also underwent deduplication since interactive fiction logs often contain duplicate text from, for example, visiting the same in-game area several times. spaCy was used for grammatical analysis with the purpose of reformatting the actions commonly found in old text adventure games into more complete sentences. There was also some manual elimination of things such as "thank you for playing" messages and title messages. ### Speeds, Sizes, Times Training took approximately 14 hours in total, with the average speed being 5265 tokens per second. # Evaluation ## Testing Data, Factors & Metrics ### Testing Data More information needed ### Factors ### Metrics More information needed ## Results More information needed # Model Examination More information needed # Environmental Impact Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** More information needed - **Hours used:** More information needed - **Cloud Provider:** More information needed - **Compute Region:** More information needed - **Carbon Emitted:** More information needed # Technical Specifications [optional] ## Model Architecture and Objective More information needed ## Compute Infrastructure More information needed ### Hardware More information needed ### Software https://github.com/kingoflolz/mesh-transformer-jax # Citation **BibTeX:** ``` @misc{mesh-transformer-jax, author = {Wang, Ben}, title = {{Mesh-Transformer-JAX: Model-Parallel Implementation of Transformer Language Model with JAX}}, howpublished = {\url{https://github.com/kingoflolz/mesh-transformer-jax}}, year = 2021, month = May } ``` # Glossary [optional] More information needed # More Information [optional] More information needed # Model Card Authors [optional] KoboldAI in collaboration with Ezi Ozoani and the Hugging Face team # Model Card Contact More information needed # How to Get Started with the Model Use the code below to get started with the model. <details> <summary> Click to expand </summary> ```python from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("KoboldAI/GPT-J-6B-Skein") model = AutoModelForCausalLM.from_pretrained("KoboldAI/GPT-J-6B-Skein") ``` </details>
{"tags": ["text-generation"]}
KoboldAI/GPT-J-6B-Skein
null
[ "transformers", "pytorch", "gptj", "text-generation", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-generation
transformers
# GPT-Neo-125M-AID This model was finetuned by Henk717 on Google Colab, it contains text adventure tuning and its the smallest 'Adventure' model of its size. Because of its limited size the behavior is mostly suitable for testing text adventure gamemodes at fast speeds, for a coherent adventure you are better off using one of the 2.7B models.
{}
KoboldAI/GPT-Neo-125M-AID
null
[ "transformers", "pytorch", "gpt_neo", "text-generation", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-generation
transformers
{}
KoboldAI/GPT-Neo-2.7B-AID
null
[ "transformers", "pytorch", "gpt_neo", "text-generation", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-generation
transformers
{}
KoboldAI/GPT-Neo-2.7B-Horni-LN
null
[ "transformers", "pytorch", "gpt_neo", "text-generation", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-generation
transformers
{}
KoboldAI/GPT-Neo-2.7B-Horni
null
[ "transformers", "pytorch", "gpt_neo", "text-generation", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-generation
transformers
# GPT-Neo 2.7B - Janeway ## Model Description GPT-Neo 2.7B-Janeway is a finetune created using EleutherAI's GPT-Neo 2.7B model. ## Training data The training data contains around 2210 ebooks, mostly in the sci-fi and fantasy genres. The dataset is based on the same dataset used by GPT-Neo-2.7B-Picard, with 20% more data in various genres. Some parts of the dataset have been prepended using the following text: `[Genre: <genre1>,<genre2>]` ### How to use You can use this model directly with a pipeline for text generation. This example generates a different sequence each time it's run: ```py >>> from transformers import pipeline >>> generator = pipeline('text-generation', model='KoboldAI/GPT-Neo-2.7B-Janeway') >>> generator("Welcome Captain Janeway, I apologize for the delay.", do_sample=True, min_length=50) [{'generated_text': 'Welcome Captain Janeway, I apologize for the delay."\nIt's all right," Janeway said. "I'm certain that you're doing your best to keep me informed of what\'s going on."'}] ``` ### Limitations and Biases GPT-Neo was trained as an autoregressive language model. This means that its core functionality is taking a string of text and predicting the next token. While language models are widely used for tasks other than this, there are a lot of unknowns with this work. GPT-Neo was trained on the Pile, a dataset known to contain profanity, lewd, and otherwise abrasive language. Depending on your usecase GPT-Neo may produce socially unacceptable text. See Sections 5 and 6 of the Pile paper for a more detailed analysis of the biases in the Pile. As with all language models, it is hard to predict in advance how GPT-Neo will respond to particular prompts and offensive content may occur without warning. We recommend having a human curate or filter the outputs before releasing them, both to censor undesirable content and to improve the quality of the results. ### BibTeX entry and citation info The model is made using the following software: ```bibtex @software{gpt-neo, author = {Black, Sid and Leo, Gao and Wang, Phil and Leahy, Connor and Biderman, Stella}, title = {{GPT-Neo: Large Scale Autoregressive Language Modeling with Mesh-Tensorflow}}, month = mar, year = 2021, note = {{If you use this software, please cite it using these metadata.}}, publisher = {Zenodo}, version = {1.0}, doi = {10.5281/zenodo.5297715}, url = {https://doi.org/10.5281/zenodo.5297715} } ```
{"language": "en", "license": "mit"}
KoboldAI/GPT-Neo-2.7B-Janeway
null
[ "transformers", "pytorch", "gpt_neo", "text-generation", "en", "license:mit", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-generation
transformers
# GPT-Neo 2.7B - Picard ## Model Description GPT-Neo 2.7B-Picard is a finetune created using EleutherAI's GPT-Neo 2.7B model. ## Training data The training data contains around 1800 ebooks, mostly in the sci-fi and fantasy genres. ### How to use You can use this model directly with a pipeline for text generation. This example generates a different sequence each time it's run: ```py >>> from transformers import pipeline >>> generator = pipeline('text-generation', model='mrseeker87/GPT-Neo-2.7B-Picard') >>> generator("Jean-Luc Picard", do_sample=True, min_length=50) [{'generated_text': 'Jean-Luc Picard, the captain of a Federation starship in command of one of Starfleet's few fulltime scientists.'}] ``` ### Limitations and Biases GPT-Neo was trained as an autoregressive language model. This means that its core functionality is taking a string of text and predicting the next token. While language models are widely used for tasks other than this, there are a lot of unknowns with this work. GPT-Neo was trained on the Pile, a dataset known to contain profanity, lewd, and otherwise abrasive language. Depending on your usecase GPT-Neo may produce socially unacceptable text. See Sections 5 and 6 of the Pile paper for a more detailed analysis of the biases in the Pile. As with all language models, it is hard to predict in advance how GPT-Neo will respond to particular prompts and offensive content may occur without warning. We recommend having a human curate or filter the outputs before releasing them, both to censor undesirable content and to improve the quality of the results. ### BibTeX entry and citation info The model is made using the following software: ```bibtex @software{gpt-neo, author = {Black, Sid and Leo, Gao and Wang, Phil and Leahy, Connor and Biderman, Stella}, title = {{GPT-Neo: Large Scale Autoregressive Language Modeling with Mesh-Tensorflow}}, month = mar, year = 2021, note = {{If you use this software, please cite it using these metadata.}}, publisher = {Zenodo}, version = {1.0}, doi = {10.5281/zenodo.5297715}, url = {https://doi.org/10.5281/zenodo.5297715} } ```
{"language": "en", "license": "mit"}
KoboldAI/GPT-Neo-2.7B-Picard
null
[ "transformers", "pytorch", "safetensors", "gpt_neo", "text-generation", "en", "license:mit", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-generation
transformers
# GPT-Neo 2.7B - Shinen ## Model Description GPT-Neo 2.7B-Shinen is a finetune created using EleutherAI's GPT-Neo 2.7B model. Compared to GPT-Neo-2.7-Horni, this model is much heavier on the sexual content. **Warning: THIS model is NOT suitable for use by minors. The model will output X-rated content.** ## Training data The training data contains user-generated stories from sexstories.com. All stories are tagged using the following way: ``` [Theme: <theme1>, <theme2> ,<theme3>] <Story goes here> ``` ### How to use You can use this model directly with a pipeline for text generation. This example generates a different sequence each time it's run: ```py >>> from transformers import pipeline >>> generator = pipeline('text-generation', model='KoboldAI/GPT-Neo-2.7B-Shinen') >>> generator("She was staring at me", do_sample=True, min_length=50) [{'generated_text': 'She was staring at me with a look that said it all. She wanted me so badly tonight that I wanted'}] ``` ### Limitations and Biases GPT-Neo was trained as an autoregressive language model. This means that its core functionality is taking a string of text and predicting the next token. While language models are widely used for tasks other than this, there are a lot of unknowns with this work. GPT-Neo-Shinen was trained on a dataset known to contain profanity, lewd, and otherwise abrasive language. GPT-Neo-Shinen *WILL* produce socially unacceptable text without warning. GPT-Neo-Shinen will respond to particular prompts and offensive content may occur without warning. We recommend having a human curate or filter the outputs before releasing them, both to censor undesirable content and to improve the quality of the results. ### BibTeX entry and citation info The model is made using the following software: ```bibtex @software{gpt-neo, author = {Black, Sid and Leo, Gao and Wang, Phil and Leahy, Connor and Biderman, Stella}, title = {{GPT-Neo: Large Scale Autoregressive Language Modeling with Mesh-Tensorflow}}, month = mar, year = 2021, note = {{If you use this software, please cite it using these metadata.}}, publisher = {Zenodo}, version = {1.0}, doi = {10.5281/zenodo.5297715}, url = {https://doi.org/10.5281/zenodo.5297715} } ```
{"language": "en", "license": "mit"}
KoboldAI/GPT-Neo-2.7B-Shinen
null
[ "transformers", "pytorch", "gpt_neo", "text-generation", "en", "license:mit", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-generation
transformers
This is a Hugging Face transformers-compatible conversion of the original dense 1.3B-parameter model from the paper "[Efficient Large Scale Language Modeling with Mixtures of Experts](https://arxiv.org/abs/2112.10684)" from Artetxe et al. Please refer to the original model card, which can be found at https://github.com/facebookresearch/fairseq/blob/main/examples/moe_lm/model_card.md. # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_KoboldAI__fairseq-dense-1.3B) | Metric | Value | |-----------------------|---------------------------| | Avg. | 31.66 | | ARC (25-shot) | 31.14 | | HellaSwag (10-shot) | 58.39 | | MMLU (5-shot) | 24.98 | | TruthfulQA (0-shot) | 37.43 | | Winogrande (5-shot) | 59.04 | | GSM8K (5-shot) | 0.0 | | DROP (3-shot) | 10.6 |
{"language": "en"}
KoboldAI/fairseq-dense-1.3B
null
[ "transformers", "pytorch", "safetensors", "xglm", "text-generation", "en", "arxiv:2112.10684", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-generation
transformers
This is a Hugging Face transformers-compatible conversion of the original dense 125M-parameter model from the paper "[Efficient Large Scale Language Modeling with Mixtures of Experts](https://arxiv.org/abs/2112.10684)" from Artetxe et al. Please refer to the original model card, which can be found at https://github.com/facebookresearch/fairseq/blob/main/examples/moe_lm/model_card.md. # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_KoboldAI__fairseq-dense-125M) | Metric | Value | |-----------------------|---------------------------| | Avg. | 26.0 | | ARC (25-shot) | 24.06 | | HellaSwag (10-shot) | 34.14 | | MMLU (5-shot) | 23.98 | | TruthfulQA (0-shot) | 43.72 | | Winogrande (5-shot) | 50.59 | | GSM8K (5-shot) | 0.0 | | DROP (3-shot) | 5.5 |
{"language": "en"}
KoboldAI/fairseq-dense-125M
null
[ "transformers", "pytorch", "safetensors", "xglm", "text-generation", "en", "arxiv:2112.10684", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-generation
transformers
This is a Hugging Face transformers-compatible conversion of the original dense 13B-parameter model from the paper "[Efficient Large Scale Language Modeling with Mixtures of Experts](https://arxiv.org/abs/2112.10684)" from Artetxe et al. Please refer to the original model card, which can be found at https://github.com/facebookresearch/fairseq/blob/main/examples/moe_lm/model_card.md. # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_KoboldAI__fairseq-dense-13B) | Metric | Value | |-----------------------|---------------------------| | Avg. | 37.53 | | ARC (25-shot) | 40.36 | | HellaSwag (10-shot) | 75.51 | | MMLU (5-shot) | 27.07 | | TruthfulQA (0-shot) | 32.83 | | Winogrande (5-shot) | 67.96 | | GSM8K (5-shot) | 0.0 | | DROP (3-shot) | 18.96 |
{"language": "en"}
KoboldAI/fairseq-dense-13B
null
[ "transformers", "pytorch", "xglm", "text-generation", "en", "arxiv:2112.10684", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-generation
transformers
# Fairseq-dense 2.7B - Janeway ## Model Description Fairseq-dense 2.7B-Janeway is a finetune created using Fairseq's MoE dense model. ## Training data The training data contains around 2210 ebooks, mostly in the sci-fi and fantasy genres. The dataset is identical as dataset used by GPT-Neo-2.7B-Janeway. Some parts of the dataset have been prepended using the following text: `[Genre: <genre1>,<genre2>]` ### How to use You can use this model directly with a pipeline for text generation. This example generates a different sequence each time it's run: ```py >>> from transformers import pipeline >>> generator = pipeline('text-generation', model='KoboldAI/fairseq-dense-2.7B-Janeway') >>> generator("Welcome Captain Janeway, I apologize for the delay.", do_sample=True, min_length=50) [{'generated_text': 'Welcome Captain Janeway, I apologize for the delay."\nIt's all right," Janeway said. "I'm certain that you're doing your best to keep me informed of what\'s going on."'}] ``` ### Limitations and Biases Based on known problems with NLP technology, potential relevant factors include bias (gender, profession, race and religion). ### BibTeX entry and citation info ``` Artetxe et al. (2021): Efficient Large Scale Language Modeling with Mixtures of Experts ```
{"language": "en", "license": "mit"}
KoboldAI/fairseq-dense-2.7B-Janeway
null
[ "transformers", "pytorch", "xglm", "text-generation", "en", "license:mit", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-generation
transformers
This is a Hugging Face transformers-compatible conversion of the original dense 2.7B-parameter model from the paper "[Efficient Large Scale Language Modeling with Mixtures of Experts](https://arxiv.org/abs/2112.10684)" from Artetxe et al. Please refer to the original model card, which can be found at https://github.com/facebookresearch/fairseq/blob/main/examples/moe_lm/model_card.md. # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_KoboldAI__fairseq-dense-2.7B) | Metric | Value | |-----------------------|---------------------------| | Avg. | 33.67 | | ARC (25-shot) | 33.79 | | HellaSwag (10-shot) | 65.74 | | MMLU (5-shot) | 26.44 | | TruthfulQA (0-shot) | 34.57 | | Winogrande (5-shot) | 63.93 | | GSM8K (5-shot) | 0.0 | | DROP (3-shot) | 11.24 |
{"language": "en"}
KoboldAI/fairseq-dense-2.7B
null
[ "transformers", "pytorch", "safetensors", "xglm", "text-generation", "en", "arxiv:2112.10684", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-generation
transformers
This is a Hugging Face transformers-compatible conversion of the original dense 355M-parameter model from the paper "[Efficient Large Scale Language Modeling with Mixtures of Experts](https://arxiv.org/abs/2112.10684)" from Artetxe et al. Please refer to the original model card, which can be found at https://github.com/facebookresearch/fairseq/blob/main/examples/moe_lm/model_card.md. # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_KoboldAI__fairseq-dense-355M) | Metric | Value | |-----------------------|---------------------------| | Avg. | 27.99 | | ARC (25-shot) | 25.43 | | HellaSwag (10-shot) | 46.67 | | MMLU (5-shot) | 25.3 | | TruthfulQA (0-shot) | 39.19 | | Winogrande (5-shot) | 52.88 | | GSM8K (5-shot) | 0.0 | | DROP (3-shot) | 6.48 |
{"language": "en"}
KoboldAI/fairseq-dense-355M
null
[ "transformers", "pytorch", "safetensors", "xglm", "text-generation", "en", "arxiv:2112.10684", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:04+00:00
text-generation
transformers
This is a Hugging Face transformers-compatible conversion of the original dense 6.7B-parameter model from the paper "[Efficient Large Scale Language Modeling with Mixtures of Experts](https://arxiv.org/abs/2112.10684)" from Artetxe et al. Please refer to the original model card, which can be found at https://github.com/facebookresearch/fairseq/blob/main/examples/moe_lm/model_card.md. # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_KoboldAI__fairseq-dense-6.7B) | Metric | Value | |-----------------------|---------------------------| | Avg. | 36.09 | | ARC (25-shot) | 39.42 | | HellaSwag (10-shot) | 71.26 | | MMLU (5-shot) | 26.91 | | TruthfulQA (0-shot) | 32.73 | | Winogrande (5-shot) | 65.27 | | GSM8K (5-shot) | 0.0 | | DROP (3-shot) | 17.05 |
{"language": "en"}
KoboldAI/fairseq-dense-6.7B
null
[ "transformers", "pytorch", "xglm", "text-generation", "en", "arxiv:2112.10684", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:04+00:00
null
null
{}
Koen534/trump
null
[ "region:us" ]
null
2022-03-02T23:29:04+00:00
token-classification
transformers
[![Current PyPI packages](https://badge.fury.io/py/suparkanbun.svg)](https://pypi.org/project/suparkanbun/) # SuPar-Kanbun Tokenizer, POS-Tagger and Dependency-Parser for Classical Chinese Texts (漢文/文言文) with [spaCy](https://spacy.io), [Transformers](https://huggingface.co/transformers/) and [SuPar](https://github.com/yzhangcs/parser). ## Basic usage ```py >>> import suparkanbun >>> nlp=suparkanbun.load() >>> doc=nlp("不入虎穴不得虎子") >>> print(type(doc)) <class 'spacy.tokens.doc.Doc'> >>> print(suparkanbun.to_conllu(doc)) # text = 不入虎穴不得虎子 1 不 不 ADV v,副詞,否定,無界 Polarity=Neg 2 advmod _ Gloss=not|SpaceAfter=No 2 入 入 VERB v,動詞,行為,移動 _ 0 root _ Gloss=enter|SpaceAfter=No 3 虎 虎 NOUN n,名詞,主体,動物 _ 4 nmod _ Gloss=tiger|SpaceAfter=No 4 穴 穴 NOUN n,名詞,固定物,地形 Case=Loc 2 obj _ Gloss=cave|SpaceAfter=No 5 不 不 ADV v,副詞,否定,無界 Polarity=Neg 6 advmod _ Gloss=not|SpaceAfter=No 6 得 得 VERB v,動詞,行為,得失 _ 2 parataxis _ Gloss=get|SpaceAfter=No 7 虎 虎 NOUN n,名詞,主体,動物 _ 8 nmod _ Gloss=tiger|SpaceAfter=No 8 子 子 NOUN n,名詞,人,関係 _ 6 obj _ Gloss=child|SpaceAfter=No >>> import deplacy >>> deplacy.render(doc) 不 ADV <════╗ advmod 入 VERB ═══╗═╝═╗ ROOT 虎 NOUN <╗ ║ ║ nmod 穴 NOUN ═╝<╝ ║ obj 不 ADV <════╗ ║ advmod 得 VERB ═══╗═╝<╝ parataxis 虎 NOUN <╗ ║ nmod 子 NOUN ═╝<╝ obj ``` `suparkanbun.load()` has two options `suparkanbun.load(BERT="roberta-classical-chinese-base-char",Danku=False)`. With the option `Danku=True` the pipeline tries to segment sentences automatically. Available `BERT` options are: * `BERT="roberta-classical-chinese-base-char"` utilizes [roberta-classical-chinese-base-char](https://huggingface.co/KoichiYasuoka/roberta-classical-chinese-base-char) (default) * `BERT="roberta-classical-chinese-large-char"` utilizes [roberta-classical-chinese-large-char](https://huggingface.co/KoichiYasuoka/roberta-classical-chinese-large-char) * `BERT="guwenbert-base"` utilizes [GuwenBERT-base](https://huggingface.co/ethanyt/guwenbert-base) * `BERT="guwenbert-large"` utilizes [GuwenBERT-large](https://huggingface.co/ethanyt/guwenbert-large) * `BERT="sikubert"` utilizes [SikuBERT](https://huggingface.co/SIKU-BERT/sikubert) * `BERT="sikuroberta"` utilizes [SikuRoBERTa](https://huggingface.co/SIKU-BERT/sikuroberta) ## Installation for Linux ```sh pip3 install suparkanbun --user ``` ## Installation for Cygwin64 Make sure to get `python37-devel` `python37-pip` `python37-cython` `python37-numpy` `python37-wheel` `gcc-g++` `mingw64-x86_64-gcc-g++` `git` `curl` `make` `cmake` packages, and then: ```sh curl -L https://raw.githubusercontent.com/KoichiYasuoka/CygTorch/master/installer/supar.sh | sh pip3.7 install suparkanbun --no-build-isolation ``` ## Installation for Jupyter Notebook (Google Colaboratory) ```py !pip install suparkanbun ``` Try [notebook](https://colab.research.google.com/github/KoichiYasuoka/SuPar-Kanbun/blob/main/suparkanbun.ipynb) for Google Colaboratory. ## Author Koichi Yasuoka (安岡孝一)
{"language": ["lzh"], "license": "mit", "tags": ["classical chinese", "literary chinese", "ancient chinese", "token-classification", "pos"], "datasets": ["universal_dependencies"], "pipeline_tag": "token-classification", "widget": [{"text": "\u4e0d\u5165\u864e\u7a74\u4e0d\u5f97\u864e\u5b50"}]}
KoichiYasuoka/SuPar-Kanbun
null
[ "transformers", "pytorch", "roberta", "token-classification", "classical chinese", "literary chinese", "ancient chinese", "pos", "lzh", "dataset:universal_dependencies", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:04+00:00
fill-mask
transformers
# bert-base-japanese-char-extended ## Model Description This is a BERT model pre-trained on Japanese Wikipedia texts, derived from [bert-base-japanese-char-v2](https://huggingface.co/cl-tohoku/bert-base-japanese-char-v2). Character-embeddings are enhanced to include all 常用漢字/人名用漢字 characters using BertTokenizerFast. You can fine-tune `bert-base-japanese-char-extended` for downstream tasks, such as [POS-tagging](https://huggingface.co/KoichiYasuoka/bert-base-japanese-upos), [dependency-parsing](https://huggingface.co/KoichiYasuoka/bert-base-japanese-wikipedia-ud-head), and so on. ## How to Use ```py from transformers import AutoTokenizer,AutoModelForMaskedLM tokenizer=AutoTokenizer.from_pretrained("KoichiYasuoka/bert-base-japanese-char-extended") model=AutoModelForMaskedLM.from_pretrained("KoichiYasuoka/bert-base-japanese-char-extended") ```
{"language": ["ja"], "license": "cc-by-sa-4.0", "tags": ["japanese", "masked-lm", "wikipedia"], "pipeline_tag": "fill-mask", "mask_token": "[MASK]", "widget": [{"text": "\u9178\u7d20\u30dc\u30f3\u30d9\u3092\u5145[MASK]\u3059\u308b\u3002"}]}
KoichiYasuoka/bert-base-japanese-char-extended
null
[ "transformers", "pytorch", "bert", "fill-mask", "japanese", "masked-lm", "wikipedia", "ja", "license:cc-by-sa-4.0", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:04+00:00