Search is not available for this dataset
pipeline_tag
stringclasses
48 values
library_name
stringclasses
205 values
text
stringlengths
0
18.3M
metadata
stringlengths
2
1.07B
id
stringlengths
5
122
last_modified
null
tags
listlengths
1
1.84k
sha
null
created_at
stringlengths
25
25
text-generation
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilgpt2-sgnews This model is a fine-tuned version of [distilgpt2](https://huggingface.co/distilgpt2) on the None dataset. It achieves the following results on the evaluation set: - Loss: 3.1516 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 3.3558 | 1.0 | 23769 | 3.2316 | | 3.2558 | 2.0 | 47538 | 3.1683 | | 3.2321 | 3.0 | 71307 | 3.1516 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.9.0+cu111 - Datasets 1.14.0 - Tokenizers 0.10.3
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "base_model": "distilgpt2", "model-index": [{"name": "distilgpt2-sgnews", "results": []}]}
chinhon/distilgpt2-sgnews
null
[ "transformers", "pytorch", "tensorboard", "gpt2", "text-generation", "generated_from_trainer", "base_model:distilgpt2", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:05+00:00
text-classification
transformers
{}
chinhon/fake_tweet_detect
null
[ "transformers", "pytorch", "safetensors", "distilbert", "text-classification", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:05+00:00
text2text-generation
transformers
# Model Trained Using AutoNLP - Problem type: Summarization - Model ID: 25965855 - CO2 Emissions (in grams): 114.71292762345828 ## Validation Metrics - Loss: 1.3862273693084717 - Rouge1: 52.4988 - Rouge2: 31.6973 - RougeL: 47.1727 - RougeLsum: 47.1576 - Gen Len: 17.6194 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_HUGGINGFACE_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoNLP"}' https://api-inference.huggingface.co/chinhon/autonlp-sg_headline_generator-25965855 ```
{"language": "en", "tags": "autonlp", "datasets": ["chinhon/autonlp-data-sg_headline_generator"], "widget": [{"text": "I love AutoNLP \ud83e\udd17"}], "co2_eq_emissions": 114.71292762345828}
chinhon/headline_writer
null
[ "transformers", "pytorch", "safetensors", "bart", "text2text-generation", "autonlp", "en", "dataset:chinhon/autonlp-data-sg_headline_generator", "co2_eq_emissions", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:05+00:00
text2text-generation
transformers
# Model Trained Using AutoNLP - Problem type: Summarization - Model ID: 25965856 - CO2 Emissions (in grams): 396.629376395644 ## Validation Metrics - Loss: 1.4130597114562988 - Rouge1: 51.7922 - Rouge2: 30.8259 - RougeL: 46.4585 - RougeLsum: 46.4807 - Gen Len: 15.8411 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_HUGGINGFACE_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoNLP"}' https://api-inference.huggingface.co/chinhon/autonlp-sg_headline_generator-25965856 ```
{"language": "en", "tags": "autonlp", "datasets": ["chinhon/autonlp-data-sg_headline_generator"], "widget": [{"text": "I love AutoNLP \ud83e\udd17"}], "co2_eq_emissions": 396.629376395644}
chinhon/headline_writer2
null
[ "transformers", "pytorch", "safetensors", "bart", "text2text-generation", "autonlp", "en", "dataset:chinhon/autonlp-data-sg_headline_generator", "co2_eq_emissions", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
text2text-generation
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # pegasus-large-commentaries_hd This model is a fine-tuned version of [google/pegasus-large](https://huggingface.co/google/pegasus-large) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 2.5453 - Rouge1: 26.3475 - Rouge2: 9.5095 - Rougel: 22.6367 - Rougelsum: 22.8127 - Gen Len: 14.4789 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:-----:|:---------------:|:-------:|:------:|:-------:|:---------:|:-------:| | 2.5718 | 1.0 | 4710 | 2.5277 | 25.1384 | 8.6528 | 21.3443 | 21.5289 | 15.3268 | | 2.4034 | 2.0 | 9420 | 2.4973 | 25.9298 | 9.2238 | 22.3192 | 22.4817 | 14.2243 | | 2.2093 | 3.0 | 14130 | 2.5013 | 26.6036 | 9.7482 | 22.8409 | 23.0077 | 14.2263 | | 2.0518 | 4.0 | 18840 | 2.5272 | 26.4723 | 9.6599 | 22.7439 | 22.9201 | 14.38 | | 1.9906 | 5.0 | 23550 | 2.5453 | 26.3475 | 9.5095 | 22.6367 | 22.8127 | 14.4789 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.0+cu111 - Datasets 1.17.0 - Tokenizers 0.10.3
{"tags": ["generated_from_trainer"], "metrics": ["rouge"], "base_model": "google/pegasus-large", "model-index": [{"name": "pegasus-large-commentaries_hd", "results": []}]}
chinhon/pegasus-large-commentaries_hd
null
[ "transformers", "pytorch", "tensorboard", "pegasus", "text2text-generation", "generated_from_trainer", "base_model:google/pegasus-large", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:05+00:00
text2text-generation
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # pegasus-multi_news-commentaries_hdwriter This model is a fine-tuned version of [google/pegasus-multi_news](https://huggingface.co/google/pegasus-multi_news) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 2.7259 - Rouge1: 21.3899 - Rouge2: 6.2409 - Rougel: 16.6172 - Rougelsum: 17.808 - Gen Len: 34.7016 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:-----:|:---------------:|:-------:|:------:|:-------:|:---------:|:-------:| | 2.847 | 1.0 | 4710 | 2.7513 | 20.5559 | 5.9762 | 16.1223 | 17.2872 | 35.81 | | 2.6399 | 2.0 | 9420 | 2.6890 | 21.2052 | 6.0104 | 16.5753 | 17.6517 | 34.5242 | | 2.3811 | 3.0 | 14130 | 2.6904 | 21.2358 | 6.1416 | 16.6053 | 17.7067 | 34.6157 | | 2.2388 | 4.0 | 18840 | 2.7112 | 21.3806 | 6.1895 | 16.6909 | 17.7504 | 34.5227 | | 2.1589 | 5.0 | 23550 | 2.7259 | 21.3899 | 6.2409 | 16.6172 | 17.808 | 34.7016 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.0+cu111 - Datasets 1.17.0 - Tokenizers 0.10.3
{"tags": ["generated_from_trainer"], "metrics": ["rouge"], "base_model": "google/pegasus-multi_news", "model-index": [{"name": "pegasus-multi_news-commentaries_hdwriter", "results": []}]}
chinhon/pegasus-multi_news-commentaries_hdwriter
null
[ "transformers", "pytorch", "tensorboard", "pegasus", "text2text-generation", "generated_from_trainer", "base_model:google/pegasus-multi_news", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:05+00:00
text2text-generation
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # pegasus-multi_news-headline This model is a fine-tuned version of [google/pegasus-multi_news](https://huggingface.co/google/pegasus-multi_news) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.4421 - Rouge1: 41.616 - Rouge2: 22.922 - Rougel: 35.2189 - Rougelsum: 35.3561 - Gen Len: 33.9532 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:-----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:-------:| | 1.6637 | 1.0 | 31200 | 1.4877 | 41.0996 | 22.579 | 34.9311 | 35.0611 | 34.3431 | | 1.4395 | 2.0 | 62400 | 1.4388 | 41.6075 | 22.8274 | 35.2051 | 35.3526 | 33.7965 | | 1.3137 | 3.0 | 93600 | 1.4421 | 41.616 | 22.922 | 35.2189 | 35.3561 | 33.9532 | ### Framework versions - Transformers 4.12.2 - Pytorch 1.9.0+cu111 - Datasets 1.14.0 - Tokenizers 0.10.3
{"tags": ["generated_from_trainer"], "metrics": ["rouge"], "base_model": "google/pegasus-multi_news", "model-index": [{"name": "pegasus-multi_news-headline", "results": []}]}
chinhon/pegasus-multi_news-headline
null
[ "transformers", "pytorch", "tensorboard", "safetensors", "pegasus", "text2text-generation", "generated_from_trainer", "base_model:google/pegasus-multi_news", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:05+00:00
text2text-generation
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # pegasus-multi_news-malay_headlines_02 This model is a fine-tuned version of [google/pegasus-multi_news](https://huggingface.co/google/pegasus-multi_news) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.9295 - Rouge1: 39.9859 - Rouge2: 20.1943 - Rougel: 36.1927 - Rougelsum: 36.2105 - Gen Len: 35.6062 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:-----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:-------:| | 2.0943 | 1.0 | 53582 | 1.9295 | 39.9859 | 20.1943 | 36.1927 | 36.2105 | 35.6062 | ### Framework versions - Transformers 4.12.3 - Pytorch 1.10.0+cu111 - Datasets 1.15.1 - Tokenizers 0.10.3
{"tags": ["generated_from_trainer"], "metrics": ["rouge"], "base_model": "google/pegasus-multi_news", "model-index": [{"name": "pegasus-multi_news-malay_headlines_02", "results": []}]}
chinhon/pegasus-multi_news-malay_headlines_02
null
[ "transformers", "pytorch", "tensorboard", "safetensors", "pegasus", "text2text-generation", "generated_from_trainer", "base_model:google/pegasus-multi_news", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:05+00:00
text2text-generation
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # pegasus-multi_news-summarizer_01 This model is a fine-tuned version of [google/pegasus-multi_news](https://huggingface.co/google/pegasus-multi_news) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.2794 - Rouge1: 52.1693 - Rouge2: 34.8989 - Rougel: 41.2385 - Rougelsum: 48.4365 - Gen Len: 98.6433 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:-----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:--------:| | 1.3936 | 1.0 | 16113 | 1.2972 | 51.5747 | 34.2062 | 40.7279 | 47.7783 | 95.0004 | | 1.3664 | 2.0 | 32226 | 1.2817 | 52.1077 | 34.8189 | 41.1614 | 48.3894 | 100.3265 | | 1.3002 | 3.0 | 48339 | 1.2794 | 52.1693 | 34.8989 | 41.2385 | 48.4365 | 98.6433 | ### Framework versions - Transformers 4.12.3 - Pytorch 1.9.0+cu111 - Datasets 1.15.1 - Tokenizers 0.10.3
{"tags": ["generated_from_trainer"], "metrics": ["rouge"], "base_model": "google/pegasus-multi_news", "model-index": [{"name": "pegasus-multi_news-summarizer_01", "results": []}]}
chinhon/pegasus-multi_news-summarizer_01
null
[ "transformers", "pytorch", "tensorboard", "safetensors", "pegasus", "text2text-generation", "generated_from_trainer", "base_model:google/pegasus-multi_news", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
text2text-generation
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # pegasus-newsroom-commentaries_hdwriter This model is a fine-tuned version of [google/pegasus-newsroom](https://huggingface.co/google/pegasus-newsroom) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 2.5316 - Rouge1: 21.4079 - Rouge2: 6.2399 - Rougel: 16.6644 - Rougelsum: 17.8501 - Gen Len: 34.4111 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:-----:|:---------------:|:-------:|:------:|:-------:|:---------:|:-------:| | 2.6327 | 1.0 | 4710 | 2.5474 | 20.9392 | 6.1702 | 16.3859 | 17.5963 | 35.6626 | | 2.4322 | 2.0 | 9420 | 2.5198 | 21.4026 | 6.1811 | 16.5874 | 17.8207 | 34.5976 | | 2.2703 | 3.0 | 14130 | 2.5316 | 21.4079 | 6.2399 | 16.6644 | 17.8501 | 34.4111 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.0+cu111 - Datasets 1.17.0 - Tokenizers 0.10.3
{"tags": ["generated_from_trainer"], "metrics": ["rouge"], "model-index": [{"name": "pegasus-newsroom-commentaries_hdwriter", "results": []}]}
chinhon/pegasus-newsroom-commentaries_hdwriter
null
[ "transformers", "pytorch", "tensorboard", "safetensors", "pegasus", "text2text-generation", "generated_from_trainer", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
text2text-generation
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # pegasus-newsroom-headline_writer This model is a fine-tuned version of [google/pegasus-newsroom](https://huggingface.co/google/pegasus-newsroom) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.3988 - Rouge1: 41.8748 - Rouge2: 23.1947 - Rougel: 35.6263 - Rougelsum: 35.7355 - Gen Len: 34.1266 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:-----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:-------:| | 1.5784 | 1.0 | 31200 | 1.4287 | 41.4257 | 22.9355 | 35.3299 | 35.4648 | 34.4677 | | 1.3501 | 2.0 | 62400 | 1.3955 | 41.9119 | 23.1912 | 35.6698 | 35.7479 | 33.8672 | | 1.2417 | 3.0 | 93600 | 1.3988 | 41.8748 | 23.1947 | 35.6263 | 35.7355 | 34.1266 | ### Framework versions - Transformers 4.11.3 - Pytorch 1.9.0+cu111 - Datasets 1.14.0 - Tokenizers 0.10.3
{"tags": ["generated_from_trainer"], "metrics": ["rouge"], "base_model": "google/pegasus-newsroom", "model-index": [{"name": "pegasus-newsroom-headline_writer", "results": []}]}
chinhon/pegasus-newsroom-headline_writer
null
[ "transformers", "pytorch", "tensorboard", "safetensors", "pegasus", "text2text-generation", "generated_from_trainer", "base_model:google/pegasus-newsroom", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:05+00:00
text2text-generation
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # pegasus-newsroom-malay_headlines This model is a fine-tuned version of [google/pegasus-newsroom](https://huggingface.co/google/pegasus-newsroom) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.6603 - Rouge1: 42.6667 - Rouge2: 22.8739 - Rougel: 38.6684 - Rougelsum: 38.6928 - Gen Len: 34.7995 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:-----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:-------:| | 1.9713 | 1.0 | 15310 | 1.8121 | 41.1469 | 21.5262 | 37.3081 | 37.3377 | 35.0939 | | 1.7917 | 2.0 | 30620 | 1.6913 | 42.4027 | 22.6089 | 38.4471 | 38.4699 | 34.8149 | | 1.7271 | 3.0 | 45930 | 1.6603 | 42.6667 | 22.8739 | 38.6684 | 38.6928 | 34.7995 | ### Framework versions - Transformers 4.12.2 - Pytorch 1.9.0+cu111 - Datasets 1.14.0 - Tokenizers 0.10.3
{"tags": ["generated_from_trainer"], "metrics": ["rouge"], "base_model": "google/pegasus-newsroom", "model-index": [{"name": "pegasus-newsroom-malay_headlines", "results": []}]}
chinhon/pegasus-newsroom-malay_headlines
null
[ "transformers", "pytorch", "tensorboard", "pegasus", "text2text-generation", "generated_from_trainer", "base_model:google/pegasus-newsroom", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:05+00:00
text2text-generation
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # pegasus-newsroom-summarizer_02 This model is a fine-tuned version of [google/pegasus-newsroom](https://huggingface.co/google/pegasus-newsroom) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.2204 - Rouge1: 52.4459 - Rouge2: 35.2568 - Rougel: 41.6213 - Rougelsum: 48.7859 - Gen Len: 98.0627 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:-----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:-------:| | 1.3231 | 1.0 | 16113 | 1.2305 | 52.1565 | 34.8681 | 41.3189 | 48.4258 | 95.9049 | | 1.3001 | 2.0 | 32226 | 1.2186 | 52.4921 | 35.2661 | 41.6264 | 48.8168 | 98.9241 | | 1.2372 | 3.0 | 48339 | 1.2204 | 52.4459 | 35.2568 | 41.6213 | 48.7859 | 98.0627 | ### Framework versions - Transformers 4.12.3 - Pytorch 1.9.0+cu111 - Datasets 1.15.1 - Tokenizers 0.10.3
{"tags": ["generated_from_trainer"], "metrics": ["rouge"], "base_model": "google/pegasus-newsroom", "model-index": [{"name": "pegasus-newsroom-summarizer_02", "results": []}]}
chinhon/pegasus-newsroom-summarizer_02
null
[ "transformers", "pytorch", "tensorboard", "safetensors", "pegasus", "text2text-generation", "generated_from_trainer", "base_model:google/pegasus-newsroom", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:05+00:00
text-generation
transformers
Chizuru Ichinose DialoGPT Model.
{"tags": ["conversational"]}
chip/DialoGPT-small-chizuru
null
[ "transformers", "pytorch", "gpt2", "text-generation", "conversational", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:05+00:00
null
null
{}
chipmooon/test_model
null
[ "region:us" ]
null
2022-03-02T23:29:05+00:00
null
null
{}
chirag2706/AudioClassifier
null
[ "region:us" ]
null
2022-03-02T23:29:05+00:00
null
keras
{}
chirag2706/FlowerClassifier
null
[ "keras", "region:us" ]
null
2022-03-02T23:29:05+00:00
text-generation
transformers
{}
chirag2706/gpt2_code_generation_model
null
[ "transformers", "pytorch", "jax", "gpt2", "text-generation", "autotrain_compatible", "endpoints_compatible", "has_space", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:05+00:00
text-classification
transformers
### Distibert model finetuned on the task of classifying product descriptions to one of 45 broad [NICE classifications](https://www.wipo.int/classifications/nice/en/)
{}
chisadi/nice-distilbert-v2
null
[ "transformers", "pytorch", "distilbert", "text-classification", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
text-classification
transformers
{}
chisadi/nice-distilbert
null
[ "transformers", "pytorch", "distilbert", "text-classification", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
null
transformers
{"license": "mit"}
chitanda/merit-albert-v2-xxlarge-v1
null
[ "transformers", "pytorch", "albert", "license:mit", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
null
transformers
{"license": "mit"}
chitanda/merit-deberta-v2-xlarge-v1
null
[ "transformers", "pytorch", "deberta-v2", "license:mit", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
null
transformers
{"license": "mit"}
chitanda/merit-deberta-v2-xxlarge-v1
null
[ "transformers", "pytorch", "deberta-v2", "license:mit", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
null
transformers
{"license": "mit"}
chitanda/merit-roberta-large-v1
null
[ "transformers", "pytorch", "roberta", "license:mit", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
null
transformers
{"license": "mit"}
chitanda/merit-roberta-large-v2
null
[ "transformers", "pytorch", "roberta", "license:mit", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
text-classification
transformers
{}
chitra/distilbert-negation
null
[ "transformers", "pytorch", "tf", "distilbert", "text-classification", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetune-paraphrase-model This model is a fine-tuned version of [coderpotter/adversarial-paraphrasing-detector](https://huggingface.co/coderpotter/adversarial-paraphrasing-detector) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 0.1 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | No log | 0.1 | 200 | 3.0116 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.0+cu111 - Datasets 1.17.0 - Tokenizers 0.10.3
{"tags": ["generated_from_trainer"], "model-index": [{"name": "finetune-paraphrase-model", "results": []}]}
chitra/finetune-paraphrase-model
null
[ "transformers", "pytorch", "tensorboard", "roberta", "text-classification", "generated_from_trainer", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
text-classification
transformers
{}
chitra/finetuned-adversarial-paraphrase-model-test
null
[ "transformers", "pytorch", "tensorboard", "roberta", "text-classification", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetuned-adversarial-paraphrase-model This model is a fine-tuned version of [coderpotter/adversarial-paraphrasing-detector](https://huggingface.co/coderpotter/adversarial-paraphrasing-detector) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 7.5680 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 0.0848 | 1.0 | 2000 | 5.4633 | | 0.0495 | 2.0 | 4000 | 6.0352 | | 0.0121 | 3.0 | 6000 | 7.5680 | ### Framework versions - Transformers 4.15.0 - Pytorch 1.10.0+cu111 - Datasets 1.17.0 - Tokenizers 0.10.3
{"tags": ["generated_from_trainer"], "model-index": [{"name": "finetuned-adversarial-paraphrase-model", "results": []}]}
chitra/finetuned-adversarial-paraphrase-model
null
[ "transformers", "pytorch", "tensorboard", "roberta", "text-classification", "generated_from_trainer", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
text-classification
transformers
{}
chitra/finetuned-adversarial-paraphrase-modell
null
[ "transformers", "pytorch", "tensorboard", "roberta", "text-classification", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
text-classification
transformers
{}
chitra/finetuned-adversarial-paraphrasing-detector
null
[ "transformers", "pytorch", "roberta", "text-classification", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
text-classification
transformers
{}
chitra/finetuned-adversial-paraphrase-model
null
[ "transformers", "pytorch", "tensorboard", "roberta", "text-classification", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
text-classification
transformers
### Welcome to RoBERTArg! πŸ€– **Model description** This model was trained on ~25k heterogeneous manually annotated sentences (πŸ“š [Stab et al. 2018](https://www.aclweb.org/anthology/D18-1402/)) of controversial topics to classify text into one of two labels: 🏷 **NON-ARGUMENT** (0) and **ARGUMENT** (1). πŸ—ƒ **Dataset** The dataset (πŸ“š Stab et al. 2018) consists of **ARGUMENTS** (\~11k) that either support or oppose a topic if it includes a relevant reason for supporting or opposing the topic, or as a **NON-ARGUMENT** (\~14k) if it does not include reasons. The authors focus on controversial topics, i.e., topics that include "an obvious polarity to the possible outcomes" and compile a final set of eight controversial topics: _abortion, school uniforms, death penalty, marijuana legalization, nuclear energy, cloning, gun control, and minimum wage_. | TOPIC | ARGUMENT | NON-ARGUMENT | |----|----|----| | abortion | 2213 | 2,427 | | school uniforms | 325 | 1,734 | | death penalty | 325 | 2,083 | | marijuana legalization | 325 | 1,262 | | nuclear energy | 325 | 2,118 | | cloning | 325 | 1,494 | | gun control | 325 | 1,889 | | minimum wage | 325 | 1,346 | πŸƒπŸΌβ€β™‚οΈ**Model training** **RoBERTArg** was fine-tuned on a RoBERTA (base) pre-trained model from HuggingFace using the HuggingFace trainer with the following hyperparameters: ``` training_args = TrainingArguments( num_train_epochs=2, learning_rate=2.3102e-06, seed=8, per_device_train_batch_size=64, per_device_eval_batch_size=64, ) ``` πŸ“Š **Evaluation** The model was evaluated on an evaluation set (20%): | Model | Acc | F1 | R arg | R non | P arg | P non | |----|----|----|----|----|----|----| | RoBERTArg | 0.8193 | 0.8021 | 0.8463 | 0.7986 | 0.7623 | 0.8719 | Showing the **confusion matrix** using again the evaluation set: | | ARGUMENT | NON-ARGUMENT | |----|----|----| | ARGUMENT | 2213 | 558 | | NON-ARGUMENT | 325 | 1790 | ⚠️ **Intended Uses & Potential Limitations** The model can only be a starting point to dive into the exciting field of argument mining. But be aware. An argument is a complex structure, with multiple dependencies. Therefore, the model may perform less well on different topics and text types not included in the training set. Enjoy and stay tuned! πŸš€ 🐦 Twitter: [@chklamm](http://twitter.com/chklamm)
{"language": "en", "widget": [{"text": "It has been determined that the amount of greenhouse gases have decreased by almost half because of the prevalence in the utilization of nuclear power."}]}
chkla/roberta-argument
null
[ "transformers", "pytorch", "tf", "jax", "safetensors", "roberta", "text-classification", "en", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:05+00:00
null
null
{}
chloeh13q/wav2vec2-base-finetuned-ks
null
[ "region:us" ]
null
2022-03-02T23:29:05+00:00
null
null
{}
chloem38/Chloe
null
[ "region:us" ]
null
2022-03-02T23:29:05+00:00
null
null
{}
chmanoj/kenlm_te
null
[ "region:us" ]
null
2022-03-02T23:29:05+00:00
null
null
{}
chmanoj/wav2vec2-large-xls-r-300m-turkish-demo-colab
null
[ "region:us" ]
null
2022-03-02T23:29:05+00:00
automatic-speech-recognition
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # This model is a fine-tuned version of [facebook/wav2vec2-xls-r-1b](https://huggingface.co/facebook/wav2vec2-xls-r-1b) on the OPENSLR_SLR66 - NA dataset. It achieves the following results on the evaluation set: - Loss: 0.3119 - Wer: 0.2613 ### Evaluation metrics | Metric | Split | Decode with LM | Value | |:------:|:------:|:--------------:|:---------:| | WER | Train | No | 5.36 | | CER | Train | No | 1.11 | | WER | Test | No | 26.14 | | CER | Test | No | 4.93 | | WER | Train | Yes | 5.04 | | CER | Train | Yes | 1.07 | | WER | Test | Yes | 20.69 | | CER | Test | Yes | 3.986 | ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 4 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 2000 - num_epochs: 150.0 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:------:|:-----:|:---------------:|:------:| | 2.9038 | 4.8 | 500 | 3.0125 | 1.0 | | 1.3777 | 9.61 | 1000 | 0.8681 | 0.8753 | | 1.1436 | 14.42 | 1500 | 0.6256 | 0.7961 | | 1.0997 | 19.23 | 2000 | 0.5244 | 0.6875 | | 1.0363 | 24.04 | 2500 | 0.4585 | 0.6276 | | 0.7996 | 28.84 | 3000 | 0.4072 | 0.5295 | | 0.825 | 33.65 | 3500 | 0.3590 | 0.5222 | | 0.8018 | 38.46 | 4000 | 0.3678 | 0.4671 | | 0.7545 | 43.27 | 4500 | 0.3474 | 0.3962 | | 0.7375 | 48.08 | 5000 | 0.3224 | 0.3869 | | 0.6198 | 52.88 | 5500 | 0.3233 | 0.3630 | | 0.6608 | 57.69 | 6000 | 0.3029 | 0.3308 | | 0.645 | 62.5 | 6500 | 0.3195 | 0.3722 | | 0.5249 | 67.31 | 7000 | 0.3004 | 0.3202 | | 0.4875 | 72.11 | 7500 | 0.2826 | 0.2992 | | 0.5171 | 76.92 | 8000 | 0.2962 | 0.2976 | | 0.4974 | 81.73 | 8500 | 0.2990 | 0.2933 | | 0.4387 | 86.54 | 9000 | 0.2834 | 0.2755 | | 0.4511 | 91.34 | 9500 | 0.2886 | 0.2787 | | 0.4112 | 96.15 | 10000 | 0.3093 | 0.2976 | | 0.4064 | 100.96 | 10500 | 0.3123 | 0.2863 | | 0.4047 | 105.77 | 11000 | 0.2968 | 0.2719 | | 0.3519 | 110.57 | 11500 | 0.3106 | 0.2832 | | 0.3719 | 115.38 | 12000 | 0.3030 | 0.2737 | | 0.3669 | 120.19 | 12500 | 0.2964 | 0.2714 | | 0.3386 | 125.0 | 13000 | 0.3101 | 0.2714 | | 0.3137 | 129.8 | 13500 | 0.3063 | 0.2710 | | 0.3008 | 134.61 | 14000 | 0.3082 | 0.2617 | | 0.301 | 139.42 | 14500 | 0.3121 | 0.2628 | | 0.3291 | 144.23 | 15000 | 0.3105 | 0.2612 | | 0.3133 | 149.04 | 15500 | 0.3114 | 0.2624 | ### Framework versions - Transformers 4.16.0.dev0 - Pytorch 1.10.1+cu102 - Datasets 1.17.1.dev0 - Tokenizers 0.11.0
{"language": ["te"], "license": "apache-2.0", "tags": ["automatic-speech-recognition", "openslr_SLR66", "generated_from_trainer", "robust-speech-event", "hf-asr-leaderboard"], "datasets": ["openslr", "SLR66"], "metrics": ["wer"], "model-index": [{"name": "xls-r-1B-te", "results": [{"task": {"type": "automatic-speech-recognition", "name": "Speech Recognition"}, "dataset": {"name": "Open SLR", "type": "openslr", "args": "SLR66"}, "metrics": [{"type": "wer", "value": 20.624, "name": "Test WER"}, {"type": "cer", "value": 3.979, "name": "Test CER"}, {"type": "wer", "value": 26.14777618364419, "name": "Test WER (without LM)"}, {"type": "cer", "value": 4.932543184970369, "name": "Test CER (without LM)"}]}]}]}
chmanoj/xls-r-1B-te
null
[ "transformers", "pytorch", "tensorboard", "safetensors", "wav2vec2", "automatic-speech-recognition", "openslr_SLR66", "generated_from_trainer", "robust-speech-event", "hf-asr-leaderboard", "te", "dataset:openslr", "dataset:SLR66", "license:apache-2.0", "model-index", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
automatic-speech-recognition
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # This model is a fine-tuned version of [facebook/wav2vec2-xls-r-2b](https://huggingface.co/facebook/wav2vec2-xls-r-2b) on the OPENSLR_SLR66 - NA dataset. It achieves the following results on the evaluation set: - Loss: 0.4253 - Wer: 0.5109 ### Evaluation metrics | Metric | Split | Decode with LM | Value | |:------:|:------:|:--------------:|:---------:| | WER | Train | No | | | CER | Train | No | | | WER | Test | No | | | CER | Test | No | | | WER | Train | Yes | | | CER | Train | Yes | | | WER | Test | Yes | | | CER | Test | Yes | | ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - gradient_accumulation_steps: 12 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - learning_rate: 3e-6 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 2000 - num_epochs: 150.0 - hidden_dropout: 0.15 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.16.0.dev0 - Pytorch 1.10.1+cu102 - Datasets 1.17.1.dev0 - Tokenizers 0.11.0
{"language": ["te"], "license": "apache-2.0", "tags": ["automatic-speech-recognition", "openslr_SLR66", "generated_from_trainer", "robust-speech-event", "hf-asr-leaderboard"], "datasets": ["openslr", "SLR66"], "metrics": ["wer"], "model-index": [{"name": "xls-r-1B-te", "results": [{"task": {"type": "automatic-speech-recognition", "name": "Speech Recognition"}, "dataset": {"name": "Open SLR", "type": "openslr", "args": "SLR66"}, "metrics": [{"type": "wer", "value": 0.51, "name": "Test WER"}, {"type": "cer", "value": 0.097, "name": "Test CER"}]}]}]}
chmanoj/xls-r-2B-te
null
[ "transformers", "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "openslr_SLR66", "generated_from_trainer", "robust-speech-event", "hf-asr-leaderboard", "te", "dataset:openslr", "dataset:SLR66", "license:apache-2.0", "model-index", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:05+00:00
automatic-speech-recognition
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the MOZILLA-FOUNDATION/COMMON_VOICE_7_0 - SV-SE dataset. It achieves the following results on the evaluation set: - Loss: 0.8004 - Wer: 0.7139 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 7.5e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - gradient_accumulation_steps: 8 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 2000 - num_epochs: 10.0 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 2.6683 | 1.45 | 500 | 1.7698 | 1.0041 | | 1.9548 | 2.91 | 1000 | 1.0890 | 0.8602 | | 1.9568 | 4.36 | 1500 | 1.0878 | 0.8680 | | 1.9497 | 5.81 | 2000 | 1.1501 | 0.8838 | | 1.8453 | 7.27 | 2500 | 1.0452 | 0.8418 | | 1.6952 | 8.72 | 3000 | 0.9153 | 0.7823 | ### Framework versions - Transformers 4.16.0.dev0 - Pytorch 1.10.0+cu113 - Datasets 1.18.1.dev0 - Tokenizers 0.10.3
{"language": ["sv-SE"], "license": "apache-2.0", "tags": ["automatic-speech-recognition", "mozilla-foundation/common_voice_7_0", "generated_from_trainer"], "datasets": ["common_voice"], "model-index": [{"name": "", "results": []}]}
chmanoj/xls-r-300m-sv
null
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "mozilla-foundation/common_voice_7_0", "generated_from_trainer", "dataset:common_voice", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
automatic-speech-recognition
transformers
{}
chmanoj/xls-r-300m-ta
null
[ "transformers", "pytorch", "tensorboard", "wav2vec2", "automatic-speech-recognition", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
automatic-speech-recognition
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the OPENSLR_SLR66 - NA dataset. It achieves the following results on the evaluation set: - Loss: 0.2680 - Wer: 0.3467 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 7.5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 2000 - num_epochs: 10.0 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:-----:|:---------------:|:------:| | 3.0304 | 4.81 | 500 | 1.5676 | 1.0554 | | 1.5263 | 9.61 | 1000 | 0.4693 | 0.8023 | | 1.5299 | 14.42 | 1500 | 0.4368 | 0.7311 | | 1.5063 | 19.23 | 2000 | 0.4360 | 0.7302 | | 1.455 | 24.04 | 2500 | 0.4213 | 0.6692 | | 1.4755 | 28.84 | 3000 | 0.4329 | 0.5943 | | 1.352 | 33.65 | 3500 | 0.4074 | 0.5765 | | 1.3122 | 38.46 | 4000 | 0.3866 | 0.5630 | | 1.2799 | 43.27 | 4500 | 0.3860 | 0.5480 | | 1.212 | 48.08 | 5000 | 0.3590 | 0.5317 | | 1.1645 | 52.88 | 5500 | 0.3283 | 0.4757 | | 1.0854 | 57.69 | 6000 | 0.3162 | 0.4687 | | 1.0292 | 62.5 | 6500 | 0.3126 | 0.4416 | | 0.9607 | 67.31 | 7000 | 0.2990 | 0.4066 | | 0.9156 | 72.12 | 7500 | 0.2870 | 0.4009 | | 0.8329 | 76.92 | 8000 | 0.2791 | 0.3909 | | 0.7979 | 81.73 | 8500 | 0.2770 | 0.3670 | | 0.7144 | 86.54 | 9000 | 0.2841 | 0.3661 | | 0.6997 | 91.35 | 9500 | 0.2721 | 0.3485 | | 0.6568 | 96.15 | 10000 | 0.2681 | 0.3437 | ### Framework versions - Transformers 4.16.0.dev0 - Pytorch 1.10.1+cu102 - Datasets 1.17.1.dev0 - Tokenizers 0.11.0
{"language": ["te"], "license": "apache-2.0", "tags": ["automatic-speech-recognition", "openslr_SLR66", "generated_from_trainer", "robust-speech-event", "hf-asr-leaderboard"], "datasets": ["openslr", "SLR66"], "metrics": ["wer"], "model-index": [{"name": "xls-r-300m-te", "results": [{"task": {"type": "automatic-speech-recognition", "name": "Speech Recognition"}, "dataset": {"name": "Open SLR", "type": "openslr", "args": "SLR66"}, "metrics": [{"type": "wer", "value": 24.695121951219512, "name": "Test WER"}, {"type": "cer", "value": 4.861934182322532, "name": "Test CER"}]}]}]}
chmanoj/xls-r-300m-te
null
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "openslr_SLR66", "generated_from_trainer", "robust-speech-event", "hf-asr-leaderboard", "te", "dataset:openslr", "dataset:SLR66", "license:apache-2.0", "model-index", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
automatic-speech-recognition
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # This model is a fine-tuned version of [hf-test/xls-r-dummy](https://huggingface.co/hf-test/xls-r-dummy) on the MOZILLA-FOUNDATION/COMMON_VOICE_7_0 - AB dataset. It achieves the following results on the evaluation set: - Loss: 156.8786 - Wer: 1.3460 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 2 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - training_steps: 10 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.16.0.dev0 - Pytorch 1.10.0+cu113 - Datasets 1.18.1.dev0 - Tokenizers 0.10.3
{"language": ["ab"], "tags": ["automatic-speech-recognition", "mozilla-foundation/common_voice_7_0", "generated_from_trainer"], "datasets": ["common_voice"], "model-index": [{"name": "", "results": []}]}
chmanoj/xls-r-demo-test
null
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "mozilla-foundation/common_voice_7_0", "generated_from_trainer", "ab", "dataset:common_voice", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
null
null
{}
chntl/b3t5-pmid-removed
null
[ "region:us" ]
null
2022-03-02T23:29:05+00:00
null
null
{}
chnyangs/bert-base-uncased-finetuned-cola
null
[ "region:us" ]
null
2022-03-02T23:29:05+00:00
null
null
{}
chnyangs/distilbert-base-uncased-finetuned-cola
null
[ "region:us" ]
null
2022-03-02T23:29:05+00:00
null
null
{}
cho/distilbert-base-uncased-finetuned-squad
null
[ "region:us" ]
null
2022-03-02T23:29:05+00:00
null
null
{}
chochan/tan
null
[ "region:us" ]
null
2022-03-02T23:29:05+00:00
null
null
{}
choil/testFineTune
null
[ "has_space", "region:us" ]
null
2022-03-02T23:29:05+00:00
automatic-speech-recognition
transformers
# Wav2Vec2-Large-XLSR-53 in Thai Language (Train with deepcut tokenizer)
{"language": "th", "license": "apache-2.0", "tags": ["audio", "automatic-speech-recognition", "speech", "xlsr-fine-tuning"], "datasets": ["common_voice"]}
chompk/wav2vec2-large-xlsr-thai-tokenized
null
[ "transformers", "pytorch", "jax", "wav2vec2", "automatic-speech-recognition", "audio", "speech", "xlsr-fine-tuning", "th", "dataset:common_voice", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
text-classification
transformers
{}
choondrise/emolve
null
[ "transformers", "pytorch", "bert", "text-classification", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
text2text-generation
transformers
Test English-Dhivehi/Dhivehi-English NMT Would need a lot more data to get accurate translations.
{}
chopey/testmntdv
null
[ "transformers", "pytorch", "mt5", "text2text-generation", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:05+00:00
null
null
These models were made for my course project in NLP and AI special course at the University of Latvia during my first semester of study.
{}
chrisAS12/specseminars
null
[ "region:us" ]
null
2022-03-02T23:29:05+00:00
null
null
{}
chrisevan/68
null
[ "region:us" ]
null
2022-03-02T23:29:05+00:00
automatic-speech-recognition
transformers
# Wav2Vec2-Large-XLSR-53-Fon Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on [Fon (or Fongbe)](https://en.wikipedia.org/wiki/Fon_language) using the [Fon Dataset](https://github.com/laleye/pyFongbe/tree/master/data). When using this model, make sure that your speech input is sampled at 16kHz. ## Usage The model can be used directly (without a language model) as follows: ```python import json import random import torch import torchaudio from datasets import load_dataset from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor #Load test_dataset from saved files in folder from datasets import load_dataset, load_metric #for test for root, dirs, files in os.walk(test/): test_dataset= load_dataset("json", data_files=[os.path.join(root,i) for i in files],split="train") #Remove unnecessary chars chars_to_ignore_regex = '[\\,\\?\\.\\!\\-\\;\\:\\"\\β€œ\\%\\β€˜\\”]' def remove_special_characters(batch): batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower() + " " return batch test_dataset = test_dataset.map(remove_special_characters) processor = Wav2Vec2Processor.from_pretrained("chrisjay/wav2vec2-large-xlsr-53-fon") model = Wav2Vec2ForCTC.from_pretrained("chrisjay/wav2vec2-large-xlsr-53-fon") #No need for resampling because audio dataset already at 16kHz #resampler = torchaudio.transforms.Resample(48_000, 16_000) # Preprocessing the datasets. # We need to read the audio files as arrays def speech_file_to_array_fn(batch): speech_array, sampling_rate = torchaudio.load(batch["path"]) batch["speech"]=speech_array.squeeze().numpy() return batch test_dataset = test_dataset.map(speech_file_to_array_fn) inputs = processor(test_dataset["speech"][:2], sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): tlogits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits predicted_ids = torch.argmax(logits, dim=-1) print("Prediction:", processor.batch_decode(predicted_ids)) print("Reference:", test_dataset["sentence"][:2]) ``` ## Evaluation The model can be evaluated as follows on our unique Fon test data. ```python import torch import torchaudio from datasets import load_dataset, load_metric from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor import re for root, dirs, files in os.walk(test/): test_dataset = load_dataset("json", data_files=[os.path.join(root,i) for i in files],split="train") chars_to_ignore_regex = '[\\,\\?\\.\\!\\-\\;\\:\\"\\β€œ\\%\\β€˜\\”]' batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower() + " " return batch test_dataset = test_dataset.map(remove_special_characters) wer = load_metric("wer") processor = Wav2Vec2Processor.from_pretrained("chrisjay/wav2vec2-large-xlsr-53-fon") model = Wav2Vec2ForCTC.from_pretrained("chrisjay/wav2vec2-large-xlsr-53-fon") model.to("cuda") # Preprocessing the datasets. # We need to read the aduio files as arrays def speech_file_to_array_fn(batch): speech_array, sampling_rate = torchaudio.load(batch["path"]) batch["speech"] = speech_array[0].numpy() batch["sampling_rate"] = sampling_rate batch["target_text"] = batch["sentence"] return batch test_dataset = test_dataset.map(speech_file_to_array_fn) #Evaluation on test dataset def evaluate(batch): inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): logits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits pred_ids = torch.argmax(logits, dim=-1) batch["pred_strings"] = processor.batch_decode(pred_ids) return batch result = test_dataset.map(evaluate, batched=True, batch_size=8) print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"]))) ``` **Test Result**: 14.97 % ## Training The [Fon dataset](https://github.com/laleye/pyFongbe/tree/master/data) was split into `train`(8235 samples), `validation`(1107 samples), and `test`(1061 samples). The script used for training can be found [here](https://colab.research.google.com/drive/11l6qhJCYnPTG1TQZ8f3EvKB9z12TQi4g?usp=sharing) # Collaborators on this project - Chris C. Emezue ([Twitter](https://twitter.com/ChrisEmezue))|([email protected]) - Bonaventure F.P. Dossou (HuggingFace Username: [bonadossou](https://huggingface.co/bonadossou))|([Twitter](https://twitter.com/bonadossou))|([email protected]) ## This is a joint project continuing our research on [OkwuGbΓ©: End-to-End Speech Recognition for Fon and Igbo](https://arxiv.org/abs/2103.07762)
{"language": "fon", "license": "apache-2.0", "tags": ["audio", "automatic-speech-recognition", "speech", "xlsr-fine-tuning-week", "hf-asr-leaderboard"], "datasets": ["fon_dataset"], "metrics": ["wer"], "model-index": [{"name": "Fon XLSR Wav2Vec2 Large 53", "results": [{"task": {"type": "automatic-speech-recognition", "name": "Speech Recognition"}, "dataset": {"name": "fon", "type": "fon_dataset", "args": "fon"}, "metrics": [{"type": "wer", "value": 14.97, "name": "Test WER"}]}]}]}
chrisjay/fonxlsr
null
[ "transformers", "pytorch", "jax", "wav2vec2", "automatic-speech-recognition", "audio", "speech", "xlsr-fine-tuning-week", "hf-asr-leaderboard", "fon", "dataset:fon_dataset", "arxiv:2103.07762", "license:apache-2.0", "model-index", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
null
null
# Interacting with the Masakhane Benchmark Models I created this demo for very easy interaction with the [benchmark models on Masakhane](https://github.com/masakhane-io/masakhane-mt/tree/master/benchmarks) which were trained with [JoeyNMT](https://github.com/chrisemezue/joeynmt)(my forked version). To access the space click [here](https://huggingface.co/spaces/chrisjay/masakhane-benchmarks). To include your language, all you need to do is: 1. Create a folder in the format *src-tgt/main* for your language pair, if it does not exist. 2. Inside the *main* folder put the following files: 1. model checkpoint. Rename it to `best.ckpt`. 2. `config.yaml` file. This is the JoeyNMT config file which loads the model an pre-processing parameters. 3. `src_vocab.txt` file. 4. `trg_vocab.txt` file. The space currently supports these languages: | source language | target language | |:---------------:|:---------------:| | English | Swahili | | English | Afrikaans | | English | Arabic | | English | Urhobo | | English | αΊΈΜ€dΓ³ | | Efik | English | | English | Hausa | | English | Igbo | | English | Fon | | English | Twi | | English | Dendi | | English | αΊΈΜ€sΓ‘n | | English | Isoko | | English | Kamba | | English | Luo | | English | Southern Ndebele | | English | Tshivenda | | Shona | English | | Swahili | English | | Yoruba | English | TO DO: 1. Include more languages from the benchmark.
{"language": "african-languages", "license": "apache-2.0", "tags": ["african-languages", "machine-translation", "text"]}
chrisjay/masakhane_benchmarks
null
[ "african-languages", "machine-translation", "text", "license:apache-2.0", "has_space", "region:us" ]
null
2022-03-02T23:29:05+00:00
fill-mask
transformers
{}
chriskhanhtran/spanberta
null
[ "transformers", "pytorch", "jax", "roberta", "fill-mask", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
text-classification
spacy
Text statistics including readability and formality. | Feature | Description | | --- | --- | | **Name** | `en_statistics` | | **Version** | `0.0.1` | | **spaCy** | `>=3.1.1,<3.2.0` | | **Default Pipeline** | `tok2vec`, `tagger`, `parser`, `attribute_ruler`, `lemmatizer`, `syllables`, `formality`, `readability` | | **Components** | `tok2vec`, `tagger`, `parser`, `senter`, `attribute_ruler`, `lemmatizer`, `syllables`, `formality`, `readability` | | **Vectors** | 684830 keys, 20000 unique vectors (300 dimensions) | | **Sources** | [OntoNotes 5](https://catalog.ldc.upenn.edu/LDC2013T19) (Ralph Weischedel, Martha Palmer, Mitchell Marcus, Eduard Hovy, Sameer Pradhan, Lance Ramshaw, Nianwen Xue, Ann Taylor, Jeff Kaufman, Michelle Franchini, Mohammed El-Bachouti, Robert Belvin, Ann Houston)<br />[ClearNLP Constituent-to-Dependency Conversion](https://github.com/clir/clearnlp-guidelines/blob/master/md/components/dependency_conversion.md) (Emory University)<br />[WordNet 3.0](https://wordnet.princeton.edu/) (Princeton University)<br />[GloVe Common Crawl](https://nlp.stanford.edu/projects/glove/) (Jeffrey Pennington, Richard Socher, and Christopher D. Manning) | | **License** | `MIT` | | **Author** | [Chris Knowles](https://explosion.ai) | ### Label Scheme <details> <summary>View label scheme (96 labels for 3 components)</summary> | Component | Labels | | --- | --- | | **`tagger`** | `$`, `''`, `,`, `-LRB-`, `-RRB-`, `.`, `:`, `ADD`, `AFX`, `CC`, `CD`, `DT`, `EX`, `FW`, `HYPH`, `IN`, `JJ`, `JJR`, `JJS`, `LS`, `MD`, `NFP`, `NN`, `NNP`, `NNPS`, `NNS`, `PDT`, `POS`, `PRP`, `PRP$`, `RB`, `RBR`, `RBS`, `RP`, `SYM`, `TO`, `UH`, `VB`, `VBD`, `VBG`, `VBN`, `VBP`, `VBZ`, `WDT`, `WP`, `WP$`, `WRB`, `XX`, ```` | | **`parser`** | `ROOT`, `acl`, `acomp`, `advcl`, `advmod`, `agent`, `amod`, `appos`, `attr`, `aux`, `auxpass`, `case`, `cc`, `ccomp`, `compound`, `conj`, `csubj`, `csubjpass`, `dative`, `dep`, `det`, `dobj`, `expl`, `intj`, `mark`, `meta`, `neg`, `nmod`, `npadvmod`, `nsubj`, `nsubjpass`, `nummod`, `oprd`, `parataxis`, `pcomp`, `pobj`, `poss`, `preconj`, `predet`, `prep`, `prt`, `punct`, `quantmod`, `relcl`, `xcomp` | | **`senter`** | `I`, `S` | </details>
{"language": ["en"], "license": "mit", "tags": ["spacy", "text-classification"], "model-index": [{"name": "en_statistics", "results": []}]}
chrisknowles/en_statistics
null
[ "spacy", "text-classification", "en", "license:mit", "region:us" ]
null
2022-03-02T23:29:05+00:00
token-classification
spacy
Check style on English text (currently passive text). | Feature | Description | | --- | --- | | **Name** | `en_stylecheck` | | **Version** | `0.0.1` | | **spaCy** | `>=3.1.1,<3.2.0` | | **Default Pipeline** | `tok2vec`, `tagger`, `parser`, `attribute_ruler`, `lemmatizer`, `ner`, `stylecheck` | | **Components** | `tok2vec`, `tagger`, `parser`, `senter`, `attribute_ruler`, `lemmatizer`, `ner`, `stylecheck` | | **Vectors** | 684830 keys, 20000 unique vectors (300 dimensions) | | **Sources** | n/a | | **License** | `MIT` | | **Author** | [Explosion](https://explosion.ai) | ### Label Scheme <details> <summary>View label scheme (115 labels for 5 components)</summary> | Component | Labels | | --- | --- | | **`tagger`** | `$`, `''`, `,`, `-LRB-`, `-RRB-`, `.`, `:`, `ADD`, `AFX`, `CC`, `CD`, `DT`, `EX`, `FW`, `HYPH`, `IN`, `JJ`, `JJR`, `JJS`, `LS`, `MD`, `NFP`, `NN`, `NNP`, `NNPS`, `NNS`, `PDT`, `POS`, `PRP`, `PRP$`, `RB`, `RBR`, `RBS`, `RP`, `SYM`, `TO`, `UH`, `VB`, `VBD`, `VBG`, `VBN`, `VBP`, `VBZ`, `WDT`, `WP`, `WP$`, `WRB`, `XX`, ```` | | **`parser`** | `ROOT`, `acl`, `acomp`, `advcl`, `advmod`, `agent`, `amod`, `appos`, `attr`, `aux`, `auxpass`, `case`, `cc`, `ccomp`, `compound`, `conj`, `csubj`, `csubjpass`, `dative`, `dep`, `det`, `dobj`, `expl`, `intj`, `mark`, `meta`, `neg`, `nmod`, `npadvmod`, `nsubj`, `nsubjpass`, `nummod`, `oprd`, `parataxis`, `pcomp`, `pobj`, `poss`, `preconj`, `predet`, `prep`, `prt`, `punct`, `quantmod`, `relcl`, `xcomp` | | **`senter`** | `I`, `S` | | **`ner`** | `CARDINAL`, `DATE`, `EVENT`, `FAC`, `GPE`, `LANGUAGE`, `LAW`, `LOC`, `MONEY`, `NORP`, `ORDINAL`, `ORG`, `PERCENT`, `PERSON`, `PRODUCT`, `QUANTITY`, `TIME`, `WORK_OF_ART` | | **`entity_ruler`** | `PASSIVE` | </details>
{"language": ["en"], "license": "mit", "tags": ["spacy", "token-classification"], "model-index": [{"name": "en_stylecheck", "results": []}]}
chrisknowles/en_stylecheck
null
[ "spacy", "token-classification", "en", "license:mit", "region:us" ]
null
2022-03-02T23:29:05+00:00
null
null
{}
chrisl/gpt-neo-2.7B
null
[ "region:us" ]
null
2022-03-02T23:29:05+00:00
null
null
{}
christima/DialogGPT-small-harrypotter
null
[ "region:us" ]
null
2022-03-02T23:29:05+00:00
null
null
{}
christina/decoder-only-transformer-small
null
[ "region:us" ]
null
2022-03-02T23:29:05+00:00
null
null
{}
christina/decoder-only-transformer-x2small
null
[ "region:us" ]
null
2022-03-02T23:29:05+00:00
null
null
{}
christina/decoder-only-transformer-x3small
null
[ "region:us" ]
null
2022-03-02T23:29:05+00:00
null
null
{}
christina/decoder-only-transformer-x4small
null
[ "region:us" ]
null
2022-03-02T23:29:05+00:00
null
null
{}
christina/decoder-only-transformer-x5small
null
[ "region:us" ]
null
2022-03-02T23:29:05+00:00
null
null
{}
christina/decoder-only-transformer-x6small
null
[ "region:us" ]
null
2022-03-02T23:29:05+00:00
null
transformers
{}
christophalt/test-model
null
[ "transformers", "pytorch", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
text-generation
transformers
[DistilGPT2](https://huggingface.co/distilgpt2) English language model fine-tuned on mathematical proofs extracted from [arXiv.org](https://arxiv.org) LaTeX sources from 1992 to 2020. Proofs have been cleaned up a bit. In particular, they use * `CITE` for any citation * `REF` for any reference * `MATH` for any LaTeX mathematical formula * `CASE:` for any `\item` or labeled subcase.
{"widget": [{"text": "Let MATH be given."}, {"text": "If MATH is a nonempty"}, {"text": "By the inductive hypothesis,"}]}
christopherastone/distilgpt2-proofs
null
[ "transformers", "pytorch", "tf", "jax", "safetensors", "gpt2", "text-generation", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:05+00:00
null
null
{}
christopherlaangsell/model_name
null
[ "region:us" ]
null
2022-03-02T23:29:05+00:00
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-base-multilingual-cased-finetuned-cola This model is a fine-tuned version of [bert-base-multilingual-cased](https://huggingface.co/bert-base-multilingual-cased) on an unkown dataset. It achieves the following results on the evaluation set: - Loss: 0.1729 - Accuracy: 0.9755 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.5119 | 1.0 | 625 | 0.2386 | 0.922 | | 0.2536 | 2.0 | 1250 | 0.2055 | 0.949 | | 0.1718 | 3.0 | 1875 | 0.1733 | 0.969 | | 0.0562 | 4.0 | 2500 | 0.1661 | 0.974 | | 0.0265 | 5.0 | 3125 | 0.1729 | 0.9755 | ### Framework versions - Transformers 4.9.2 - Pytorch 1.9.0+cu102 - Datasets 1.11.0 - Tokenizers 0.10.3
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "model_index": [{"name": "bert-base-multilingual-cased-finetuned-cola", "results": [{"task": {"name": "Text Classification", "type": "text-classification"}, "metric": {"name": "Accuracy", "type": "accuracy", "value": 0.9755}}]}]}
chrommium/bert-base-multilingual-cased-finetuned-news-headlines
null
[ "transformers", "pytorch", "tensorboard", "bert", "text-classification", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
text-classification
transformers
{}
chrommium/helper-model
null
[ "transformers", "pytorch", "bert", "text-classification", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # rubert-base-cased-sentence-finetuned-headlines_X This model is a fine-tuned version of [DeepPavlov/rubert-base-cased-sentence](https://huggingface.co/DeepPavlov/rubert-base-cased-sentence) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.2535 - Accuracy: 0.952 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | No log | 1.0 | 157 | 0.2759 | 0.912 | | No log | 2.0 | 314 | 0.2538 | 0.936 | | No log | 3.0 | 471 | 0.2556 | 0.945 | | 0.1908 | 4.0 | 628 | 0.2601 | 0.95 | | 0.1908 | 5.0 | 785 | 0.2535 | 0.952 | ### Framework versions - Transformers 4.10.2 - Pytorch 1.9.0+cu102 - Datasets 1.12.1 - Tokenizers 0.10.3
{"tags": ["generated_from_trainer"], "metrics": ["accuracy"]}
chrommium/rubert-base-cased-sentence-finetuned-headlines_X
null
[ "transformers", "pytorch", "tensorboard", "bert", "text-classification", "generated_from_trainer", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # rubert-base-cased-sentence-finetuned-sent_in_news_sents This model is a fine-tuned version of [DeepPavlov/rubert-base-cased-sentence](https://huggingface.co/DeepPavlov/rubert-base-cased-sentence) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.9506 - Accuracy: 0.7224 - F1: 0.5137 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 14 - eval_batch_size: 14 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 20 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | No log | 1.0 | 81 | 1.0045 | 0.6690 | 0.1388 | | No log | 2.0 | 162 | 0.9574 | 0.6228 | 0.2980 | | No log | 3.0 | 243 | 1.0259 | 0.6477 | 0.3208 | | No log | 4.0 | 324 | 1.1262 | 0.6619 | 0.4033 | | No log | 5.0 | 405 | 1.3377 | 0.6299 | 0.3909 | | No log | 6.0 | 486 | 1.5716 | 0.6868 | 0.3624 | | 0.6085 | 7.0 | 567 | 1.6286 | 0.6762 | 0.4130 | | 0.6085 | 8.0 | 648 | 1.6450 | 0.6940 | 0.4775 | | 0.6085 | 9.0 | 729 | 1.7108 | 0.7224 | 0.4920 | | 0.6085 | 10.0 | 810 | 1.8792 | 0.7046 | 0.5028 | | 0.6085 | 11.0 | 891 | 1.8670 | 0.7153 | 0.4992 | | 0.6085 | 12.0 | 972 | 1.8856 | 0.7153 | 0.4934 | | 0.0922 | 13.0 | 1053 | 1.9506 | 0.7224 | 0.5137 | | 0.0922 | 14.0 | 1134 | 2.0363 | 0.7189 | 0.4761 | | 0.0922 | 15.0 | 1215 | 2.0601 | 0.7224 | 0.5053 | | 0.0922 | 16.0 | 1296 | 2.0813 | 0.7153 | 0.5038 | | 0.0922 | 17.0 | 1377 | 2.0960 | 0.7189 | 0.5065 | | 0.0922 | 18.0 | 1458 | 2.1060 | 0.7224 | 0.5098 | | 0.0101 | 19.0 | 1539 | 2.1153 | 0.7260 | 0.5086 | | 0.0101 | 20.0 | 1620 | 2.1187 | 0.7260 | 0.5086 | ### Framework versions - Transformers 4.10.3 - Pytorch 1.9.0+cu102 - Datasets 1.12.1 - Tokenizers 0.10.3
{"tags": ["generated_from_trainer"], "metrics": ["accuracy", "f1"]}
chrommium/rubert-base-cased-sentence-finetuned-sent_in_news_sents
null
[ "transformers", "pytorch", "tensorboard", "bert", "text-classification", "generated_from_trainer", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # rubert-base-cased-sentence-finetuned-sent_in_ru This model is a fine-tuned version of [DeepPavlov/rubert-base-cased-sentence](https://huggingface.co/DeepPavlov/rubert-base-cased-sentence) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 2.3503 - Accuracy: 0.6884 - F1: 0.6875 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 15 - eval_batch_size: 15 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 25 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:-----:|:---------------:|:--------:|:------:| | No log | 1.0 | 441 | 0.7397 | 0.6630 | 0.6530 | | 0.771 | 2.0 | 882 | 0.7143 | 0.6909 | 0.6905 | | 0.5449 | 3.0 | 1323 | 0.8385 | 0.6897 | 0.6870 | | 0.3795 | 4.0 | 1764 | 0.8851 | 0.6939 | 0.6914 | | 0.3059 | 5.0 | 2205 | 1.0728 | 0.6933 | 0.6953 | | 0.2673 | 6.0 | 2646 | 1.0673 | 0.7060 | 0.7020 | | 0.2358 | 7.0 | 3087 | 1.5200 | 0.6830 | 0.6829 | | 0.2069 | 8.0 | 3528 | 1.3439 | 0.7024 | 0.7016 | | 0.2069 | 9.0 | 3969 | 1.3545 | 0.6830 | 0.6833 | | 0.1724 | 10.0 | 4410 | 1.5591 | 0.6927 | 0.6902 | | 0.1525 | 11.0 | 4851 | 1.6425 | 0.6818 | 0.6823 | | 0.131 | 12.0 | 5292 | 1.8999 | 0.6836 | 0.6775 | | 0.1253 | 13.0 | 5733 | 1.6959 | 0.6884 | 0.6877 | | 0.1132 | 14.0 | 6174 | 1.9561 | 0.6776 | 0.6803 | | 0.0951 | 15.0 | 6615 | 2.0356 | 0.6763 | 0.6754 | | 0.1009 | 16.0 | 7056 | 1.7995 | 0.6842 | 0.6741 | | 0.1009 | 17.0 | 7497 | 2.0638 | 0.6884 | 0.6811 | | 0.0817 | 18.0 | 7938 | 2.1686 | 0.6884 | 0.6859 | | 0.0691 | 19.0 | 8379 | 2.0874 | 0.6878 | 0.6889 | | 0.0656 | 20.0 | 8820 | 2.1772 | 0.6854 | 0.6817 | | 0.0652 | 21.0 | 9261 | 2.4018 | 0.6872 | 0.6896 | | 0.0608 | 22.0 | 9702 | 2.2074 | 0.6770 | 0.6656 | | 0.0677 | 23.0 | 10143 | 2.2101 | 0.6848 | 0.6793 | | 0.0559 | 24.0 | 10584 | 2.2920 | 0.6848 | 0.6835 | | 0.0524 | 25.0 | 11025 | 2.3503 | 0.6884 | 0.6875 | ### Framework versions - Transformers 4.11.2 - Pytorch 1.9.0+cu102 - Datasets 1.12.1 - Tokenizers 0.10.3
{"tags": ["generated_from_trainer"], "metrics": ["accuracy", "f1"], "model-index": [{"name": "rubert-base-cased-sentence-finetuned-sent_in_ru", "results": []}]}
chrommium/rubert-base-cased-sentence-finetuned-sent_in_ru
null
[ "transformers", "pytorch", "tensorboard", "bert", "text-classification", "generated_from_trainer", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # sbert_large-finetuned-sent_in_news_sents This model is a fine-tuned version of [sberbank-ai/sbert_large_nlu_ru](https://huggingface.co/sberbank-ai/sbert_large_nlu_ru) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.7056 - Accuracy: 0.7301 - F1: 0.5210 ## Model examples Model responds to label X in news text. For exaple: For 'Π“Π°Π·ΠΏΡ€ΠΎΠΌ ΠΎΡ‚ΠΎΠ·Π²Π°Π» Π»ΠΈΡ†Π΅Π½Π·ΠΈΡŽ Ρƒ X, сообщаСт Π€ΠΈΠ½Π°ΠΌ' the model will return negative label -3 For 'X ΠΎΡ‚ΠΎΠ·Π²Π°Π» Π»ΠΈΡ†Π΅Π½Π·ΠΈΡŽ Ρƒ Π‘Π±Π΅Ρ€Π±Π°Π½ΠΊΠ°, сообщаСт Π€ΠΈΠ½Π°ΠΌ' the model will return neutral label 0 For 'Π“Π°Π·ΠΏΡ€ΠΎΠΌ ΠΎΡ‚ΠΎΠ·Π²Π°Π» Π»ΠΈΡ†Π΅Π½Π·ΠΈΡŽ Ρƒ Π‘Π±Π΅Ρ€Π±Π°Π½ΠΊΠ°, сообщаСт X' the model will return neutral label 0 For 'X дСмонстрируСт Π²Ρ‹ΡΠΎΠΊΡƒΡŽ ΠΏΡ€ΠΈΠ±Ρ‹Π»ΡŒ, сообщаСт Π€ΠΈΠ½Π°ΠΌ' the model will return positive label 1 ## Simple example of News preprocessing for Russian before BERT ``` from natasha import ( Segmenter, MorphVocab, NewsEmbedding, NewsMorphTagger, NewsSyntaxParser, NewsNERTagger, PER, NamesExtractor, Doc ) segmenter = Segmenter() emb = NewsEmbedding() morph_tagger = NewsMorphTagger(emb) syntax_parser = NewsSyntaxParser(emb) morph_vocab = MorphVocab() ### ----------------------------- key sentences block ----------------------------- def find_synax_tokens_with_order(doc, start, tokens, text_arr, full_str): ''' Находит всС синтаксичСскиС Ρ‚ΠΎΠΊΠ΅Π½Ρ‹, ΡΠΎΠΎΡ‚Π²Π΅Ρ‚ΡΡ‚Π²ΡƒΡŽΡ‰ΠΈΠ΅ Π·Π°Π΄Π°Π½Π½ΠΎΠΌΡƒ Π½Π°Π±ΠΎΡ€Ρƒ простых Ρ‚ΠΎΠΊΠ΅Π½ΠΎΠ² (Π½Π°ΠΉΠ΄Π΅Π½Π½Ρ‹Π΅ для ΠΎΠΏΡ€Π΅Π΄Π΅Π»Π΅Π½Π½ΠΎΠΉ NER Π΄Ρ€ΡƒΠ³ΠΈΠΌΠΈ функциями). Π’ΠΎΠ·Π²Ρ€Π°Ρ‰Π°Π΅Ρ‚ ΡΠ»ΠΎΠ²Π°Ρ€ΡŒ Π½Π°ΠΉΠ΄Π΅Π½Π½Ρ‹Ρ… синтаксичСских Ρ‚ΠΎΠΊΠ΅Π½ΠΎΠ² (ΠΊΠ»ΡŽΡ‡ - ΠΈΠ΄Π΅Π½Ρ‚ΠΈΡ„ΠΈΠΊΠ°Ρ‚ΠΎΡ€ Ρ‚ΠΎΠΊΠ΅Π½Π°, состоящий ΠΈΠ· Π½ΠΎΠΌΠ΅Ρ€Π° прСдлоТСния ΠΈ Π½ΠΎΠΌΠ΅Ρ€Π° Ρ‚ΠΎΠΊΠ΅Π½Π° Π²Π½ΡƒΡ‚Ρ€ΠΈ прСдлоТСния). НачинаСт поиск с ΡƒΠΊΠ°Π·Π°Π½Π½ΠΎΠΉ ΠΏΠΎΠ·ΠΈΡ†ΠΈΠΈ Π² спискС синтаксичСских Ρ‚ΠΎΠΊΠ΅Π½ΠΎΠ², Π΄ΠΎΠΏΠΎΠ»Π½ΠΈΡ‚Π΅Π»ΡŒΠ½ΠΎ Π²ΠΎΠ·Π²Ρ€Π°Ρ‰Π°Π΅Ρ‚ ΠΏΠΎΠ·ΠΈΡ†ΠΈΡŽ остановки, с ΠΊΠΎΡ‚ΠΎΡ€ΠΎΠΉ Π½ΡƒΠΆΠ½ΠΎ ΠΏΡ€ΠΎΠ΄ΠΎΠ»ΠΆΠΈΡ‚ΡŒ поиск ΡΠ»Π΅Π΄ΡƒΡŽΡ‰Π΅ΠΉ NER. ''' found = [] in_str = False str_candidate = '' str_counter = 0 if len(text_arr) == 0: return [], start for i in range(start, len(doc.syntax.tokens)): t = doc.syntax.tokens[i] if in_str: str_counter += 1 if str_counter < len(text_arr) and t.text == text_arr[str_counter]: str_candidate += t.text found.append(t) if str_candidate == full_str: return found, i+1 else: in_str = False str_candidate = '' str_counter = 0 found = [] if t.text == text_arr[0]: found.append(t) str_candidate = t.text if str_candidate == full_str: return found, i+1 in_str = True return [], len(doc.syntax.tokens) def find_tokens_in_diap_with_order(doc, start_token, diap): ''' Находит всС простыС Ρ‚ΠΎΠΊΠ΅Π½Ρ‹ (Π±Π΅Π· синтаксичСской ΠΈΠ½Ρ„ΠΎΡ€ΠΌΠ°Ρ†ΠΈΠΈ), ΠΊΠΎΡ‚ΠΎΡ€Ρ‹Π΅ ΠΏΠΎΠΏΠ°Π΄Π°ΡŽΡ‚ Π² ΡƒΠΊΠ°Π·Π°Π½Π½Ρ‹ΠΉ Π΄ΠΈΠ°ΠΏΠ°Π·ΠΎΠ½. Π­Ρ‚ΠΈ Π΄ΠΈΠ°ΠΏΠ°Π·ΠΎΠ½Ρ‹ ΠΌΡ‹ ΠΏΠΎΠ»ΡƒΡ‡Π°Π΅ΠΌ ΠΈΠ· Ρ€Π°Π·ΠΌΠ΅Ρ‚ΠΊΠΈ NER. Π’ΠΎΠ·Π²Ρ€Π°Ρ‰Π°Π΅Ρ‚ Π½Π°Π±ΠΎΡ€ Π½Π°ΠΉΠ΄Π΅Π½Π½Ρ‹Ρ… Ρ‚ΠΎΠΊΠ΅Π½ΠΎΠ² ΠΈ Π² Π²ΠΈΠ΄Π΅ массива Ρ‚ΠΎΠΊΠ΅Π½ΠΎΠ², ΠΈ Π² Π²ΠΈΠ΄Π΅ массива строчСк. НачинаСт поиск с ΡƒΠΊΠ°Π·Π°Π½Π½ΠΎΠΉ ΠΏΠΎΠ·ΠΈΡ†ΠΈΠΈ Π² строкС ΠΈ Π΄ΠΎΠΏΠΎΠ»Π½ΠΈΡ‚Π΅Π»ΡŒΠ½ΠΎ Π²ΠΎΠ·Π²Ρ€Π°Ρ‰Π°Π΅Ρ‚ ΠΏΠΎΠ·ΠΈΡ†ΠΈΡŽ остановки. ''' found_tokens = [] found_text = [] full_str = '' next_i = 0 for i in range(start_token, len(doc.tokens)): t = doc.tokens[i] if t.start > diap[-1]: next_i = i break if t.start in diap: found_tokens.append(t) found_text.append(t.text) full_str += t.text return found_tokens, found_text, full_str, next_i def add_found_arr_to_dict(found, dict_dest): for synt in found: dict_dest.update({synt.id: synt}) return dict_dest def make_all_syntax_dict(doc): all_syntax = {} for synt in doc.syntax.tokens: all_syntax.update({synt.id: synt}) return all_syntax def is_consiquent(id_1, id_2): ''' ΠŸΡ€ΠΎΠ²Π΅Ρ€ΡΠ΅Ρ‚ ΠΈΠ΄ΡƒΡ‚ Π»ΠΈ Ρ‚ΠΎΠΊΠ΅Π½Ρ‹ Π΄Ρ€ΡƒΠ³ Π·Π° Π΄Ρ€ΡƒΠ³ΠΎΠΌ Π±Π΅Π· ΠΏΡ€ΠΎΠΌΠ΅ΠΆΡƒΡ‚ΠΊΠ° ΠΏΠΎ ΠΊΠ»ΡŽΡ‡Π°ΠΌ. ''' id_1_list = id_1.split('_') id_2_list = id_2.split('_') if id_1_list[0] != id_2_list[0]: return False return int(id_1_list[1]) + 1 == int(id_2_list[1]) def replace_found_to(found, x_str): ''' ЗамСняСт ΠΏΠΎΡΠ»Π΅Π΄ΠΎΠ²Π°Ρ‚Π΅Π»ΡŒΠ½ΠΎΡΡ‚ΡŒ Ρ‚ΠΎΠΊΠ΅Π½ΠΎΠ² NER Π½Π° Β«Π·Π°Π³Π»ΡƒΡˆΠΊΡƒΒ». ''' prev_id = '0_0' for synt in found: if is_consiquent(prev_id, synt.id): synt.text = '' else: synt.text = x_str prev_id = synt.id def analyze_doc(text): ''' ЗапускаСт Natasha для Π°Π½Π°Π»ΠΈΠ·Π° Π΄ΠΎΠΊΡƒΠΌΠ΅Π½Ρ‚Π°. ''' doc = Doc(text) doc.segment(segmenter) doc.tag_morph(morph_tagger) doc.parse_syntax(syntax_parser) ner_tagger = NewsNERTagger(emb) doc.tag_ner(ner_tagger) return doc def find_non_sym_syntax_short(entity_name, doc, add_X=False, x_str='X'): ''' ΠžΡ‚Ρ‹ΡΠΊΠΈΠ²Π°Π΅Ρ‚ Π·Π°Π΄Π°Π½Π½ΡƒΡŽ ΡΡƒΡ‰Π½ΠΎΡΡ‚ΡŒ Π² тСкстС, срСди всСх NER (Π²ΠΎΠ·ΠΌΠΎΠΆΠ½ΠΎ, Π² Π΄Ρ€ΡƒΠ³ΠΎΠΉ грамматичСской Ρ„ΠΎΡ€ΠΌΠ΅). entity_name - ΡΡƒΡ‰Π½ΠΎΡΡ‚ΡŒ, ΠΊΠΎΡ‚ΠΎΡ€ΡƒΡŽ ΠΈΡ‰Π΅ΠΌ; doc - Π΄ΠΎΠΊΡƒΠΌΠ΅Π½Ρ‚, Π² ΠΊΠΎΡ‚ΠΎΡ€ΠΎΠΌ сдСлан прСпроцСссинг Natasha; add_X - ΡΠ΄Π΅Π»Π°Ρ‚ΡŒ Π»ΠΈ Π·Π°ΠΌΠ΅Π½Ρƒ сущности Π½Π° Β«Π·Π°Π³Π»ΡƒΡˆΠΊΡƒΒ»; x_str - тСкст Π·Π°ΠΌΠ΅Π½Ρ‹. Π’ΠΎΠ·Π²Ρ€Π°Ρ‰Π°Π΅Ρ‚: all_found_syntax - ΡΠ»ΠΎΠ²Π°Ρ€ΡŒ всСх подходящих Ρ‚ΠΎΠΊΠ΅Π½ΠΎΠ² ΠΎΠ±Ρ€Π°Π·ΡƒΡŽΡ‰ΠΈΡ… искомыС сущности, Π² ΠΊΠΎΡ‚ΠΎΡ€ΠΎΠΌ Π² случаС надобности ΠΏΡ€ΠΎΠΈΠ·Π²Π΅Π΄Π΅Π½Π° Π·Π°ΠΌΠ΅Π½Π° NER Π½Π° Β«Π·Π°Π³Π»ΡƒΡˆΠΊΡƒΒ»; all_syntax - ΡΠ»ΠΎΠ²Π°Ρ€ΡŒ всСх Ρ‚ΠΎΠΊΠ΅Π½ΠΎΠ². ''' all_found_syntax = {} current_synt_number = 0 current_tok_number = 0 # ΠΈΠ΄Π΅ΠΌ ΠΏΠΎ всСм Π½Π°ΠΉΠ΄Π΅Π½Π½Ρ‹ΠΌ NER for span in doc.spans: span.normalize(morph_vocab) if span.type != 'ORG': continue diap = range(span.start, span.stop) # создаСм ΡΠ»ΠΎΠ²Π°Ρ€ΡŒ всСх синтаксичСских элСмСнтов (ΠΊΠ»ΡŽΡ‡ -- id ΠΈΠ· Π½ΠΎΠΌΠ΅Ρ€Π° прСдлоТСния ΠΈ Π½ΠΎΠΌΠ΅Ρ€Π° Π²Π½ΡƒΡ‚Ρ€ΠΈ прСдлоТСния) all_syntax = make_all_syntax_dict(doc) # Π½Π°Ρ…ΠΎΠ΄ΠΈΠΌ всС простыС Ρ‚ΠΎΠΊΠ΅Π½Ρ‹ Π²Π½ΡƒΡ‚Ρ€ΠΈ NER found_tokens, found_text, full_str, current_tok_number = find_tokens_in_diap_with_order(doc, current_tok_number, diap) # ΠΏΠΎ Π½Π°ΠΉΠ΄Π΅Π½Π½Ρ‹ΠΌ простым Ρ‚ΠΎΠΊΠ΅Π½Π°ΠΌ Π½Π°Ρ…ΠΎΠ΄ΠΈΠΌ всС синтаксичСскиС Ρ‚ΠΎΠΊΠ΅Π½Ρ‹ Π²Π½ΡƒΡ‚Ρ€ΠΈ Π΄Π°Π½Π½ΠΎΠ³ΠΎ NER found, current_synt_number = find_synax_tokens_with_order(doc, current_synt_number, found_tokens, found_text, full_str) # Ссли тСкст NER совпадаСт с ΡƒΠΊΠ°Π·Π°Π½Π½ΠΎΠΉ ΡΡƒΡ‰Π½ΠΎΡΡ‚ΡŒΡŽ, Ρ‚ΠΎ Π΄Π΅Π»Π°Π΅ΠΌ Π·Π°ΠΌΠ΅Π½Ρƒ if entity_name.find(span.normal) >= 0 or span.normal.find(entity_name) >= 0: if add_X: replace_found_to(found, x_str) all_found_syntax = add_found_arr_to_dict(found, all_found_syntax) return all_found_syntax, all_syntax def key_sentences(all_found_syntax): ''' Находит Π½ΠΎΠΌΠ΅Ρ€Π° ΠΏΡ€Π΅Π΄Π»ΠΎΠΆΠ΅Π½ΠΈΠΉ с искомой NER. ''' key_sent_numb = {} for synt in all_found_syntax.keys(): key_sent_numb.update({synt.split('_')[0]: 1}) return key_sent_numb def openinig_punct(x): opennings = ['Β«', '('] return x in opennings def key_sentences_str(entitiy_name, doc, add_X=False, x_str='X', return_all=True): ''' БоставляСт ΠΎΠΊΠΎΠ½Ρ‡Π°Ρ‚Π΅Π»ΡŒΠ½Ρ‹ΠΉ тСкст, Π² ΠΊΠΎΡ‚ΠΎΡ€ΠΎΠΌ Π΅ΡΡ‚ΡŒ Ρ‚ΠΎΠ»ΡŒΠΊΠΎ прСдлоТСния, Π³Π΄Π΅ Π΅ΡΡ‚ΡŒ ΠΊΠ»ΡŽΡ‡Π΅Π²Π°Ρ ΡΡƒΡ‰Π½ΠΎΡΡ‚ΡŒ, эта ΡΡƒΡ‰Π½ΠΎΡΡ‚ΡŒ, Ссли ΡƒΠΊΠ°Π·Π°Π½ΠΎ, замСняСтся Π½Π° Β«Π·Π°Π³Π»ΡƒΡˆΠΊΡƒΒ». ''' all_found_syntax, all_syntax = find_non_sym_syntax_short(entitiy_name, doc, add_X, x_str) key_sent_numb = key_sentences(all_found_syntax) str_ret = '' for s in all_syntax.keys(): if (s.split('_')[0] in key_sent_numb.keys()) or (return_all): to_add = all_syntax[s] if s in all_found_syntax.keys(): to_add = all_found_syntax[s] else: if to_add.rel == 'punct' and not openinig_punct(to_add.text): str_ret = str_ret.rstrip() str_ret += to_add.text if (not openinig_punct(to_add.text)) and (to_add.text != ''): str_ret += ' ' return str_ret ### ----------------------------- key entities block ----------------------------- def find_synt(doc, synt_id): for synt in doc.syntax.tokens: if synt.id == synt_id: return synt return None def is_subj(doc, synt, recursion_list=[]): ''' Π‘ΠΎΠΎΠ±Ρ‰Π°Π΅Ρ‚ являСтся Π»ΠΈ слово ΠΏΠΎΠ΄Π»Π΅ΠΆΠ°Ρ‰ΠΈΠΌ ΠΈΠ»ΠΈ Ρ‡Π°ΡΡ‚ΡŒΡŽ слоТного ΠΏΠΎΠ΄Π»Π΅ΠΆΠ°Ρ‰Π΅Π³ΠΎ. ''' if synt.rel == 'nsubj': return True if synt.rel == 'appos': found_head = find_synt(doc, synt.head_id) if found_head.id in recursion_list: return False return is_subj(doc, found_head, recursion_list + [synt.id]) return False def find_subjects_in_syntax(doc): ''' Π’Ρ‹Π΄Π°Π΅Ρ‚ словарик, Π² ΠΊΠΎΡ‚ΠΎΡ€ΠΎΠΌ для ΠΊΠ°ΠΆΠ΄ΠΎΠΉ NER написано, являСтся Π»ΠΈ ΠΎΠ½ ΠΏΠΎΠ΄Π»Π΅ΠΆΠ°Ρ‰ΠΈΠΌ Π² ΠΏΡ€Π΅Π΄Π»ΠΎΠΆΠ΅Π½ΠΈΠΈ. Π’Ρ‹Π΄Π°Π΅Ρ‚ ΡΡ‚Π°Ρ€Ρ‚ΠΎΠ²ΡƒΡŽ ΠΏΠΎΠ·ΠΈΡ†ΠΈΡŽ NER ΠΈ Π±Ρ‹Π»ΠΎ Π»ΠΈ ΠΎΠ½ΠΎ ΠΏΠΎΠ΄Π»Π΅ΠΆΠ°Ρ‰ΠΈΠΌ (ΠΈΠ»ΠΈ appos) ''' found_subjects = {} current_synt_number = 0 current_tok_number = 0 for span in doc.spans: span.normalize(morph_vocab) if span.type != 'ORG': continue found_subjects.update({span.start: 0}) diap = range(span.start, span.stop) found_tokens, found_text, full_str, current_tok_number = find_tokens_in_diap_with_order(doc, current_tok_number, diap) found, current_synt_number = find_synax_tokens_with_order(doc, current_synt_number, found_tokens, found_text, full_str) found_subjects.update({span.start: 0}) for synt in found: if is_subj(doc, synt): found_subjects.update({span.start: 1}) return found_subjects def entity_weight(lst, c=1): return c*lst[0]+lst[1] def determine_subject(found_subjects, doc, new_agency_list, return_best=True, threshold=0.75): ''' ΠžΠΏΡ€Π΅Π΄Π΅Π»ΡΠ΅Ρ‚ ΠΊΠ»ΡŽΡ‡Π΅Π²ΡƒΡŽ NER ΠΈ список самых Π²Π°ΠΆΠ½Ρ‹Ρ… NER, ΠΎΡΠ½ΠΎΠ²Ρ‹Π²Π°ΡΡΡŒ Π½Π° Ρ‚ΠΎΠΌ, сколько Ρ€Π°Π· каТдая ΠΈΠ· Π½ΠΈΡ… встрСчаСтся Π² тСкста Π²ΠΎΠΎΠ±Ρ‰Π΅ ΠΈ сколько Ρ€Π°Π· Π² Ρ€ΠΎΠ»ΠΈ ΠΏΠΎΠ΄Π»Π΅ΠΆΠ°Ρ‰Π΅Π³ΠΎ ''' objects_arr = [] objects_arr_ners = [] should_continue = False for span in doc.spans: should_continue = False span.normalize(morph_vocab) if span.type != 'ORG': continue if span.normal in new_agency_list: continue for i in range(len(objects_arr)): t, lst = objects_arr[i] if t.find(span.normal) >= 0: lst[0] += 1 lst[1] += found_subjects[span.start] should_continue = True break if span.normal.find(t) >= 0: objects_arr[i] = (span.normal, [lst[0]+1, lst[1]+found_subjects[span.start]]) should_continue = True break if should_continue: continue objects_arr.append((span.normal, [1, found_subjects[span.start]])) objects_arr_ners.append(span.normal) max_weight = 0 opt_ent = 0 for obj in objects_arr: t, lst = obj w = entity_weight(lst) if max_weight < w: max_weight = w opt_ent = t if not return_best: return opt_ent, objects_arr_ners bests = [] for obj in objects_arr: t, lst = obj w = entity_weight(lst) if max_weight*threshold < w: bests.append(t) return opt_ent, bests text = '''Π’ офисах Π‘Π±Π΅Ρ€Π° Π½Π°Ρ‡Π°Π»ΠΈ Ρ‚Π΅ΡΡ‚ΠΈΡ€ΠΎΠ²Π°Ρ‚ΡŒ Ρ‚Π΅Ρ…Π½ΠΎΠ»ΠΎΠ³ΠΈΡŽ ΠΏΠΎΠΌΠΎΡ‰ΠΈ посСтитСлям Π² экстрСнных ситуациях. «ЗСлСная ΠΊΠ½ΠΎΠΏΠΊΠ°Β» Π±ΡƒΠ΄Π΅Ρ‚ Π² Π·ΠΎΠ½Π°Ρ… круглосуточного обслуТивания офисов Π±Π°Π½ΠΊΠ° Π² Π’ΠΎΡ€ΠΎΠ½Π΅ΠΆΠ΅, Π‘Π°Π½ΠΊΡ‚-ΠŸΠ΅Ρ‚Π΅Ρ€Π±ΡƒΡ€Π³Π΅, ПодольскС, ПсковС, ΠžΡ€Π»Π΅ ΠΈ ЯрославлС. Π’ Π½ΠΈΡ… находятся стСнды с сСнсорными ΠΊΠ½ΠΎΠΏΠΊΠ°ΠΌΠΈ, ΠΎΠ±Π΅ΡΠΏΠ΅Ρ‡ΠΈΠ²Π°ΡŽΡ‰ΠΈΠ΅ связь с ΠΎΠΏΠ΅Ρ€Π°Ρ‚ΠΎΡ€Π°ΠΌΠΈ Ρ†Π΅Π½Ρ‚Ρ€Π° ΠΌΠΎΠ½ΠΈΡ‚ΠΎΡ€ΠΈΠ½Π³Π° слуТбы бСзопасности Π±Π°Π½ΠΊΠ°. ΠŸΠΎΠ»ΡƒΡ‡ΠΈΠ² сигнал ΠΎ ΠΏΠΎΠΌΠΎΡ‰ΠΈ, ΠΎΠΏΠ΅Ρ€Π°Ρ‚ΠΎΡ€ Ρ†Π΅Π½Ρ‚Ρ€Π° ΠΌΠΎΠΆΠ΅Ρ‚ ΠΏΠΎΠ΄ΠΊΠ»ΡŽΡ‡ΠΈΡ‚ΡŒΡΡ ΠΊ ΠΎΠ±ΡŠΠ΅ΠΊΡ‚Ρƒ ΠΏΠΎ голосовой связи. Π‘ ΠΏΠΎΠΌΠΎΡ‰ΡŒΡŽ ΠΊΠ°ΠΌΠ΅Ρ€ видСонаблюдСния ΠΎΠ½ ΠΎΡ†Π΅Π½ΠΈΡ‚ обстановку ΠΈ ΠΏΡ€ΠΈ нСобходимости Π²Ρ‹Π·ΠΎΠ²Π΅Ρ‚ ΠΏΠΎΠ»ΠΈΡ†ΠΈΡŽ ΠΈΠ»ΠΈ ΡΠΊΠΎΡ€ΡƒΡŽ ΠΏΠΎΠΌΠΎΡ‰ΡŒ. Β«Π—Π΅Π»Π΅Π½ΠΎΠΉ ΠΊΠ½ΠΎΠΏΠΊΠΎΠΉΒ» ΠΌΠΎΠΆΠ½ΠΎ Π²ΠΎΡΠΏΠΎΠ»ΡŒΠ·ΠΎΠ²Π°Ρ‚ΡŒΡΡ Π² Π½Π΅Ρ€Π°Π±ΠΎΡ‡Π΅Π΅ для отдСлСния врСмя, Ссли Π²ΠΎΠ·Π½ΠΈΠΊΠ»Π° ΡƒΠ³Ρ€ΠΎΠ·Π° ΠΆΠΈΠ·Π½ΠΈ ΠΈΠ»ΠΈ Π·Π΄ΠΎΡ€ΠΎΠ²ΡŒΡŽ. Π’ ΠΎΡΡ‚Π°Π»ΡŒΠ½Ρ‹Ρ… случаях ΠΏΠΎΠΌΠΎΡ‡ΡŒ ΠΊΠ»ΠΈΠ΅Π½Ρ‚Π°ΠΌ Π³ΠΎΡ‚ΠΎΠ²Ρ‹ сотрудники отдСлСния Π±Π°Π½ΠΊΠ°. «Одно ΠΈΠ· Π½Π°ΠΏΡ€Π°Π²Π»Π΅Π½ΠΈΠΉ нашСй Ρ€Π°Π±ΠΎΡ‚Ρ‹ Π² области ESG ΠΈ устойчивого развития β€” это Π·Π°Π±ΠΎΡ‚Π° ΠΎΠ± общСствС. И Π·Π΄ΠΎΡ€ΠΎΠ²ΡŒΠ΅ людСй ΠΊΠ°ΠΊ Π²Ρ‹ΡΡˆΠ°Ρ Ρ†Π΅Π½Π½ΠΎΡΡ‚ΡŒ являСтся Π΅Π³ΠΎ основой. ΠŸΠΎΡΡ‚ΠΎΠΌΡƒ Π·Π°Π΄Π°Ρ‡Π° Π±Π°Π½ΠΊΠ° Π² области бСзопасности Π³ΠΎΡ€Π°Π·Π΄ΠΎ ΠΌΠ°ΡΡˆΡ‚Π°Π±Π½Π΅Π΅, Ρ‡Π΅ΠΌ обСспСчСниС Ρ‚ΠΎΠ»ΡŒΠΊΠΎ финансовой бСзопасности ΠΊΠ»ΠΈΠ΅Π½Ρ‚ΠΎΠ². Π­Ρ‚ΠΎΡ‚ ΠΏΠΈΠ»ΠΎΡ‚Π½Ρ‹ΠΉ ΠΏΡ€ΠΎΠ΅ΠΊΡ‚ ΠΏΡ€ΠΈΡƒΡ€ΠΎΡ‡Π΅Π½ ΠΊ 180-Π»Π΅Ρ‚ΠΈΡŽ Π‘Π±Π΅Ρ€Π±Π°Π½ΠΊΠ°: ΠΌΡ‹ Ρ…ΠΎΡ‚ΠΈΠΌ, Ρ‡Ρ‚ΠΎΠ±Ρ‹, приходя Π² Π±Π°Π½ΠΊ, ΠΊΠ»ΠΈΠ΅Π½Ρ‚ чувствовал, Ρ‡Ρ‚ΠΎ Π΅Π³ΠΎ Тизнь ΠΈ Π±Π΅Π·ΠΎΠΏΠ°ΡΠ½ΠΎΡΡ‚ΡŒ β€” наша Ρ†Π΅Π½Π½ΠΎΡΡ‚ΡŒΒ», β€” ΠΎΡ‚ΠΌΠ΅Ρ‚ΠΈΠ» Π·Π°ΠΌΠ΅ΡΡ‚ΠΈΡ‚Π΅Π»ΡŒ прСдсСдатСля правлСния Π‘Π±Π΅Ρ€Π±Π°Π½ΠΊΠ° Бтанислав ΠšΡƒΠ·Π½Π΅Ρ†ΠΎΠ².''' doc = analyze_doc(text) key_entity = determine_subject(find_subjects_in_syntax(doc), doc, [])[0] text_for_model = key_sentences_str(key_entity, doc, add_X=True, x_str='X', return_all=False) ``` ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 6 - eval_batch_size: 6 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 20 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | No log | 1.0 | 176 | 0.9504 | 0.6903 | 0.2215 | | No log | 2.0 | 352 | 0.9065 | 0.7159 | 0.4760 | | 0.8448 | 3.0 | 528 | 0.9687 | 0.7045 | 0.4774 | | 0.8448 | 4.0 | 704 | 1.2436 | 0.7045 | 0.4686 | | 0.8448 | 5.0 | 880 | 1.4809 | 0.7273 | 0.4630 | | 0.2074 | 6.0 | 1056 | 1.5866 | 0.7330 | 0.5185 | | 0.2074 | 7.0 | 1232 | 1.7056 | 0.7301 | 0.5210 | | 0.2074 | 8.0 | 1408 | 1.6982 | 0.7415 | 0.5056 | | 0.0514 | 9.0 | 1584 | 1.8088 | 0.7273 | 0.5203 | | 0.0514 | 10.0 | 1760 | 1.9250 | 0.7102 | 0.4879 | ### Framework versions - Transformers 4.11.2 - Pytorch 1.9.0+cu102 - Datasets 1.12.1 - Tokenizers 0.10.3
{"tags": ["generated_from_trainer"], "metrics": ["accuracy", "f1"], "model-index": [{"name": "sbert_large-finetuned-sent_in_news_sents", "results": []}]}
chrommium/sbert_large-finetuned-sent_in_news_sents
null
[ "transformers", "pytorch", "tensorboard", "bert", "text-classification", "generated_from_trainer", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # sbert_large-finetuned-sent_in_news_sents_3lab This model is a fine-tuned version of [sberbank-ai/sbert_large_nlu_ru](https://huggingface.co/sberbank-ai/sbert_large_nlu_ru) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.9443 - Accuracy: 0.8580 - F1: 0.6199 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 17 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | No log | 1.0 | 264 | 0.6137 | 0.8608 | 0.3084 | | 0.524 | 2.0 | 528 | 0.6563 | 0.8722 | 0.4861 | | 0.524 | 3.0 | 792 | 0.7110 | 0.8494 | 0.4687 | | 0.2225 | 4.0 | 1056 | 0.7323 | 0.8608 | 0.6015 | | 0.2225 | 5.0 | 1320 | 0.9604 | 0.8551 | 0.6185 | | 0.1037 | 6.0 | 1584 | 0.8801 | 0.8523 | 0.5535 | | 0.1037 | 7.0 | 1848 | 0.9443 | 0.8580 | 0.6199 | | 0.0479 | 8.0 | 2112 | 1.0048 | 0.8608 | 0.6168 | | 0.0479 | 9.0 | 2376 | 0.9757 | 0.8551 | 0.6097 | | 0.0353 | 10.0 | 2640 | 1.0743 | 0.8580 | 0.6071 | | 0.0353 | 11.0 | 2904 | 1.1216 | 0.8580 | 0.6011 | ### Framework versions - Transformers 4.11.2 - Pytorch 1.9.0+cu102 - Datasets 1.12.1 - Tokenizers 0.10.3
{"tags": ["generated_from_trainer"], "metrics": ["accuracy", "f1"], "model-index": [{"name": "sbert_large-finetuned-sent_in_news_sents_3lab", "results": []}]}
chrommium/sbert_large-finetuned-sent_in_news_sents_3lab
null
[ "transformers", "pytorch", "tensorboard", "bert", "text-classification", "generated_from_trainer", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
text-classification
transformers
{}
chrommium/two-step-finetuning-sbert
null
[ "transformers", "pytorch", "bert", "text-classification", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-large-finetuned-sent_in_news This model is a fine-tuned version of [xlm-roberta-large](https://huggingface.co/xlm-roberta-large) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.8872 - Accuracy: 0.7273 - F1: 0.5125 ## Model description МодСль ассимСтрична, Ρ€Π΅Π°Π³ΠΈΡ€ΡƒΠ΅Ρ‚ Π½Π° ΠΌΠ΅Ρ‚ΠΊΡƒ X Π² тСкстС новости. ΠŸΠΎΠΏΡ€ΠΎΠ±ΡƒΠΉΡ‚Π΅ ΡΠ»Π΅Π΄ΡƒΡŽΡ‰ΠΈΠ΅ ΠΏΡ€ΠΈΠΌΠ΅Ρ€Ρ‹: a) АгСнтство X ΠΏΠΎΠ½ΠΈΠ·ΠΈΠ»ΠΎ Ρ€Π΅ΠΉΡ‚ΠΈΠ½Π³ Π±Π°Π½ΠΊΠ° Fitch. b) АгСнтство Fitch ΠΏΠΎΠ½ΠΈΠ·ΠΈΠ»ΠΎ Ρ€Π΅ΠΉΡ‚ΠΈΠ½Π³ Π±Π°Π½ΠΊΠ° X. a) Компания Π€ΠΈΠ½Π°ΠΌ ΠΏΠΎΠΊΠ°Π·Π°Π»Π° Ρ€Π΅ΠΊΠΎΡ€Π΄Π½ΡƒΡŽ ΠΏΡ€ΠΈΠ±Ρ‹Π»ΡŒ, говорят Π°Π½Π°Π»ΠΈΡ‚ΠΈΠΊΠΈ ΠΊΠΎΠΌΠΏΠ°Π½ΠΈΠΈ X. b) Компания X ΠΏΠΎΠΊΠ°Π·Π°Π»Π° Ρ€Π΅ΠΊΠΎΡ€Π΄Π½ΡƒΡŽ ΠΏΡ€ΠΈΠ±Ρ‹Π»ΡŒ, говорят Π°Π½Π°Π»ΠΈΡ‚ΠΈΠΊΠΈ ΠΊΠΎΠΌΠΏΠ°Π½ΠΈΠΈ Π€ΠΈΠ½Π°ΠΌ. ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 10 - eval_batch_size: 10 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 16 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | No log | 1.0 | 106 | 1.2526 | 0.6108 | 0.1508 | | No log | 2.0 | 212 | 1.1553 | 0.6648 | 0.1141 | | No log | 3.0 | 318 | 1.1150 | 0.6591 | 0.1247 | | No log | 4.0 | 424 | 1.0007 | 0.6705 | 0.1383 | | 1.1323 | 5.0 | 530 | 0.9267 | 0.6733 | 0.2027 | | 1.1323 | 6.0 | 636 | 1.0869 | 0.6335 | 0.4084 | | 1.1323 | 7.0 | 742 | 1.1224 | 0.6932 | 0.4586 | | 1.1323 | 8.0 | 848 | 1.2535 | 0.6307 | 0.3424 | | 1.1323 | 9.0 | 954 | 1.4288 | 0.6932 | 0.4881 | | 0.5252 | 10.0 | 1060 | 1.5856 | 0.6932 | 0.4739 | | 0.5252 | 11.0 | 1166 | 1.7101 | 0.6733 | 0.4530 | | 0.5252 | 12.0 | 1272 | 1.7330 | 0.6903 | 0.4750 | | 0.5252 | 13.0 | 1378 | 1.8872 | 0.7273 | 0.5125 | | 0.5252 | 14.0 | 1484 | 1.8797 | 0.7301 | 0.5033 | | 0.1252 | 15.0 | 1590 | 1.9339 | 0.7330 | 0.5024 | | 0.1252 | 16.0 | 1696 | 1.9632 | 0.7301 | 0.4967 | ### Framework versions - Transformers 4.11.2 - Pytorch 1.9.0+cu102 - Datasets 1.12.1 - Tokenizers 0.10.3
{"license": "mit", "tags": ["generated_from_trainer"], "metrics": ["accuracy", "f1"], "model-index": [{"name": "xlm-roberta-large-finetuned-sent_in_news", "results": []}]}
chrommium/xlm-roberta-large-finetuned-sent_in_news
null
[ "transformers", "pytorch", "tensorboard", "xlm-roberta", "text-classification", "generated_from_trainer", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
null
null
{}
chu/KCdot
null
[ "region:us" ]
null
2022-03-02T23:29:05+00:00
null
null
{}
chuangzhidian/electra-small-spanish
null
[ "region:us" ]
null
2022-03-02T23:29:05+00:00
text-generation
transformers
[blenderbot-400M-distill](https://huggingface.co/facebook/blenderbot-400M-distill) fine-tuned on the [ESConv dataset](https://github.com/thu-coai/Emotional-Support-Conversation). Usage example: ```python import torch from transformers import AutoTokenizer from transformers.models.blenderbot import BlenderbotTokenizer, BlenderbotForConditionalGeneration def _norm(x): return ' '.join(x.strip().split()) tokenizer = BlenderbotTokenizer.from_pretrained('thu-coai/blenderbot-400M-esconv') model = BlenderbotForConditionalGeneration.from_pretrained('thu-coai/blenderbot-400M-esconv') model.eval() utterances = [ "I am having a lot of anxiety about quitting my current job. It is too stressful but pays well", "What makes your job stressful for you?", "I have to deal with many people in hard financial situations and it is upsetting", "Do you help your clients to make it to a better financial situation?", "I do, but often they are not going to get back to what they want. Many people are going to lose their home when safeguards are lifted", ] input_sequence = ' '.join([' ' + e for e in utterances]) + tokenizer.eos_token # add space prefix and separate utterances with two spaces input_ids = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(input_sequence))[-128:] input_ids = torch.LongTensor([input_ids]) model_output = model.generate(input_ids, num_beams=1, do_sample=True, top_p=0.9, num_return_sequences=5, return_dict=False) generation = tokenizer.batch_decode(model_output, skip_special_tokens=True) generation = [_norm(e) for e in generation] print(generation) utterances.append(generation[0]) # for future loop ``` Please kindly cite the [original paper](https://arxiv.org/abs/2106.01144) if you use this model: ```bib @inproceedings{liu-etal-2021-towards, title={Towards Emotional Support Dialog Systems}, author={Liu, Siyang and Zheng, Chujie and Demasi, Orianna and Sabour, Sahand and Li, Yu and Yu, Zhou and Jiang, Yong and Huang, Minlie}, booktitle={Proceedings of the 59th annual meeting of the Association for Computational Linguistics}, year={2021} } ```
{"language": ["en"], "tags": ["pytorch", "coai"], "pipeline_tag": "conversational"}
thu-coai/blenderbot-400M-esconv
null
[ "transformers", "pytorch", "safetensors", "blenderbot", "text2text-generation", "coai", "conversational", "en", "arxiv:2106.01144", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2022-03-02T23:29:05+00:00
null
null
{}
chulin/distilbert-base-uncased-finetuned-squad
null
[ "region:us" ]
null
2022-03-02T23:29:05+00:00
null
null
{}
chymaks/IgboBert-finetuned-ner
null
[ "region:us" ]
null
2022-03-02T23:29:05+00:00
null
null
{}
chymaks/bert-base-multilingual-cased-finetuned-ner
null
[ "region:us" ]
null
2022-03-02T23:29:05+00:00
null
null
{}
chymaks/distilbert-base-uncased-finetuned-ner
null
[ "region:us" ]
null
2022-03-02T23:29:05+00:00
null
null
{}
chymaks/roberta-base-finetuned-ner
null
[ "region:us" ]
null
2022-03-02T23:29:05+00:00
null
null
{}
cicuq/Nea
null
[ "region:us" ]
null
2022-03-02T23:29:05+00:00
null
transformers
## EnDR-BERT EnDR-BERT - Multilingual, Cased, which pretrained on the english collection of consumer comments on drug administration from [2]. Pre-training was based on the [original BERT code](https://github.com/google-research/bert) provided by Google. In particular, Multi-BERT was for used for initialization and all the parameters are the same as in Multi-BERT. Training details are described in our paper. \ link: https://yadi.sk/d/-PTn0xhk1PqvgQ ## Citing & Authors If you find this repository helpful, feel free to cite our publication: [1] Tutubalina E, Alimova I, Miftahutdinov Z, et al. The Russian Drug Reaction Corpus and Neural Models for Drug Reactions and Effectiveness Detection in User Reviews.//Bioinformatics. - 2020. preprint: https://arxiv.org/abs/2004.03659 ``` @article{10.1093/bioinformatics/btaa675, author = {Tutubalina, Elena and Alimova, Ilseyar and Miftahutdinov, Zulfat and Sakhovskiy, Andrey and Malykh, Valentin and Nikolenko, Sergey}, title = "{The Russian Drug Reaction Corpus and Neural Models for Drug Reactions and Effectiveness Detection in User Reviews}", journal = {Bioinformatics}, year = {2020}, month = {07}, issn = {1367-4803}, doi = {10.1093/bioinformatics/btaa675}, url = {https://doi.org/10.1093/bioinformatics/btaa675}, note = {btaa675}, eprint = {https://academic.oup.com/bioinformatics/advance-article-pdf/doi/10.1093/bioinformatics/btaa675/33539752/btaa675.pdf}, } ``` [2] Tutubalina, EV and Miftahutdinov, Z Sh and Nugmanov, RI and Madzhidov, TI and Nikolenko, SI and Alimova, IS and Tropsha, AE Using semantic analysis of texts for the identification of drugs with similar therapeutic effects.//Russian Chemical Bulletin. – 2017. – Π’. 66. – β„–. 11. – Π‘. 2180-2189. [link to paper](https://www.researchgate.net/profile/Elena_Tutubalina/publication/323751823_Using_semantic_analysis_of_texts_for_the_identification_of_drugs_with_similar_therapeutic_effects/links/5bf7cfc3299bf1a0202cbc1f/Using-semantic-analysis-of-texts-for-the-identification-of-drugs-with-similar-therapeutic-effects.pdf) ``` @article{tutubalina2017using, title={Using semantic analysis of texts for the identification of drugs with similar therapeutic effects}, author={Tutubalina, EV and Miftahutdinov, Z Sh and Nugmanov, RI and Madzhidov, TI and Nikolenko, SI and Alimova, IS and Tropsha, AE}, journal={Russian Chemical Bulletin}, volume={66}, number={11}, pages={2180--2189}, year={2017}, publisher={Springer} } ```
{"language": ["ru", "en"], "tags": ["bio", "med", "biomedical"]}
cimm-kzn/endr-bert
null
[ "transformers", "pytorch", "bio", "med", "biomedical", "ru", "en", "arxiv:2004.03659", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
null
transformers
## EnRuDR-BERT EnRuDR-BERT - Multilingual, Cased, which pretrained on the raw part of the RuDReC corpus (1.4M reviews) and english collection of consumer comments on drug administration from [2]. Pre-training was based on the [original BERT code](https://github.com/google-research/bert) provided by Google. In particular, Multi-BERT was for used for initialization; vocabulary of Russian subtokens and parameters are the same as in Multi-BERT. Training details are described in our paper. \ link: https://yadi.sk/d/-PTn0xhk1PqvgQ ## Citing & Authors If you find this repository helpful, feel free to cite our publication: [1] Tutubalina E, Alimova I, Miftahutdinov Z, et al. The Russian Drug Reaction Corpus and Neural Models for Drug Reactions and Effectiveness Detection in User Reviews.//Bioinformatics. - 2020. preprint: https://arxiv.org/abs/2004.03659 ``` @article{10.1093/bioinformatics/btaa675, author = {Tutubalina, Elena and Alimova, Ilseyar and Miftahutdinov, Zulfat and Sakhovskiy, Andrey and Malykh, Valentin and Nikolenko, Sergey}, title = "{The Russian Drug Reaction Corpus and Neural Models for Drug Reactions and Effectiveness Detection in User Reviews}", journal = {Bioinformatics}, year = {2020}, month = {07}, issn = {1367-4803}, doi = {10.1093/bioinformatics/btaa675}, url = {https://doi.org/10.1093/bioinformatics/btaa675}, note = {btaa675}, eprint = {https://academic.oup.com/bioinformatics/advance-article-pdf/doi/10.1093/bioinformatics/btaa675/33539752/btaa675.pdf}, } ``` [2] Tutubalina, EV and Miftahutdinov, Z Sh and Nugmanov, RI and Madzhidov, TI and Nikolenko, SI and Alimova, IS and Tropsha, AE Using semantic analysis of texts for the identification of drugs with similar therapeutic effects.//Russian Chemical Bulletin. – 2017. – Π’. 66. – β„–. 11. – Π‘. 2180-2189. [link to paper](https://www.researchgate.net/profile/Elena_Tutubalina/publication/323751823_Using_semantic_analysis_of_texts_for_the_identification_of_drugs_with_similar_therapeutic_effects/links/5bf7cfc3299bf1a0202cbc1f/Using-semantic-analysis-of-texts-for-the-identification-of-drugs-with-similar-therapeutic-effects.pdf) ``` @article{tutubalina2017using, title={Using semantic analysis of texts for the identification of drugs with similar therapeutic effects}, author={Tutubalina, EV and Miftahutdinov, Z Sh and Nugmanov, RI and Madzhidov, TI and Nikolenko, SI and Alimova, IS and Tropsha, AE}, journal={Russian Chemical Bulletin}, volume={66}, number={11}, pages={2180--2189}, year={2017}, publisher={Springer} } ```
{"language": ["ru", "en"], "tags": ["bio", "med", "biomedical"]}
cimm-kzn/enrudr-bert
null
[ "transformers", "pytorch", "bio", "med", "biomedical", "ru", "en", "arxiv:2004.03659", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
null
transformers
## RuDR-BERT RuDR-BERT - Multilingual, Cased, which pretrained on the raw part of the RuDReC corpus (1.4M reviews). Pre-training was based on the [original BERT code](https://github.com/google-research/bert) provided by Google. In particular, Multi-BERT was for used for initialization; vocabulary of Russian subtokens and parameters are the same as in Multi-BERT. Training details are described in our paper. \ link: https://yadi.sk/d/-PTn0xhk1PqvgQ ## Citing & Authors If you find this repository helpful, feel free to cite our publication: [1] Tutubalina E, Alimova I, Miftahutdinov Z, et al. The Russian Drug Reaction Corpus and Neural Models for Drug Reactions and Effectiveness Detection in User Reviews. preprint: https://arxiv.org/abs/2004.03659 ``` @article{10.1093/bioinformatics/btaa675, author = {Tutubalina, Elena and Alimova, Ilseyar and Miftahutdinov, Zulfat and Sakhovskiy, Andrey and Malykh, Valentin and Nikolenko, Sergey}, title = "{The Russian Drug Reaction Corpus and Neural Models for Drug Reactions and Effectiveness Detection in User Reviews}", journal = {Bioinformatics}, year = {2020}, month = {07}, issn = {1367-4803}, doi = {10.1093/bioinformatics/btaa675}, url = {https://doi.org/10.1093/bioinformatics/btaa675}, note = {btaa675}, eprint = {https://academic.oup.com/bioinformatics/advance-article-pdf/doi/10.1093/bioinformatics/btaa675/33539752/btaa675.pdf}, } ``` [2] Tutubalina, EV and Miftahutdinov, Z Sh and Nugmanov, RI and Madzhidov, TI and Nikolenko, SI and Alimova, IS and Tropsha, AE Using semantic analysis of texts for the identification of drugs with similar therapeutic effects. [link to paper](https://www.researchgate.net/profile/Elena_Tutubalina/publication/323751823_Using_semantic_analysis_of_texts_for_the_identification_of_drugs_with_similar_therapeutic_effects/links/5bf7cfc3299bf1a0202cbc1f/Using-semantic-analysis-of-texts-for-the-identification-of-drugs-with-similar-therapeutic-effects.pdf) ``` @article{tutubalina2017using, title={Using semantic analysis of texts for the identification of drugs with similar therapeutic effects}, author={Tutubalina, EV and Miftahutdinov, Z Sh and Nugmanov, RI and Madzhidov, TI and Nikolenko, SI and Alimova, IS and Tropsha, AE}, journal={Russian Chemical Bulletin}, volume={66}, number={11}, pages={2180--2189}, year={2017}, publisher={Springer} } ```
{"language": ["ru"], "tags": ["bio", "med", "biomedical"]}
cimm-kzn/rudr-bert
null
[ "transformers", "pytorch", "bio", "med", "biomedical", "ru", "arxiv:2004.03659", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
null
null
End-2-End with english
{}
cjcu/End2End-asr
null
[ "region:us" ]
null
2022-03-02T23:29:05+00:00
question-answering
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # afriberta_base-finetuned-tydiqa This model is a fine-tuned version of [castorini/afriberta_base](https://huggingface.co/castorini/afriberta_base) on the tydiqa dataset. It achieves the following results on the evaluation set: - Loss: 2.3728 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | No log | 1.0 | 192 | 2.1359 | | No log | 2.0 | 384 | 2.3409 | | 0.8353 | 3.0 | 576 | 2.3728 | ### Framework versions - Transformers 4.14.1 - Pytorch 1.10.0+cu111 - Datasets 1.16.1 - Tokenizers 0.10.3
{"language": ["sw"], "tags": ["generated_from_trainer"], "datasets": ["tydiqa"], "model-index": [{"name": "afriberta_base-finetuned-tydiqa", "results": []}]}
cjrowe/afriberta_base-finetuned-tydiqa
null
[ "transformers", "pytorch", "tensorboard", "xlm-roberta", "question-answering", "generated_from_trainer", "sw", "dataset:tydiqa", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
fill-mask
transformers
{}
ck46/camembert-base
null
[ "transformers", "pytorch", "camembert", "fill-mask", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00
text2text-generation
transformers
{}
ck46/t5-base-hotpot-qa-qg
null
[ "transformers", "pytorch", "t5", "text2text-generation", "autotrain_compatible", "endpoints_compatible", "has_space", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:05+00:00
text2text-generation
transformers
{}
ck46/t5-base-qg-prefix
null
[ "transformers", "pytorch", "t5", "text2text-generation", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:05+00:00
text2text-generation
transformers
{}
ck46/t5-base-squad-qa-qg
null
[ "transformers", "pytorch", "t5", "text2text-generation", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:05+00:00
text2text-generation
transformers
{}
ck46/t5-small-hotpot-qa-qg
null
[ "transformers", "pytorch", "t5", "text2text-generation", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:05+00:00
text2text-generation
transformers
{}
ck46/t5-small-squad-qa-qg
null
[ "transformers", "pytorch", "t5", "text2text-generation", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2022-03-02T23:29:05+00:00
token-classification
transformers
{}
ckauth/ck-ner-disease
null
[ "transformers", "tf", "bert", "token-classification", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05+00:00