pipeline_tag
stringclasses
48 values
library_name
stringclasses
198 values
text
stringlengths
1
900k
metadata
stringlengths
2
438k
id
stringlengths
5
122
last_modified
null
tags
sequencelengths
1
1.84k
sha
null
created_at
stringlengths
25
25
arxiv
sequencelengths
0
201
languages
sequencelengths
0
1.83k
tags_str
stringlengths
17
9.34k
text_str
stringlengths
0
389k
text_lists
sequencelengths
0
722
processed_texts
sequencelengths
1
723
text-generation
transformers
# Uploaded model - **Developed by:** bharathirajan89 - **License:** apache-2.0 - **Finetuned from model :** unsloth/mistral-7b-instruct-v0.2-bnb-4bit This mistral model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
{"language": ["en"], "license": "apache-2.0", "tags": ["text-generation-inference", "transformers", "unsloth", "mistral", "trl", "sft"], "base_model": "unsloth/mistral-7b-instruct-v0.2-bnb-4bit"}
bharathirajan89/bharathi_mistral_7b_pulse_unsloth_v1_merged
null
[ "transformers", "pytorch", "mistral", "text-generation", "text-generation-inference", "unsloth", "trl", "sft", "conversational", "en", "base_model:unsloth/mistral-7b-instruct-v0.2-bnb-4bit", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-20T09:42:07+00:00
[]
[ "en" ]
TAGS #transformers #pytorch #mistral #text-generation #text-generation-inference #unsloth #trl #sft #conversational #en #base_model-unsloth/mistral-7b-instruct-v0.2-bnb-4bit #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us
# Uploaded model - Developed by: bharathirajan89 - License: apache-2.0 - Finetuned from model : unsloth/mistral-7b-instruct-v0.2-bnb-4bit This mistral model was trained 2x faster with Unsloth and Huggingface's TRL library. <img src="URL width="200"/>
[ "# Uploaded model\n\n- Developed by: bharathirajan89\n- License: apache-2.0\n- Finetuned from model : unsloth/mistral-7b-instruct-v0.2-bnb-4bit\n\nThis mistral model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
[ "TAGS\n#transformers #pytorch #mistral #text-generation #text-generation-inference #unsloth #trl #sft #conversational #en #base_model-unsloth/mistral-7b-instruct-v0.2-bnb-4bit #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us \n", "# Uploaded model\n\n- Developed by: bharathirajan89\n- License: apache-2.0\n- Finetuned from model : unsloth/mistral-7b-instruct-v0.2-bnb-4bit\n\nThis mistral model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
text-generation
transformers
# Uploaded model - **Developed by:** ntvcie - **License:** apache-2.0 - **Finetuned from model :** unsloth/gemma-2b-bnb-4bit This gemma model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
{"language": ["en"], "license": "apache-2.0", "tags": ["text-generation-inference", "transformers", "unsloth", "gemma", "trl"], "base_model": "unsloth/gemma-2b-bnb-4bit"}
ntvcie/Gemma2bVinhntV8_16bit
null
[ "transformers", "pytorch", "gemma", "text-generation", "text-generation-inference", "unsloth", "trl", "en", "base_model:unsloth/gemma-2b-bnb-4bit", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-20T09:43:25+00:00
[]
[ "en" ]
TAGS #transformers #pytorch #gemma #text-generation #text-generation-inference #unsloth #trl #en #base_model-unsloth/gemma-2b-bnb-4bit #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us
# Uploaded model - Developed by: ntvcie - License: apache-2.0 - Finetuned from model : unsloth/gemma-2b-bnb-4bit This gemma model was trained 2x faster with Unsloth and Huggingface's TRL library. <img src="URL width="200"/>
[ "# Uploaded model\n\n- Developed by: ntvcie\n- License: apache-2.0\n- Finetuned from model : unsloth/gemma-2b-bnb-4bit\n\nThis gemma model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
[ "TAGS\n#transformers #pytorch #gemma #text-generation #text-generation-inference #unsloth #trl #en #base_model-unsloth/gemma-2b-bnb-4bit #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us \n", "# Uploaded model\n\n- Developed by: ntvcie\n- License: apache-2.0\n- Finetuned from model : unsloth/gemma-2b-bnb-4bit\n\nThis gemma model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
null
null
--- license: base_model: - timpal0l/Mistral-7B-v0.1-flashback-v2-instruct - Debbevi/Experiment1-7B
{}
Debbevi/Experiment2-7B
null
[ "region:us" ]
null
2024-04-20T09:45:06+00:00
[]
[]
TAGS #region-us
--- license: base_model: - timpal0l/Mistral-7B-v0.1-flashback-v2-instruct - Debbevi/Experiment1-7B
[]
[ "TAGS\n#region-us \n" ]
image-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # idbwtiny This model is a fine-tuned version of [facebook/convnext-tiny-224](https://huggingface.co/facebook/convnext-tiny-224) on the sooks/id2 dataset. It achieves the following results on the evaluation set: - eval_loss: 0.0155 - eval_accuracy: 0.9953 - eval_runtime: 301.1388 - eval_samples_per_second: 178.967 - eval_steps_per_second: 22.372 - epoch: 6.37 - step: 12963 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 150 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.38.2 - Pytorch 2.2.1+cu121 - Datasets 2.19.0 - Tokenizers 0.15.2
{"license": "apache-2.0", "tags": ["image-classification", "generated_from_trainer"], "base_model": "facebook/convnext-tiny-224", "model-index": [{"name": "idbwtiny", "results": []}]}
sooks/idbwtiny
null
[ "transformers", "tensorboard", "safetensors", "convnext", "image-classification", "generated_from_trainer", "base_model:facebook/convnext-tiny-224", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-20T09:46:25+00:00
[]
[]
TAGS #transformers #tensorboard #safetensors #convnext #image-classification #generated_from_trainer #base_model-facebook/convnext-tiny-224 #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us
# idbwtiny This model is a fine-tuned version of facebook/convnext-tiny-224 on the sooks/id2 dataset. It achieves the following results on the evaluation set: - eval_loss: 0.0155 - eval_accuracy: 0.9953 - eval_runtime: 301.1388 - eval_samples_per_second: 178.967 - eval_steps_per_second: 22.372 - epoch: 6.37 - step: 12963 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 150 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.38.2 - Pytorch 2.2.1+cu121 - Datasets 2.19.0 - Tokenizers 0.15.2
[ "# idbwtiny\n\nThis model is a fine-tuned version of facebook/convnext-tiny-224 on the sooks/id2 dataset.\nIt achieves the following results on the evaluation set:\n- eval_loss: 0.0155\n- eval_accuracy: 0.9953\n- eval_runtime: 301.1388\n- eval_samples_per_second: 178.967\n- eval_steps_per_second: 22.372\n- epoch: 6.37\n- step: 12963", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 0.0002\n- train_batch_size: 150\n- eval_batch_size: 8\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 10\n- mixed_precision_training: Native AMP", "### Framework versions\n\n- Transformers 4.38.2\n- Pytorch 2.2.1+cu121\n- Datasets 2.19.0\n- Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #tensorboard #safetensors #convnext #image-classification #generated_from_trainer #base_model-facebook/convnext-tiny-224 #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us \n", "# idbwtiny\n\nThis model is a fine-tuned version of facebook/convnext-tiny-224 on the sooks/id2 dataset.\nIt achieves the following results on the evaluation set:\n- eval_loss: 0.0155\n- eval_accuracy: 0.9953\n- eval_runtime: 301.1388\n- eval_samples_per_second: 178.967\n- eval_steps_per_second: 22.372\n- epoch: 6.37\n- step: 12963", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 0.0002\n- train_batch_size: 150\n- eval_batch_size: 8\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 10\n- mixed_precision_training: Native AMP", "### Framework versions\n\n- Transformers 4.38.2\n- Pytorch 2.2.1+cu121\n- Datasets 2.19.0\n- Tokenizers 0.15.2" ]
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # micro_base_help_class_no_pre_seed_0 This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.9454 - Accuracy: 0.8456 - F1 Macro: 0.6500 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 0 - optimizer: Adam with betas=(0.9,0.98) and epsilon=1e-06 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 Macro | |:-------------:|:-----:|:----:|:---------------:|:--------:|:--------:| | 0.3239 | 1.0 | 313 | 0.3666 | 0.8572 | 0.5370 | | 0.3208 | 2.0 | 626 | 0.3962 | 0.8536 | 0.4632 | | 0.2688 | 3.0 | 939 | 0.3881 | 0.8622 | 0.5912 | | 0.2105 | 4.0 | 1252 | 0.5269 | 0.8616 | 0.5922 | | 0.1625 | 5.0 | 1565 | 0.6255 | 0.859 | 0.6338 | | 0.1188 | 6.0 | 1878 | 0.8231 | 0.8572 | 0.6169 | | 0.052 | 7.0 | 2191 | 0.8230 | 0.8616 | 0.6189 | | 0.053 | 8.0 | 2504 | 0.9466 | 0.8422 | 0.6496 | | 0.0365 | 9.0 | 2817 | 0.9747 | 0.8556 | 0.6365 | | 0.0452 | 10.0 | 3130 | 0.9923 | 0.8578 | 0.6360 | ### Framework versions - Transformers 4.36.2 - Pytorch 2.2.1+cu121 - Datasets 2.19.0 - Tokenizers 0.15.2
{"license": "mit", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "roberta-base", "model-index": [{"name": "micro_base_help_class_no_pre_seed_0", "results": []}]}
BigTMiami/micro_base_help_class_no_pre_seed_0
null
[ "transformers", "safetensors", "roberta", "text-classification", "generated_from_trainer", "base_model:roberta-base", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-20T09:46:48+00:00
[]
[]
TAGS #transformers #safetensors #roberta #text-classification #generated_from_trainer #base_model-roberta-base #license-mit #autotrain_compatible #endpoints_compatible #region-us
micro\_base\_help\_class\_no\_pre\_seed\_0 ========================================== This model is a fine-tuned version of roberta-base on an unknown dataset. It achieves the following results on the evaluation set: * Loss: 0.9454 * Accuracy: 0.8456 * F1 Macro: 0.6500 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 2e-05 * train\_batch\_size: 16 * eval\_batch\_size: 16 * seed: 0 * optimizer: Adam with betas=(0.9,0.98) and epsilon=1e-06 * lr\_scheduler\_type: linear * num\_epochs: 10 ### Training results ### Framework versions * Transformers 4.36.2 * Pytorch 2.2.1+cu121 * Datasets 2.19.0 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 16\n* eval\\_batch\\_size: 16\n* seed: 0\n* optimizer: Adam with betas=(0.9,0.98) and epsilon=1e-06\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 10", "### Training results", "### Framework versions\n\n\n* Transformers 4.36.2\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #safetensors #roberta #text-classification #generated_from_trainer #base_model-roberta-base #license-mit #autotrain_compatible #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 16\n* eval\\_batch\\_size: 16\n* seed: 0\n* optimizer: Adam with betas=(0.9,0.98) and epsilon=1e-06\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 10", "### Training results", "### Framework versions\n\n\n* Transformers 4.36.2\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.15.2" ]
image-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # idbwbase This model is a fine-tuned version of [facebook/convnext-base-224-22k](https://huggingface.co/facebook/convnext-base-224-22k) on the indian_food_images dataset. It achieves the following results on the evaluation set: - Loss: 0.0978 - Accuracy: 0.9741 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 64 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:| | 0.373 | 1.0 | 4709 | 0.3002 | 0.8705 | | 0.3244 | 2.0 | 9418 | 0.2262 | 0.9044 | | 0.2801 | 3.0 | 14127 | 0.1987 | 0.9196 | | 0.2366 | 4.0 | 18836 | 0.1788 | 0.9345 | | 0.2051 | 5.0 | 23545 | 0.1463 | 0.9484 | | 0.1764 | 6.0 | 28254 | 0.1202 | 0.9593 | | 0.1595 | 7.0 | 32963 | 0.1243 | 0.9655 | | 0.1359 | 8.0 | 37672 | 0.1188 | 0.9659 | | 0.1231 | 9.0 | 42381 | 0.0978 | 0.9741 | | 0.1162 | 10.0 | 47090 | 0.1001 | 0.9761 | ### Framework versions - Transformers 4.38.2 - Pytorch 2.2.1+cu121 - Datasets 2.19.0 - Tokenizers 0.15.2
{"license": "apache-2.0", "tags": ["image-classification", "generated_from_trainer"], "metrics": ["accuracy"], "base_model": "facebook/convnext-base-224-22k", "model-index": [{"name": "idbwbase", "results": []}]}
sooks/idbwbase
null
[ "transformers", "tensorboard", "safetensors", "convnext", "image-classification", "generated_from_trainer", "base_model:facebook/convnext-base-224-22k", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-20T09:46:51+00:00
[]
[]
TAGS #transformers #tensorboard #safetensors #convnext #image-classification #generated_from_trainer #base_model-facebook/convnext-base-224-22k #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us
idbwbase ======== This model is a fine-tuned version of facebook/convnext-base-224-22k on the indian\_food\_images dataset. It achieves the following results on the evaluation set: * Loss: 0.0978 * Accuracy: 0.9741 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0002 * train\_batch\_size: 64 * eval\_batch\_size: 8 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * num\_epochs: 10 * mixed\_precision\_training: Native AMP ### Training results ### Framework versions * Transformers 4.38.2 * Pytorch 2.2.1+cu121 * Datasets 2.19.0 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0002\n* train\\_batch\\_size: 64\n* eval\\_batch\\_size: 8\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 10\n* mixed\\_precision\\_training: Native AMP", "### Training results", "### Framework versions\n\n\n* Transformers 4.38.2\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #tensorboard #safetensors #convnext #image-classification #generated_from_trainer #base_model-facebook/convnext-base-224-22k #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0002\n* train\\_batch\\_size: 64\n* eval\\_batch\\_size: 8\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 10\n* mixed\\_precision\\_training: Native AMP", "### Training results", "### Framework versions\n\n\n* Transformers 4.38.2\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.15.2" ]
text-generation
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # 0.001_ablation_declr_4iters_iter_1 This model is a fine-tuned version of [HuggingFaceH4/mistral-7b-sft-beta](https://huggingface.co/HuggingFaceH4/mistral-7b-sft-beta) on the HuggingFaceH4/ultrafeedback_binarized dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-06 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - distributed_type: multi-GPU - num_devices: 8 - gradient_accumulation_steps: 2 - total_train_batch_size: 128 - total_eval_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.36.2 - Pytorch 2.1.2+cu121 - Datasets 2.14.6 - Tokenizers 0.15.2
{"license": "mit", "tags": ["alignment-handbook", "generated_from_trainer", "trl", "dpo", "generated_from_trainer"], "datasets": ["HuggingFaceH4/ultrafeedback_binarized"], "base_model": "HuggingFaceH4/mistral-7b-sft-beta", "model-index": [{"name": "0.001_ablation_declr_4iters_iter_1", "results": []}]}
ShenaoZ/0.001_ablation_declr_4iters_iter_1
null
[ "transformers", "safetensors", "mistral", "text-generation", "alignment-handbook", "generated_from_trainer", "trl", "dpo", "conversational", "dataset:HuggingFaceH4/ultrafeedback_binarized", "base_model:HuggingFaceH4/mistral-7b-sft-beta", "license:mit", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-20T09:46:53+00:00
[]
[]
TAGS #transformers #safetensors #mistral #text-generation #alignment-handbook #generated_from_trainer #trl #dpo #conversational #dataset-HuggingFaceH4/ultrafeedback_binarized #base_model-HuggingFaceH4/mistral-7b-sft-beta #license-mit #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# 0.001_ablation_declr_4iters_iter_1 This model is a fine-tuned version of HuggingFaceH4/mistral-7b-sft-beta on the HuggingFaceH4/ultrafeedback_binarized dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-06 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - distributed_type: multi-GPU - num_devices: 8 - gradient_accumulation_steps: 2 - total_train_batch_size: 128 - total_eval_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.36.2 - Pytorch 2.1.2+cu121 - Datasets 2.14.6 - Tokenizers 0.15.2
[ "# 0.001_ablation_declr_4iters_iter_1\n\nThis model is a fine-tuned version of HuggingFaceH4/mistral-7b-sft-beta on the HuggingFaceH4/ultrafeedback_binarized dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-06\n- train_batch_size: 8\n- eval_batch_size: 8\n- seed: 42\n- distributed_type: multi-GPU\n- num_devices: 8\n- gradient_accumulation_steps: 2\n- total_train_batch_size: 128\n- total_eval_batch_size: 64\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: cosine\n- lr_scheduler_warmup_ratio: 0.1\n- num_epochs: 1", "### Training results", "### Framework versions\n\n- Transformers 4.36.2\n- Pytorch 2.1.2+cu121\n- Datasets 2.14.6\n- Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #safetensors #mistral #text-generation #alignment-handbook #generated_from_trainer #trl #dpo #conversational #dataset-HuggingFaceH4/ultrafeedback_binarized #base_model-HuggingFaceH4/mistral-7b-sft-beta #license-mit #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# 0.001_ablation_declr_4iters_iter_1\n\nThis model is a fine-tuned version of HuggingFaceH4/mistral-7b-sft-beta on the HuggingFaceH4/ultrafeedback_binarized dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-06\n- train_batch_size: 8\n- eval_batch_size: 8\n- seed: 42\n- distributed_type: multi-GPU\n- num_devices: 8\n- gradient_accumulation_steps: 2\n- total_train_batch_size: 128\n- total_eval_batch_size: 64\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: cosine\n- lr_scheduler_warmup_ratio: 0.1\n- num_epochs: 1", "### Training results", "### Framework versions\n\n- Transformers 4.36.2\n- Pytorch 2.1.2+cu121\n- Datasets 2.14.6\n- Tokenizers 0.15.2" ]
null
transformers
# Uploaded model - **Developed by:** odessy - **License:** apache-2.0 - **Finetuned from model :** unsloth/llama-3-8b-bnb-4bit This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
{"language": ["en"], "license": "apache-2.0", "tags": ["text-generation-inference", "transformers", "unsloth", "llama", "trl"], "base_model": "unsloth/llama-3-8b-bnb-4bit"}
odessy/llama3-8b-oig-unsloth
null
[ "transformers", "safetensors", "text-generation-inference", "unsloth", "llama", "trl", "en", "base_model:unsloth/llama-3-8b-bnb-4bit", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2024-04-20T09:48:09+00:00
[]
[ "en" ]
TAGS #transformers #safetensors #text-generation-inference #unsloth #llama #trl #en #base_model-unsloth/llama-3-8b-bnb-4bit #license-apache-2.0 #endpoints_compatible #region-us
# Uploaded model - Developed by: odessy - License: apache-2.0 - Finetuned from model : unsloth/llama-3-8b-bnb-4bit This llama model was trained 2x faster with Unsloth and Huggingface's TRL library. <img src="URL width="200"/>
[ "# Uploaded model\n\n- Developed by: odessy\n- License: apache-2.0\n- Finetuned from model : unsloth/llama-3-8b-bnb-4bit\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
[ "TAGS\n#transformers #safetensors #text-generation-inference #unsloth #llama #trl #en #base_model-unsloth/llama-3-8b-bnb-4bit #license-apache-2.0 #endpoints_compatible #region-us \n", "# Uploaded model\n\n- Developed by: odessy\n- License: apache-2.0\n- Finetuned from model : unsloth/llama-3-8b-bnb-4bit\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
null
adapter-transformers
# Adapter `BigTMiami/amz_10k_seq_bn_helpf_class_adp_seed_0` for roberta-base An [adapter](https://adapterhub.ml) for the `roberta-base` model that was trained on the [BigTMiami/amazon_MICRO_helpfulness_dataset](https://huggingface.co/datasets/BigTMiami/amazon_MICRO_helpfulness_dataset/) dataset and includes a prediction head for classification. This adapter was created for usage with the **[Adapters](https://github.com/Adapter-Hub/adapters)** library. ## Usage First, install `adapters`: ``` pip install -U adapters ``` Now, the adapter can be loaded and activated like this: ```python from adapters import AutoAdapterModel model = AutoAdapterModel.from_pretrained("roberta-base") adapter_name = model.load_adapter("BigTMiami/amz_10k_seq_bn_helpf_class_adp_seed_0", source="hf", set_active=True) ``` ## Architecture & Training <!-- Add some description here --> ## Evaluation results <!-- Add some description here --> ## Citation <!-- Add some description here -->
{"tags": ["adapter-transformers", "roberta"], "datasets": ["BigTMiami/amazon_MICRO_helpfulness_dataset"]}
BigTMiami/amz_10k_seq_bn_helpf_class_adp_seed_0
null
[ "adapter-transformers", "roberta", "dataset:BigTMiami/amazon_MICRO_helpfulness_dataset", "region:us" ]
null
2024-04-20T09:49:08+00:00
[]
[]
TAGS #adapter-transformers #roberta #dataset-BigTMiami/amazon_MICRO_helpfulness_dataset #region-us
# Adapter 'BigTMiami/amz_10k_seq_bn_helpf_class_adp_seed_0' for roberta-base An adapter for the 'roberta-base' model that was trained on the BigTMiami/amazon_MICRO_helpfulness_dataset dataset and includes a prediction head for classification. This adapter was created for usage with the Adapters library. ## Usage First, install 'adapters': Now, the adapter can be loaded and activated like this: ## Architecture & Training ## Evaluation results
[ "# Adapter 'BigTMiami/amz_10k_seq_bn_helpf_class_adp_seed_0' for roberta-base\n\nAn adapter for the 'roberta-base' model that was trained on the BigTMiami/amazon_MICRO_helpfulness_dataset dataset and includes a prediction head for classification.\n\nThis adapter was created for usage with the Adapters library.", "## Usage\n\nFirst, install 'adapters':\n\n\n\nNow, the adapter can be loaded and activated like this:", "## Architecture & Training", "## Evaluation results" ]
[ "TAGS\n#adapter-transformers #roberta #dataset-BigTMiami/amazon_MICRO_helpfulness_dataset #region-us \n", "# Adapter 'BigTMiami/amz_10k_seq_bn_helpf_class_adp_seed_0' for roberta-base\n\nAn adapter for the 'roberta-base' model that was trained on the BigTMiami/amazon_MICRO_helpfulness_dataset dataset and includes a prediction head for classification.\n\nThis adapter was created for usage with the Adapters library.", "## Usage\n\nFirst, install 'adapters':\n\n\n\nNow, the adapter can be loaded and activated like this:", "## Architecture & Training", "## Evaluation results" ]
null
keras
## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: | Hyperparameters | Value | | :-- | :-- | | name | Adam | | weight_decay | None | | clipnorm | None | | global_clipnorm | None | | clipvalue | None | | use_ema | False | | ema_momentum | 0.99 | | ema_overwrite_frequency | None | | jit_compile | False | | is_legacy_optimizer | False | | learning_rate | 0.0010000000474974513 | | beta_1 | 0.9 | | beta_2 | 0.999 | | epsilon | 1e-07 | | amsgrad | False | | training_precision | float32 | ## Model Plot <details> <summary>View Model Plot</summary> ![Model Image](./model.png) </details>
{"library_name": "keras"}
AndreiUrsu/EmotionClasificationCVRepoUrsuAndrei
null
[ "keras", "safetensors", "vit", "region:us" ]
null
2024-04-20T09:49:25+00:00
[]
[]
TAGS #keras #safetensors #vit #region-us
Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: Model Plot ---------- View Model Plot !Model Image
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n\nModel Plot\n----------\n\n\n\nView Model Plot\n!Model Image" ]
[ "TAGS\n#keras #safetensors #vit #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n\nModel Plot\n----------\n\n\n\nView Model Plot\n!Model Image" ]
text-generation
transformers
# Gemma 2B Translation v0.110 - Eval Loss: `0.59812` - Train Loss: `0.40320` - lr: `6e-05` - optimizer: adamw - lr_scheduler_type: cosine ## Prompt Template ``` <bos>### English Hamsters don't eat cats. ### Korean 햄스터는 고양이를 먹지 않습니다.<eos> ``` ``` <bos>### Korean 햄스터는 고양이를 먹지 않습니다. ### English Hamsters don't eat cats.<eos> ``` ## Model Description - **Developed by:** `lemon-mint` - **Model type:** Gemma - **Language(s) (NLP):** English - **License:** [gemma-terms-of-use](https://ai.google.dev/gemma/terms) - **Finetuned from model:** [beomi/gemma-ko-2b](https://huggingface.co/beomi/gemma-ko-2b)
{"language": ["ko"], "license": "gemma", "library_name": "transformers", "tags": ["gemma", "pytorch", "instruct", "finetune", "translation"], "datasets": ["traintogpb/aihub-flores-koen-integrated-sparta-30k", "lemon-mint/korean_high_quality_translation_426k"], "widget": [{"messages": [{"role": "user", "content": "Hamsters don't eat cats."}]}], "inference": {"parameters": {"max_new_tokens": 2048}}, "base_model": "beomi/gemma-ko-2b", "pipeline_tag": "text-generation"}
lemon-mint/gemma-2b-translation-v0.110
null
[ "transformers", "safetensors", "gemma", "text-generation", "pytorch", "instruct", "finetune", "translation", "conversational", "ko", "dataset:traintogpb/aihub-flores-koen-integrated-sparta-30k", "dataset:lemon-mint/korean_high_quality_translation_426k", "base_model:beomi/gemma-ko-2b", "license:gemma", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-20T09:49:39+00:00
[]
[ "ko" ]
TAGS #transformers #safetensors #gemma #text-generation #pytorch #instruct #finetune #translation #conversational #ko #dataset-traintogpb/aihub-flores-koen-integrated-sparta-30k #dataset-lemon-mint/korean_high_quality_translation_426k #base_model-beomi/gemma-ko-2b #license-gemma #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# Gemma 2B Translation v0.110 - Eval Loss: '0.59812' - Train Loss: '0.40320' - lr: '6e-05' - optimizer: adamw - lr_scheduler_type: cosine ## Prompt Template ## Model Description - Developed by: 'lemon-mint' - Model type: Gemma - Language(s) (NLP): English - License: gemma-terms-of-use - Finetuned from model: beomi/gemma-ko-2b
[ "# Gemma 2B Translation v0.110\n\n- Eval Loss: '0.59812'\n- Train Loss: '0.40320'\n- lr: '6e-05'\n- optimizer: adamw\n- lr_scheduler_type: cosine", "## Prompt Template", "## Model Description\n\n- Developed by: 'lemon-mint'\n- Model type: Gemma\n- Language(s) (NLP): English\n- License: gemma-terms-of-use\n- Finetuned from model: beomi/gemma-ko-2b" ]
[ "TAGS\n#transformers #safetensors #gemma #text-generation #pytorch #instruct #finetune #translation #conversational #ko #dataset-traintogpb/aihub-flores-koen-integrated-sparta-30k #dataset-lemon-mint/korean_high_quality_translation_426k #base_model-beomi/gemma-ko-2b #license-gemma #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# Gemma 2B Translation v0.110\n\n- Eval Loss: '0.59812'\n- Train Loss: '0.40320'\n- lr: '6e-05'\n- optimizer: adamw\n- lr_scheduler_type: cosine", "## Prompt Template", "## Model Description\n\n- Developed by: 'lemon-mint'\n- Model type: Gemma\n- Language(s) (NLP): English\n- License: gemma-terms-of-use\n- Finetuned from model: beomi/gemma-ko-2b" ]
null
null
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Mistral-7B-Instruct-v0.2-absa-MT-restaurants This model is a fine-tuned version of [mistralai/Mistral-7B-Instruct-v0.2](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.0072 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 2 - training_steps: 1200 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 0.8237 | 0.13 | 40 | 0.1383 | | 0.0623 | 0.25 | 80 | 0.0213 | | 0.0199 | 0.38 | 120 | 0.0176 | | 0.0178 | 0.5 | 160 | 0.0153 | | 0.0153 | 0.63 | 200 | 0.0141 | | 0.0136 | 0.75 | 240 | 0.0127 | | 0.0111 | 0.88 | 280 | 0.0121 | | 0.0117 | 1.0 | 320 | 0.0123 | | 0.0091 | 1.13 | 360 | 0.0117 | | 0.0102 | 1.25 | 400 | 0.0107 | | 0.0081 | 1.38 | 440 | 0.0106 | | 0.0097 | 1.5 | 480 | 0.0100 | | 0.0091 | 1.63 | 520 | 0.0092 | | 0.0079 | 1.75 | 560 | 0.0096 | | 0.0074 | 1.88 | 600 | 0.0089 | | 0.0075 | 2.0 | 640 | 0.0092 | | 0.0043 | 2.13 | 680 | 0.0088 | | 0.0053 | 2.26 | 720 | 0.0092 | | 0.0047 | 2.38 | 760 | 0.0084 | | 0.0041 | 2.51 | 800 | 0.0082 | | 0.005 | 2.63 | 840 | 0.0080 | | 0.005 | 2.76 | 880 | 0.0072 | | 0.0045 | 2.88 | 920 | 0.0069 | | 0.0034 | 3.01 | 960 | 0.0071 | | 0.0021 | 3.13 | 1000 | 0.0075 | | 0.0021 | 3.26 | 1040 | 0.0075 | | 0.0018 | 3.38 | 1080 | 0.0077 | | 0.0019 | 3.51 | 1120 | 0.0073 | | 0.0018 | 3.63 | 1160 | 0.0075 | | 0.0021 | 3.76 | 1200 | 0.0072 | ### Framework versions - Transformers 4.38.2 - Pytorch 2.2.1+cu121 - Datasets 2.18.0 - Tokenizers 0.15.2
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "base_model": "mistralai/Mistral-7B-Instruct-v0.2", "model-index": [{"name": "Mistral-7B-Instruct-v0.2-absa-MT-restaurants", "results": []}]}
Shakhovak/Mistral-7B-Instruct-v0.2-absa-MT-restaurants
null
[ "generated_from_trainer", "base_model:mistralai/Mistral-7B-Instruct-v0.2", "license:apache-2.0", "region:us" ]
null
2024-04-20T09:50:49+00:00
[]
[]
TAGS #generated_from_trainer #base_model-mistralai/Mistral-7B-Instruct-v0.2 #license-apache-2.0 #region-us
Mistral-7B-Instruct-v0.2-absa-MT-restaurants ============================================ This model is a fine-tuned version of mistralai/Mistral-7B-Instruct-v0.2 on the None dataset. It achieves the following results on the evaluation set: * Loss: 0.0072 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 3e-05 * train\_batch\_size: 8 * eval\_batch\_size: 8 * seed: 42 * gradient\_accumulation\_steps: 4 * total\_train\_batch\_size: 32 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * lr\_scheduler\_warmup\_steps: 2 * training\_steps: 1200 * mixed\_precision\_training: Native AMP ### Training results ### Framework versions * Transformers 4.38.2 * Pytorch 2.2.1+cu121 * Datasets 2.18.0 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 3e-05\n* train\\_batch\\_size: 8\n* eval\\_batch\\_size: 8\n* seed: 42\n* gradient\\_accumulation\\_steps: 4\n* total\\_train\\_batch\\_size: 32\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* lr\\_scheduler\\_warmup\\_steps: 2\n* training\\_steps: 1200\n* mixed\\_precision\\_training: Native AMP", "### Training results", "### Framework versions\n\n\n* Transformers 4.38.2\n* Pytorch 2.2.1+cu121\n* Datasets 2.18.0\n* Tokenizers 0.15.2" ]
[ "TAGS\n#generated_from_trainer #base_model-mistralai/Mistral-7B-Instruct-v0.2 #license-apache-2.0 #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 3e-05\n* train\\_batch\\_size: 8\n* eval\\_batch\\_size: 8\n* seed: 42\n* gradient\\_accumulation\\_steps: 4\n* total\\_train\\_batch\\_size: 32\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* lr\\_scheduler\\_warmup\\_steps: 2\n* training\\_steps: 1200\n* mixed\\_precision\\_training: Native AMP", "### Training results", "### Framework versions\n\n\n* Transformers 4.38.2\n* Pytorch 2.2.1+cu121\n* Datasets 2.18.0\n* Tokenizers 0.15.2" ]
text-to-image
diffusers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🧨 diffusers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "diffusers"}
Niggendar/RealErodeityX_x2
null
[ "diffusers", "safetensors", "arxiv:1910.09700", "endpoints_compatible", "diffusers:StableDiffusionPipeline", "region:us" ]
null
2024-04-20T09:50:51+00:00
[ "1910.09700" ]
[]
TAGS #diffusers #safetensors #arxiv-1910.09700 #endpoints_compatible #diffusers-StableDiffusionPipeline #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a diffusers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a diffusers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#diffusers #safetensors #arxiv-1910.09700 #endpoints_compatible #diffusers-StableDiffusionPipeline #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a diffusers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
null
transformers
# Uploaded model - **Developed by:** dattaraj - **License:** apache-2.0 - **Finetuned from model :** unsloth/llama-3-8b-bnb-4bit This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
{"language": ["en"], "license": "apache-2.0", "tags": ["text-generation-inference", "transformers", "unsloth", "llama", "trl"], "base_model": "unsloth/llama-3-8b-bnb-4bit"}
dattaraj/model
null
[ "transformers", "safetensors", "text-generation-inference", "unsloth", "llama", "trl", "en", "base_model:unsloth/llama-3-8b-bnb-4bit", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2024-04-20T09:51:46+00:00
[]
[ "en" ]
TAGS #transformers #safetensors #text-generation-inference #unsloth #llama #trl #en #base_model-unsloth/llama-3-8b-bnb-4bit #license-apache-2.0 #endpoints_compatible #region-us
# Uploaded model - Developed by: dattaraj - License: apache-2.0 - Finetuned from model : unsloth/llama-3-8b-bnb-4bit This llama model was trained 2x faster with Unsloth and Huggingface's TRL library. <img src="URL width="200"/>
[ "# Uploaded model\n\n- Developed by: dattaraj\n- License: apache-2.0\n- Finetuned from model : unsloth/llama-3-8b-bnb-4bit\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
[ "TAGS\n#transformers #safetensors #text-generation-inference #unsloth #llama #trl #en #base_model-unsloth/llama-3-8b-bnb-4bit #license-apache-2.0 #endpoints_compatible #region-us \n", "# Uploaded model\n\n- Developed by: dattaraj\n- License: apache-2.0\n- Finetuned from model : unsloth/llama-3-8b-bnb-4bit\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
text-generation
transformers
# Uploaded model - **Developed by:** ChiragAI12 - **License:** apache-2.0 - **Finetuned from model :** unsloth/llama-3-8b-bnb-4bit This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library.
{"language": ["en"], "license": "apache-2.0", "tags": ["text-generation-inference", "transformers", "unsloth", "llama", "trl", "sft"], "base_model": "unsloth/llama-3-8b-bnb-4bit"}
ChiragAI12/llama3-8b-oig-unsloth-merged
null
[ "transformers", "pytorch", "llama", "text-generation", "text-generation-inference", "unsloth", "trl", "sft", "en", "base_model:unsloth/llama-3-8b-bnb-4bit", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-20T09:53:05+00:00
[]
[ "en" ]
TAGS #transformers #pytorch #llama #text-generation #text-generation-inference #unsloth #trl #sft #en #base_model-unsloth/llama-3-8b-bnb-4bit #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us
# Uploaded model - Developed by: ChiragAI12 - License: apache-2.0 - Finetuned from model : unsloth/llama-3-8b-bnb-4bit This llama model was trained 2x faster with Unsloth and Huggingface's TRL library.
[ "# Uploaded model\n\n- Developed by: ChiragAI12\n- License: apache-2.0\n- Finetuned from model : unsloth/llama-3-8b-bnb-4bit\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library." ]
[ "TAGS\n#transformers #pytorch #llama #text-generation #text-generation-inference #unsloth #trl #sft #en #base_model-unsloth/llama-3-8b-bnb-4bit #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us \n", "# Uploaded model\n\n- Developed by: ChiragAI12\n- License: apache-2.0\n- Finetuned from model : unsloth/llama-3-8b-bnb-4bit\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library." ]
text-generation
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # 0.0_ablation_declr_5iters5e7_iter_3 This model is a fine-tuned version of [ZhangShenao/0.0_ablation_declr_5iters5e7_iter_2](https://huggingface.co/ZhangShenao/0.0_ablation_declr_5iters5e7_iter_2) on the ZhangShenao/0.0_ablation_declr_5iters5e7_dataset dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-07 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - distributed_type: multi-GPU - num_devices: 8 - gradient_accumulation_steps: 2 - total_train_batch_size: 128 - total_eval_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.36.2 - Pytorch 2.1.2+cu121 - Datasets 2.14.6 - Tokenizers 0.15.2
{"license": "mit", "tags": ["alignment-handbook", "generated_from_trainer", "trl", "dpo", "generated_from_trainer"], "datasets": ["ZhangShenao/0.0_ablation_declr_5iters5e7_dataset"], "base_model": "ZhangShenao/0.0_ablation_declr_5iters5e7_iter_2", "model-index": [{"name": "0.0_ablation_declr_5iters5e7_iter_3", "results": []}]}
ZhangShenao/0.0_ablation_declr_5iters5e7_iter_3
null
[ "transformers", "safetensors", "mistral", "text-generation", "alignment-handbook", "generated_from_trainer", "trl", "dpo", "conversational", "dataset:ZhangShenao/0.0_ablation_declr_5iters5e7_dataset", "base_model:ZhangShenao/0.0_ablation_declr_5iters5e7_iter_2", "license:mit", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-20T09:53:53+00:00
[]
[]
TAGS #transformers #safetensors #mistral #text-generation #alignment-handbook #generated_from_trainer #trl #dpo #conversational #dataset-ZhangShenao/0.0_ablation_declr_5iters5e7_dataset #base_model-ZhangShenao/0.0_ablation_declr_5iters5e7_iter_2 #license-mit #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# 0.0_ablation_declr_5iters5e7_iter_3 This model is a fine-tuned version of ZhangShenao/0.0_ablation_declr_5iters5e7_iter_2 on the ZhangShenao/0.0_ablation_declr_5iters5e7_dataset dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-07 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - distributed_type: multi-GPU - num_devices: 8 - gradient_accumulation_steps: 2 - total_train_batch_size: 128 - total_eval_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.36.2 - Pytorch 2.1.2+cu121 - Datasets 2.14.6 - Tokenizers 0.15.2
[ "# 0.0_ablation_declr_5iters5e7_iter_3\n\nThis model is a fine-tuned version of ZhangShenao/0.0_ablation_declr_5iters5e7_iter_2 on the ZhangShenao/0.0_ablation_declr_5iters5e7_dataset dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 3e-07\n- train_batch_size: 8\n- eval_batch_size: 8\n- seed: 42\n- distributed_type: multi-GPU\n- num_devices: 8\n- gradient_accumulation_steps: 2\n- total_train_batch_size: 128\n- total_eval_batch_size: 64\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: cosine\n- lr_scheduler_warmup_ratio: 0.1\n- num_epochs: 1", "### Training results", "### Framework versions\n\n- Transformers 4.36.2\n- Pytorch 2.1.2+cu121\n- Datasets 2.14.6\n- Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #safetensors #mistral #text-generation #alignment-handbook #generated_from_trainer #trl #dpo #conversational #dataset-ZhangShenao/0.0_ablation_declr_5iters5e7_dataset #base_model-ZhangShenao/0.0_ablation_declr_5iters5e7_iter_2 #license-mit #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# 0.0_ablation_declr_5iters5e7_iter_3\n\nThis model is a fine-tuned version of ZhangShenao/0.0_ablation_declr_5iters5e7_iter_2 on the ZhangShenao/0.0_ablation_declr_5iters5e7_dataset dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 3e-07\n- train_batch_size: 8\n- eval_batch_size: 8\n- seed: 42\n- distributed_type: multi-GPU\n- num_devices: 8\n- gradient_accumulation_steps: 2\n- total_train_batch_size: 128\n- total_eval_batch_size: 64\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: cosine\n- lr_scheduler_warmup_ratio: 0.1\n- num_epochs: 1", "### Training results", "### Framework versions\n\n- Transformers 4.36.2\n- Pytorch 2.1.2+cu121\n- Datasets 2.14.6\n- Tokenizers 0.15.2" ]
null
transformers
# Uploaded model - **Developed by:** mychen76 - **License:** apache-2.0 - **Finetuned from model :** unsloth/llama-3-8b-bnb-4bit This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
{"language": ["en"], "license": "apache-2.0", "tags": ["text-generation-inference", "transformers", "unsloth", "llama", "trl"], "base_model": "unsloth/llama-3-8b-bnb-4bit"}
mychen76/life_book_llama3_8b_lora_v1
null
[ "transformers", "safetensors", "text-generation-inference", "unsloth", "llama", "trl", "en", "base_model:unsloth/llama-3-8b-bnb-4bit", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2024-04-20T09:54:07+00:00
[]
[ "en" ]
TAGS #transformers #safetensors #text-generation-inference #unsloth #llama #trl #en #base_model-unsloth/llama-3-8b-bnb-4bit #license-apache-2.0 #endpoints_compatible #region-us
# Uploaded model - Developed by: mychen76 - License: apache-2.0 - Finetuned from model : unsloth/llama-3-8b-bnb-4bit This llama model was trained 2x faster with Unsloth and Huggingface's TRL library. <img src="URL width="200"/>
[ "# Uploaded model\n\n- Developed by: mychen76\n- License: apache-2.0\n- Finetuned from model : unsloth/llama-3-8b-bnb-4bit\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
[ "TAGS\n#transformers #safetensors #text-generation-inference #unsloth #llama #trl #en #base_model-unsloth/llama-3-8b-bnb-4bit #license-apache-2.0 #endpoints_compatible #region-us \n", "# Uploaded model\n\n- Developed by: mychen76\n- License: apache-2.0\n- Finetuned from model : unsloth/llama-3-8b-bnb-4bit\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
null
null
The Genius Wave :- It sounds like you might be referring to "The Genius Wave," a concept or term related to audio. However, it's not something widely recognized as a specific audio track or term in the mainstream. Could you provide more context or clarify what "The Genius Wave" refers to? That way, I can assist you more accurately. could refer to a few different things depending on context. Are you looking for a specific audio track with that name, or are you interested in information about audio tracks created by geniuses or with a genius theme? Let me know how I can assist you further! Click Here Main Site - https://www.santacruzsentinel.com/2024/04/15/the-genius-wave-review-2024-updated-does-it-really-work-theta-waves-or-is-it-just-a-scam/ https://www.facebook.com/profile.php?id=61558258543833 https://www.facebook.com/profile.php?id=61558509364259 https://www.facebook.com/people/The-Genius-Wave-Audio-Review-2024/100076646734504/ https://www.facebook.com/people/The-Genius-Wave-Audio-Reviews/61558736814207/ https://the-genius-w-a-v-e-reviews.jimdosite.com/ https://the-genius-wave-2024.jimdosite.com/ https://genius-wave-review.jimdosite.com/ https://the-genius-wave-reviews.jimdosite.com/ https://the-genius-wave-order.jimdosite.com/ https://sites.google.com/view/genius-wave-scam/ https://sites.google.com/view/genius-wave--audio/ https://sites.google.com/view/the-genius-waves-reviews/ https://worldhealthcarewebsite.blogspot.com/2024/04/TheGeniusWaveReviews.html https://e-genius-wave.blogspot.com/2024/04/TheGeniusWaveReviews.html https://the-genius-wave-review.blogspot.com/2024/04/The-Genius-Wave-Audio.html https://medium.com/@trygeniuswave/the-impact-of-the-genius-wave-audio-on-physical-health-a-comprehensive-guide-does-it-work-2024-a8054b073524 https://medium.com/@trygeniuswave/the-genius-wave-audio-and-complaints-alert-fake-website-what-is-it-ebc092335888 https://medium.com/@geniuswavetry/the-genius-wave-reviews-2024-sound-frequency-and-audio-soundtrack-43bf3573e89d https://medium.com/@susangiwallis/the-genius-wave-usa-exclusive-report-boosts-concentration-and-focus-trustworthy-or-not-1609d7361655 https://medium.com/@carlacovert/the-genius-wave-does-this-audio-track-really-work-86224f8404cf https://medium.com/@geniuswavetry/the-truth-the-genius-wave-reviews-scam-theta-waves-expert-opinions-usa-2024-buy-now-7955ed8e43e6 https://medium.com/@geniuswavereviews/8-tips-to-becoming-a-genius-wave-the-genius-wavereal-or-scam-986a3d4b0e44 https://medium.com/@thegeniuswave_65858/the-genius-wave-audio-track-personal-growth-tool-2024-update-official-website-or-fake-results-255d2246813a https://medium.com/@thegeniuswave_65858/big-alert-the-genius-wave-legit-or-scam-updated-2024-beware-fake-ads-theta-waves-scam-96baf34cd4c2 https://medium.com/@stacycartee/the-genius-wave-audio-track-insights-and-consumer-safety-should-i-buy-it-price-scam-exp-fa8c1af83c6d https://medium.com/@josetocco/the-genius-wave-review-enhance-your-cognitive-abilities-f7386fcbde24 https://the-genius-wave-scam.webflow.io/ https://geniuswavereviews.webflow.io/ https://the-genius-wave-official-revie-bcbc43.webflow.io/ https://genius-wave-review.webflow.io/ https://the-genius-wave-bf9275.webflow.io/ https://the-genius-wave-e8a837.webflow.io/ https://genius-wave-audio.company.site/ https://the-genius-wave-program.company.site/ https://the-genius-wave-audio.company.site/ https://the-genius-wave-official.company.site/ https://the-genius-wave-orders.company.site/ https://genius-wave-review.company.site/ https://the-genius-wave-reviews-2024.company.site/ https://thegeniuswaveaudio29.godaddysites.com/ https://thegeniuswavereviews7.godaddysites.com/ https://www.eventbrite.com/e/apply-these-9-secret-techniques-to-improve-the-genius-wave-audio-tickets-886653964877?aff=oddtdtcreator https://www.eventbrite.com/e/the-genius-wave-6-ways-to-use-7-minute-audio-for-become-genius-live-stress-free-life-tickets-886641547737 https://www.eventbrite.com/e/the-genius-wave-audio-2024-improves-in-achieving-objectives-quicker-tickets-886044602257 https://thegeniuswaves.wixsite.com/order-now https://genius-wave.clubeo.com/page/the-genius-wave-review-24-april-live-sale-revolutionary-soundwave-system-to-improve-brain-ability.html https://nycdepartmentoffinance.powerappsportals.us/forums/general-discussion/691ab3f5-defe-ee11-a73d-001dd8305ba3 https://nycdepartmentoffinance.powerappsportals.us/forums/general-discussion/bdd4cf7f-dffe-ee11-a73d-001dd8305ba3 https://middlesexcountynj.powerappsportals.us/forums/general-discussion/013b4533-dffe-ee11-a73d-001dd80b215a https://middlesexcountynj.powerappsportals.us/forums/general-discussion/8e21fe49-dcfe-ee11-a73d-001dd80b215a https://middlesexcountynj.powerappsportals.us/forums/general-discussion/b9432a3b-dbfe-ee11-a73d-001dd80b215a
{}
trygeniuswave/TheGeniusWaveAudio
null
[ "region:us" ]
null
2024-04-20T09:55:00+00:00
[]
[]
TAGS #region-us
The Genius Wave :- It sounds like you might be referring to "The Genius Wave," a concept or term related to audio. However, it's not something widely recognized as a specific audio track or term in the mainstream. Could you provide more context or clarify what "The Genius Wave" refers to? That way, I can assist you more accurately. could refer to a few different things depending on context. Are you looking for a specific audio track with that name, or are you interested in information about audio tracks created by geniuses or with a genius theme? Let me know how I can assist you further! Click Here Main Site - URL URL URL URL URL URL URL URL URL URL URL URL URL URL URL URL URL URL URL URL URL URL URL URL URL URL URL URL URL URL URL URL URL URL URL URL URL URL URL URL URL URL URL URL URL URL URL URL URL URL URL URL
[]
[ "TAGS\n#region-us \n" ]
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # deberta-v2-xxlarge-otat-recommened-hp This model is a fine-tuned version of [microsoft/deberta-v2-xxlarge](https://huggingface.co/microsoft/deberta-v2-xxlarge) on the DandinPower/review_onlytitleandtext dataset. It achieves the following results on the evaluation set: - Loss: 0.7864 - Accuracy: 0.6741 - Macro F1: 0.6719 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-06 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - gradient_accumulation_steps: 64 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 1 - num_epochs: 5 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | Macro F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:--------:| | 0.9641 | 0.46 | 200 | 0.8451 | 0.6327 | 0.6341 | | 0.8263 | 0.91 | 400 | 0.7768 | 0.6651 | 0.6650 | | 0.7605 | 1.37 | 600 | 0.7842 | 0.667 | 0.6667 | | 0.7496 | 1.83 | 800 | 0.7790 | 0.6659 | 0.6650 | | 0.7034 | 2.29 | 1000 | 0.7738 | 0.67 | 0.6639 | | 0.7134 | 2.74 | 1200 | 0.7671 | 0.6694 | 0.6698 | | 0.6839 | 3.2 | 1400 | 0.7754 | 0.6743 | 0.6770 | | 0.6699 | 3.66 | 1600 | 0.7853 | 0.6711 | 0.6666 | | 0.6502 | 4.11 | 1800 | 0.7789 | 0.671 | 0.6692 | | 0.6431 | 4.57 | 2000 | 0.7864 | 0.6741 | 0.6719 | ### Framework versions - Transformers 4.39.3 - Pytorch 2.2.2+cu121 - Datasets 2.18.0 - Tokenizers 0.15.2
{"language": ["en"], "license": "mit", "tags": ["nycu-112-2-datamining-hw2", "generated_from_trainer"], "datasets": ["DandinPower/review_onlytitleandtext"], "metrics": ["accuracy"], "base_model": "microsoft/deberta-v2-xxlarge", "model-index": [{"name": "deberta-v2-xxlarge-otat-recommened-hp", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "DandinPower/review_onlytitleandtext", "type": "DandinPower/review_onlytitleandtext"}, "metrics": [{"type": "accuracy", "value": 0.6741428571428572, "name": "Accuracy"}]}]}]}
DandinPower/deberta-v2-xxlarge-otat-recommened-hp
null
[ "transformers", "safetensors", "deberta-v2", "text-classification", "nycu-112-2-datamining-hw2", "generated_from_trainer", "en", "dataset:DandinPower/review_onlytitleandtext", "base_model:microsoft/deberta-v2-xxlarge", "license:mit", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-20T09:55:52+00:00
[]
[ "en" ]
TAGS #transformers #safetensors #deberta-v2 #text-classification #nycu-112-2-datamining-hw2 #generated_from_trainer #en #dataset-DandinPower/review_onlytitleandtext #base_model-microsoft/deberta-v2-xxlarge #license-mit #model-index #autotrain_compatible #endpoints_compatible #region-us
deberta-v2-xxlarge-otat-recommened-hp ===================================== This model is a fine-tuned version of microsoft/deberta-v2-xxlarge on the DandinPower/review\_onlytitleandtext dataset. It achieves the following results on the evaluation set: * Loss: 0.7864 * Accuracy: 0.6741 * Macro F1: 0.6719 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 3e-06 * train\_batch\_size: 1 * eval\_batch\_size: 1 * seed: 42 * gradient\_accumulation\_steps: 64 * total\_train\_batch\_size: 64 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * lr\_scheduler\_warmup\_steps: 1 * num\_epochs: 5 * mixed\_precision\_training: Native AMP ### Training results ### Framework versions * Transformers 4.39.3 * Pytorch 2.2.2+cu121 * Datasets 2.18.0 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 3e-06\n* train\\_batch\\_size: 1\n* eval\\_batch\\_size: 1\n* seed: 42\n* gradient\\_accumulation\\_steps: 64\n* total\\_train\\_batch\\_size: 64\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* lr\\_scheduler\\_warmup\\_steps: 1\n* num\\_epochs: 5\n* mixed\\_precision\\_training: Native AMP", "### Training results", "### Framework versions\n\n\n* Transformers 4.39.3\n* Pytorch 2.2.2+cu121\n* Datasets 2.18.0\n* Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #safetensors #deberta-v2 #text-classification #nycu-112-2-datamining-hw2 #generated_from_trainer #en #dataset-DandinPower/review_onlytitleandtext #base_model-microsoft/deberta-v2-xxlarge #license-mit #model-index #autotrain_compatible #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 3e-06\n* train\\_batch\\_size: 1\n* eval\\_batch\\_size: 1\n* seed: 42\n* gradient\\_accumulation\\_steps: 64\n* total\\_train\\_batch\\_size: 64\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* lr\\_scheduler\\_warmup\\_steps: 1\n* num\\_epochs: 5\n* mixed\\_precision\\_training: Native AMP", "### Training results", "### Framework versions\n\n\n* Transformers 4.39.3\n* Pytorch 2.2.2+cu121\n* Datasets 2.18.0\n* Tokenizers 0.15.2" ]
reinforcement-learning
stable-baselines3
# **A2C** Agent playing **PandaReachDense-v3** This is a trained model of a **A2C** agent playing **PandaReachDense-v3** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3). ## Usage (with Stable-baselines3) TODO: Add your code ```python from stable_baselines3 import ... from huggingface_sb3 import load_from_hub ... ```
{"library_name": "stable-baselines3", "tags": ["PandaReachDense-v3", "deep-reinforcement-learning", "reinforcement-learning", "stable-baselines3"], "model-index": [{"name": "A2C", "results": [{"task": {"type": "reinforcement-learning", "name": "reinforcement-learning"}, "dataset": {"name": "PandaReachDense-v3", "type": "PandaReachDense-v3"}, "metrics": [{"type": "mean_reward", "value": "-0.23 +/- 0.10", "name": "mean_reward", "verified": false}]}]}]}
lacknerm/a2c-PandaReachDense-v3
null
[ "stable-baselines3", "PandaReachDense-v3", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
null
2024-04-20T09:56:01+00:00
[]
[]
TAGS #stable-baselines3 #PandaReachDense-v3 #deep-reinforcement-learning #reinforcement-learning #model-index #region-us
# A2C Agent playing PandaReachDense-v3 This is a trained model of a A2C agent playing PandaReachDense-v3 using the stable-baselines3 library. ## Usage (with Stable-baselines3) TODO: Add your code
[ "# A2C Agent playing PandaReachDense-v3\nThis is a trained model of a A2C agent playing PandaReachDense-v3\nusing the stable-baselines3 library.", "## Usage (with Stable-baselines3)\nTODO: Add your code" ]
[ "TAGS\n#stable-baselines3 #PandaReachDense-v3 #deep-reinforcement-learning #reinforcement-learning #model-index #region-us \n", "# A2C Agent playing PandaReachDense-v3\nThis is a trained model of a A2C agent playing PandaReachDense-v3\nusing the stable-baselines3 library.", "## Usage (with Stable-baselines3)\nTODO: Add your code" ]
null
peft
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed] ### Framework versions - PEFT 0.10.0
{"library_name": "peft", "base_model": "mistralai/Mistral-7B-Instruct-v0.2"}
PussyEater/Enlighten_Instruct
null
[ "peft", "safetensors", "arxiv:1910.09700", "base_model:mistralai/Mistral-7B-Instruct-v0.2", "region:us" ]
null
2024-04-20T09:56:48+00:00
[ "1910.09700" ]
[]
TAGS #peft #safetensors #arxiv-1910.09700 #base_model-mistralai/Mistral-7B-Instruct-v0.2 #region-us
# Model Card for Model ID ## Model Details ### Model Description - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact ### Framework versions - PEFT 0.10.0
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\n\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact", "### Framework versions\n\n- PEFT 0.10.0" ]
[ "TAGS\n#peft #safetensors #arxiv-1910.09700 #base_model-mistralai/Mistral-7B-Instruct-v0.2 #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\n\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact", "### Framework versions\n\n- PEFT 0.10.0" ]
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # micro_base_help_class_no_pre_seed_1 This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.8288 - Accuracy: 0.8506 - F1 Macro: 0.6496 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 1 - optimizer: Adam with betas=(0.9,0.98) and epsilon=1e-06 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 Macro | |:-------------:|:-----:|:----:|:---------------:|:--------:|:--------:| | 0.3574 | 1.0 | 313 | 0.3915 | 0.8544 | 0.5436 | | 0.3281 | 2.0 | 626 | 0.3468 | 0.8542 | 0.6399 | | 0.2822 | 3.0 | 939 | 0.4199 | 0.8618 | 0.5364 | | 0.2347 | 4.0 | 1252 | 0.4397 | 0.8552 | 0.6567 | | 0.1707 | 5.0 | 1565 | 0.5268 | 0.8584 | 0.6031 | | 0.0992 | 6.0 | 1878 | 0.8439 | 0.85 | 0.6609 | | 0.0891 | 7.0 | 2191 | 0.8919 | 0.8506 | 0.6486 | | 0.0506 | 8.0 | 2504 | 0.9836 | 0.857 | 0.6425 | | 0.0387 | 9.0 | 2817 | 1.0062 | 0.8536 | 0.6354 | ### Framework versions - Transformers 4.36.2 - Pytorch 2.2.1+cu121 - Datasets 2.19.0 - Tokenizers 0.15.2
{"license": "mit", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "roberta-base", "model-index": [{"name": "micro_base_help_class_no_pre_seed_1", "results": []}]}
BigTMiami/micro_base_help_class_no_pre_seed_1
null
[ "transformers", "safetensors", "roberta", "text-classification", "generated_from_trainer", "base_model:roberta-base", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-20T09:58:46+00:00
[]
[]
TAGS #transformers #safetensors #roberta #text-classification #generated_from_trainer #base_model-roberta-base #license-mit #autotrain_compatible #endpoints_compatible #region-us
micro\_base\_help\_class\_no\_pre\_seed\_1 ========================================== This model is a fine-tuned version of roberta-base on an unknown dataset. It achieves the following results on the evaluation set: * Loss: 0.8288 * Accuracy: 0.8506 * F1 Macro: 0.6496 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 2e-05 * train\_batch\_size: 16 * eval\_batch\_size: 16 * seed: 1 * optimizer: Adam with betas=(0.9,0.98) and epsilon=1e-06 * lr\_scheduler\_type: linear * num\_epochs: 10 ### Training results ### Framework versions * Transformers 4.36.2 * Pytorch 2.2.1+cu121 * Datasets 2.19.0 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 16\n* eval\\_batch\\_size: 16\n* seed: 1\n* optimizer: Adam with betas=(0.9,0.98) and epsilon=1e-06\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 10", "### Training results", "### Framework versions\n\n\n* Transformers 4.36.2\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #safetensors #roberta #text-classification #generated_from_trainer #base_model-roberta-base #license-mit #autotrain_compatible #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 16\n* eval\\_batch\\_size: 16\n* seed: 1\n* optimizer: Adam with betas=(0.9,0.98) and epsilon=1e-06\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 10", "### Training results", "### Framework versions\n\n\n* Transformers 4.36.2\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.15.2" ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
cilantro9246/cvh9yi8
null
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-20T09:58:49+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #llama #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
null
adapter-transformers
# Adapter `BigTMiami/amz_10k_seq_bn_helpf_class_adp_seed_1` for roberta-base An [adapter](https://adapterhub.ml) for the `roberta-base` model that was trained on the [BigTMiami/amazon_MICRO_helpfulness_dataset](https://huggingface.co/datasets/BigTMiami/amazon_MICRO_helpfulness_dataset/) dataset and includes a prediction head for classification. This adapter was created for usage with the **[Adapters](https://github.com/Adapter-Hub/adapters)** library. ## Usage First, install `adapters`: ``` pip install -U adapters ``` Now, the adapter can be loaded and activated like this: ```python from adapters import AutoAdapterModel model = AutoAdapterModel.from_pretrained("roberta-base") adapter_name = model.load_adapter("BigTMiami/amz_10k_seq_bn_helpf_class_adp_seed_1", source="hf", set_active=True) ``` ## Architecture & Training <!-- Add some description here --> ## Evaluation results <!-- Add some description here --> ## Citation <!-- Add some description here -->
{"tags": ["adapter-transformers", "roberta"], "datasets": ["BigTMiami/amazon_MICRO_helpfulness_dataset"]}
BigTMiami/amz_10k_seq_bn_helpf_class_adp_seed_1
null
[ "adapter-transformers", "roberta", "dataset:BigTMiami/amazon_MICRO_helpfulness_dataset", "region:us" ]
null
2024-04-20T09:59:39+00:00
[]
[]
TAGS #adapter-transformers #roberta #dataset-BigTMiami/amazon_MICRO_helpfulness_dataset #region-us
# Adapter 'BigTMiami/amz_10k_seq_bn_helpf_class_adp_seed_1' for roberta-base An adapter for the 'roberta-base' model that was trained on the BigTMiami/amazon_MICRO_helpfulness_dataset dataset and includes a prediction head for classification. This adapter was created for usage with the Adapters library. ## Usage First, install 'adapters': Now, the adapter can be loaded and activated like this: ## Architecture & Training ## Evaluation results
[ "# Adapter 'BigTMiami/amz_10k_seq_bn_helpf_class_adp_seed_1' for roberta-base\n\nAn adapter for the 'roberta-base' model that was trained on the BigTMiami/amazon_MICRO_helpfulness_dataset dataset and includes a prediction head for classification.\n\nThis adapter was created for usage with the Adapters library.", "## Usage\n\nFirst, install 'adapters':\n\n\n\nNow, the adapter can be loaded and activated like this:", "## Architecture & Training", "## Evaluation results" ]
[ "TAGS\n#adapter-transformers #roberta #dataset-BigTMiami/amazon_MICRO_helpfulness_dataset #region-us \n", "# Adapter 'BigTMiami/amz_10k_seq_bn_helpf_class_adp_seed_1' for roberta-base\n\nAn adapter for the 'roberta-base' model that was trained on the BigTMiami/amazon_MICRO_helpfulness_dataset dataset and includes a prediction head for classification.\n\nThis adapter was created for usage with the Adapters library.", "## Usage\n\nFirst, install 'adapters':\n\n\n\nNow, the adapter can be loaded and activated like this:", "## Architecture & Training", "## Evaluation results" ]
image-classification
transformers
Finetuned from [vit model](https://huggingface.co/google/vit-base-patch16-224) on [cub-200-2011-birds](https://huggingface.co/datasets/pkuHaowei/cub-200-2011-birds) dataset, reached accuracy of 85.75%.
{"license": "apache-2.0"}
HaotianZG/vit-cub-200-2011-bird
null
[ "transformers", "safetensors", "vit", "image-classification", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-20T09:59:52+00:00
[]
[]
TAGS #transformers #safetensors #vit #image-classification #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us
Finetuned from vit model on cub-200-2011-birds dataset, reached accuracy of 85.75%.
[]
[ "TAGS\n#transformers #safetensors #vit #image-classification #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us \n" ]
text-generation
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # pythia-410m-sft-full This model is a fine-tuned version of [EleutherAI/pythia-410m-deduped-v0](https://huggingface.co/EleutherAI/pythia-410m-deduped-v0) on the HuggingFaceH4/ultrachat_200k dataset. It achieves the following results on the evaluation set: - Loss: 1.6286 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - distributed_type: multi-GPU - num_devices: 2 - gradient_accumulation_steps: 2 - total_train_batch_size: 32 - total_eval_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 1.5669 | 1.0 | 3982 | 1.6286 | ### Framework versions - Transformers 4.39.3 - Pytorch 2.1.2+cu121 - Datasets 2.18.0 - Tokenizers 0.15.2
{"license": "apache-2.0", "tags": ["alignment-handbook", "trl", "sft", "generated_from_trainer", "trl", "sft", "generated_from_trainer"], "datasets": ["HuggingFaceH4/ultrachat_200k"], "base_model": "EleutherAI/pythia-410m-deduped-v0", "model-index": [{"name": "pythia-410m-sft-full", "results": []}]}
DatPySci/pythia-410m-sft-full
null
[ "transformers", "gpt_neox", "text-generation", "alignment-handbook", "trl", "sft", "generated_from_trainer", "conversational", "dataset:HuggingFaceH4/ultrachat_200k", "base_model:EleutherAI/pythia-410m-deduped-v0", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-20T10:01:09+00:00
[]
[]
TAGS #transformers #gpt_neox #text-generation #alignment-handbook #trl #sft #generated_from_trainer #conversational #dataset-HuggingFaceH4/ultrachat_200k #base_model-EleutherAI/pythia-410m-deduped-v0 #license-apache-2.0 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
pythia-410m-sft-full ==================== This model is a fine-tuned version of EleutherAI/pythia-410m-deduped-v0 on the HuggingFaceH4/ultrachat\_200k dataset. It achieves the following results on the evaluation set: * Loss: 1.6286 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 2e-05 * train\_batch\_size: 8 * eval\_batch\_size: 8 * seed: 42 * distributed\_type: multi-GPU * num\_devices: 2 * gradient\_accumulation\_steps: 2 * total\_train\_batch\_size: 32 * total\_eval\_batch\_size: 16 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: cosine * lr\_scheduler\_warmup\_ratio: 0.1 * num\_epochs: 1 ### Training results ### Framework versions * Transformers 4.39.3 * Pytorch 2.1.2+cu121 * Datasets 2.18.0 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 8\n* eval\\_batch\\_size: 8\n* seed: 42\n* distributed\\_type: multi-GPU\n* num\\_devices: 2\n* gradient\\_accumulation\\_steps: 2\n* total\\_train\\_batch\\_size: 32\n* total\\_eval\\_batch\\_size: 16\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: cosine\n* lr\\_scheduler\\_warmup\\_ratio: 0.1\n* num\\_epochs: 1", "### Training results", "### Framework versions\n\n\n* Transformers 4.39.3\n* Pytorch 2.1.2+cu121\n* Datasets 2.18.0\n* Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #gpt_neox #text-generation #alignment-handbook #trl #sft #generated_from_trainer #conversational #dataset-HuggingFaceH4/ultrachat_200k #base_model-EleutherAI/pythia-410m-deduped-v0 #license-apache-2.0 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 8\n* eval\\_batch\\_size: 8\n* seed: 42\n* distributed\\_type: multi-GPU\n* num\\_devices: 2\n* gradient\\_accumulation\\_steps: 2\n* total\\_train\\_batch\\_size: 32\n* total\\_eval\\_batch\\_size: 16\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: cosine\n* lr\\_scheduler\\_warmup\\_ratio: 0.1\n* num\\_epochs: 1", "### Training results", "### Framework versions\n\n\n* Transformers 4.39.3\n* Pytorch 2.1.2+cu121\n* Datasets 2.18.0\n* Tokenizers 0.15.2" ]
null
transformers
# Uploaded model - **Developed by:** ChiragAI12 - **License:** apache-2.0 - **Finetuned from model :** unsloth/llama-3-8b-bnb-4bit This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
{"language": ["en"], "license": "apache-2.0", "tags": ["text-generation-inference", "transformers", "unsloth", "llama", "trl"], "base_model": "unsloth/llama-3-8b-bnb-4bit"}
ChiragAI12/llama3-8b-oig-unsloth
null
[ "transformers", "safetensors", "text-generation-inference", "unsloth", "llama", "trl", "en", "base_model:unsloth/llama-3-8b-bnb-4bit", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2024-04-20T10:01:48+00:00
[]
[ "en" ]
TAGS #transformers #safetensors #text-generation-inference #unsloth #llama #trl #en #base_model-unsloth/llama-3-8b-bnb-4bit #license-apache-2.0 #endpoints_compatible #region-us
# Uploaded model - Developed by: ChiragAI12 - License: apache-2.0 - Finetuned from model : unsloth/llama-3-8b-bnb-4bit This llama model was trained 2x faster with Unsloth and Huggingface's TRL library. <img src="URL width="200"/>
[ "# Uploaded model\n\n- Developed by: ChiragAI12\n- License: apache-2.0\n- Finetuned from model : unsloth/llama-3-8b-bnb-4bit\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
[ "TAGS\n#transformers #safetensors #text-generation-inference #unsloth #llama #trl #en #base_model-unsloth/llama-3-8b-bnb-4bit #license-apache-2.0 #endpoints_compatible #region-us \n", "# Uploaded model\n\n- Developed by: ChiragAI12\n- License: apache-2.0\n- Finetuned from model : unsloth/llama-3-8b-bnb-4bit\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
null
transformers
# Uploaded model - **Developed by:** indiehackers - **License:** apache-2.0 - **Finetuned from model :** unsloth/llama-3-8b-Instruct-bnb-4bit This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
{"language": ["en"], "license": "apache-2.0", "tags": ["text-generation-inference", "transformers", "unsloth", "llama", "trl"], "base_model": "unsloth/llama-3-8b-Instruct-bnb-4bit"}
indiehackers/llama3-telugu-instruct
null
[ "transformers", "safetensors", "text-generation-inference", "unsloth", "llama", "trl", "en", "base_model:unsloth/llama-3-8b-Instruct-bnb-4bit", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2024-04-20T10:03:49+00:00
[]
[ "en" ]
TAGS #transformers #safetensors #text-generation-inference #unsloth #llama #trl #en #base_model-unsloth/llama-3-8b-Instruct-bnb-4bit #license-apache-2.0 #endpoints_compatible #region-us
# Uploaded model - Developed by: indiehackers - License: apache-2.0 - Finetuned from model : unsloth/llama-3-8b-Instruct-bnb-4bit This llama model was trained 2x faster with Unsloth and Huggingface's TRL library. <img src="URL width="200"/>
[ "# Uploaded model\n\n- Developed by: indiehackers\n- License: apache-2.0\n- Finetuned from model : unsloth/llama-3-8b-Instruct-bnb-4bit\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
[ "TAGS\n#transformers #safetensors #text-generation-inference #unsloth #llama #trl #en #base_model-unsloth/llama-3-8b-Instruct-bnb-4bit #license-apache-2.0 #endpoints_compatible #region-us \n", "# Uploaded model\n\n- Developed by: indiehackers\n- License: apache-2.0\n- Finetuned from model : unsloth/llama-3-8b-Instruct-bnb-4bit\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
text-to-image
diffusers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🧨 diffusers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "diffusers"}
Niggendar/realAnimagineXL_v10
null
[ "diffusers", "safetensors", "arxiv:1910.09700", "endpoints_compatible", "diffusers:StableDiffusionXLPipeline", "region:us" ]
null
2024-04-20T10:03:52+00:00
[ "1910.09700" ]
[]
TAGS #diffusers #safetensors #arxiv-1910.09700 #endpoints_compatible #diffusers-StableDiffusionXLPipeline #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a diffusers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a diffusers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#diffusers #safetensors #arxiv-1910.09700 #endpoints_compatible #diffusers-StableDiffusionXLPipeline #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a diffusers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
null
null
Kaj je Power Pulse XXL Kapsula? Power Pulse XXL Tablete je vodilni naravni dodatek, izdelan iz mešanice zeliščnih izvlečkov, vitaminov in mineralov, ki so znani po svojih pomlajevalnih lastnostih. Zasnovan za podporo vitalnosti, vzdržljivosti in moči, Power Pulse XXL Tablete ponuja naraven in trajnosten pristop k povečanju ravni energije in izboljšanju splošnega zdravja. Uradna spletna stran:<a href="https://www.nutritionsee.com/powpulsexslo">www.PowerPulseXXL.com</a> <p><a href="https://www.nutritionsee.com/powpulsexslo"> <img src="https://www.nutritionsee.com/wp-content/uploads/2024/04/Power-Pulse-XXL-Slovenia.png" alt="enter image description here"> </a></p> <a href="https://www.nutritionsee.com/powpulsexslo">Kupi zdaj!! Kliknite spodnjo povezavo za več informacij in si zagotovite 50 % popust zdaj... Pohitite</a> Uradna spletna stran:<a href="https://www.nutritionsee.com/powpulsexslo">www.PowerPulseXXL.com</a>
{"license": "apache-2.0"}
PowerPulseXXLSlovenia/PowerPulseXXLSlovenia
null
[ "license:apache-2.0", "region:us" ]
null
2024-04-20T10:04:47+00:00
[]
[]
TAGS #license-apache-2.0 #region-us
Kaj je Power Pulse XXL Kapsula? Power Pulse XXL Tablete je vodilni naravni dodatek, izdelan iz mešanice zeliščnih izvlečkov, vitaminov in mineralov, ki so znani po svojih pomlajevalnih lastnostih. Zasnovan za podporo vitalnosti, vzdržljivosti in moči, Power Pulse XXL Tablete ponuja naraven in trajnosten pristop k povečanju ravni energije in izboljšanju splošnega zdravja. Uradna spletna stran:<a href="URL <p><a href="URL <img src="URL alt="enter image description here"> </a></p> <a href="URL zdaj!! Kliknite spodnjo povezavo za več informacij in si zagotovite 50 % popust zdaj... Pohitite</a> Uradna spletna stran:<a href="URL
[]
[ "TAGS\n#license-apache-2.0 #region-us \n" ]
text-to-audio
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
vbrydik/mms-tts-eng-finetune-v3-train
null
[ "transformers", "safetensors", "vits", "text-to-audio", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
null
2024-04-20T10:06:29+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #vits #text-to-audio #arxiv-1910.09700 #endpoints_compatible #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #vits #text-to-audio #arxiv-1910.09700 #endpoints_compatible #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
text-to-image
diffusers
# Falkons Hentai API Inference ![generated from modelslab.com](https://cdn2.stablediffusionapi.com/generations/0-31acaef0-1b3b-4091-80c2-9d89ba246d2a.png) ## Get API Key Get API key from [ModelsLab API](http://modelslab.com), No Payment needed. Replace Key in below code, change **model_id** to "falkons-hentai" Coding in PHP/Node/Java etc? Have a look at docs for more code examples: [View docs](https://modelslab.com/docs) Try model for free: [Generate Images](https://modelslab.com/models/falkons-hentai) Model link: [View model](https://modelslab.com/models/falkons-hentai) View all models: [View Models](https://modelslab.com/models) import requests import json url = "https://modelslab.com/api/v6/images/text2img" payload = json.dumps({ "key": "your_api_key", "model_id": "falkons-hentai", "prompt": "ultra realistic close up portrait ((beautiful pale cyberpunk female with heavy black eyeliner)), blue eyes, shaved side haircut, hyper detail, cinematic lighting, magic neon, dark red city, Canon EOS R3, nikon, f/1.4, ISO 200, 1/160s, 8K, RAW, unedited, symmetrical balance, in-frame, 8K", "negative_prompt": "painting, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, deformed, ugly, blurry, bad anatomy, bad proportions, extra limbs, cloned face, skinny, glitchy, double torso, extra arms, extra hands, mangled fingers, missing lips, ugly face, distorted face, extra legs, anime", "width": "512", "height": "512", "samples": "1", "num_inference_steps": "30", "safety_checker": "no", "enhance_prompt": "yes", "seed": None, "guidance_scale": 7.5, "multi_lingual": "no", "panorama": "no", "self_attention": "no", "upscale": "no", "embeddings": "embeddings_model_id", "lora": "lora_model_id", "webhook": None, "track_id": None }) headers = { 'Content-Type': 'application/json' } response = requests.request("POST", url, headers=headers, data=payload) print(response.text) > Use this coupon code to get 25% off **DMGG0RBN**
{"license": "creativeml-openrail-m", "tags": ["modelslab.com", "stable-diffusion-api", "text-to-image", "ultra-realistic"], "pinned": true}
stablediffusionapi/falkons-hentai
null
[ "diffusers", "modelslab.com", "stable-diffusion-api", "text-to-image", "ultra-realistic", "license:creativeml-openrail-m", "endpoints_compatible", "diffusers:StableDiffusionPipeline", "region:us" ]
null
2024-04-20T10:07:57+00:00
[]
[]
TAGS #diffusers #modelslab.com #stable-diffusion-api #text-to-image #ultra-realistic #license-creativeml-openrail-m #endpoints_compatible #diffusers-StableDiffusionPipeline #region-us
# Falkons Hentai API Inference !generated from URL ## Get API Key Get API key from ModelsLab API, No Payment needed. Replace Key in below code, change model_id to "falkons-hentai" Coding in PHP/Node/Java etc? Have a look at docs for more code examples: View docs Try model for free: Generate Images Model link: View model View all models: View Models import requests import json url = "URL payload = URL({ "key": "your_api_key", "model_id": "falkons-hentai", "prompt": "ultra realistic close up portrait ((beautiful pale cyberpunk female with heavy black eyeliner)), blue eyes, shaved side haircut, hyper detail, cinematic lighting, magic neon, dark red city, Canon EOS R3, nikon, f/1.4, ISO 200, 1/160s, 8K, RAW, unedited, symmetrical balance, in-frame, 8K", "negative_prompt": "painting, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, deformed, ugly, blurry, bad anatomy, bad proportions, extra limbs, cloned face, skinny, glitchy, double torso, extra arms, extra hands, mangled fingers, missing lips, ugly face, distorted face, extra legs, anime", "width": "512", "height": "512", "samples": "1", "num_inference_steps": "30", "safety_checker": "no", "enhance_prompt": "yes", "seed": None, "guidance_scale": 7.5, "multi_lingual": "no", "panorama": "no", "self_attention": "no", "upscale": "no", "embeddings": "embeddings_model_id", "lora": "lora_model_id", "webhook": None, "track_id": None }) headers = { 'Content-Type': 'application/json' } response = requests.request("POST", url, headers=headers, data=payload) print(URL) > Use this coupon code to get 25% off DMGG0RBN
[ "# Falkons Hentai API Inference\n\n!generated from URL", "## Get API Key\n\nGet API key from ModelsLab API, No Payment needed. \n\nReplace Key in below code, change model_id to \"falkons-hentai\"\n\nCoding in PHP/Node/Java etc? Have a look at docs for more code examples: View docs\n\nTry model for free: Generate Images\n\nModel link: View model\n\nView all models: View Models\n\n import requests \n import json \n \n url = \"URL \n \n payload = URL({ \n \"key\": \"your_api_key\", \n \"model_id\": \"falkons-hentai\", \n \"prompt\": \"ultra realistic close up portrait ((beautiful pale cyberpunk female with heavy black eyeliner)), blue eyes, shaved side haircut, hyper detail, cinematic lighting, magic neon, dark red city, Canon EOS R3, nikon, f/1.4, ISO 200, 1/160s, 8K, RAW, unedited, symmetrical balance, in-frame, 8K\", \n \"negative_prompt\": \"painting, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, deformed, ugly, blurry, bad anatomy, bad proportions, extra limbs, cloned face, skinny, glitchy, double torso, extra arms, extra hands, mangled fingers, missing lips, ugly face, distorted face, extra legs, anime\", \n \"width\": \"512\", \n \"height\": \"512\", \n \"samples\": \"1\", \n \"num_inference_steps\": \"30\", \n \"safety_checker\": \"no\", \n \"enhance_prompt\": \"yes\", \n \"seed\": None, \n \"guidance_scale\": 7.5, \n \"multi_lingual\": \"no\", \n \"panorama\": \"no\", \n \"self_attention\": \"no\", \n \"upscale\": \"no\", \n \"embeddings\": \"embeddings_model_id\", \n \"lora\": \"lora_model_id\", \n \"webhook\": None, \n \"track_id\": None \n }) \n \n headers = { \n 'Content-Type': 'application/json' \n } \n \n response = requests.request(\"POST\", url, headers=headers, data=payload) \n \n print(URL)\n\n> Use this coupon code to get 25% off DMGG0RBN" ]
[ "TAGS\n#diffusers #modelslab.com #stable-diffusion-api #text-to-image #ultra-realistic #license-creativeml-openrail-m #endpoints_compatible #diffusers-StableDiffusionPipeline #region-us \n", "# Falkons Hentai API Inference\n\n!generated from URL", "## Get API Key\n\nGet API key from ModelsLab API, No Payment needed. \n\nReplace Key in below code, change model_id to \"falkons-hentai\"\n\nCoding in PHP/Node/Java etc? Have a look at docs for more code examples: View docs\n\nTry model for free: Generate Images\n\nModel link: View model\n\nView all models: View Models\n\n import requests \n import json \n \n url = \"URL \n \n payload = URL({ \n \"key\": \"your_api_key\", \n \"model_id\": \"falkons-hentai\", \n \"prompt\": \"ultra realistic close up portrait ((beautiful pale cyberpunk female with heavy black eyeliner)), blue eyes, shaved side haircut, hyper detail, cinematic lighting, magic neon, dark red city, Canon EOS R3, nikon, f/1.4, ISO 200, 1/160s, 8K, RAW, unedited, symmetrical balance, in-frame, 8K\", \n \"negative_prompt\": \"painting, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, deformed, ugly, blurry, bad anatomy, bad proportions, extra limbs, cloned face, skinny, glitchy, double torso, extra arms, extra hands, mangled fingers, missing lips, ugly face, distorted face, extra legs, anime\", \n \"width\": \"512\", \n \"height\": \"512\", \n \"samples\": \"1\", \n \"num_inference_steps\": \"30\", \n \"safety_checker\": \"no\", \n \"enhance_prompt\": \"yes\", \n \"seed\": None, \n \"guidance_scale\": 7.5, \n \"multi_lingual\": \"no\", \n \"panorama\": \"no\", \n \"self_attention\": \"no\", \n \"upscale\": \"no\", \n \"embeddings\": \"embeddings_model_id\", \n \"lora\": \"lora_model_id\", \n \"webhook\": None, \n \"track_id\": None \n }) \n \n headers = { \n 'Content-Type': 'application/json' \n } \n \n response = requests.request(\"POST\", url, headers=headers, data=payload) \n \n print(URL)\n\n> Use this coupon code to get 25% off DMGG0RBN" ]
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # micro_base_help_class_tapt_seed_0 This model is a fine-tuned version of [BigTMiami/micro_base_help_tapt_pretrain_model](https://huggingface.co/BigTMiami/micro_base_help_tapt_pretrain_model) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.4322 - Accuracy: 0.8648 - F1 Macro: 0.6284 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 0 - optimizer: Adam with betas=(0.9,0.98) and epsilon=1e-06 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 Macro | |:-------------:|:-----:|:----:|:---------------:|:--------:|:--------:| | 0.3129 | 1.0 | 313 | 0.4102 | 0.8568 | 0.4902 | | 0.2957 | 2.0 | 626 | 0.3787 | 0.8588 | 0.5080 | | 0.2515 | 3.0 | 939 | 0.4483 | 0.8614 | 0.6406 | | 0.1744 | 4.0 | 1252 | 0.5208 | 0.8552 | 0.5978 | | 0.1379 | 5.0 | 1565 | 0.7543 | 0.861 | 0.5924 | | 0.1142 | 6.0 | 1878 | 0.8998 | 0.8534 | 0.6123 | ### Framework versions - Transformers 4.36.2 - Pytorch 2.2.1+cu121 - Datasets 2.19.0 - Tokenizers 0.15.2
{"license": "mit", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "BigTMiami/micro_base_help_tapt_pretrain_model", "model-index": [{"name": "micro_base_help_class_tapt_seed_0", "results": []}]}
BigTMiami/micro_base_help_class_tapt_seed_0
null
[ "transformers", "safetensors", "roberta", "text-classification", "generated_from_trainer", "base_model:BigTMiami/micro_base_help_tapt_pretrain_model", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-20T10:09:40+00:00
[]
[]
TAGS #transformers #safetensors #roberta #text-classification #generated_from_trainer #base_model-BigTMiami/micro_base_help_tapt_pretrain_model #license-mit #autotrain_compatible #endpoints_compatible #region-us
micro\_base\_help\_class\_tapt\_seed\_0 ======================================= This model is a fine-tuned version of BigTMiami/micro\_base\_help\_tapt\_pretrain\_model on an unknown dataset. It achieves the following results on the evaluation set: * Loss: 0.4322 * Accuracy: 0.8648 * F1 Macro: 0.6284 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 2e-05 * train\_batch\_size: 16 * eval\_batch\_size: 16 * seed: 0 * optimizer: Adam with betas=(0.9,0.98) and epsilon=1e-06 * lr\_scheduler\_type: linear * num\_epochs: 10 ### Training results ### Framework versions * Transformers 4.36.2 * Pytorch 2.2.1+cu121 * Datasets 2.19.0 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 16\n* eval\\_batch\\_size: 16\n* seed: 0\n* optimizer: Adam with betas=(0.9,0.98) and epsilon=1e-06\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 10", "### Training results", "### Framework versions\n\n\n* Transformers 4.36.2\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #safetensors #roberta #text-classification #generated_from_trainer #base_model-BigTMiami/micro_base_help_tapt_pretrain_model #license-mit #autotrain_compatible #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 16\n* eval\\_batch\\_size: 16\n* seed: 0\n* optimizer: Adam with betas=(0.9,0.98) and epsilon=1e-06\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 10", "### Training results", "### Framework versions\n\n\n* Transformers 4.36.2\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.15.2" ]
null
transformers
# NotAiLOL/Knight-Miqu-70B-MoE-Q4_K_M-GGUF This model was converted to GGUF format from [`NotAiLOL/Knight-Miqu-70B`](https://huggingface.co/NotAiLOL/Knight-Miqu-70B) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space. Refer to the [original model card](https://huggingface.co/NotAiLOL/Knight-Miqu-70B-MoE) for more details on the model. ## Use with llama.cpp Install llama.cpp through brew. ```bash brew install ggerganov/ggerganov/llama.cpp ``` Invoke the llama.cpp server or the CLI. CLI: ```bash llama-cli --hf-repo NotAiLOL/Knight-Miqu-70B-MoE-Q4_K_M-GGUF --model knight-miqu-70b-moe.Q4_K_M.gguf -p "The meaning to life and the universe is" ``` Server: ```bash llama-server --hf-repo NotAiLOL/Knight-Miqu-70B-MoE-Q4_K_M-GGUF --model knight-miqu-70b-moe.Q4_K_M.gguf -c 2048 ``` Note: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well. ``` git clone https://github.com/ggerganov/llama.cpp && cd llama.cpp && make && ./main -m knight-miqu-70b-moe.Q4_K_M.gguf -n 128 ```
{"license": "apache-2.0", "library_name": "transformers", "tags": ["mergekit", "merge", "llama-cpp", "gguf-my-repo"], "base_model": ["152334H/miqu-1-70b-sf"]}
NotAiLOL/Knight-Miqu-70B-Q4_K_M-GGUF
null
[ "transformers", "gguf", "mergekit", "merge", "llama-cpp", "gguf-my-repo", "base_model:152334H/miqu-1-70b-sf", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2024-04-20T10:09:55+00:00
[]
[]
TAGS #transformers #gguf #mergekit #merge #llama-cpp #gguf-my-repo #base_model-152334H/miqu-1-70b-sf #license-apache-2.0 #endpoints_compatible #region-us
# NotAiLOL/Knight-Miqu-70B-MoE-Q4_K_M-GGUF This model was converted to GGUF format from 'NotAiLOL/Knight-Miqu-70B' using URL via the URL's GGUF-my-repo space. Refer to the original model card for more details on the model. ## Use with URL Install URL through brew. Invoke the URL server or the CLI. CLI: Server: Note: You can also use this checkpoint directly through the usage steps listed in the URL repo as well.
[ "# NotAiLOL/Knight-Miqu-70B-MoE-Q4_K_M-GGUF\nThis model was converted to GGUF format from 'NotAiLOL/Knight-Miqu-70B' using URL via the URL's GGUF-my-repo space.\nRefer to the original model card for more details on the model.", "## Use with URL\n\nInstall URL through brew.\n\n\nInvoke the URL server or the CLI.\n\nCLI:\n\n\n\nServer:\n\n\n\nNote: You can also use this checkpoint directly through the usage steps listed in the URL repo as well." ]
[ "TAGS\n#transformers #gguf #mergekit #merge #llama-cpp #gguf-my-repo #base_model-152334H/miqu-1-70b-sf #license-apache-2.0 #endpoints_compatible #region-us \n", "# NotAiLOL/Knight-Miqu-70B-MoE-Q4_K_M-GGUF\nThis model was converted to GGUF format from 'NotAiLOL/Knight-Miqu-70B' using URL via the URL's GGUF-my-repo space.\nRefer to the original model card for more details on the model.", "## Use with URL\n\nInstall URL through brew.\n\n\nInvoke the URL server or the CLI.\n\nCLI:\n\n\n\nServer:\n\n\n\nNote: You can also use this checkpoint directly through the usage steps listed in the URL repo as well." ]
text-to-image
diffusers
# AstreaPixie Radiance Hentai API Inference ![generated from modelslab.com](https://pub-3626123a908346a7a8be8d9295f44e26.r2.dev/generations/11177105801713607758.png) ## Get API Key Get API key from [ModelsLab API](http://modelslab.com), No Payment needed. Replace Key in below code, change **model_id** to "astreapixie-radiance-hent" Coding in PHP/Node/Java etc? Have a look at docs for more code examples: [View docs](https://modelslab.com/docs) Try model for free: [Generate Images](https://modelslab.com/models/astreapixie-radiance-hent) Model link: [View model](https://modelslab.com/models/astreapixie-radiance-hent) View all models: [View Models](https://modelslab.com/models) import requests import json url = "https://modelslab.com/api/v6/images/text2img" payload = json.dumps({ "key": "your_api_key", "model_id": "astreapixie-radiance-hent", "prompt": "ultra realistic close up portrait ((beautiful pale cyberpunk female with heavy black eyeliner)), blue eyes, shaved side haircut, hyper detail, cinematic lighting, magic neon, dark red city, Canon EOS R3, nikon, f/1.4, ISO 200, 1/160s, 8K, RAW, unedited, symmetrical balance, in-frame, 8K", "negative_prompt": "painting, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, deformed, ugly, blurry, bad anatomy, bad proportions, extra limbs, cloned face, skinny, glitchy, double torso, extra arms, extra hands, mangled fingers, missing lips, ugly face, distorted face, extra legs, anime", "width": "512", "height": "512", "samples": "1", "num_inference_steps": "30", "safety_checker": "no", "enhance_prompt": "yes", "seed": None, "guidance_scale": 7.5, "multi_lingual": "no", "panorama": "no", "self_attention": "no", "upscale": "no", "embeddings": "embeddings_model_id", "lora": "lora_model_id", "webhook": None, "track_id": None }) headers = { 'Content-Type': 'application/json' } response = requests.request("POST", url, headers=headers, data=payload) print(response.text) > Use this coupon code to get 25% off **DMGG0RBN**
{"license": "creativeml-openrail-m", "tags": ["modelslab.com", "stable-diffusion-api", "text-to-image", "ultra-realistic"], "pinned": true}
stablediffusionapi/astreapixie-radiance-hent
null
[ "diffusers", "modelslab.com", "stable-diffusion-api", "text-to-image", "ultra-realistic", "license:creativeml-openrail-m", "endpoints_compatible", "diffusers:StableDiffusionPipeline", "region:us" ]
null
2024-04-20T10:10:11+00:00
[]
[]
TAGS #diffusers #modelslab.com #stable-diffusion-api #text-to-image #ultra-realistic #license-creativeml-openrail-m #endpoints_compatible #diffusers-StableDiffusionPipeline #region-us
# AstreaPixie Radiance Hentai API Inference !generated from URL ## Get API Key Get API key from ModelsLab API, No Payment needed. Replace Key in below code, change model_id to "astreapixie-radiance-hent" Coding in PHP/Node/Java etc? Have a look at docs for more code examples: View docs Try model for free: Generate Images Model link: View model View all models: View Models import requests import json url = "URL payload = URL({ "key": "your_api_key", "model_id": "astreapixie-radiance-hent", "prompt": "ultra realistic close up portrait ((beautiful pale cyberpunk female with heavy black eyeliner)), blue eyes, shaved side haircut, hyper detail, cinematic lighting, magic neon, dark red city, Canon EOS R3, nikon, f/1.4, ISO 200, 1/160s, 8K, RAW, unedited, symmetrical balance, in-frame, 8K", "negative_prompt": "painting, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, deformed, ugly, blurry, bad anatomy, bad proportions, extra limbs, cloned face, skinny, glitchy, double torso, extra arms, extra hands, mangled fingers, missing lips, ugly face, distorted face, extra legs, anime", "width": "512", "height": "512", "samples": "1", "num_inference_steps": "30", "safety_checker": "no", "enhance_prompt": "yes", "seed": None, "guidance_scale": 7.5, "multi_lingual": "no", "panorama": "no", "self_attention": "no", "upscale": "no", "embeddings": "embeddings_model_id", "lora": "lora_model_id", "webhook": None, "track_id": None }) headers = { 'Content-Type': 'application/json' } response = requests.request("POST", url, headers=headers, data=payload) print(URL) > Use this coupon code to get 25% off DMGG0RBN
[ "# AstreaPixie Radiance Hentai API Inference\n\n!generated from URL", "## Get API Key\n\nGet API key from ModelsLab API, No Payment needed. \n\nReplace Key in below code, change model_id to \"astreapixie-radiance-hent\"\n\nCoding in PHP/Node/Java etc? Have a look at docs for more code examples: View docs\n\nTry model for free: Generate Images\n\nModel link: View model\n\nView all models: View Models\n\n import requests \n import json \n \n url = \"URL \n \n payload = URL({ \n \"key\": \"your_api_key\", \n \"model_id\": \"astreapixie-radiance-hent\", \n \"prompt\": \"ultra realistic close up portrait ((beautiful pale cyberpunk female with heavy black eyeliner)), blue eyes, shaved side haircut, hyper detail, cinematic lighting, magic neon, dark red city, Canon EOS R3, nikon, f/1.4, ISO 200, 1/160s, 8K, RAW, unedited, symmetrical balance, in-frame, 8K\", \n \"negative_prompt\": \"painting, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, deformed, ugly, blurry, bad anatomy, bad proportions, extra limbs, cloned face, skinny, glitchy, double torso, extra arms, extra hands, mangled fingers, missing lips, ugly face, distorted face, extra legs, anime\", \n \"width\": \"512\", \n \"height\": \"512\", \n \"samples\": \"1\", \n \"num_inference_steps\": \"30\", \n \"safety_checker\": \"no\", \n \"enhance_prompt\": \"yes\", \n \"seed\": None, \n \"guidance_scale\": 7.5, \n \"multi_lingual\": \"no\", \n \"panorama\": \"no\", \n \"self_attention\": \"no\", \n \"upscale\": \"no\", \n \"embeddings\": \"embeddings_model_id\", \n \"lora\": \"lora_model_id\", \n \"webhook\": None, \n \"track_id\": None \n }) \n \n headers = { \n 'Content-Type': 'application/json' \n } \n \n response = requests.request(\"POST\", url, headers=headers, data=payload) \n \n print(URL)\n\n> Use this coupon code to get 25% off DMGG0RBN" ]
[ "TAGS\n#diffusers #modelslab.com #stable-diffusion-api #text-to-image #ultra-realistic #license-creativeml-openrail-m #endpoints_compatible #diffusers-StableDiffusionPipeline #region-us \n", "# AstreaPixie Radiance Hentai API Inference\n\n!generated from URL", "## Get API Key\n\nGet API key from ModelsLab API, No Payment needed. \n\nReplace Key in below code, change model_id to \"astreapixie-radiance-hent\"\n\nCoding in PHP/Node/Java etc? Have a look at docs for more code examples: View docs\n\nTry model for free: Generate Images\n\nModel link: View model\n\nView all models: View Models\n\n import requests \n import json \n \n url = \"URL \n \n payload = URL({ \n \"key\": \"your_api_key\", \n \"model_id\": \"astreapixie-radiance-hent\", \n \"prompt\": \"ultra realistic close up portrait ((beautiful pale cyberpunk female with heavy black eyeliner)), blue eyes, shaved side haircut, hyper detail, cinematic lighting, magic neon, dark red city, Canon EOS R3, nikon, f/1.4, ISO 200, 1/160s, 8K, RAW, unedited, symmetrical balance, in-frame, 8K\", \n \"negative_prompt\": \"painting, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, deformed, ugly, blurry, bad anatomy, bad proportions, extra limbs, cloned face, skinny, glitchy, double torso, extra arms, extra hands, mangled fingers, missing lips, ugly face, distorted face, extra legs, anime\", \n \"width\": \"512\", \n \"height\": \"512\", \n \"samples\": \"1\", \n \"num_inference_steps\": \"30\", \n \"safety_checker\": \"no\", \n \"enhance_prompt\": \"yes\", \n \"seed\": None, \n \"guidance_scale\": 7.5, \n \"multi_lingual\": \"no\", \n \"panorama\": \"no\", \n \"self_attention\": \"no\", \n \"upscale\": \"no\", \n \"embeddings\": \"embeddings_model_id\", \n \"lora\": \"lora_model_id\", \n \"webhook\": None, \n \"track_id\": None \n }) \n \n headers = { \n 'Content-Type': 'application/json' \n } \n \n response = requests.request(\"POST\", url, headers=headers, data=payload) \n \n print(URL)\n\n> Use this coupon code to get 25% off DMGG0RBN" ]
token-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # rubert-finetuned-ner This model is a fine-tuned version of [DeepPavlov/rubert-base-cased](https://huggingface.co/DeepPavlov/rubert-base-cased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.1536 - Precision: 0.8904 - Recall: 0.9077 - F1: 0.8990 - Accuracy: 0.9585 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 16 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_ratio: 0.05 - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | 0.0828 | 0.5 | 625 | 0.2171 | 0.8117 | 0.8616 | 0.8359 | 0.9391 | | 0.1195 | 1.0 | 1250 | 0.1753 | 0.8540 | 0.8842 | 0.8689 | 0.9488 | | 0.1255 | 1.5 | 1875 | 0.1754 | 0.8860 | 0.9027 | 0.8943 | 0.9577 | | 0.0546 | 2.0 | 2500 | 0.1536 | 0.8904 | 0.9077 | 0.8990 | 0.9585 | ### Framework versions - Transformers 4.40.0 - Pytorch 2.2.1+cu121 - Datasets 2.19.0 - Tokenizers 0.19.1
{"tags": ["generated_from_trainer"], "metrics": ["precision", "recall", "f1", "accuracy"], "base_model": "DeepPavlov/rubert-base-cased", "model-index": [{"name": "rubert-finetuned-ner", "results": []}]}
zaalbar/rubert-finetuned-ner
null
[ "transformers", "safetensors", "bert", "token-classification", "generated_from_trainer", "base_model:DeepPavlov/rubert-base-cased", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-20T10:10:28+00:00
[]
[]
TAGS #transformers #safetensors #bert #token-classification #generated_from_trainer #base_model-DeepPavlov/rubert-base-cased #autotrain_compatible #endpoints_compatible #region-us
rubert-finetuned-ner ==================== This model is a fine-tuned version of DeepPavlov/rubert-base-cased on an unknown dataset. It achieves the following results on the evaluation set: * Loss: 0.1536 * Precision: 0.8904 * Recall: 0.9077 * F1: 0.8990 * Accuracy: 0.9585 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0001 * train\_batch\_size: 16 * eval\_batch\_size: 64 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: cosine * lr\_scheduler\_warmup\_ratio: 0.05 * num\_epochs: 2 ### Training results ### Framework versions * Transformers 4.40.0 * Pytorch 2.2.1+cu121 * Datasets 2.19.0 * Tokenizers 0.19.1
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0001\n* train\\_batch\\_size: 16\n* eval\\_batch\\_size: 64\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: cosine\n* lr\\_scheduler\\_warmup\\_ratio: 0.05\n* num\\_epochs: 2", "### Training results", "### Framework versions\n\n\n* Transformers 4.40.0\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.19.1" ]
[ "TAGS\n#transformers #safetensors #bert #token-classification #generated_from_trainer #base_model-DeepPavlov/rubert-base-cased #autotrain_compatible #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0001\n* train\\_batch\\_size: 16\n* eval\\_batch\\_size: 64\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: cosine\n* lr\\_scheduler\\_warmup\\_ratio: 0.05\n* num\\_epochs: 2", "### Training results", "### Framework versions\n\n\n* Transformers 4.40.0\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.19.1" ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
heyllm234/sc47
null
[ "transformers", "safetensors", "stablelm", "text-generation", "conversational", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-20T10:10:36+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #stablelm #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #stablelm #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
null
null
My first run, 8192 ctx qlora, trained on AEZAKMI-3_6 dataset. Base seems to not be too slopped but finetune is not great - lots of slopped GPTisms, "It's important to remember" etc. It does seem uncensored though, so if you're not fine with Llama-3-8B-Instruct, this might be an option until more better finetunes come out. ChatML prompt format.
{"license": "other", "license_name": "llama3", "license_link": "LICENSE"}
adamo1139/Llama-3-8B-AEZAKMI-run1-LoRA
null
[ "safetensors", "license:other", "region:us" ]
null
2024-04-20T10:11:34+00:00
[]
[]
TAGS #safetensors #license-other #region-us
My first run, 8192 ctx qlora, trained on AEZAKMI-3_6 dataset. Base seems to not be too slopped but finetune is not great - lots of slopped GPTisms, "It's important to remember" etc. It does seem uncensored though, so if you're not fine with Llama-3-8B-Instruct, this might be an option until more better finetunes come out. ChatML prompt format.
[]
[ "TAGS\n#safetensors #license-other #region-us \n" ]
translation
transformers
# WS TCG Card Text Translator A Japanese-English machine translation model specifically trained for translating card text from the Weiss Schwarz (WS) Trading Card Game, fine-tuned on [Helsinki-NLP/opus-mt-ja-en](https://huggingface.co/Helsinki-NLP/opus-mt-ja-en). ## Hugging Face Space Demo Check out the demo at [https://huggingface.co/spaces/eepj/wstcg-mt](https://huggingface.co/spaces/eepj/wstcg-mt). ## Dataset ### Official WS Card List * Japanese-English parallel card text comprising 6000+ card text retrieved from the offical card list. ## Training ### Base Model * Base model: Helsinki-NLP/opus-mt-ja-en * Base tokenizer: Helsinki-NLP/opus-mt-ja-en * Source language: Japanese (ja) * Target language: English (en) ### Additional Tokens |Token Type|Additional Tokens| |----------|-----------------| |Named Entity Placeholder|\<TRAIT\>, \<NAME\>| |Trigger Icon Placeholder|\<SOUL\>, \<CHOICE\>, \<TREASURE\>, \<SALVAGE\>, \<STANDBY\>,<br> \<GATE\>, \<BOUNCE\>, \<STOCK\>, \<SHOT\>, \<DRAW\>| |Keywords|【, 】, AUTO, ACT, CONT, COUNTER, CLOCK, トリガー| ### Hardware * NVIDIA RTX3060 Ti with CUDA hardware acceleration ### Hyperparameters * Number of epochs: 5 * Optimizer: Adam * Initial learning rate: 1e-4 * Learning rate scheduler: StepLR, reduce by factor of 0.5 every epoch * Batch size: 4 * Loss function: CrossEntropyLoss * Random seed: 42 ## Performance ### Metrics |Dataset|BLEU|chr-F| |-------|------|-----| |WS Official Card List|0.82664|0.96515| ### Example Test Case |Language|Official Text| |--------|-------------| |Japanese|【永】 あなたの\<TRAIT\>のキャラが4枚以上なら、このカードは、色条件を満たさずに手札からプレイでき、あなたの手札のこのカードのレベルを-1。| |English|【CONT】 If you have 4 or more \<TRAIT\> characters, this card gets -1 level while in your hand, and can be played from your hand without fulfilling color requirements.| |Model|Translated Text|BLEU|chr-F| |-----|---------------|------|-----| |Ours|【CONT】If you have 4 or more \<TRAIT\> characters, this card can be played from your hand without fulfilling color requirements, and this card gets -1 level while in your hand.|0.79134|0.95207| |Google Translate|[CONT] If you have 4 or more characters in \<TRAIT\>, you can play this card from your hand without meeting the color conditions, and the level of this card in your hand becomes -1.|0.26561|0.53877| |GPT-3.5|[Permanent] If you have 4 or more cards with the \<TRAIT\> trait, you can play this card from your hand without meeting the color condition, and reduce the level of this card in your hand by 1.|0.24213|0.48925| |opus-mt-ja-en|If there are more than four characters in your \<TRAIT\> card, this card can be played from your hand with no color conditions, and you can see the level of this card in your hand is -1.|0.22006|0.51034| ## References **Helsinki-NLP/opus-mt-ja-en** <br> https://huggingface.co/Helsinki-NLP/opus-mt-ja-en **WS Official Card List (Japanese)** <br> https://ws-tcg.com/cardlist **WS Official Card List (English)** <br> https://en.ws-tcg.com/cardlist **WS Comprehensive Rules (English)** <br> https://en.ws-tcg.com/wp/wp-content/uploads/20220726205726/WSE-Comprehensive-Rules-v2.07.pdf
{"language": ["ja", "en"], "metrics": ["bleu"], "base_model": "Helsinki-NLP/opus-mt-ja-en", "pipeline_tag": "translation", "widget": [{"text": "\u3010\u6c38\u3011 \u3042\u306a\u305f\u306e<TRAIT>\u306e\u30ad\u30e3\u30e9\u304c4\u679a\u4ee5\u4e0a\u306a\u3089\u3001\u3053\u306e\u30ab\u30fc\u30c9\u306f\u3001\u8272\u6761\u4ef6\u3092\u6e80\u305f\u3055\u305a\u306b\u624b\u672d\u304b\u3089\u30d7\u30ec\u30a4\u3067\u304d\u3001\u3042\u306a\u305f\u306e\u624b\u672d\u306e\u3053\u306e\u30ab\u30fc\u30c9\u306e\u30ec\u30d9\u30eb\u3092\uff0d1\u3002"}]}
eepj/wstcg-mt-ja-en
null
[ "transformers", "safetensors", "marian", "text2text-generation", "translation", "ja", "en", "base_model:Helsinki-NLP/opus-mt-ja-en", "autotrain_compatible", "endpoints_compatible", "region:us", "has_space" ]
null
2024-04-20T10:13:55+00:00
[]
[ "ja", "en" ]
TAGS #transformers #safetensors #marian #text2text-generation #translation #ja #en #base_model-Helsinki-NLP/opus-mt-ja-en #autotrain_compatible #endpoints_compatible #region-us #has_space
WS TCG Card Text Translator =========================== A Japanese-English machine translation model specifically trained for translating card text from the Weiss Schwarz (WS) Trading Card Game, fine-tuned on Helsinki-NLP/opus-mt-ja-en. Hugging Face Space Demo ----------------------- Check out the demo at URL Dataset ------- ### Official WS Card List * Japanese-English parallel card text comprising 6000+ card text retrieved from the offical card list. Training -------- ### Base Model * Base model: Helsinki-NLP/opus-mt-ja-en * Base tokenizer: Helsinki-NLP/opus-mt-ja-en * Source language: Japanese (ja) * Target language: English (en) ### Additional Tokens ### Hardware * NVIDIA RTX3060 Ti with CUDA hardware acceleration ### Hyperparameters * Number of epochs: 5 * Optimizer: Adam * Initial learning rate: 1e-4 * Learning rate scheduler: StepLR, reduce by factor of 0.5 every epoch * Batch size: 4 * Loss function: CrossEntropyLoss * Random seed: 42 Performance ----------- ### Metrics Dataset: WS Official Card List, BLEU: 0.82664, chr-F: 0.96515 ### Example Test Case References ---------- Helsinki-NLP/opus-mt-ja-en URL WS Official Card List (Japanese) URL WS Official Card List (English) URL WS Comprehensive Rules (English) URL
[ "### Official WS Card List\n\n\n* Japanese-English parallel card text comprising 6000+ card text retrieved from the offical card list.\n\n\nTraining\n--------", "### Base Model\n\n\n* Base model: Helsinki-NLP/opus-mt-ja-en\n* Base tokenizer: Helsinki-NLP/opus-mt-ja-en\n* Source language: Japanese (ja)\n* Target language: English (en)", "### Additional Tokens", "### Hardware\n\n\n* NVIDIA RTX3060 Ti with CUDA hardware acceleration", "### Hyperparameters\n\n\n* Number of epochs: 5\n* Optimizer: Adam\n* Initial learning rate: 1e-4\n* Learning rate scheduler: StepLR, reduce by factor of 0.5 every epoch\n* Batch size: 4\n* Loss function: CrossEntropyLoss\n* Random seed: 42\n\n\nPerformance\n-----------", "### Metrics\n\n\nDataset: WS Official Card List, BLEU: 0.82664, chr-F: 0.96515", "### Example Test Case\n\n\n\n\nReferences\n----------\n\n\nHelsinki-NLP/opus-mt-ja-en\n \n\nURL\n\n\nWS Official Card List (Japanese)\n \n\nURL\n\n\nWS Official Card List (English)\n \n\nURL\n\n\nWS Comprehensive Rules (English)\n \n\nURL" ]
[ "TAGS\n#transformers #safetensors #marian #text2text-generation #translation #ja #en #base_model-Helsinki-NLP/opus-mt-ja-en #autotrain_compatible #endpoints_compatible #region-us #has_space \n", "### Official WS Card List\n\n\n* Japanese-English parallel card text comprising 6000+ card text retrieved from the offical card list.\n\n\nTraining\n--------", "### Base Model\n\n\n* Base model: Helsinki-NLP/opus-mt-ja-en\n* Base tokenizer: Helsinki-NLP/opus-mt-ja-en\n* Source language: Japanese (ja)\n* Target language: English (en)", "### Additional Tokens", "### Hardware\n\n\n* NVIDIA RTX3060 Ti with CUDA hardware acceleration", "### Hyperparameters\n\n\n* Number of epochs: 5\n* Optimizer: Adam\n* Initial learning rate: 1e-4\n* Learning rate scheduler: StepLR, reduce by factor of 0.5 every epoch\n* Batch size: 4\n* Loss function: CrossEntropyLoss\n* Random seed: 42\n\n\nPerformance\n-----------", "### Metrics\n\n\nDataset: WS Official Card List, BLEU: 0.82664, chr-F: 0.96515", "### Example Test Case\n\n\n\n\nReferences\n----------\n\n\nHelsinki-NLP/opus-mt-ja-en\n \n\nURL\n\n\nWS Official Card List (Japanese)\n \n\nURL\n\n\nWS Official Card List (English)\n \n\nURL\n\n\nWS Comprehensive Rules (English)\n \n\nURL" ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
Grayx/sad_llama_7
null
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-20T10:14:18+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #llama #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
automatic-speech-recognition
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-large-xls-r-300m-fir-turkish-colab This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the common_voice_13_0 dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 30 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.38.2 - Pytorch 2.2.1+cu121 - Datasets 2.19.0 - Tokenizers 0.15.2
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "datasets": ["common_voice_13_0"], "base_model": "facebook/wav2vec2-xls-r-300m", "model-index": [{"name": "wav2vec2-large-xls-r-300m-fir-turkish-colab", "results": []}]}
f77777/wav2vec2-large-xls-r-300m-fir-turkish-colab
null
[ "transformers", "tensorboard", "safetensors", "wav2vec2", "automatic-speech-recognition", "generated_from_trainer", "dataset:common_voice_13_0", "base_model:facebook/wav2vec2-xls-r-300m", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2024-04-20T10:15:55+00:00
[]
[]
TAGS #transformers #tensorboard #safetensors #wav2vec2 #automatic-speech-recognition #generated_from_trainer #dataset-common_voice_13_0 #base_model-facebook/wav2vec2-xls-r-300m #license-apache-2.0 #endpoints_compatible #region-us
# wav2vec2-large-xls-r-300m-fir-turkish-colab This model is a fine-tuned version of facebook/wav2vec2-xls-r-300m on the common_voice_13_0 dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 30 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.38.2 - Pytorch 2.2.1+cu121 - Datasets 2.19.0 - Tokenizers 0.15.2
[ "# wav2vec2-large-xls-r-300m-fir-turkish-colab\n\nThis model is a fine-tuned version of facebook/wav2vec2-xls-r-300m on the common_voice_13_0 dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 0.0003\n- train_batch_size: 16\n- eval_batch_size: 8\n- seed: 42\n- gradient_accumulation_steps: 2\n- total_train_batch_size: 32\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- lr_scheduler_warmup_steps: 500\n- num_epochs: 30\n- mixed_precision_training: Native AMP", "### Training results", "### Framework versions\n\n- Transformers 4.38.2\n- Pytorch 2.2.1+cu121\n- Datasets 2.19.0\n- Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #tensorboard #safetensors #wav2vec2 #automatic-speech-recognition #generated_from_trainer #dataset-common_voice_13_0 #base_model-facebook/wav2vec2-xls-r-300m #license-apache-2.0 #endpoints_compatible #region-us \n", "# wav2vec2-large-xls-r-300m-fir-turkish-colab\n\nThis model is a fine-tuned version of facebook/wav2vec2-xls-r-300m on the common_voice_13_0 dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 0.0003\n- train_batch_size: 16\n- eval_batch_size: 8\n- seed: 42\n- gradient_accumulation_steps: 2\n- total_train_batch_size: 32\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- lr_scheduler_warmup_steps: 500\n- num_epochs: 30\n- mixed_precision_training: Native AMP", "### Training results", "### Framework versions\n\n- Transformers 4.38.2\n- Pytorch 2.2.1+cu121\n- Datasets 2.19.0\n- Tokenizers 0.15.2" ]
text-to-image
diffusers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🧨 diffusers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "diffusers"}
Niggendar/wildcardxREALNSFWSFW_nsfwSFW
null
[ "diffusers", "safetensors", "arxiv:1910.09700", "endpoints_compatible", "diffusers:StableDiffusionPipeline", "region:us" ]
null
2024-04-20T10:16:41+00:00
[ "1910.09700" ]
[]
TAGS #diffusers #safetensors #arxiv-1910.09700 #endpoints_compatible #diffusers-StableDiffusionPipeline #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a diffusers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a diffusers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#diffusers #safetensors #arxiv-1910.09700 #endpoints_compatible #diffusers-StableDiffusionPipeline #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a diffusers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
automatic-speech-recognition
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # fineturning-without-pretraining-3 This model is a fine-tuned version of [](https://huggingface.co/) on the common_voice_1_0 dataset. It achieves the following results on the evaluation set: - Loss: 3.0417 - Wer: 1.2316 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 32 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 1000 - num_epochs: 30 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | |:-------------:|:-----:|:----:|:---------------:|:------:| | 3.4746 | 4.27 | 500 | 2.6868 | 1.0 | | 2.5662 | 8.55 | 1000 | 2.4297 | 1.0371 | | 2.3434 | 12.82 | 1500 | 2.3182 | 1.1941 | | 2.134 | 17.09 | 2000 | 2.3792 | 1.1749 | | 1.8502 | 21.37 | 2500 | 2.6371 | 1.1072 | | 1.5697 | 25.64 | 3000 | 2.9421 | 1.1907 | | 1.3814 | 29.91 | 3500 | 3.0417 | 1.2316 | ### Framework versions - Transformers 4.39.3 - Pytorch 2.1.2 - Datasets 2.18.0 - Tokenizers 0.15.2
{"tags": ["generated_from_trainer"], "datasets": ["common_voice_1_0"], "metrics": ["wer"], "model-index": [{"name": "fineturning-without-pretraining-3", "results": [{"task": {"type": "automatic-speech-recognition", "name": "Automatic Speech Recognition"}, "dataset": {"name": "common_voice_1_0", "type": "common_voice_1_0", "config": "en", "split": "validation", "args": "en"}, "metrics": [{"type": "wer", "value": 1.231604810552179, "name": "Wer"}]}]}]}
Aviral2412/fineturning-without-pretraining-3
null
[ "transformers", "tensorboard", "safetensors", "wav2vec2", "automatic-speech-recognition", "generated_from_trainer", "dataset:common_voice_1_0", "model-index", "endpoints_compatible", "region:us" ]
null
2024-04-20T10:18:07+00:00
[]
[]
TAGS #transformers #tensorboard #safetensors #wav2vec2 #automatic-speech-recognition #generated_from_trainer #dataset-common_voice_1_0 #model-index #endpoints_compatible #region-us
fineturning-without-pretraining-3 ================================= This model is a fine-tuned version of [](URL on the common\_voice\_1\_0 dataset. It achieves the following results on the evaluation set: * Loss: 3.0417 * Wer: 1.2316 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0001 * train\_batch\_size: 32 * eval\_batch\_size: 8 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * lr\_scheduler\_warmup\_steps: 1000 * num\_epochs: 30 * mixed\_precision\_training: Native AMP ### Training results ### Framework versions * Transformers 4.39.3 * Pytorch 2.1.2 * Datasets 2.18.0 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0001\n* train\\_batch\\_size: 32\n* eval\\_batch\\_size: 8\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* lr\\_scheduler\\_warmup\\_steps: 1000\n* num\\_epochs: 30\n* mixed\\_precision\\_training: Native AMP", "### Training results", "### Framework versions\n\n\n* Transformers 4.39.3\n* Pytorch 2.1.2\n* Datasets 2.18.0\n* Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #tensorboard #safetensors #wav2vec2 #automatic-speech-recognition #generated_from_trainer #dataset-common_voice_1_0 #model-index #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0001\n* train\\_batch\\_size: 32\n* eval\\_batch\\_size: 8\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* lr\\_scheduler\\_warmup\\_steps: 1000\n* num\\_epochs: 30\n* mixed\\_precision\\_training: Native AMP", "### Training results", "### Framework versions\n\n\n* Transformers 4.39.3\n* Pytorch 2.1.2\n* Datasets 2.18.0\n* Tokenizers 0.15.2" ]
null
peft
## Training procedure The following `bitsandbytes` quantization config was used during training: - load_in_8bit: False - load_in_4bit: True - llm_int8_threshold: 6.0 - llm_int8_skip_modules: None - llm_int8_enable_fp32_cpu_offload: False - llm_int8_has_fp16_weight: False - bnb_4bit_quant_type: nf4 - bnb_4bit_use_double_quant: True - bnb_4bit_compute_dtype: bfloat16 ### Framework versions - PEFT 0.4.0
{"library_name": "peft"}
finellm/fntinyLlama
null
[ "peft", "safetensors", "llama", "region:us" ]
null
2024-04-20T10:19:09+00:00
[]
[]
TAGS #peft #safetensors #llama #region-us
## Training procedure The following 'bitsandbytes' quantization config was used during training: - load_in_8bit: False - load_in_4bit: True - llm_int8_threshold: 6.0 - llm_int8_skip_modules: None - llm_int8_enable_fp32_cpu_offload: False - llm_int8_has_fp16_weight: False - bnb_4bit_quant_type: nf4 - bnb_4bit_use_double_quant: True - bnb_4bit_compute_dtype: bfloat16 ### Framework versions - PEFT 0.4.0
[ "## Training procedure\n\n\nThe following 'bitsandbytes' quantization config was used during training:\n- load_in_8bit: False\n- load_in_4bit: True\n- llm_int8_threshold: 6.0\n- llm_int8_skip_modules: None\n- llm_int8_enable_fp32_cpu_offload: False\n- llm_int8_has_fp16_weight: False\n- bnb_4bit_quant_type: nf4\n- bnb_4bit_use_double_quant: True\n- bnb_4bit_compute_dtype: bfloat16", "### Framework versions\n\n\n- PEFT 0.4.0" ]
[ "TAGS\n#peft #safetensors #llama #region-us \n", "## Training procedure\n\n\nThe following 'bitsandbytes' quantization config was used during training:\n- load_in_8bit: False\n- load_in_4bit: True\n- llm_int8_threshold: 6.0\n- llm_int8_skip_modules: None\n- llm_int8_enable_fp32_cpu_offload: False\n- llm_int8_has_fp16_weight: False\n- bnb_4bit_quant_type: nf4\n- bnb_4bit_use_double_quant: True\n- bnb_4bit_compute_dtype: bfloat16", "### Framework versions\n\n\n- PEFT 0.4.0" ]
text-generation
transformers
# Uploaded model - **Developed by:** LeroyDyer - **License:** apache-2.0 - **Finetuned from model :** LeroyDyer/Mixtral_AI_Minitron_2b_1.0 This mistral model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
{"language": ["en"], "license": "apache-2.0", "tags": ["text-generation-inference", "transformers", "unsloth", "mistral", "trl"], "base_model": "LeroyDyer/Mixtral_AI_Minitron_2b_1.0"}
LeroyDyer/Mini_Merge_RolePlay
null
[ "transformers", "safetensors", "mistral", "text-generation", "text-generation-inference", "unsloth", "trl", "conversational", "en", "base_model:LeroyDyer/Mixtral_AI_Minitron_2b_1.0", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-20T10:19:19+00:00
[]
[ "en" ]
TAGS #transformers #safetensors #mistral #text-generation #text-generation-inference #unsloth #trl #conversational #en #base_model-LeroyDyer/Mixtral_AI_Minitron_2b_1.0 #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us
# Uploaded model - Developed by: LeroyDyer - License: apache-2.0 - Finetuned from model : LeroyDyer/Mixtral_AI_Minitron_2b_1.0 This mistral model was trained 2x faster with Unsloth and Huggingface's TRL library. <img src="URL width="200"/>
[ "# Uploaded model\n\n- Developed by: LeroyDyer\n- License: apache-2.0\n- Finetuned from model : LeroyDyer/Mixtral_AI_Minitron_2b_1.0\n\nThis mistral model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
[ "TAGS\n#transformers #safetensors #mistral #text-generation #text-generation-inference #unsloth #trl #conversational #en #base_model-LeroyDyer/Mixtral_AI_Minitron_2b_1.0 #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us \n", "# Uploaded model\n\n- Developed by: LeroyDyer\n- License: apache-2.0\n- Finetuned from model : LeroyDyer/Mixtral_AI_Minitron_2b_1.0\n\nThis mistral model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
null
adapter-transformers
# Adapter `BigTMiami/amz_10k_seq_bn_helpf_class_adp_seed_2` for roberta-base An [adapter](https://adapterhub.ml) for the `roberta-base` model that was trained on the [BigTMiami/amazon_MICRO_helpfulness_dataset](https://huggingface.co/datasets/BigTMiami/amazon_MICRO_helpfulness_dataset/) dataset and includes a prediction head for classification. This adapter was created for usage with the **[Adapters](https://github.com/Adapter-Hub/adapters)** library. ## Usage First, install `adapters`: ``` pip install -U adapters ``` Now, the adapter can be loaded and activated like this: ```python from adapters import AutoAdapterModel model = AutoAdapterModel.from_pretrained("roberta-base") adapter_name = model.load_adapter("BigTMiami/amz_10k_seq_bn_helpf_class_adp_seed_2", source="hf", set_active=True) ``` ## Architecture & Training <!-- Add some description here --> ## Evaluation results <!-- Add some description here --> ## Citation <!-- Add some description here -->
{"tags": ["adapter-transformers", "roberta"], "datasets": ["BigTMiami/amazon_MICRO_helpfulness_dataset"]}
BigTMiami/amz_10k_seq_bn_helpf_class_adp_seed_2
null
[ "adapter-transformers", "roberta", "dataset:BigTMiami/amazon_MICRO_helpfulness_dataset", "region:us" ]
null
2024-04-20T10:20:06+00:00
[]
[]
TAGS #adapter-transformers #roberta #dataset-BigTMiami/amazon_MICRO_helpfulness_dataset #region-us
# Adapter 'BigTMiami/amz_10k_seq_bn_helpf_class_adp_seed_2' for roberta-base An adapter for the 'roberta-base' model that was trained on the BigTMiami/amazon_MICRO_helpfulness_dataset dataset and includes a prediction head for classification. This adapter was created for usage with the Adapters library. ## Usage First, install 'adapters': Now, the adapter can be loaded and activated like this: ## Architecture & Training ## Evaluation results
[ "# Adapter 'BigTMiami/amz_10k_seq_bn_helpf_class_adp_seed_2' for roberta-base\n\nAn adapter for the 'roberta-base' model that was trained on the BigTMiami/amazon_MICRO_helpfulness_dataset dataset and includes a prediction head for classification.\n\nThis adapter was created for usage with the Adapters library.", "## Usage\n\nFirst, install 'adapters':\n\n\n\nNow, the adapter can be loaded and activated like this:", "## Architecture & Training", "## Evaluation results" ]
[ "TAGS\n#adapter-transformers #roberta #dataset-BigTMiami/amazon_MICRO_helpfulness_dataset #region-us \n", "# Adapter 'BigTMiami/amz_10k_seq_bn_helpf_class_adp_seed_2' for roberta-base\n\nAn adapter for the 'roberta-base' model that was trained on the BigTMiami/amazon_MICRO_helpfulness_dataset dataset and includes a prediction head for classification.\n\nThis adapter was created for usage with the Adapters library.", "## Usage\n\nFirst, install 'adapters':\n\n\n\nNow, the adapter can be loaded and activated like this:", "## Architecture & Training", "## Evaluation results" ]
null
transformers
# Uploaded model - **Developed by:** LeroyDyer - **License:** apache-2.0 - **Finetuned from model :** LeroyDyer/Mixtral_AI_Minitron_2b_1.0 This mistral model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
{"language": ["en"], "license": "apache-2.0", "tags": ["text-generation-inference", "transformers", "unsloth", "mistral", "trl"], "base_model": "LeroyDyer/Mixtral_AI_Minitron_2b_1.0"}
LeroyDyer/MINI_2b_roles_LORA
null
[ "transformers", "safetensors", "text-generation-inference", "unsloth", "mistral", "trl", "en", "base_model:LeroyDyer/Mixtral_AI_Minitron_2b_1.0", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2024-04-20T10:21:29+00:00
[]
[ "en" ]
TAGS #transformers #safetensors #text-generation-inference #unsloth #mistral #trl #en #base_model-LeroyDyer/Mixtral_AI_Minitron_2b_1.0 #license-apache-2.0 #endpoints_compatible #region-us
# Uploaded model - Developed by: LeroyDyer - License: apache-2.0 - Finetuned from model : LeroyDyer/Mixtral_AI_Minitron_2b_1.0 This mistral model was trained 2x faster with Unsloth and Huggingface's TRL library. <img src="URL width="200"/>
[ "# Uploaded model\n\n- Developed by: LeroyDyer\n- License: apache-2.0\n- Finetuned from model : LeroyDyer/Mixtral_AI_Minitron_2b_1.0\n\nThis mistral model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
[ "TAGS\n#transformers #safetensors #text-generation-inference #unsloth #mistral #trl #en #base_model-LeroyDyer/Mixtral_AI_Minitron_2b_1.0 #license-apache-2.0 #endpoints_compatible #region-us \n", "# Uploaded model\n\n- Developed by: LeroyDyer\n- License: apache-2.0\n- Finetuned from model : LeroyDyer/Mixtral_AI_Minitron_2b_1.0\n\nThis mistral model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
text-classification
null
# Model Card for SentimentTensor This modelcard provides details about the SentimentTensor model, developed by Saish Shinde, for sentiment analysis using LSTM architecture. ## Model Details ### Model Description The SentimentTensor model is a deep learning model based on LSTM architecture, developed by Saish Shinde, for sentiment analysis tasks. It achieves an accuracy of 81% on standard evaluation datasets. The model is designed to classify text data into three categories: negative, neutral, and positive sentiments. - **Developed by:** Saish Shinde - **Model type:** LSTM-based Sequence Classification - **Language(s) (NLP):** English - **License:** No specific license # Dataset Used yelp dataset with 4.04GB compressed,8.65GB uncompressed data ## Uses ### Direct Use The SentimentTensor model can be directly used for sentiment analysis tasks without fine-tuning. ### Downstream Use This model can be fine-tuned for specific domains or integrated into larger NLP applications. ### Out-of-Scope Use The model may not perform well on highly specialized or domain-specific text data. ## Bias, Risks, and Limitations The SentimentTensor model, like any LSTM-based model, may have biases and limitations inherent in its training data and architecture. It might sometimes struggle with capturing long-range dependencies or understanding context in complex sentences, also it emphasizes less on neutral sentiment ### Recommendations Users should be aware of potential biases and limitations and evaluate results accordingly. ## How to Get Started with the Model ### Loading the Model You can load the SentimentTensor model using the Hugging Face library: # python Code: from transformers import AutoModelForSequenceClassification, AutoTokenizer # Load the model and tokenizer model = AutoModelForSequenceClassification.from_pretrained("your-model-name") tokenizer = AutoTokenizer.from_pretrained("your-tokenizer-name") # Tokenization text = "Your text data here" tokenized_input = tokenizer(text, return_tensors="pt") # Sentiment Analysis #Forward pass through the model outputs = model(**tokenized_input) #Get predicted sentiment label predicted_label = outputs.logits.argmax().item() # Example Usage ```python from transformers import AutoModelForSequenceClassification, AutoTokenizer # Load the model and tokenizer model = AutoModelForSequenceClassification.from_pretrained("saishshinde15/SentimentTensor") tokenizer = AutoTokenizer.from_pretrained("saishshinde15/SentimentTensor") # Tokenize text data text = "This is a great movie!" tokenized_input = tokenizer(text, return_tensors="pt") # Perform sentiment analysis outputs = model(**tokenized_input) predicted_label = outputs.logits.argmax().item() # Print predicted sentiment sentiment_labels = ["negative", "neutral", "positive"] print(f"Predicted Sentiment: {sentiment_labels[predicted_label]}") ``` # Model Architecture and Objective The SentimentTensor model is based on LSTM architecture, which is well-suited for sequence classification tasks like sentiment analysis. It uses long short-term memory cells to capture dependencies in sequential data. # Model Card Authors Saish Shinde
{"language": ["en"], "datasets": ["yelp_review_full"], "metrics": ["accuracy", "code_eval"], "pipeline_tag": "text-classification"}
saishshinde15/SentimentTensor
null
[ "text-classification", "en", "dataset:yelp_review_full", "region:us" ]
null
2024-04-20T10:21:31+00:00
[]
[ "en" ]
TAGS #text-classification #en #dataset-yelp_review_full #region-us
# Model Card for SentimentTensor This modelcard provides details about the SentimentTensor model, developed by Saish Shinde, for sentiment analysis using LSTM architecture. ## Model Details ### Model Description The SentimentTensor model is a deep learning model based on LSTM architecture, developed by Saish Shinde, for sentiment analysis tasks. It achieves an accuracy of 81% on standard evaluation datasets. The model is designed to classify text data into three categories: negative, neutral, and positive sentiments. - Developed by: Saish Shinde - Model type: LSTM-based Sequence Classification - Language(s) (NLP): English - License: No specific license # Dataset Used yelp dataset with 4.04GB compressed,8.65GB uncompressed data ## Uses ### Direct Use The SentimentTensor model can be directly used for sentiment analysis tasks without fine-tuning. ### Downstream Use This model can be fine-tuned for specific domains or integrated into larger NLP applications. ### Out-of-Scope Use The model may not perform well on highly specialized or domain-specific text data. ## Bias, Risks, and Limitations The SentimentTensor model, like any LSTM-based model, may have biases and limitations inherent in its training data and architecture. It might sometimes struggle with capturing long-range dependencies or understanding context in complex sentences, also it emphasizes less on neutral sentiment ### Recommendations Users should be aware of potential biases and limitations and evaluate results accordingly. ## How to Get Started with the Model ### Loading the Model You can load the SentimentTensor model using the Hugging Face library: # python Code: from transformers import AutoModelForSequenceClassification, AutoTokenizer # Load the model and tokenizer model = AutoModelForSequenceClassification.from_pretrained("your-model-name") tokenizer = AutoTokenizer.from_pretrained("your-tokenizer-name") # Tokenization text = "Your text data here" tokenized_input = tokenizer(text, return_tensors="pt") # Sentiment Analysis #Forward pass through the model outputs = model(tokenized_input) #Get predicted sentiment label predicted_label = URL().item() # Example Usage # Model Architecture and Objective The SentimentTensor model is based on LSTM architecture, which is well-suited for sequence classification tasks like sentiment analysis. It uses long short-term memory cells to capture dependencies in sequential data. # Model Card Authors Saish Shinde
[ "# Model Card for SentimentTensor\n\nThis modelcard provides details about the SentimentTensor model, developed by Saish Shinde, for sentiment analysis using LSTM architecture.", "## Model Details", "### Model Description\n\nThe SentimentTensor model is a deep learning model based on LSTM architecture, developed by Saish Shinde, for sentiment analysis tasks. It achieves an accuracy of 81% on standard evaluation datasets. The model is designed to classify text data into three categories: negative, neutral, and positive sentiments.\n\n- Developed by: Saish Shinde\n- Model type: LSTM-based Sequence Classification\n- Language(s) (NLP): English\n- License: No specific license", "# Dataset Used\n\nyelp dataset with 4.04GB compressed,8.65GB uncompressed data", "## Uses", "### Direct Use\n\nThe SentimentTensor model can be directly used for sentiment analysis tasks without fine-tuning.", "### Downstream Use\n\nThis model can be fine-tuned for specific domains or integrated into larger NLP applications.", "### Out-of-Scope Use\n\nThe model may not perform well on highly specialized or domain-specific text data.", "## Bias, Risks, and Limitations\n\nThe SentimentTensor model, like any LSTM-based model, may have biases and limitations inherent in its training data and architecture. It might sometimes struggle with capturing long-range dependencies or understanding context in complex sentences, also it emphasizes less on neutral sentiment", "### Recommendations\n\nUsers should be aware of potential biases and limitations and evaluate results accordingly.", "## How to Get Started with the Model", "### Loading the Model\n\nYou can load the SentimentTensor model using the Hugging Face library:", "# python Code:\nfrom transformers import AutoModelForSequenceClassification, AutoTokenizer", "# Load the model and tokenizer\nmodel = AutoModelForSequenceClassification.from_pretrained(\"your-model-name\")\ntokenizer = AutoTokenizer.from_pretrained(\"your-tokenizer-name\")", "# Tokenization\ntext = \"Your text data here\"\ntokenized_input = tokenizer(text, return_tensors=\"pt\")", "# Sentiment Analysis", "# Example Usage", "# Model Architecture and Objective\n\nThe SentimentTensor model is based on LSTM architecture, which is well-suited for sequence classification tasks like sentiment analysis. It uses long short-term memory cells to capture dependencies in sequential data.", "# Model Card Authors\nSaish Shinde" ]
[ "TAGS\n#text-classification #en #dataset-yelp_review_full #region-us \n", "# Model Card for SentimentTensor\n\nThis modelcard provides details about the SentimentTensor model, developed by Saish Shinde, for sentiment analysis using LSTM architecture.", "## Model Details", "### Model Description\n\nThe SentimentTensor model is a deep learning model based on LSTM architecture, developed by Saish Shinde, for sentiment analysis tasks. It achieves an accuracy of 81% on standard evaluation datasets. The model is designed to classify text data into three categories: negative, neutral, and positive sentiments.\n\n- Developed by: Saish Shinde\n- Model type: LSTM-based Sequence Classification\n- Language(s) (NLP): English\n- License: No specific license", "# Dataset Used\n\nyelp dataset with 4.04GB compressed,8.65GB uncompressed data", "## Uses", "### Direct Use\n\nThe SentimentTensor model can be directly used for sentiment analysis tasks without fine-tuning.", "### Downstream Use\n\nThis model can be fine-tuned for specific domains or integrated into larger NLP applications.", "### Out-of-Scope Use\n\nThe model may not perform well on highly specialized or domain-specific text data.", "## Bias, Risks, and Limitations\n\nThe SentimentTensor model, like any LSTM-based model, may have biases and limitations inherent in its training data and architecture. It might sometimes struggle with capturing long-range dependencies or understanding context in complex sentences, also it emphasizes less on neutral sentiment", "### Recommendations\n\nUsers should be aware of potential biases and limitations and evaluate results accordingly.", "## How to Get Started with the Model", "### Loading the Model\n\nYou can load the SentimentTensor model using the Hugging Face library:", "# python Code:\nfrom transformers import AutoModelForSequenceClassification, AutoTokenizer", "# Load the model and tokenizer\nmodel = AutoModelForSequenceClassification.from_pretrained(\"your-model-name\")\ntokenizer = AutoTokenizer.from_pretrained(\"your-tokenizer-name\")", "# Tokenization\ntext = \"Your text data here\"\ntokenized_input = tokenizer(text, return_tensors=\"pt\")", "# Sentiment Analysis", "# Example Usage", "# Model Architecture and Objective\n\nThe SentimentTensor model is based on LSTM architecture, which is well-suited for sequence classification tasks like sentiment analysis. It uses long short-term memory cells to capture dependencies in sequential data.", "# Model Card Authors\nSaish Shinde" ]
null
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
bdsaglam/llama-2-7b-chat-jerx-peft-t8wqahmu
null
[ "transformers", "safetensors", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
null
2024-04-20T10:21:46+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #arxiv-1910.09700 #endpoints_compatible #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #arxiv-1910.09700 #endpoints_compatible #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": ["trl", "sft"]}
mjm4dl/llama3_8B_slot_filling_ignore_prompt_input
null
[ "transformers", "safetensors", "llama", "text-generation", "trl", "sft", "conversational", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-20T10:24:07+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #llama #text-generation #trl #sft #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #trl #sft #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
text2text-generation
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert2bert-model99-last This model is a fine-tuned version of [](https://huggingface.co/) on the id_liputan6 dataset. It achieves the following results on the evaluation set: - Loss: 2.8701 - R1 Precision: 0.3001 - R1 Recall: 0.34 - R1 Fmeasure: 0.3156 - R2 Precision: 0.121 - R2 Recall: 0.1366 - R2 Fmeasure: 0.1269 - Rl Precision: 0.239 - Rl Recall: 0.2707 - Rl Fmeasure: 0.2513 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 18 - eval_batch_size: 18 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 8 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | R1 Precision | R1 Recall | R1 Fmeasure | R2 Precision | R2 Recall | R2 Fmeasure | Rl Precision | Rl Recall | Rl Fmeasure | |:-------------:|:-----:|:-----:|:---------------:|:------------:|:---------:|:-----------:|:------------:|:---------:|:-----------:|:------------:|:---------:|:-----------:| | 2.3429 | 1.0 | 10772 | 2.7616 | 0.29 | 0.3334 | 0.3069 | 0.1175 | 0.1351 | 0.1243 | 0.2329 | 0.2678 | 0.2464 | | 1.5227 | 2.0 | 21544 | 2.6637 | 0.287 | 0.3356 | 0.3062 | 0.1148 | 0.1338 | 0.1222 | 0.2304 | 0.2693 | 0.2457 | | 1.3203 | 3.0 | 32316 | 2.6384 | 0.2934 | 0.3387 | 0.3111 | 0.1195 | 0.1377 | 0.1265 | 0.2355 | 0.272 | 0.2498 | | 1.169 | 4.0 | 43088 | 2.6579 | 0.3004 | 0.3403 | 0.3158 | 0.1228 | 0.139 | 0.129 | 0.2407 | 0.2726 | 0.253 | | 1.0416 | 5.0 | 53860 | 2.6894 | 0.2963 | 0.3367 | 0.3121 | 0.1202 | 0.1362 | 0.1264 | 0.2367 | 0.2691 | 0.2494 | | 0.9303 | 6.0 | 64632 | 2.7418 | 0.2986 | 0.3417 | 0.3155 | 0.1213 | 0.1384 | 0.1279 | 0.2385 | 0.2727 | 0.2519 | | 0.8375 | 7.0 | 75404 | 2.8060 | 0.3009 | 0.3417 | 0.3168 | 0.1223 | 0.1384 | 0.1285 | 0.2402 | 0.2727 | 0.2528 | | 0.7675 | 8.0 | 86176 | 2.8701 | 0.3001 | 0.34 | 0.3156 | 0.121 | 0.1366 | 0.1269 | 0.239 | 0.2707 | 0.2513 | ### Framework versions - Transformers 4.40.0 - Pytorch 2.2.1 - Datasets 2.19.0 - Tokenizers 0.19.1
{"tags": ["generated_from_trainer"], "datasets": ["id_liputan6"], "model-index": [{"name": "bert2bert-model99-last", "results": []}]}
Alfahluzi/bert2bert-model99-last
null
[ "transformers", "tensorboard", "safetensors", "encoder-decoder", "text2text-generation", "generated_from_trainer", "dataset:id_liputan6", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-20T10:24:13+00:00
[]
[]
TAGS #transformers #tensorboard #safetensors #encoder-decoder #text2text-generation #generated_from_trainer #dataset-id_liputan6 #autotrain_compatible #endpoints_compatible #region-us
bert2bert-model99-last ====================== This model is a fine-tuned version of [](URL on the id\_liputan6 dataset. It achieves the following results on the evaluation set: * Loss: 2.8701 * R1 Precision: 0.3001 * R1 Recall: 0.34 * R1 Fmeasure: 0.3156 * R2 Precision: 0.121 * R2 Recall: 0.1366 * R2 Fmeasure: 0.1269 * Rl Precision: 0.239 * Rl Recall: 0.2707 * Rl Fmeasure: 0.2513 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 5e-05 * train\_batch\_size: 18 * eval\_batch\_size: 18 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * num\_epochs: 8 * mixed\_precision\_training: Native AMP ### Training results ### Framework versions * Transformers 4.40.0 * Pytorch 2.2.1 * Datasets 2.19.0 * Tokenizers 0.19.1
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 5e-05\n* train\\_batch\\_size: 18\n* eval\\_batch\\_size: 18\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 8\n* mixed\\_precision\\_training: Native AMP", "### Training results", "### Framework versions\n\n\n* Transformers 4.40.0\n* Pytorch 2.2.1\n* Datasets 2.19.0\n* Tokenizers 0.19.1" ]
[ "TAGS\n#transformers #tensorboard #safetensors #encoder-decoder #text2text-generation #generated_from_trainer #dataset-id_liputan6 #autotrain_compatible #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 5e-05\n* train\\_batch\\_size: 18\n* eval\\_batch\\_size: 18\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 8\n* mixed\\_precision\\_training: Native AMP", "### Training results", "### Framework versions\n\n\n* Transformers 4.40.0\n* Pytorch 2.2.1\n* Datasets 2.19.0\n* Tokenizers 0.19.1" ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
Hapiri/OrpoLlama-3-8B
null
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-20T10:25:09+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #llama #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
codemaster-ab/Llama-3-8B-HealthCounsellingConversation
null
[ "transformers", "safetensors", "llama", "text-generation", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-20T10:25:21+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #llama #text-generation #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # micro_base_help_class_tapt_seed_1 This model is a fine-tuned version of [BigTMiami/micro_base_help_tapt_pretrain_model](https://huggingface.co/BigTMiami/micro_base_help_tapt_pretrain_model) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.3254 - Accuracy: 0.8584 - F1 Macro: 0.6599 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 1 - optimizer: Adam with betas=(0.9,0.98) and epsilon=1e-06 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 Macro | |:-------------:|:-----:|:----:|:---------------:|:--------:|:--------:| | 0.347 | 1.0 | 313 | 0.3434 | 0.8536 | 0.4659 | | 0.3196 | 2.0 | 626 | 0.3397 | 0.8578 | 0.6761 | | 0.2627 | 3.0 | 939 | 0.3875 | 0.8636 | 0.5878 | | 0.2091 | 4.0 | 1252 | 0.5313 | 0.8604 | 0.6143 | | 0.1574 | 5.0 | 1565 | 0.7826 | 0.8552 | 0.6218 | ### Framework versions - Transformers 4.36.2 - Pytorch 2.2.1+cu121 - Datasets 2.19.0 - Tokenizers 0.15.2
{"license": "mit", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "BigTMiami/micro_base_help_tapt_pretrain_model", "model-index": [{"name": "micro_base_help_class_tapt_seed_1", "results": []}]}
BigTMiami/micro_base_help_class_tapt_seed_1
null
[ "transformers", "safetensors", "roberta", "text-classification", "generated_from_trainer", "base_model:BigTMiami/micro_base_help_tapt_pretrain_model", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-20T10:25:32+00:00
[]
[]
TAGS #transformers #safetensors #roberta #text-classification #generated_from_trainer #base_model-BigTMiami/micro_base_help_tapt_pretrain_model #license-mit #autotrain_compatible #endpoints_compatible #region-us
micro\_base\_help\_class\_tapt\_seed\_1 ======================================= This model is a fine-tuned version of BigTMiami/micro\_base\_help\_tapt\_pretrain\_model on an unknown dataset. It achieves the following results on the evaluation set: * Loss: 0.3254 * Accuracy: 0.8584 * F1 Macro: 0.6599 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 2e-05 * train\_batch\_size: 16 * eval\_batch\_size: 16 * seed: 1 * optimizer: Adam with betas=(0.9,0.98) and epsilon=1e-06 * lr\_scheduler\_type: linear * num\_epochs: 10 ### Training results ### Framework versions * Transformers 4.36.2 * Pytorch 2.2.1+cu121 * Datasets 2.19.0 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 16\n* eval\\_batch\\_size: 16\n* seed: 1\n* optimizer: Adam with betas=(0.9,0.98) and epsilon=1e-06\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 10", "### Training results", "### Framework versions\n\n\n* Transformers 4.36.2\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #safetensors #roberta #text-classification #generated_from_trainer #base_model-BigTMiami/micro_base_help_tapt_pretrain_model #license-mit #autotrain_compatible #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 16\n* eval\\_batch\\_size: 16\n* seed: 1\n* optimizer: Adam with betas=(0.9,0.98) and epsilon=1e-06\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 10", "### Training results", "### Framework versions\n\n\n* Transformers 4.36.2\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.15.2" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mistralv1_lora_r4_1e-4_e5 This model is a fine-tuned version of [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 4 - eval_batch_size: 8 - seed: 0 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results ### Framework versions - PEFT 0.9.0 - Transformers 4.39.3 - Pytorch 2.2.2 - Datasets 2.18.0 - Tokenizers 0.15.2
{"library_name": "peft", "tags": ["generated_from_trainer"], "base_model": "mistralai/Mistral-7B-v0.1", "model-index": [{"name": "mistralv1_lora_r4_1e-4_e5", "results": []}]}
fangzhaoz/mistralv1_lora_r4_1e-4_e5
null
[ "peft", "safetensors", "generated_from_trainer", "base_model:mistralai/Mistral-7B-v0.1", "region:us" ]
null
2024-04-20T10:26:55+00:00
[]
[]
TAGS #peft #safetensors #generated_from_trainer #base_model-mistralai/Mistral-7B-v0.1 #region-us
# mistralv1_lora_r4_1e-4_e5 This model is a fine-tuned version of mistralai/Mistral-7B-v0.1 on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 4 - eval_batch_size: 8 - seed: 0 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results ### Framework versions - PEFT 0.9.0 - Transformers 4.39.3 - Pytorch 2.2.2 - Datasets 2.18.0 - Tokenizers 0.15.2
[ "# mistralv1_lora_r4_1e-4_e5\n\nThis model is a fine-tuned version of mistralai/Mistral-7B-v0.1 on the None dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 0.0001\n- train_batch_size: 4\n- eval_batch_size: 8\n- seed: 0\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 5", "### Training results", "### Framework versions\n\n- PEFT 0.9.0\n- Transformers 4.39.3\n- Pytorch 2.2.2\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
[ "TAGS\n#peft #safetensors #generated_from_trainer #base_model-mistralai/Mistral-7B-v0.1 #region-us \n", "# mistralv1_lora_r4_1e-4_e5\n\nThis model is a fine-tuned version of mistralai/Mistral-7B-v0.1 on the None dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 0.0001\n- train_batch_size: 4\n- eval_batch_size: 8\n- seed: 0\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 5", "### Training results", "### Framework versions\n\n- PEFT 0.9.0\n- Transformers 4.39.3\n- Pytorch 2.2.2\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
fangzhaoz/mistralv1_lora_r4_1e-4_e5_merged
null
[ "transformers", "safetensors", "mistral", "text-generation", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-20T10:27:05+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #mistral #text-generation #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #mistral #text-generation #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
null
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
tony-k/code-search-net-tokenizer
null
[ "transformers", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
null
2024-04-20T10:27:36+00:00
[ "1910.09700" ]
[]
TAGS #transformers #arxiv-1910.09700 #endpoints_compatible #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #arxiv-1910.09700 #endpoints_compatible #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
null
adapter-transformers
# Adapter `BigTMiami/amz_10k_seq_bn_helpf_class_adp_seed_3` for roberta-base An [adapter](https://adapterhub.ml) for the `roberta-base` model that was trained on the [BigTMiami/amazon_MICRO_helpfulness_dataset](https://huggingface.co/datasets/BigTMiami/amazon_MICRO_helpfulness_dataset/) dataset and includes a prediction head for classification. This adapter was created for usage with the **[Adapters](https://github.com/Adapter-Hub/adapters)** library. ## Usage First, install `adapters`: ``` pip install -U adapters ``` Now, the adapter can be loaded and activated like this: ```python from adapters import AutoAdapterModel model = AutoAdapterModel.from_pretrained("roberta-base") adapter_name = model.load_adapter("BigTMiami/amz_10k_seq_bn_helpf_class_adp_seed_3", source="hf", set_active=True) ``` ## Architecture & Training <!-- Add some description here --> ## Evaluation results <!-- Add some description here --> ## Citation <!-- Add some description here -->
{"tags": ["adapter-transformers", "roberta"], "datasets": ["BigTMiami/amazon_MICRO_helpfulness_dataset"]}
BigTMiami/amz_10k_seq_bn_helpf_class_adp_seed_3
null
[ "adapter-transformers", "roberta", "dataset:BigTMiami/amazon_MICRO_helpfulness_dataset", "region:us" ]
null
2024-04-20T10:32:45+00:00
[]
[]
TAGS #adapter-transformers #roberta #dataset-BigTMiami/amazon_MICRO_helpfulness_dataset #region-us
# Adapter 'BigTMiami/amz_10k_seq_bn_helpf_class_adp_seed_3' for roberta-base An adapter for the 'roberta-base' model that was trained on the BigTMiami/amazon_MICRO_helpfulness_dataset dataset and includes a prediction head for classification. This adapter was created for usage with the Adapters library. ## Usage First, install 'adapters': Now, the adapter can be loaded and activated like this: ## Architecture & Training ## Evaluation results
[ "# Adapter 'BigTMiami/amz_10k_seq_bn_helpf_class_adp_seed_3' for roberta-base\n\nAn adapter for the 'roberta-base' model that was trained on the BigTMiami/amazon_MICRO_helpfulness_dataset dataset and includes a prediction head for classification.\n\nThis adapter was created for usage with the Adapters library.", "## Usage\n\nFirst, install 'adapters':\n\n\n\nNow, the adapter can be loaded and activated like this:", "## Architecture & Training", "## Evaluation results" ]
[ "TAGS\n#adapter-transformers #roberta #dataset-BigTMiami/amazon_MICRO_helpfulness_dataset #region-us \n", "# Adapter 'BigTMiami/amz_10k_seq_bn_helpf_class_adp_seed_3' for roberta-base\n\nAn adapter for the 'roberta-base' model that was trained on the BigTMiami/amazon_MICRO_helpfulness_dataset dataset and includes a prediction head for classification.\n\nThis adapter was created for usage with the Adapters library.", "## Usage\n\nFirst, install 'adapters':\n\n\n\nNow, the adapter can be loaded and activated like this:", "## Architecture & Training", "## Evaluation results" ]
text-to-image
diffusers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🧨 diffusers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "diffusers"}
Niggendar/AnimeRealPantheon_k1ssBakedvae
null
[ "diffusers", "safetensors", "arxiv:1910.09700", "endpoints_compatible", "diffusers:StableDiffusionPipeline", "region:us" ]
null
2024-04-20T10:34:05+00:00
[ "1910.09700" ]
[]
TAGS #diffusers #safetensors #arxiv-1910.09700 #endpoints_compatible #diffusers-StableDiffusionPipeline #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a diffusers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a diffusers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#diffusers #safetensors #arxiv-1910.09700 #endpoints_compatible #diffusers-StableDiffusionPipeline #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a diffusers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
automatic-speech-recognition
transformers.js
Fine-tunned Serbian Whisper medium to use it with Transformers.js ONNX converted [Sagicc/whisper-small-sr-yodas-v2](https://huggingface.co/Sagicc/whisper-small-sr-yodas-v2) with ONNX weights to be compatible with Transformers.js. Note: Having a separate repo for ONNX weights is intended to be a temporary solution until WebML gains more traction. If you would like to make your models web-ready, we recommend converting to ONNX using [🤗 Optimum](https://huggingface.co/docs/optimum/index) and structuring your repo like this one (with ONNX weights located in a subfolder named `onnx`).
{"language": ["sr"], "license": "mit", "library_name": "transformers.js"}
Sagicc/whisper-small-sr-onnx
null
[ "transformers.js", "onnx", "whisper", "automatic-speech-recognition", "sr", "license:mit", "region:us" ]
null
2024-04-20T10:37:37+00:00
[]
[ "sr" ]
TAGS #transformers.js #onnx #whisper #automatic-speech-recognition #sr #license-mit #region-us
Fine-tunned Serbian Whisper medium to use it with URL ONNX converted Sagicc/whisper-small-sr-yodas-v2 with ONNX weights to be compatible with URL. Note: Having a separate repo for ONNX weights is intended to be a temporary solution until WebML gains more traction. If you would like to make your models web-ready, we recommend converting to ONNX using Optimum and structuring your repo like this one (with ONNX weights located in a subfolder named 'onnx').
[]
[ "TAGS\n#transformers.js #onnx #whisper #automatic-speech-recognition #sr #license-mit #region-us \n" ]
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # micro_base_help_class_tapt_seed_2 This model is a fine-tuned version of [BigTMiami/micro_base_help_tapt_pretrain_model](https://huggingface.co/BigTMiami/micro_base_help_tapt_pretrain_model) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.3416 - Accuracy: 0.8534 - F1 Macro: 0.6723 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 2 - optimizer: Adam with betas=(0.9,0.98) and epsilon=1e-06 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 Macro | |:-------------:|:-----:|:----:|:---------------:|:--------:|:--------:| | 0.3574 | 1.0 | 313 | 0.3770 | 0.8534 | 0.4605 | | 0.3055 | 2.0 | 626 | 0.3547 | 0.847 | 0.6722 | | 0.2465 | 3.0 | 939 | 0.4434 | 0.8644 | 0.6004 | | 0.1874 | 4.0 | 1252 | 0.5127 | 0.8588 | 0.6173 | | 0.1542 | 5.0 | 1565 | 0.7644 | 0.8424 | 0.6561 | ### Framework versions - Transformers 4.36.2 - Pytorch 2.2.1+cu121 - Datasets 2.19.0 - Tokenizers 0.15.2
{"license": "mit", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "BigTMiami/micro_base_help_tapt_pretrain_model", "model-index": [{"name": "micro_base_help_class_tapt_seed_2", "results": []}]}
BigTMiami/micro_base_help_class_tapt_seed_2
null
[ "transformers", "safetensors", "roberta", "text-classification", "generated_from_trainer", "base_model:BigTMiami/micro_base_help_tapt_pretrain_model", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-20T10:38:52+00:00
[]
[]
TAGS #transformers #safetensors #roberta #text-classification #generated_from_trainer #base_model-BigTMiami/micro_base_help_tapt_pretrain_model #license-mit #autotrain_compatible #endpoints_compatible #region-us
micro\_base\_help\_class\_tapt\_seed\_2 ======================================= This model is a fine-tuned version of BigTMiami/micro\_base\_help\_tapt\_pretrain\_model on an unknown dataset. It achieves the following results on the evaluation set: * Loss: 0.3416 * Accuracy: 0.8534 * F1 Macro: 0.6723 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 2e-05 * train\_batch\_size: 16 * eval\_batch\_size: 16 * seed: 2 * optimizer: Adam with betas=(0.9,0.98) and epsilon=1e-06 * lr\_scheduler\_type: linear * num\_epochs: 10 ### Training results ### Framework versions * Transformers 4.36.2 * Pytorch 2.2.1+cu121 * Datasets 2.19.0 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 16\n* eval\\_batch\\_size: 16\n* seed: 2\n* optimizer: Adam with betas=(0.9,0.98) and epsilon=1e-06\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 10", "### Training results", "### Framework versions\n\n\n* Transformers 4.36.2\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #safetensors #roberta #text-classification #generated_from_trainer #base_model-BigTMiami/micro_base_help_tapt_pretrain_model #license-mit #autotrain_compatible #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 16\n* eval\\_batch\\_size: 16\n* seed: 2\n* optimizer: Adam with betas=(0.9,0.98) and epsilon=1e-06\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 10", "### Training results", "### Framework versions\n\n\n* Transformers 4.36.2\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.15.2" ]
null
null
Using ToolsBaer OST to HTML Conversion Software is the risk-free method of converting Outlook OST files to HTML file format. One or more OST database files can be converted to HTML format using this quick and simple procedure, keeping all required features and information. Users can transfer this easy-to-use application in just three or four easy steps. With this simple process, any non-technical user can simply convert Outlook OST files into HTML data files. The email attributes that are exported by the application include name, CC, BCC, to, from, hyperlinks, photos, and attachments. With this version of the program, conversion accuracy is 100% guaranteed. There's no need for additional software to utilize the applications. Windows 11, 10, 8, 8.1, 7, XP, and Vista are compatible with the program. Get the application's free demo version by downloading it. Read More:- http://www.toolsbaer.com/ost-to-html-conversion/
{}
madelineoliver/ToolsBaer-OST-to-HTML-Conversion
null
[ "region:us" ]
null
2024-04-20T10:45:26+00:00
[]
[]
TAGS #region-us
Using ToolsBaer OST to HTML Conversion Software is the risk-free method of converting Outlook OST files to HTML file format. One or more OST database files can be converted to HTML format using this quick and simple procedure, keeping all required features and information. Users can transfer this easy-to-use application in just three or four easy steps. With this simple process, any non-technical user can simply convert Outlook OST files into HTML data files. The email attributes that are exported by the application include name, CC, BCC, to, from, hyperlinks, photos, and attachments. With this version of the program, conversion accuracy is 100% guaranteed. There's no need for additional software to utilize the applications. Windows 11, 10, 8, 8.1, 7, XP, and Vista are compatible with the program. Get the application's free demo version by downloading it. Read More:- URL
[]
[ "TAGS\n#region-us \n" ]
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-reviews-online-courses-sentiment-analysis-sravni.ru-corp-ru This model is a fine-tuned version of [distilbert/distilbert-base-multilingual-cased](https://huggingface.co/distilbert/distilbert-base-multilingual-cased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.1533 - Accuracy: 0.9526 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | No log | 1.0 | 491 | 0.1322 | 0.9475 | | 0.2248 | 2.0 | 982 | 0.1533 | 0.9526 | ### Framework versions - Transformers 4.30.0 - Pytorch 2.2.1+cu121 - Datasets 2.19.0 - Tokenizers 0.13.3
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "model-index": [{"name": "bert-reviews-online-courses-sentiment-analysis-sravni.ru-corp-ru", "results": []}]}
C0uchP0tat0/bert-reviews-online-courses-sentiment-analysis-sravni.ru-corp-ru
null
[ "transformers", "pytorch", "tensorboard", "distilbert", "text-classification", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-20T10:46:37+00:00
[]
[]
TAGS #transformers #pytorch #tensorboard #distilbert #text-classification #generated_from_trainer #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us
URL-corp-ru =========== This model is a fine-tuned version of distilbert/distilbert-base-multilingual-cased on the None dataset. It achieves the following results on the evaluation set: * Loss: 0.1533 * Accuracy: 0.9526 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 2e-05 * train\_batch\_size: 16 * eval\_batch\_size: 16 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * num\_epochs: 2 ### Training results ### Framework versions * Transformers 4.30.0 * Pytorch 2.2.1+cu121 * Datasets 2.19.0 * Tokenizers 0.13.3
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 16\n* eval\\_batch\\_size: 16\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 2", "### Training results", "### Framework versions\n\n\n* Transformers 4.30.0\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.13.3" ]
[ "TAGS\n#transformers #pytorch #tensorboard #distilbert #text-classification #generated_from_trainer #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 16\n* eval\\_batch\\_size: 16\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 2", "### Training results", "### Framework versions\n\n\n* Transformers 4.30.0\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.13.3" ]
text-to-image
diffusers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🧨 diffusers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "diffusers"}
Niggendar/XXXXXX
null
[ "diffusers", "safetensors", "arxiv:1910.09700", "endpoints_compatible", "diffusers:StableDiffusionXLPipeline", "region:us" ]
null
2024-04-20T10:50:16+00:00
[ "1910.09700" ]
[]
TAGS #diffusers #safetensors #arxiv-1910.09700 #endpoints_compatible #diffusers-StableDiffusionXLPipeline #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a diffusers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a diffusers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#diffusers #safetensors #arxiv-1910.09700 #endpoints_compatible #diffusers-StableDiffusionXLPipeline #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a diffusers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
text2text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
ikeno-ada/nllb-200-distilled-1.3B-bitsandbytes-4bit-cpu
null
[ "transformers", "safetensors", "m2m_100", "text2text-generation", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "4-bit", "region:us" ]
null
2024-04-20T10:50:59+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #m2m_100 #text2text-generation #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #4-bit #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #m2m_100 #text2text-generation #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #4-bit #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # micro_base_help_class_tapt_seed_3 This model is a fine-tuned version of [BigTMiami/micro_base_help_tapt_pretrain_model](https://huggingface.co/BigTMiami/micro_base_help_tapt_pretrain_model) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.3588 - Accuracy: 0.8398 - F1 Macro: 0.6881 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 3 - optimizer: Adam with betas=(0.9,0.98) and epsilon=1e-06 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 Macro | |:-------------:|:-----:|:----:|:---------------:|:--------:|:--------:| | 0.3463 | 1.0 | 313 | 0.3531 | 0.8586 | 0.6005 | | 0.303 | 2.0 | 626 | 0.3318 | 0.8616 | 0.5976 | | 0.2288 | 3.0 | 939 | 0.3732 | 0.8288 | 0.6841 | | 0.1718 | 4.0 | 1252 | 0.6695 | 0.8586 | 0.5697 | | 0.1466 | 5.0 | 1565 | 0.7902 | 0.855 | 0.6347 | | 0.0853 | 6.0 | 1878 | 0.9930 | 0.847 | 0.6702 | ### Framework versions - Transformers 4.36.2 - Pytorch 2.2.1+cu121 - Datasets 2.19.0 - Tokenizers 0.15.2
{"license": "mit", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "BigTMiami/micro_base_help_tapt_pretrain_model", "model-index": [{"name": "micro_base_help_class_tapt_seed_3", "results": []}]}
BigTMiami/micro_base_help_class_tapt_seed_3
null
[ "transformers", "safetensors", "roberta", "text-classification", "generated_from_trainer", "base_model:BigTMiami/micro_base_help_tapt_pretrain_model", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-20T10:52:18+00:00
[]
[]
TAGS #transformers #safetensors #roberta #text-classification #generated_from_trainer #base_model-BigTMiami/micro_base_help_tapt_pretrain_model #license-mit #autotrain_compatible #endpoints_compatible #region-us
micro\_base\_help\_class\_tapt\_seed\_3 ======================================= This model is a fine-tuned version of BigTMiami/micro\_base\_help\_tapt\_pretrain\_model on an unknown dataset. It achieves the following results on the evaluation set: * Loss: 0.3588 * Accuracy: 0.8398 * F1 Macro: 0.6881 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 2e-05 * train\_batch\_size: 16 * eval\_batch\_size: 16 * seed: 3 * optimizer: Adam with betas=(0.9,0.98) and epsilon=1e-06 * lr\_scheduler\_type: linear * num\_epochs: 10 ### Training results ### Framework versions * Transformers 4.36.2 * Pytorch 2.2.1+cu121 * Datasets 2.19.0 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 16\n* eval\\_batch\\_size: 16\n* seed: 3\n* optimizer: Adam with betas=(0.9,0.98) and epsilon=1e-06\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 10", "### Training results", "### Framework versions\n\n\n* Transformers 4.36.2\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #safetensors #roberta #text-classification #generated_from_trainer #base_model-BigTMiami/micro_base_help_tapt_pretrain_model #license-mit #autotrain_compatible #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 16\n* eval\\_batch\\_size: 16\n* seed: 3\n* optimizer: Adam with betas=(0.9,0.98) and epsilon=1e-06\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 10", "### Training results", "### Framework versions\n\n\n* Transformers 4.36.2\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.15.2" ]
null
adapter-transformers
# Adapter `BigTMiami/amz_10k_seq_bn_helpf_class_adp_seed_4` for roberta-base An [adapter](https://adapterhub.ml) for the `roberta-base` model that was trained on the [BigTMiami/amazon_MICRO_helpfulness_dataset](https://huggingface.co/datasets/BigTMiami/amazon_MICRO_helpfulness_dataset/) dataset and includes a prediction head for classification. This adapter was created for usage with the **[Adapters](https://github.com/Adapter-Hub/adapters)** library. ## Usage First, install `adapters`: ``` pip install -U adapters ``` Now, the adapter can be loaded and activated like this: ```python from adapters import AutoAdapterModel model = AutoAdapterModel.from_pretrained("roberta-base") adapter_name = model.load_adapter("BigTMiami/amz_10k_seq_bn_helpf_class_adp_seed_4", source="hf", set_active=True) ``` ## Architecture & Training <!-- Add some description here --> ## Evaluation results <!-- Add some description here --> ## Citation <!-- Add some description here -->
{"tags": ["adapter-transformers", "roberta"], "datasets": ["BigTMiami/amazon_MICRO_helpfulness_dataset"]}
BigTMiami/amz_10k_seq_bn_helpf_class_adp_seed_4
null
[ "adapter-transformers", "roberta", "dataset:BigTMiami/amazon_MICRO_helpfulness_dataset", "region:us" ]
null
2024-04-20T10:53:10+00:00
[]
[]
TAGS #adapter-transformers #roberta #dataset-BigTMiami/amazon_MICRO_helpfulness_dataset #region-us
# Adapter 'BigTMiami/amz_10k_seq_bn_helpf_class_adp_seed_4' for roberta-base An adapter for the 'roberta-base' model that was trained on the BigTMiami/amazon_MICRO_helpfulness_dataset dataset and includes a prediction head for classification. This adapter was created for usage with the Adapters library. ## Usage First, install 'adapters': Now, the adapter can be loaded and activated like this: ## Architecture & Training ## Evaluation results
[ "# Adapter 'BigTMiami/amz_10k_seq_bn_helpf_class_adp_seed_4' for roberta-base\n\nAn adapter for the 'roberta-base' model that was trained on the BigTMiami/amazon_MICRO_helpfulness_dataset dataset and includes a prediction head for classification.\n\nThis adapter was created for usage with the Adapters library.", "## Usage\n\nFirst, install 'adapters':\n\n\n\nNow, the adapter can be loaded and activated like this:", "## Architecture & Training", "## Evaluation results" ]
[ "TAGS\n#adapter-transformers #roberta #dataset-BigTMiami/amazon_MICRO_helpfulness_dataset #region-us \n", "# Adapter 'BigTMiami/amz_10k_seq_bn_helpf_class_adp_seed_4' for roberta-base\n\nAn adapter for the 'roberta-base' model that was trained on the BigTMiami/amazon_MICRO_helpfulness_dataset dataset and includes a prediction head for classification.\n\nThis adapter was created for usage with the Adapters library.", "## Usage\n\nFirst, install 'adapters':\n\n\n\nNow, the adapter can be loaded and activated like this:", "## Architecture & Training", "## Evaluation results" ]
translation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": ["text-generation-inference"], "pipeline_tag": "translation"}
ikeno-ada/nllb-200-distilled-600M-bitsandbytes-4bit-cpu
null
[ "transformers", "safetensors", "m2m_100", "text2text-generation", "text-generation-inference", "translation", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "4-bit", "region:us" ]
null
2024-04-20T10:54:59+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #m2m_100 #text2text-generation #text-generation-inference #translation #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #4-bit #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #m2m_100 #text2text-generation #text-generation-inference #translation #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #4-bit #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
BohdanPetryshyn/codellama-7b-clone
null
[ "transformers", "safetensors", "llama", "text-generation", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-20T10:55:02+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #llama #text-generation #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
null
transformers
## About <!-- ### quantize_version: 1 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: --> <!-- ### vocab_type: --> static quants of https://huggingface.co/deepseek-ai/deepseek-llm-67b-base <!-- provided-files --> weighted/imatrix quants are available at https://huggingface.co/mradermacher/deepseek-llm-67b-base-i1-GGUF ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/deepseek-llm-67b-base-GGUF/resolve/main/deepseek-llm-67b-base.Q2_K.gguf) | Q2_K | 25.2 | | | [GGUF](https://huggingface.co/mradermacher/deepseek-llm-67b-base-GGUF/resolve/main/deepseek-llm-67b-base.IQ3_XS.gguf) | IQ3_XS | 28.0 | | | [GGUF](https://huggingface.co/mradermacher/deepseek-llm-67b-base-GGUF/resolve/main/deepseek-llm-67b-base.Q3_K_S.gguf) | Q3_K_S | 29.4 | | | [GGUF](https://huggingface.co/mradermacher/deepseek-llm-67b-base-GGUF/resolve/main/deepseek-llm-67b-base.IQ3_S.gguf) | IQ3_S | 29.5 | beats Q3_K* | | [GGUF](https://huggingface.co/mradermacher/deepseek-llm-67b-base-GGUF/resolve/main/deepseek-llm-67b-base.IQ3_M.gguf) | IQ3_M | 30.6 | | | [GGUF](https://huggingface.co/mradermacher/deepseek-llm-67b-base-GGUF/resolve/main/deepseek-llm-67b-base.Q3_K_M.gguf) | Q3_K_M | 32.8 | lower quality | | [GGUF](https://huggingface.co/mradermacher/deepseek-llm-67b-base-GGUF/resolve/main/deepseek-llm-67b-base.Q3_K_L.gguf) | Q3_K_L | 35.7 | | | [GGUF](https://huggingface.co/mradermacher/deepseek-llm-67b-base-GGUF/resolve/main/deepseek-llm-67b-base.IQ4_XS.gguf) | IQ4_XS | 36.6 | | | [GGUF](https://huggingface.co/mradermacher/deepseek-llm-67b-base-GGUF/resolve/main/deepseek-llm-67b-base.Q4_K_S.gguf) | Q4_K_S | 38.5 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/deepseek-llm-67b-base-GGUF/resolve/main/deepseek-llm-67b-base.Q4_K_M.gguf) | Q4_K_M | 40.5 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/deepseek-llm-67b-base-GGUF/resolve/main/deepseek-llm-67b-base.Q5_K_S.gguf) | Q5_K_S | 46.6 | | | [GGUF](https://huggingface.co/mradermacher/deepseek-llm-67b-base-GGUF/resolve/main/deepseek-llm-67b-base.Q5_K_M.gguf) | Q5_K_M | 47.8 | | | [PART 1](https://huggingface.co/mradermacher/deepseek-llm-67b-base-GGUF/resolve/main/deepseek-llm-67b-base.Q6_K.gguf.part1of2) [PART 2](https://huggingface.co/mradermacher/deepseek-llm-67b-base-GGUF/resolve/main/deepseek-llm-67b-base.Q6_K.gguf.part2of2) | Q6_K | 55.4 | very good quality | | [PART 1](https://huggingface.co/mradermacher/deepseek-llm-67b-base-GGUF/resolve/main/deepseek-llm-67b-base.Q8_0.gguf.part1of2) [PART 2](https://huggingface.co/mradermacher/deepseek-llm-67b-base-GGUF/resolve/main/deepseek-llm-67b-base.Q8_0.gguf.part2of2) | Q8_0 | 71.7 | fast, best quality | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. <!-- end -->
{"language": ["en"], "license": "other", "library_name": "transformers", "base_model": "deepseek-ai/deepseek-llm-67b-base", "license_link": "LICENSE", "license_name": "deepseek", "quantized_by": "mradermacher"}
mradermacher/deepseek-llm-67b-base-GGUF
null
[ "transformers", "gguf", "en", "base_model:deepseek-ai/deepseek-llm-67b-base", "license:other", "endpoints_compatible", "region:us" ]
null
2024-04-20T10:55:03+00:00
[]
[ "en" ]
TAGS #transformers #gguf #en #base_model-deepseek-ai/deepseek-llm-67b-base #license-other #endpoints_compatible #region-us
About ----- static quants of URL weighted/imatrix quants are available at URL Usage ----- If you are unsure how to use GGUF files, refer to one of TheBloke's READMEs for more details, including on how to concatenate multi-part files. Provided Quants --------------- (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): !URL And here are Artefact2's thoughts on the matter: URL FAQ / Model Request ------------------- See URL for some answers to questions you might have and/or if you want some other model quantized. Thanks ------ I thank my company, nethype GmbH, for letting me use its servers and providing upgrades to my workstation to enable this work in my free time.
[]
[ "TAGS\n#transformers #gguf #en #base_model-deepseek-ai/deepseek-llm-67b-base #license-other #endpoints_compatible #region-us \n" ]
text-generation
transformers
Checkpoint weights seem to be malfunctioning with the hf transformer library. Please bear with me as I work on debugging and resolving the issue. Reference code : https://github.com/Jha-Pranav/OpenTransformer
{"license": "apache-2.0"}
Jha-Pranav/blm-18.6M
null
[ "transformers", "pytorch", "llama", "text-generation", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-20T10:55:38+00:00
[]
[]
TAGS #transformers #pytorch #llama #text-generation #license-apache-2.0 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
Checkpoint weights seem to be malfunctioning with the hf transformer library. Please bear with me as I work on debugging and resolving the issue. Reference code : URL
[]
[ "TAGS\n#transformers #pytorch #llama #text-generation #license-apache-2.0 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n" ]
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # micro_base_help_class_no_pre_seed_2 This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.3300 - Accuracy: 0.862 - F1 Macro: 0.6407 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 2 - optimizer: Adam with betas=(0.9,0.98) and epsilon=1e-06 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 Macro | |:-------------:|:-----:|:----:|:---------------:|:--------:|:--------:| | 0.3698 | 1.0 | 313 | 0.3706 | 0.8534 | 0.4605 | | 0.3166 | 2.0 | 626 | 0.3448 | 0.8562 | 0.6411 | | 0.2727 | 3.0 | 939 | 0.4133 | 0.858 | 0.5708 | | 0.2239 | 4.0 | 1252 | 0.4942 | 0.8562 | 0.5869 | | 0.174 | 5.0 | 1565 | 0.6633 | 0.8542 | 0.6338 | ### Framework versions - Transformers 4.36.2 - Pytorch 2.2.1+cu121 - Datasets 2.19.0 - Tokenizers 0.15.2
{"license": "mit", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "roberta-base", "model-index": [{"name": "micro_base_help_class_no_pre_seed_2", "results": []}]}
BigTMiami/micro_base_help_class_no_pre_seed_2
null
[ "transformers", "safetensors", "roberta", "text-classification", "generated_from_trainer", "base_model:roberta-base", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-20T10:56:42+00:00
[]
[]
TAGS #transformers #safetensors #roberta #text-classification #generated_from_trainer #base_model-roberta-base #license-mit #autotrain_compatible #endpoints_compatible #region-us
micro\_base\_help\_class\_no\_pre\_seed\_2 ========================================== This model is a fine-tuned version of roberta-base on an unknown dataset. It achieves the following results on the evaluation set: * Loss: 0.3300 * Accuracy: 0.862 * F1 Macro: 0.6407 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 2e-05 * train\_batch\_size: 16 * eval\_batch\_size: 16 * seed: 2 * optimizer: Adam with betas=(0.9,0.98) and epsilon=1e-06 * lr\_scheduler\_type: linear * num\_epochs: 10 ### Training results ### Framework versions * Transformers 4.36.2 * Pytorch 2.2.1+cu121 * Datasets 2.19.0 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 16\n* eval\\_batch\\_size: 16\n* seed: 2\n* optimizer: Adam with betas=(0.9,0.98) and epsilon=1e-06\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 10", "### Training results", "### Framework versions\n\n\n* Transformers 4.36.2\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #safetensors #roberta #text-classification #generated_from_trainer #base_model-roberta-base #license-mit #autotrain_compatible #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 16\n* eval\\_batch\\_size: 16\n* seed: 2\n* optimizer: Adam with betas=(0.9,0.98) and epsilon=1e-06\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 10", "### Training results", "### Framework versions\n\n\n* Transformers 4.36.2\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.15.2" ]
text-generation
transformers
My first run, 8192 ctx qlora, trained on AEZAKMI-3_6 dataset. Base seems to not be too slopped but finetune is not great - lots of slopped GPTisms, "It's important to remember" etc. It does seem uncensored though, so if you're not fine with Llama-3-8B-Instruct, this might be an option until more better finetunes come out. ChatML prompt format. Training script below. Took around 8 hours on 3090 Ti via unsloth. Benchmark prompt results can be found in [my misc repo](https://huggingface.co/datasets/adamo1139/misc/blob/main/benchmarks/llama-3-8b-aezakmi-1904/benchmark_prompts.txt) ``` from unsloth import FastLanguageModel from datasets import Dataset, load_dataset from dataclasses import dataclass, field from typing import Dict, Optional import torch max_seq_length = 8192 # Choose any! We auto support RoPE Scaling internally! dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+ load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False. model, tokenizer = FastLanguageModel.from_pretrained( model_name = "model-path-llama-3-8b", # Choose ANY! eg mistralai/Mistral-7B-Instruct-v0.2 max_seq_length = max_seq_length, attn_implementation="flash_attention_2", dtype = dtype, load_in_4bit = load_in_4bit, # token = "hf_...", # use one if using gated models like meta-llama/Llama-2-7b-hf ) #@title Alignment Handbook utils import os import re from typing import List, Literal, Optional from datasets import DatasetDict, concatenate_datasets, load_dataset, load_from_disk from datasets.builder import DatasetGenerationError #DEFAULT_CHAT_TEMPLATE = "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}" tokenizer.chat_template = "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}" from datasets import load_dataset EOS_TOKEN = tokenizer.eos_token # Must add EOS_TOKEN dataset = load_dataset("adamo1139/AEZAKMI_v3-6", split = "train") def formatting_prompts_func(examples): convos = examples["conversations"] texts = [] mapper = {"system" : "<|im_start|>system\n", "human" : "<|im_start|>user\n", "gpt" : "<|im_start|>assistant\n"} end_mapper = {"system" : "<|im_end|>\n", "human" : "<|im_end|>\n", "gpt" : "<|im_end|>\n"} for convo in convos: text = "".join(f"{mapper[(turn := x['from'])]} {x['value']}{end_mapper[turn]}" for x in convo) texts.append(f"{text}{EOS_TOKEN}") # Since there are multi-turn # conversations, I append the EOS_TOKEN at the end of the whole # conversation. These conversations always ends with a gpt message. return { "text" : texts, } pass dataset = dataset.map(formatting_prompts_func, batched = True,) import pprint pprint.pprint("""NOT a formatted dataset""") pprint pprint.pprint(dataset[250]) pprint.pprint(dataset[260]) pprint.pprint(dataset[270]) pprint.pprint(dataset[280]) pprint.pprint(dataset[290]) # Print sample pprint.pprint("""formatted dataset""") pprint.pprint(dataset[250]) pprint.pprint(dataset[260]) pprint.pprint(dataset[270]) pprint.pprint(dataset[280]) pprint.pprint(dataset[290]) model = FastLanguageModel.get_peft_model( model, r = 32, # Choose any number > 0 ! Suggested 8, 16, 32, 64, 128 target_modules = ["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj",], lora_alpha = 32, lora_dropout = 0, # Currently only supports dropout = 0 bias = "none", # Currently only supports bias = "none" use_gradient_checkpointing = "unsloth", random_state = 3407, use_rslora = False, # We support rank stabilized LoRA loftq_config = None, # And LoftQ ) model.print_trainable_parameters() from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, HfArgumentParser, TrainingArguments from transformers.utils import logging from trl import SFTTrainer sft_trainer = SFTTrainer( model = model, tokenizer = tokenizer, train_dataset = dataset, dataset_text_field = "text", max_seq_length = 8192, packing=True, args = TrainingArguments( evaluation_strategy = "no", per_device_train_batch_size = 2, gradient_accumulation_steps = 4, num_train_epochs = 1.5, warmup_steps = 10, learning_rate = 0.000095, fp16 = not torch.cuda.is_bf16_supported(), bf16 = torch.cuda.is_bf16_supported(), logging_steps = 1, output_dir = "1904-llama-3-8b-aezakmi-intermediate", optim = "adamw_8bit", weight_decay = 0.0, lr_scheduler_type = "cosine", seed = 42, save_strategy = "steps", save_steps = 150, save_total_limit = 5, ), ) sft_trainer.train() model.save_pretrained("1904-llama-3-8b-aezakmi-final") # Local saving ```
{"license": "other", "license_name": "llama3", "license_link": "LICENSE"}
adamo1139/Llama-3-8B-AEZAKMI-run1
null
[ "transformers", "safetensors", "llama", "text-generation", "license:other", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-20T10:58:35+00:00
[]
[]
TAGS #transformers #safetensors #llama #text-generation #license-other #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
My first run, 8192 ctx qlora, trained on AEZAKMI-3_6 dataset. Base seems to not be too slopped but finetune is not great - lots of slopped GPTisms, "It's important to remember" etc. It does seem uncensored though, so if you're not fine with Llama-3-8B-Instruct, this might be an option until more better finetunes come out. ChatML prompt format. Training script below. Took around 8 hours on 3090 Ti via unsloth. Benchmark prompt results can be found in my misc repo
[]
[ "TAGS\n#transformers #safetensors #llama #text-generation #license-other #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n" ]
null
null
BPE Tokenizer Model trained on the AOFrench dataset with a vocab size of 8,192.
{"language": ["fr"], "license": "mit"}
cambridge-climb/FR-CamBabyTokenizer
null
[ "fr", "license:mit", "region:us" ]
null
2024-04-20T11:00:33+00:00
[]
[ "fr" ]
TAGS #fr #license-mit #region-us
BPE Tokenizer Model trained on the AOFrench dataset with a vocab size of 8,192.
[]
[ "TAGS\n#fr #license-mit #region-us \n" ]
null
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
fombus/histllama
null
[ "transformers", "safetensors", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
null
2024-04-20T11:02:12+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #arxiv-1910.09700 #endpoints_compatible #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #arxiv-1910.09700 #endpoints_compatible #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
text-generation
transformers
# Ghost 7B Alpha <img src="https://cdn-uploads.huggingface.co/production/uploads/600ae38cc92b79f54efd4556/QPrQZMQX_jzyYngmreP0_.jpeg" alt="Ghost 7B Alpha Logo" width="400" style="margin-left:'auto' margin-right:'auto' display:'block'"/> The large generation of language models focuses on optimizing excellent reasoning, multi-task knowledge, and tools support. # Introduction **Ghost 7B Alpha** is a large language model fine-tuned from Mistral 7B, with a size of 7 billion parameters. The model was developed with the goal of optimizing reasoning ability, multi-task knowledge and supporting tool usage. The model works well with the main trained and optimized languages being English and Vietnamese. Overall, the model is suitable when making a pretrained version so you can continue to develop the desired tasks, develop virtual assistants, perform features on tasks such as coding, translation, answering questions, creating documents, etc. It is truly an efficient, fast and extremely cheap open model. ## Specifications - Name: **Ghost 7B Alpha**. - Model size: 7 billion parameters. - Context length: 8K, 8192. - Languages: English and Vietnamese. - Main tasks: reasoning, multi-tasking knowledge and function tools. - License: [Ghost 7B LICENSE AGREEMENT](https://ghost-x.org/ghost-7b-license). - Based on: Mistral 7B. - Distributions: Standard (BF16), GGUF, AWQ. - Developed by: **Ghost X**, [Hieu Lam](https://huggingface.co/lamhieu). ### Links - Card model: [🤗 HuggingFace](https://huggingface.co/ghost-x/ghost-7b-alpha). - Official website: [Ghost 7B Alpha](https://ghost-x.org/docs/models/ghost-7b-alpha/). - Demo: [Playground with Ghost 7B Alpha](https://ghost-x.org/docs/notebooks/playground-with-ghost-7b-alpha/). ## Distributions We create many distributions to give you the best access options that best suit your needs. Always make sure you know which version you need and what will help you operate better. | Version | Model card | | ------- | -------------------------------------------------------------------- | | BF16 | [🤗 HuggingFace](https://huggingface.co/ghost-x/ghost-7b-alpha) | | GGUF | [🤗 HuggingFace](https://huggingface.co/ghost-x/ghost-7b-alpha-gguf) | | AWQ | [🤗 HuggingFace](https://huggingface.co/ghost-x/ghost-7b-alpha-awq) | ## Note For all official information and updates about the model, see here: - Card model: [🤗 HuggingFace](https://huggingface.co/ghost-x/ghost-7b-alpha). - Official website: [Ghost 7B Alpha](https://ghost-x.org/docs/models/ghost-7b-alpha/). - Demo: [Playground with Ghost 7B Alpha](https://ghost-x.org/docs/notebooks/playground-with-ghost-7b-alpha/).
{"language": ["en", "vi"], "license": "other", "library_name": "transformers", "tags": ["ghost", "tools", "chat"], "license_name": "ghost-7b", "license_link": "https://ghost-x.org/ghost-7b-license", "pipeline_tag": "text-generation", "widget": [{"text": "Why is the sky blue ?", "output": {"text": "The sky appears blue because of a phenomenon called Rayleigh scattering. Sunlight is composed of all colors of the visible spectrum, but blue light has a shorter wavelength than other colors. When sunlight enters the Earth's atmosphere, it interacts with molecules of nitrogen and oxygen, which are the main components of our atmosphere. These molecules scatter the shorter wavelengths of light, such as blue and violet, more than the longer wavelengths, such as red and orange. As a result, when we look up at the sky, we see more blue light than other colors because the blue light has been scattered more by the molecules in the atmosphere."}}]}
ghost-x/ghost-7b-alpha-gguf
null
[ "transformers", "gguf", "mistral", "ghost", "tools", "chat", "text-generation", "conversational", "en", "vi", "license:other", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-20T11:04:09+00:00
[]
[ "en", "vi" ]
TAGS #transformers #gguf #mistral #ghost #tools #chat #text-generation #conversational #en #vi #license-other #endpoints_compatible #text-generation-inference #region-us
Ghost 7B Alpha ============== <img src="URL alt="Ghost 7B Alpha Logo" width="400" style="margin-left:'auto' margin-right:'auto' display:'block'"/> The large generation of language models focuses on optimizing excellent reasoning, multi-task knowledge, and tools support. Introduction ============ Ghost 7B Alpha is a large language model fine-tuned from Mistral 7B, with a size of 7 billion parameters. The model was developed with the goal of optimizing reasoning ability, multi-task knowledge and supporting tool usage. The model works well with the main trained and optimized languages being English and Vietnamese. Overall, the model is suitable when making a pretrained version so you can continue to develop the desired tasks, develop virtual assistants, perform features on tasks such as coding, translation, answering questions, creating documents, etc. It is truly an efficient, fast and extremely cheap open model. Specifications -------------- * Name: Ghost 7B Alpha. * Model size: 7 billion parameters. * Context length: 8K, 8192. * Languages: English and Vietnamese. * Main tasks: reasoning, multi-tasking knowledge and function tools. * License: Ghost 7B LICENSE AGREEMENT. * Based on: Mistral 7B. * Distributions: Standard (BF16), GGUF, AWQ. * Developed by: Ghost X, Hieu Lam. ### Links * Card model: HuggingFace. * Official website: Ghost 7B Alpha. * Demo: Playground with Ghost 7B Alpha. Distributions ------------- We create many distributions to give you the best access options that best suit your needs. Always make sure you know which version you need and what will help you operate better. Note ---- For all official information and updates about the model, see here: * Card model: HuggingFace. * Official website: Ghost 7B Alpha. * Demo: Playground with Ghost 7B Alpha.
[ "### Links\n\n\n* Card model: HuggingFace.\n* Official website: Ghost 7B Alpha.\n* Demo: Playground with Ghost 7B Alpha.\n\n\nDistributions\n-------------\n\n\nWe create many distributions to give you the best access options that best suit your needs. Always make sure you know which version you need and what will help you operate better.\n\n\n\nNote\n----\n\n\nFor all official information and updates about the model, see here:\n\n\n* Card model: HuggingFace.\n* Official website: Ghost 7B Alpha.\n* Demo: Playground with Ghost 7B Alpha." ]
[ "TAGS\n#transformers #gguf #mistral #ghost #tools #chat #text-generation #conversational #en #vi #license-other #endpoints_compatible #text-generation-inference #region-us \n", "### Links\n\n\n* Card model: HuggingFace.\n* Official website: Ghost 7B Alpha.\n* Demo: Playground with Ghost 7B Alpha.\n\n\nDistributions\n-------------\n\n\nWe create many distributions to give you the best access options that best suit your needs. Always make sure you know which version you need and what will help you operate better.\n\n\n\nNote\n----\n\n\nFor all official information and updates about the model, see here:\n\n\n* Card model: HuggingFace.\n* Official website: Ghost 7B Alpha.\n* Demo: Playground with Ghost 7B Alpha." ]
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # micro_base_help_class_tapt_seed_4 This model is a fine-tuned version of [BigTMiami/micro_base_help_tapt_pretrain_model](https://huggingface.co/BigTMiami/micro_base_help_tapt_pretrain_model) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.9585 - Accuracy: 0.847 - F1 Macro: 0.6633 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 4 - optimizer: Adam with betas=(0.9,0.98) and epsilon=1e-06 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 Macro | |:-------------:|:-----:|:----:|:---------------:|:--------:|:--------:| | 0.3721 | 1.0 | 313 | 0.3594 | 0.8574 | 0.5072 | | 0.3008 | 2.0 | 626 | 0.3596 | 0.8662 | 0.6247 | | 0.248 | 3.0 | 939 | 0.3616 | 0.8516 | 0.6425 | | 0.1696 | 4.0 | 1252 | 0.5631 | 0.8306 | 0.6510 | | 0.1631 | 5.0 | 1565 | 0.7292 | 0.8436 | 0.6440 | | 0.1096 | 6.0 | 1878 | 1.0237 | 0.8378 | 0.6614 | | 0.0839 | 7.0 | 2191 | 0.8822 | 0.8532 | 0.6140 | | 0.0652 | 8.0 | 2504 | 1.0052 | 0.8532 | 0.6344 | | 0.0329 | 9.0 | 2817 | 1.1225 | 0.848 | 0.6417 | ### Framework versions - Transformers 4.36.2 - Pytorch 2.2.1+cu121 - Datasets 2.19.0 - Tokenizers 0.15.2
{"license": "mit", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "BigTMiami/micro_base_help_tapt_pretrain_model", "model-index": [{"name": "micro_base_help_class_tapt_seed_4", "results": []}]}
BigTMiami/micro_base_help_class_tapt_seed_4
null
[ "transformers", "safetensors", "roberta", "text-classification", "generated_from_trainer", "base_model:BigTMiami/micro_base_help_tapt_pretrain_model", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-20T11:08:25+00:00
[]
[]
TAGS #transformers #safetensors #roberta #text-classification #generated_from_trainer #base_model-BigTMiami/micro_base_help_tapt_pretrain_model #license-mit #autotrain_compatible #endpoints_compatible #region-us
micro\_base\_help\_class\_tapt\_seed\_4 ======================================= This model is a fine-tuned version of BigTMiami/micro\_base\_help\_tapt\_pretrain\_model on an unknown dataset. It achieves the following results on the evaluation set: * Loss: 0.9585 * Accuracy: 0.847 * F1 Macro: 0.6633 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 2e-05 * train\_batch\_size: 16 * eval\_batch\_size: 16 * seed: 4 * optimizer: Adam with betas=(0.9,0.98) and epsilon=1e-06 * lr\_scheduler\_type: linear * num\_epochs: 10 ### Training results ### Framework versions * Transformers 4.36.2 * Pytorch 2.2.1+cu121 * Datasets 2.19.0 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 16\n* eval\\_batch\\_size: 16\n* seed: 4\n* optimizer: Adam with betas=(0.9,0.98) and epsilon=1e-06\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 10", "### Training results", "### Framework versions\n\n\n* Transformers 4.36.2\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #safetensors #roberta #text-classification #generated_from_trainer #base_model-BigTMiami/micro_base_help_tapt_pretrain_model #license-mit #autotrain_compatible #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 16\n* eval\\_batch\\_size: 16\n* seed: 4\n* optimizer: Adam with betas=(0.9,0.98) and epsilon=1e-06\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 10", "### Training results", "### Framework versions\n\n\n* Transformers 4.36.2\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.15.2" ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
baconnier/CIB_BANKER_r32_Llama-3-8B
null
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-20T11:08:56+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #llama #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
null
null
**This is a Llama-3 land now, cowboys!** GGUF-IQ-Imatrix quants for [NLPark/Test0_SLIDE](https://huggingface.co/NLPark/Test0_SLIDE). Presets [here](https://huggingface.co/Lewdiculous/Model-Requests/tree/main/data/presets/cope-llama-3-0.1). > [!WARNING] > Use the latest version of KoboldCpp. **Use the provided presets.** <br> > This is all still highly experimental, let the authors know how it performs for you, feedback is more important than ever now. **Original model information:** ![image/png](https://cdn-uploads.huggingface.co/production/uploads/65d4cf2693a0a3744a27536c/Sno5yh_SkG74F0uXTx23o.png) # Shi-Ci Language Identify & Decode Expositor **8B**, Ruozhiba... * [meta-llama/Meta-Llama-3-8B](https://huggingface.co/meta-llama/Meta-Llama-3-8B) **Chinese** Released as an early preview of our v3 LLMs. The v3 series covers the "Shi-Ci", "AnFeng" and "Cecilia" LLM products. The sizes are labelled from small to large "Nano" "Leap" "Pattern" "Avocet "Robin" "Kestrel"
{"license": "cc-by-nc-4.0"}
Lewdiculous/Test0_SLIDE-GGUF-IQ-Imatrix
null
[ "gguf", "license:cc-by-nc-4.0", "region:us" ]
null
2024-04-20T11:09:01+00:00
[]
[]
TAGS #gguf #license-cc-by-nc-4.0 #region-us
This is a Llama-3 land now, cowboys! GGUF-IQ-Imatrix quants for NLPark/Test0_SLIDE. Presets here. > [!WARNING] > Use the latest version of KoboldCpp. Use the provided presets. <br> > This is all still highly experimental, let the authors know how it performs for you, feedback is more important than ever now. Original model information: !image/png # Shi-Ci Language Identify & Decode Expositor 8B, Ruozhiba... * meta-llama/Meta-Llama-3-8B Chinese Released as an early preview of our v3 LLMs. The v3 series covers the "Shi-Ci", "AnFeng" and "Cecilia" LLM products. The sizes are labelled from small to large "Nano" "Leap" "Pattern" "Avocet "Robin" "Kestrel"
[ "# Shi-Ci Language Identify & Decode Expositor\n8B, Ruozhiba...\n\n* meta-llama/Meta-Llama-3-8B\n\nChinese\nReleased as an early preview of our v3 LLMs.\nThe v3 series covers the \"Shi-Ci\", \"AnFeng\" and \"Cecilia\" LLM products.\nThe sizes are labelled from small to large \"Nano\" \"Leap\" \"Pattern\" \"Avocet \"Robin\" \"Kestrel\"" ]
[ "TAGS\n#gguf #license-cc-by-nc-4.0 #region-us \n", "# Shi-Ci Language Identify & Decode Expositor\n8B, Ruozhiba...\n\n* meta-llama/Meta-Llama-3-8B\n\nChinese\nReleased as an early preview of our v3 LLMs.\nThe v3 series covers the \"Shi-Ci\", \"AnFeng\" and \"Cecilia\" LLM products.\nThe sizes are labelled from small to large \"Nano\" \"Leap\" \"Pattern\" \"Avocet \"Robin\" \"Kestrel\"" ]
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # micro_base_help_class_no_pre_seed_3 This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.3318 - Accuracy: 0.8454 - F1 Macro: 0.6580 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 3 - optimizer: Adam with betas=(0.9,0.98) and epsilon=1e-06 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 Macro | |:-------------:|:-----:|:----:|:---------------:|:--------:|:--------:| | 0.3515 | 1.0 | 313 | 0.3439 | 0.8536 | 0.6099 | | 0.3166 | 2.0 | 626 | 0.3352 | 0.8598 | 0.5733 | | 0.2622 | 3.0 | 939 | 0.3453 | 0.8458 | 0.6676 | | 0.2173 | 4.0 | 1252 | 0.5821 | 0.8618 | 0.5924 | | 0.1634 | 5.0 | 1565 | 0.7321 | 0.8542 | 0.6490 | | 0.126 | 6.0 | 1878 | 0.8761 | 0.8264 | 0.6615 | ### Framework versions - Transformers 4.36.2 - Pytorch 2.2.1+cu121 - Datasets 2.19.0 - Tokenizers 0.15.2
{"license": "mit", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "roberta-base", "model-index": [{"name": "micro_base_help_class_no_pre_seed_3", "results": []}]}
BigTMiami/micro_base_help_class_no_pre_seed_3
null
[ "transformers", "safetensors", "roberta", "text-classification", "generated_from_trainer", "base_model:roberta-base", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-20T11:10:13+00:00
[]
[]
TAGS #transformers #safetensors #roberta #text-classification #generated_from_trainer #base_model-roberta-base #license-mit #autotrain_compatible #endpoints_compatible #region-us
micro\_base\_help\_class\_no\_pre\_seed\_3 ========================================== This model is a fine-tuned version of roberta-base on an unknown dataset. It achieves the following results on the evaluation set: * Loss: 0.3318 * Accuracy: 0.8454 * F1 Macro: 0.6580 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 2e-05 * train\_batch\_size: 16 * eval\_batch\_size: 16 * seed: 3 * optimizer: Adam with betas=(0.9,0.98) and epsilon=1e-06 * lr\_scheduler\_type: linear * num\_epochs: 10 ### Training results ### Framework versions * Transformers 4.36.2 * Pytorch 2.2.1+cu121 * Datasets 2.19.0 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 16\n* eval\\_batch\\_size: 16\n* seed: 3\n* optimizer: Adam with betas=(0.9,0.98) and epsilon=1e-06\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 10", "### Training results", "### Framework versions\n\n\n* Transformers 4.36.2\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #safetensors #roberta #text-classification #generated_from_trainer #base_model-roberta-base #license-mit #autotrain_compatible #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 16\n* eval\\_batch\\_size: 16\n* seed: 3\n* optimizer: Adam with betas=(0.9,0.98) and epsilon=1e-06\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 10", "### Training results", "### Framework versions\n\n\n* Transformers 4.36.2\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.15.2" ]
reinforcement-learning
null
# **Q-Learning** Agent playing1 **FrozenLake-v1** This is a trained model of a **Q-Learning** agent playing **FrozenLake-v1** . ## Usage ```python model = load_from_hub(repo_id="Edgar404/q-FrozenLake-v1-4x4-noSlippery", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
{"tags": ["FrozenLake-v1-4x4-no_slippery", "q-learning", "reinforcement-learning", "custom-implementation"], "model-index": [{"name": "q-FrozenLake-v1-4x4-noSlippery", "results": [{"task": {"type": "reinforcement-learning", "name": "reinforcement-learning"}, "dataset": {"name": "FrozenLake-v1-4x4-no_slippery", "type": "FrozenLake-v1-4x4-no_slippery"}, "metrics": [{"type": "mean_reward", "value": "1.00 +/- 0.00", "name": "mean_reward", "verified": false}]}]}]}
Edgar404/q-FrozenLake-v1-4x4-noSlippery
null
[ "FrozenLake-v1-4x4-no_slippery", "q-learning", "reinforcement-learning", "custom-implementation", "model-index", "region:us" ]
null
2024-04-20T11:11:08+00:00
[]
[]
TAGS #FrozenLake-v1-4x4-no_slippery #q-learning #reinforcement-learning #custom-implementation #model-index #region-us
# Q-Learning Agent playing1 FrozenLake-v1 This is a trained model of a Q-Learning agent playing FrozenLake-v1 . ## Usage
[ "# Q-Learning Agent playing1 FrozenLake-v1\n This is a trained model of a Q-Learning agent playing FrozenLake-v1 .\n\n ## Usage" ]
[ "TAGS\n#FrozenLake-v1-4x4-no_slippery #q-learning #reinforcement-learning #custom-implementation #model-index #region-us \n", "# Q-Learning Agent playing1 FrozenLake-v1\n This is a trained model of a Q-Learning agent playing FrozenLake-v1 .\n\n ## Usage" ]
null
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vsft-llava_builder_zephyr-7b-beta This model is a fine-tuned version of [](https://huggingface.co/) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1.4e-05 - train_batch_size: 2 - eval_batch_size: 4 - seed: 42 - distributed_type: multi-GPU - num_devices: 8 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - total_eval_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1.0 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.40.0.dev0 - Pytorch 2.2.2+cu121 - Datasets 2.18.0 - Tokenizers 0.15.2
{"tags": ["trl", "sft", "generated_from_trainer"], "model-index": [{"name": "vsft-llava_builder_zephyr-7b-beta", "results": []}]}
edbeeching/vsft-llava_builder_zephyr-7b-beta
null
[ "transformers", "safetensors", "llava", "pretraining", "trl", "sft", "generated_from_trainer", "endpoints_compatible", "region:us" ]
null
2024-04-20T11:11:41+00:00
[]
[]
TAGS #transformers #safetensors #llava #pretraining #trl #sft #generated_from_trainer #endpoints_compatible #region-us
# vsft-llava_builder_zephyr-7b-beta This model is a fine-tuned version of [](URL on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1.4e-05 - train_batch_size: 2 - eval_batch_size: 4 - seed: 42 - distributed_type: multi-GPU - num_devices: 8 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - total_eval_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1.0 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.40.0.dev0 - Pytorch 2.2.2+cu121 - Datasets 2.18.0 - Tokenizers 0.15.2
[ "# vsft-llava_builder_zephyr-7b-beta\n\nThis model is a fine-tuned version of [](URL on an unknown dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1.4e-05\n- train_batch_size: 2\n- eval_batch_size: 4\n- seed: 42\n- distributed_type: multi-GPU\n- num_devices: 8\n- gradient_accumulation_steps: 4\n- total_train_batch_size: 64\n- total_eval_batch_size: 32\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1.0\n- mixed_precision_training: Native AMP", "### Training results", "### Framework versions\n\n- Transformers 4.40.0.dev0\n- Pytorch 2.2.2+cu121\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #safetensors #llava #pretraining #trl #sft #generated_from_trainer #endpoints_compatible #region-us \n", "# vsft-llava_builder_zephyr-7b-beta\n\nThis model is a fine-tuned version of [](URL on an unknown dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1.4e-05\n- train_batch_size: 2\n- eval_batch_size: 4\n- seed: 42\n- distributed_type: multi-GPU\n- num_devices: 8\n- gradient_accumulation_steps: 4\n- total_train_batch_size: 64\n- total_eval_batch_size: 32\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1.0\n- mixed_precision_training: Native AMP", "### Training results", "### Framework versions\n\n- Transformers 4.40.0.dev0\n- Pytorch 2.2.2+cu121\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # clasificador-rotten-tomatoes This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.8027 - Accuracy: 0.8621 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.4029 | 1.0 | 1067 | 0.3717 | 0.8565 | | 0.2381 | 2.0 | 2134 | 0.6918 | 0.8480 | | 0.0812 | 3.0 | 3201 | 0.8027 | 0.8621 | ### Framework versions - Transformers 4.40.0 - Pytorch 2.2.1+cu121 - Datasets 2.19.0 - Tokenizers 0.19.1
{"license": "apache-2.0", "tags": ["classification", "generated_from_trainer"], "metrics": ["accuracy"], "base_model": "bert-base-uncased", "model-index": [{"name": "clasificador-rotten-tomatoes", "results": []}]}
jonruida/clasificador-rotten-tomatoes
null
[ "transformers", "safetensors", "bert", "text-classification", "classification", "generated_from_trainer", "base_model:bert-base-uncased", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-20T11:11:44+00:00
[]
[]
TAGS #transformers #safetensors #bert #text-classification #classification #generated_from_trainer #base_model-bert-base-uncased #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us
clasificador-rotten-tomatoes ============================ This model is a fine-tuned version of bert-base-uncased on an unknown dataset. It achieves the following results on the evaluation set: * Loss: 0.8027 * Accuracy: 0.8621 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 5e-05 * train\_batch\_size: 8 * eval\_batch\_size: 8 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * num\_epochs: 3.0 ### Training results ### Framework versions * Transformers 4.40.0 * Pytorch 2.2.1+cu121 * Datasets 2.19.0 * Tokenizers 0.19.1
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 5e-05\n* train\\_batch\\_size: 8\n* eval\\_batch\\_size: 8\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 3.0", "### Training results", "### Framework versions\n\n\n* Transformers 4.40.0\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.19.1" ]
[ "TAGS\n#transformers #safetensors #bert #text-classification #classification #generated_from_trainer #base_model-bert-base-uncased #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 5e-05\n* train\\_batch\\_size: 8\n* eval\\_batch\\_size: 8\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 3.0", "### Training results", "### Framework versions\n\n\n* Transformers 4.40.0\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.19.1" ]
reinforcement-learning
stable-baselines3
# **DQN** Agent playing **SpaceInvadersNoFrameskip-v4** This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib Install the RL Zoo (with SB3 and SB3-Contrib): ```bash pip install rl_zoo3 ``` ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga DaniElAbrazos -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do: ``` python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga DaniElAbrazos -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` ## Training (with the RL Zoo) ``` python -m rl_zoo3.train --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga DaniElAbrazos ``` ## Hyperparameters ```python OrderedDict([('batch_size', 32), ('buffer_size', 100000), ('env_wrapper', ['stable_baselines3.common.atari_wrappers.AtariWrapper']), ('exploration_final_eps', 0.01), ('exploration_fraction', 0.1), ('frame_stack', 4), ('gradient_steps', 1), ('learning_rate', 0.0001), ('learning_starts', 100000), ('n_timesteps', 1000000.0), ('optimize_memory_usage', False), ('policy', 'CnnPolicy'), ('target_update_interval', 1000), ('train_freq', 4), ('normalize', False)]) ``` # Environment Arguments ```python {'render_mode': 'rgb_array'} ```
{"library_name": "stable-baselines3", "tags": ["SpaceInvadersNoFrameskip-v4", "deep-reinforcement-learning", "reinforcement-learning", "stable-baselines3"], "model-index": [{"name": "DQN", "results": [{"task": {"type": "reinforcement-learning", "name": "reinforcement-learning"}, "dataset": {"name": "SpaceInvadersNoFrameskip-v4", "type": "SpaceInvadersNoFrameskip-v4"}, "metrics": [{"type": "mean_reward", "value": "511.00 +/- 128.60", "name": "mean_reward", "verified": false}]}]}]}
DaniElAbrazos/dqn-SpaceInvadersNoFrameskip-v4
null
[ "stable-baselines3", "SpaceInvadersNoFrameskip-v4", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
null
2024-04-20T11:15:01+00:00
[]
[]
TAGS #stable-baselines3 #SpaceInvadersNoFrameskip-v4 #deep-reinforcement-learning #reinforcement-learning #model-index #region-us
# DQN Agent playing SpaceInvadersNoFrameskip-v4 This is a trained model of a DQN agent playing SpaceInvadersNoFrameskip-v4 using the stable-baselines3 library and the RL Zoo. The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: URL SB3: URL SB3 Contrib: URL Install the RL Zoo (with SB3 and SB3-Contrib): If you installed the RL Zoo3 via pip ('pip install rl_zoo3'), from anywhere you can do: ## Training (with the RL Zoo) ## Hyperparameters # Environment Arguments
[ "# DQN Agent playing SpaceInvadersNoFrameskip-v4\nThis is a trained model of a DQN agent playing SpaceInvadersNoFrameskip-v4\nusing the stable-baselines3 library\nand the RL Zoo.\n\nThe RL Zoo is a training framework for Stable Baselines3\nreinforcement learning agents,\nwith hyperparameter optimization and pre-trained agents included.", "## Usage (with SB3 RL Zoo)\n\nRL Zoo: URL\nSB3: URL\nSB3 Contrib: URL\n\nInstall the RL Zoo (with SB3 and SB3-Contrib):\n\n\n\n\nIf you installed the RL Zoo3 via pip ('pip install rl_zoo3'), from anywhere you can do:", "## Training (with the RL Zoo)", "## Hyperparameters", "# Environment Arguments" ]
[ "TAGS\n#stable-baselines3 #SpaceInvadersNoFrameskip-v4 #deep-reinforcement-learning #reinforcement-learning #model-index #region-us \n", "# DQN Agent playing SpaceInvadersNoFrameskip-v4\nThis is a trained model of a DQN agent playing SpaceInvadersNoFrameskip-v4\nusing the stable-baselines3 library\nand the RL Zoo.\n\nThe RL Zoo is a training framework for Stable Baselines3\nreinforcement learning agents,\nwith hyperparameter optimization and pre-trained agents included.", "## Usage (with SB3 RL Zoo)\n\nRL Zoo: URL\nSB3: URL\nSB3 Contrib: URL\n\nInstall the RL Zoo (with SB3 and SB3-Contrib):\n\n\n\n\nIf you installed the RL Zoo3 via pip ('pip install rl_zoo3'), from anywhere you can do:", "## Training (with the RL Zoo)", "## Hyperparameters", "# Environment Arguments" ]
reinforcement-learning
null
# **Q-Learning** Agent playing1 **Taxi-v3** This is a trained model of a **Q-Learning** agent playing **Taxi-v3** . ## Usage ```python model = load_from_hub(repo_id="Edgar404/q-Taxi-v3", filename="q-learning.pkl") # Don't forget to check if you need to add additional attributes (is_slippery=False etc) env = gym.make(model["env_id"]) ```
{"tags": ["Taxi-v3", "q-learning", "reinforcement-learning", "custom-implementation"], "model-index": [{"name": "q-Taxi-v3", "results": [{"task": {"type": "reinforcement-learning", "name": "reinforcement-learning"}, "dataset": {"name": "Taxi-v3", "type": "Taxi-v3"}, "metrics": [{"type": "mean_reward", "value": "7.56 +/- 2.71", "name": "mean_reward", "verified": false}]}]}]}
Edgar404/q-Taxi-v3
null
[ "Taxi-v3", "q-learning", "reinforcement-learning", "custom-implementation", "model-index", "region:us" ]
null
2024-04-20T11:15:43+00:00
[]
[]
TAGS #Taxi-v3 #q-learning #reinforcement-learning #custom-implementation #model-index #region-us
# Q-Learning Agent playing1 Taxi-v3 This is a trained model of a Q-Learning agent playing Taxi-v3 . ## Usage
[ "# Q-Learning Agent playing1 Taxi-v3\n This is a trained model of a Q-Learning agent playing Taxi-v3 .\n\n ## Usage" ]
[ "TAGS\n#Taxi-v3 #q-learning #reinforcement-learning #custom-implementation #model-index #region-us \n", "# Q-Learning Agent playing1 Taxi-v3\n This is a trained model of a Q-Learning agent playing Taxi-v3 .\n\n ## Usage" ]
null
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
bdsaglam/llama-2-7b-chat-jerx-debug
null
[ "transformers", "safetensors", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
null
2024-04-20T11:15:43+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #arxiv-1910.09700 #endpoints_compatible #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #arxiv-1910.09700 #endpoints_compatible #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
feature-extraction
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
stvhuang/rcr-run-5pqr6lwp-90396-master-0_20240402T105012-ep25
null
[ "transformers", "safetensors", "xlm-roberta", "feature-extraction", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
null
2024-04-20T11:15:50+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #xlm-roberta #feature-extraction #arxiv-1910.09700 #endpoints_compatible #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #xlm-roberta #feature-extraction #arxiv-1910.09700 #endpoints_compatible #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
null
null
# cleatherbury/Phigments12-Q6_K-GGUF This model was converted to GGUF format from [`liminerity/Phigments12`](https://huggingface.co/liminerity/Phigments12) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space. Refer to the [original model card](https://huggingface.co/liminerity/Phigments12) for more details on the model. ## Use with llama.cpp Install llama.cpp through brew. ```bash brew install ggerganov/ggerganov/llama.cpp ``` Invoke the llama.cpp server or the CLI. CLI: ```bash llama-cli --hf-repo cleatherbury/Phigments12-Q6_K-GGUF --model phigments12.Q6_K.gguf -p "The meaning to life and the universe is" ``` Server: ```bash llama-server --hf-repo cleatherbury/Phigments12-Q6_K-GGUF --model phigments12.Q6_K.gguf -c 2048 ``` Note: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well. ``` git clone https://github.com/ggerganov/llama.cpp && cd llama.cpp && make && ./main -m phigments12.Q6_K.gguf -n 128 ```
{"license": "apache-2.0", "tags": ["liminerity/merge6", "liminerity/merge3", "Merge", "llama-cpp", "gguf-my-repo"]}
cleatherbury/Phigments12-Q6_K-GGUF
null
[ "gguf", "liminerity/merge6", "liminerity/merge3", "Merge", "llama-cpp", "gguf-my-repo", "license:apache-2.0", "region:us" ]
null
2024-04-20T11:16:03+00:00
[]
[]
TAGS #gguf #liminerity/merge6 #liminerity/merge3 #Merge #llama-cpp #gguf-my-repo #license-apache-2.0 #region-us
# cleatherbury/Phigments12-Q6_K-GGUF This model was converted to GGUF format from 'liminerity/Phigments12' using URL via the URL's GGUF-my-repo space. Refer to the original model card for more details on the model. ## Use with URL Install URL through brew. Invoke the URL server or the CLI. CLI: Server: Note: You can also use this checkpoint directly through the usage steps listed in the URL repo as well.
[ "# cleatherbury/Phigments12-Q6_K-GGUF\nThis model was converted to GGUF format from 'liminerity/Phigments12' using URL via the URL's GGUF-my-repo space.\nRefer to the original model card for more details on the model.", "## Use with URL\n\nInstall URL through brew.\n\n\nInvoke the URL server or the CLI.\n\nCLI:\n\n\n\nServer:\n\n\n\nNote: You can also use this checkpoint directly through the usage steps listed in the URL repo as well." ]
[ "TAGS\n#gguf #liminerity/merge6 #liminerity/merge3 #Merge #llama-cpp #gguf-my-repo #license-apache-2.0 #region-us \n", "# cleatherbury/Phigments12-Q6_K-GGUF\nThis model was converted to GGUF format from 'liminerity/Phigments12' using URL via the URL's GGUF-my-repo space.\nRefer to the original model card for more details on the model.", "## Use with URL\n\nInstall URL through brew.\n\n\nInvoke the URL server or the CLI.\n\nCLI:\n\n\n\nServer:\n\n\n\nNote: You can also use this checkpoint directly through the usage steps listed in the URL repo as well." ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
Conradlynny/gemma1k_ft
null
[ "transformers", "safetensors", "gemma", "text-generation", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-20T11:19:32+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #gemma #text-generation #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #gemma #text-generation #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
image-to-text
transformers
# LongCap: Finetuned [BLIP](https://huggingface.co/Salesforce/blip-image-captioning-large) for generating long captions of images, suitable for prompts for text-to-image generation and captioning text-to-image datasets ## Usage You can use this model for conditional and un-conditional image captioning ### Using the Pytorch model #### Running the model on CPU <details> <summary> Click to expand </summary> ```python import requests from PIL import Image from transformers import BlipProcessor, BlipForConditionalGeneration processor = BlipProcessor.from_pretrained("unography/blip-large-long-cap") model = BlipForConditionalGeneration.from_pretrained("unography/blip-large-long-cap") img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg' raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB') inputs = processor(raw_image, return_tensors="pt") pixel_values = inputs.pixel_values out = model.generate(pixel_values=pixel_values, max_length=250) print(processor.decode(out[0], skip_special_tokens=True)) >>> a woman sitting on the beach, wearing a checkered shirt and a dog collar. the woman is interacting with the dog, which is positioned towards the left side of the image. the setting is a beachfront with a calm sea and a golden hue. ``` </details> #### Running the model on GPU ##### In full precision <details> <summary> Click to expand </summary> ```python import requests from PIL import Image from transformers import BlipProcessor, BlipForConditionalGeneration processor = BlipProcessor.from_pretrained("unography/blip-large-long-cap") model = BlipForConditionalGeneration.from_pretrained("unography/blip-large-long-cap").to("cuda") img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg' raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB') inputs = processor(raw_image, return_tensors="pt").to("cuda") pixel_values = inputs.pixel_values out = model.generate(pixel_values=pixel_values, max_length=250) print(processor.decode(out[0], skip_special_tokens=True)) >>> a woman sitting on the beach, wearing a checkered shirt and a dog collar. the woman is interacting with the dog, which is positioned towards the left side of the image. the setting is a beachfront with a calm sea and a golden hue. ``` </details> ##### In half precision (`float16`) <details> <summary> Click to expand </summary> ```python import torch import requests from PIL import Image from transformers import BlipProcessor, BlipForConditionalGeneration processor = BlipProcessor.from_pretrained("unography/blip-large-long-cap") model = BlipForConditionalGeneration.from_pretrained("unography/blip-large-long-cap", torch_dtype=torch.float16).to("cuda") img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg' raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB') inputs = processor(raw_image, return_tensors="pt").to("cuda", torch.float16) pixel_values = inputs.pixel_values out = model.generate(pixel_values=pixel_values, max_length=250) print(processor.decode(out[0], skip_special_tokens=True)) >>> a woman sitting on the beach, wearing a checkered shirt and a dog collar. the woman is interacting with the dog, which is positioned towards the left side of the image. the setting is a beachfront with a calm sea and a golden hue. ``` </details>
{"license": "bsd-3-clause", "tags": ["image-captioning"], "datasets": ["unography/laion-14k-GPT4V-LIVIS-Captions"], "pipeline_tag": "image-to-text", "languages": ["en"], "widget": [{"src": "https://huggingface.co/datasets/mishig/sample_images/resolve/main/savanna.jpg", "example_title": "Savanna"}, {"src": "https://huggingface.co/datasets/mishig/sample_images/resolve/main/football-match.jpg", "example_title": "Football Match"}, {"src": "https://huggingface.co/datasets/mishig/sample_images/resolve/main/airport.jpg", "example_title": "Airport"}], "inference": {"parameters": {"max_length": 300}}}
unography/blip-large-long-cap-sam-llava
null
[ "transformers", "safetensors", "blip", "text2text-generation", "image-captioning", "image-to-text", "dataset:unography/laion-14k-GPT4V-LIVIS-Captions", "license:bsd-3-clause", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-20T11:22:12+00:00
[]
[]
TAGS #transformers #safetensors #blip #text2text-generation #image-captioning #image-to-text #dataset-unography/laion-14k-GPT4V-LIVIS-Captions #license-bsd-3-clause #autotrain_compatible #endpoints_compatible #region-us
# LongCap: Finetuned BLIP for generating long captions of images, suitable for prompts for text-to-image generation and captioning text-to-image datasets ## Usage You can use this model for conditional and un-conditional image captioning ### Using the Pytorch model #### Running the model on CPU <details> <summary> Click to expand </summary> </details> #### Running the model on GPU ##### In full precision <details> <summary> Click to expand </summary> </details> ##### In half precision ('float16') <details> <summary> Click to expand </summary> </details>
[ "# LongCap: Finetuned BLIP for generating long captions of images, suitable for prompts for text-to-image generation and captioning text-to-image datasets", "## Usage\n\nYou can use this model for conditional and un-conditional image captioning", "### Using the Pytorch model", "#### Running the model on CPU\n\n<details>\n<summary> Click to expand </summary>\n\n\n</details>", "#### Running the model on GPU", "##### In full precision \n\n<details>\n<summary> Click to expand </summary>\n\n\n</details>", "##### In half precision ('float16')\n\n<details>\n<summary> Click to expand </summary>\n\n\n</details>" ]
[ "TAGS\n#transformers #safetensors #blip #text2text-generation #image-captioning #image-to-text #dataset-unography/laion-14k-GPT4V-LIVIS-Captions #license-bsd-3-clause #autotrain_compatible #endpoints_compatible #region-us \n", "# LongCap: Finetuned BLIP for generating long captions of images, suitable for prompts for text-to-image generation and captioning text-to-image datasets", "## Usage\n\nYou can use this model for conditional and un-conditional image captioning", "### Using the Pytorch model", "#### Running the model on CPU\n\n<details>\n<summary> Click to expand </summary>\n\n\n</details>", "#### Running the model on GPU", "##### In full precision \n\n<details>\n<summary> Click to expand </summary>\n\n\n</details>", "##### In half precision ('float16')\n\n<details>\n<summary> Click to expand </summary>\n\n\n</details>" ]
null
null
# T3qm7xpExperiment27pastiche-7B T3qm7xpExperiment27pastiche-7B is an automated merge created by [Maxime Labonne](https://huggingface.co/mlabonne) using the following configuration. ## 🧩 Configuration ```yaml models: - model: mistralai/Mistral-7B-v0.1 - model: nlpguy/T3QM7XP - model: automerger/Experiment27Pastiche-7B merge_method: model_stock base_model: mistralai/Mistral-7B-v0.1 dtype: bfloat16 ``` ## 💻 Usage ```python !pip install -qU transformers accelerate from transformers import AutoTokenizer import transformers import torch model = "automerger/T3qm7xpExperiment27pastiche-7B" messages = [{"role": "user", "content": "What is a large language model?"}] tokenizer = AutoTokenizer.from_pretrained(model) prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) pipeline = transformers.pipeline( "text-generation", model=model, torch_dtype=torch.float16, device_map="auto", ) outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) print(outputs[0]["generated_text"]) ```
{"license": "apache-2.0", "tags": ["merge", "mergekit", "lazymergekit", "automerger"]}
automerger/T3qm7xpExperiment27pastiche-7B
null
[ "merge", "mergekit", "lazymergekit", "automerger", "license:apache-2.0", "region:us" ]
null
2024-04-20T11:22:53+00:00
[]
[]
TAGS #merge #mergekit #lazymergekit #automerger #license-apache-2.0 #region-us
# T3qm7xpExperiment27pastiche-7B T3qm7xpExperiment27pastiche-7B is an automated merge created by Maxime Labonne using the following configuration. ## Configuration ## Usage
[ "# T3qm7xpExperiment27pastiche-7B\n\nT3qm7xpExperiment27pastiche-7B is an automated merge created by Maxime Labonne using the following configuration.", "## Configuration", "## Usage" ]
[ "TAGS\n#merge #mergekit #lazymergekit #automerger #license-apache-2.0 #region-us \n", "# T3qm7xpExperiment27pastiche-7B\n\nT3qm7xpExperiment27pastiche-7B is an automated merge created by Maxime Labonne using the following configuration.", "## Configuration", "## Usage" ]
text-to-image
diffusers
<!-- This model card has been generated automatically according to the information the training script had access to. You should probably proofread and complete it, then remove this comment. --> # SDXL LoRA DreamBooth - junhua226/corgy_dog_LoRA <Gallery /> ## Model description These are junhua226/corgy_dog_LoRA LoRA adaption weights for stabilityai/stable-diffusion-xl-base-1.0. The weights were trained using [DreamBooth](https://dreambooth.github.io/). LoRA for the text encoder was enabled: False. Special VAE used for training: madebyollin/sdxl-vae-fp16-fix. ## Trigger words You should use a photo of TOK dog to trigger the image generation. ## Download model Weights for this model are available in Safetensors format. [Download](junhua226/corgy_dog_LoRA/tree/main) them in the Files & versions tab. ## Intended uses & limitations #### How to use ```python # TODO: add an example code snippet for running this diffusion pipeline ``` #### Limitations and bias [TODO: provide examples of latent issues and potential remediations] ## Training details [TODO: describe the data used to train the model]
{"license": "openrail++", "library_name": "diffusers", "tags": ["text-to-image", "text-to-image", "diffusers-training", "diffusers", "lora", "template:sd-lora", "stable-diffusion-xl", "stable-diffusion-xl-diffusers"], "base_model": "stabilityai/stable-diffusion-xl-base-1.0", "instance_prompt": "a photo of TOK dog", "widget": []}
junhua226/corgy_dog_LoRA
null
[ "diffusers", "tensorboard", "text-to-image", "diffusers-training", "lora", "template:sd-lora", "stable-diffusion-xl", "stable-diffusion-xl-diffusers", "base_model:stabilityai/stable-diffusion-xl-base-1.0", "license:openrail++", "region:us" ]
null
2024-04-20T11:25:39+00:00
[]
[]
TAGS #diffusers #tensorboard #text-to-image #diffusers-training #lora #template-sd-lora #stable-diffusion-xl #stable-diffusion-xl-diffusers #base_model-stabilityai/stable-diffusion-xl-base-1.0 #license-openrail++ #region-us
# SDXL LoRA DreamBooth - junhua226/corgy_dog_LoRA <Gallery /> ## Model description These are junhua226/corgy_dog_LoRA LoRA adaption weights for stabilityai/stable-diffusion-xl-base-1.0. The weights were trained using DreamBooth. LoRA for the text encoder was enabled: False. Special VAE used for training: madebyollin/sdxl-vae-fp16-fix. ## Trigger words You should use a photo of TOK dog to trigger the image generation. ## Download model Weights for this model are available in Safetensors format. Download them in the Files & versions tab. ## Intended uses & limitations #### How to use #### Limitations and bias [TODO: provide examples of latent issues and potential remediations] ## Training details [TODO: describe the data used to train the model]
[ "# SDXL LoRA DreamBooth - junhua226/corgy_dog_LoRA\n\n<Gallery />", "## Model description\n\nThese are junhua226/corgy_dog_LoRA LoRA adaption weights for stabilityai/stable-diffusion-xl-base-1.0.\n\nThe weights were trained using DreamBooth.\n\nLoRA for the text encoder was enabled: False.\n\nSpecial VAE used for training: madebyollin/sdxl-vae-fp16-fix.", "## Trigger words\n\nYou should use a photo of TOK dog to trigger the image generation.", "## Download model\n\nWeights for this model are available in Safetensors format.\n\nDownload them in the Files & versions tab.", "## Intended uses & limitations", "#### How to use", "#### Limitations and bias\n\n[TODO: provide examples of latent issues and potential remediations]", "## Training details\n\n[TODO: describe the data used to train the model]" ]
[ "TAGS\n#diffusers #tensorboard #text-to-image #diffusers-training #lora #template-sd-lora #stable-diffusion-xl #stable-diffusion-xl-diffusers #base_model-stabilityai/stable-diffusion-xl-base-1.0 #license-openrail++ #region-us \n", "# SDXL LoRA DreamBooth - junhua226/corgy_dog_LoRA\n\n<Gallery />", "## Model description\n\nThese are junhua226/corgy_dog_LoRA LoRA adaption weights for stabilityai/stable-diffusion-xl-base-1.0.\n\nThe weights were trained using DreamBooth.\n\nLoRA for the text encoder was enabled: False.\n\nSpecial VAE used for training: madebyollin/sdxl-vae-fp16-fix.", "## Trigger words\n\nYou should use a photo of TOK dog to trigger the image generation.", "## Download model\n\nWeights for this model are available in Safetensors format.\n\nDownload them in the Files & versions tab.", "## Intended uses & limitations", "#### How to use", "#### Limitations and bias\n\n[TODO: provide examples of latent issues and potential remediations]", "## Training details\n\n[TODO: describe the data used to train the model]" ]
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # micro_base_help_class_no_pre_seed_4 This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.8233 - Accuracy: 0.852 - F1 Macro: 0.6640 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 4 - optimizer: Adam with betas=(0.9,0.98) and epsilon=1e-06 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 Macro | |:-------------:|:-----:|:----:|:---------------:|:--------:|:--------:| | 0.3665 | 1.0 | 313 | 0.3598 | 0.8536 | 0.4619 | | 0.3132 | 2.0 | 626 | 0.3678 | 0.8634 | 0.5801 | | 0.2547 | 3.0 | 939 | 0.3785 | 0.8456 | 0.6196 | | 0.1904 | 4.0 | 1252 | 0.5449 | 0.8444 | 0.6189 | | 0.1679 | 5.0 | 1565 | 0.7422 | 0.8528 | 0.5954 | | 0.1157 | 6.0 | 1878 | 0.9005 | 0.8404 | 0.6468 | | 0.094 | 7.0 | 2191 | 0.9842 | 0.8466 | 0.6246 | | 0.0494 | 8.0 | 2504 | 1.0898 | 0.8476 | 0.6083 | | 0.015 | 9.0 | 2817 | 1.2454 | 0.844 | 0.6390 | ### Framework versions - Transformers 4.36.2 - Pytorch 2.2.1+cu121 - Datasets 2.19.0 - Tokenizers 0.15.2
{"license": "mit", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "roberta-base", "model-index": [{"name": "micro_base_help_class_no_pre_seed_4", "results": []}]}
BigTMiami/micro_base_help_class_no_pre_seed_4
null
[ "transformers", "safetensors", "roberta", "text-classification", "generated_from_trainer", "base_model:roberta-base", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-20T11:26:19+00:00
[]
[]
TAGS #transformers #safetensors #roberta #text-classification #generated_from_trainer #base_model-roberta-base #license-mit #autotrain_compatible #endpoints_compatible #region-us
micro\_base\_help\_class\_no\_pre\_seed\_4 ========================================== This model is a fine-tuned version of roberta-base on an unknown dataset. It achieves the following results on the evaluation set: * Loss: 0.8233 * Accuracy: 0.852 * F1 Macro: 0.6640 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 2e-05 * train\_batch\_size: 16 * eval\_batch\_size: 16 * seed: 4 * optimizer: Adam with betas=(0.9,0.98) and epsilon=1e-06 * lr\_scheduler\_type: linear * num\_epochs: 10 ### Training results ### Framework versions * Transformers 4.36.2 * Pytorch 2.2.1+cu121 * Datasets 2.19.0 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 16\n* eval\\_batch\\_size: 16\n* seed: 4\n* optimizer: Adam with betas=(0.9,0.98) and epsilon=1e-06\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 10", "### Training results", "### Framework versions\n\n\n* Transformers 4.36.2\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #safetensors #roberta #text-classification #generated_from_trainer #base_model-roberta-base #license-mit #autotrain_compatible #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 16\n* eval\\_batch\\_size: 16\n* seed: 4\n* optimizer: Adam with betas=(0.9,0.98) and epsilon=1e-06\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 10", "### Training results", "### Framework versions\n\n\n* Transformers 4.36.2\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.15.2" ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
tomaszki/llama-0
null
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-20T11:26:56+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #llama #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
ohsuz/ohsuz-fin
null
[ "transformers", "safetensors", "phi", "text-generation", "conversational", "custom_code", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-20T11:27:35+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #phi #text-generation #conversational #custom_code #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #phi #text-generation #conversational #custom_code #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
translation
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # nllb-600M-mt-french-bambara This model is a fine-tuned version of [facebook/nllb-200-distilled-600M](https://huggingface.co/facebook/nllb-200-distilled-600M) on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 8e-06 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 6 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.40.0 - Pytorch 2.2.0+cu121 - Datasets 2.19.0 - Tokenizers 0.19.1
{"language": ["fr", "bm"], "license": "cc-by-nc-4.0", "library_name": "transformers", "tags": ["generated_from_trainer"], "datasets": ["oza75/mt-fr-bm-texts"], "metrics": ["sacrebleu"], "base_model": "facebook/nllb-200-distilled-600M", "pipeline_tag": "translation", "model-index": [{"name": "nllb-600M-mt-french-bambara", "results": []}]}
oza75/nllb-600M-mt-french-bambara
null
[ "transformers", "tensorboard", "safetensors", "m2m_100", "text2text-generation", "generated_from_trainer", "translation", "fr", "bm", "dataset:oza75/mt-fr-bm-texts", "base_model:facebook/nllb-200-distilled-600M", "license:cc-by-nc-4.0", "autotrain_compatible", "endpoints_compatible", "has_space", "region:us" ]
null
2024-04-20T11:27:36+00:00
[]
[ "fr", "bm" ]
TAGS #transformers #tensorboard #safetensors #m2m_100 #text2text-generation #generated_from_trainer #translation #fr #bm #dataset-oza75/mt-fr-bm-texts #base_model-facebook/nllb-200-distilled-600M #license-cc-by-nc-4.0 #autotrain_compatible #endpoints_compatible #has_space #region-us
# nllb-600M-mt-french-bambara This model is a fine-tuned version of facebook/nllb-200-distilled-600M on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 8e-06 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 6 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.40.0 - Pytorch 2.2.0+cu121 - Datasets 2.19.0 - Tokenizers 0.19.1
[ "# nllb-600M-mt-french-bambara\n\nThis model is a fine-tuned version of facebook/nllb-200-distilled-600M on the None dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 8e-06\n- train_batch_size: 16\n- eval_batch_size: 16\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 6\n- mixed_precision_training: Native AMP", "### Training results", "### Framework versions\n\n- Transformers 4.40.0\n- Pytorch 2.2.0+cu121\n- Datasets 2.19.0\n- Tokenizers 0.19.1" ]
[ "TAGS\n#transformers #tensorboard #safetensors #m2m_100 #text2text-generation #generated_from_trainer #translation #fr #bm #dataset-oza75/mt-fr-bm-texts #base_model-facebook/nllb-200-distilled-600M #license-cc-by-nc-4.0 #autotrain_compatible #endpoints_compatible #has_space #region-us \n", "# nllb-600M-mt-french-bambara\n\nThis model is a fine-tuned version of facebook/nllb-200-distilled-600M on the None dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 8e-06\n- train_batch_size: 16\n- eval_batch_size: 16\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 6\n- mixed_precision_training: Native AMP", "### Training results", "### Framework versions\n\n- Transformers 4.40.0\n- Pytorch 2.2.0+cu121\n- Datasets 2.19.0\n- Tokenizers 0.19.1" ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
mohamedhachemi/mohazz_ar
null
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-20T11:28:51+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #llama #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
tomaszki/llama-0-a
null
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-20T11:30:33+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #llama #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
text-generation
transformers
# Orca-Llama-3-8B-Instruct-DPO Finetuned [Llama 3 8B Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct) on [Intel/orca_dpo_pairs](https://huggingface.co/datasets/Intel/orca_dpo_pairs) using a single 3090 24GB. Data formated using the ChatML template. GGUF can be found here [RDson/Orca-Llama-3-8B-Instruct-DPO-GGUF](https://huggingface.co/RDson/Orca-Llama-3-8B-Instruct-DPO-GGUF) ORPOConfig: ``` learning_rate=1e-6, lr_scheduler_type="linear", max_length=1024, max_prompt_length=512, overwrite_output_dir=True, beta=0.1, per_device_train_batch_size=2, per_device_eval_batch_size=2, gradient_accumulation_steps=4, optim="paged_adamw_8bit", num_train_epochs=1, evaluation_strategy="steps", eval_steps=0.2, logging_steps=1, warmup_steps=35, report_to="wandb", output_dir="./results/", fp16=True, save_steps=50 ``` <div style="text-align: center;"> <img src="https://i.imgur.com/vQ4RzSl.png" style="width: 100%; margin: 0 auto; display: inline-block;"/> <img src="https://i.imgur.com/9H75ijW.png" style="width: 100%; margin: 0 auto; display: inline-block;"/> </div>
{"license": "other", "library_name": "transformers", "tags": ["llama 3", "orca ", "dpo "], "datasets": ["Intel/orca_dpo_pairs"], "pipeline_tag": "text-generation", "license_name": "llama-3", "license_link": "https://llama.meta.com/llama3/license"}
RDson/Orca-Llama-3-8B-Instruct-DPO
null
[ "transformers", "safetensors", "llama", "text-generation", "llama 3", "orca ", "dpo ", "conversational", "dataset:Intel/orca_dpo_pairs", "license:other", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-20T11:31:22+00:00
[]
[]
TAGS #transformers #safetensors #llama #text-generation #llama 3 #orca #dpo #conversational #dataset-Intel/orca_dpo_pairs #license-other #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# Orca-Llama-3-8B-Instruct-DPO Finetuned Llama 3 8B Instruct on Intel/orca_dpo_pairs using a single 3090 24GB. Data formated using the ChatML template. GGUF can be found here RDson/Orca-Llama-3-8B-Instruct-DPO-GGUF ORPOConfig: <div style="text-align: center;"> <img src="https://i.URL style="width: 100%; margin: 0 auto; display: inline-block;"/> <img src="https://i.URL style="width: 100%; margin: 0 auto; display: inline-block;"/> </div>
[ "# Orca-Llama-3-8B-Instruct-DPO\n\nFinetuned Llama 3 8B Instruct on Intel/orca_dpo_pairs using a single 3090 24GB. Data formated using the ChatML template.\n\nGGUF can be found here RDson/Orca-Llama-3-8B-Instruct-DPO-GGUF\n\nORPOConfig:\n\n\n\n<div style=\"text-align: center;\">\n <img src=\"https://i.URL style=\"width: 100%; margin: 0 auto; display: inline-block;\"/>\n <img src=\"https://i.URL style=\"width: 100%; margin: 0 auto; display: inline-block;\"/>\n</div>" ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #llama 3 #orca #dpo #conversational #dataset-Intel/orca_dpo_pairs #license-other #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# Orca-Llama-3-8B-Instruct-DPO\n\nFinetuned Llama 3 8B Instruct on Intel/orca_dpo_pairs using a single 3090 24GB. Data formated using the ChatML template.\n\nGGUF can be found here RDson/Orca-Llama-3-8B-Instruct-DPO-GGUF\n\nORPOConfig:\n\n\n\n<div style=\"text-align: center;\">\n <img src=\"https://i.URL style=\"width: 100%; margin: 0 auto; display: inline-block;\"/>\n <img src=\"https://i.URL style=\"width: 100%; margin: 0 auto; display: inline-block;\"/>\n</div>" ]
image-classification
timm
# Model card for hpx_former_ln_s18.westai_in1k_384
{"license": "apache-2.0", "library_name": "timm", "tags": ["image-classification", "timm"]}
Spravil/hpx_former_ln_s18.westai_in1k_384
null
[ "timm", "pytorch", "safetensors", "image-classification", "license:apache-2.0", "region:us" ]
null
2024-04-20T11:31:34+00:00
[]
[]
TAGS #timm #pytorch #safetensors #image-classification #license-apache-2.0 #region-us
# Model card for hpx_former_ln_s18.westai_in1k_384
[ "# Model card for hpx_former_ln_s18.westai_in1k_384" ]
[ "TAGS\n#timm #pytorch #safetensors #image-classification #license-apache-2.0 #region-us \n", "# Model card for hpx_former_ln_s18.westai_in1k_384" ]