pipeline_tag
stringclasses
48 values
library_name
stringclasses
198 values
text
stringlengths
1
900k
metadata
stringlengths
2
438k
id
stringlengths
5
122
last_modified
null
tags
sequencelengths
1
1.84k
sha
null
created_at
stringlengths
25
25
arxiv
sequencelengths
0
201
languages
sequencelengths
0
1.83k
tags_str
stringlengths
17
9.34k
text_str
stringlengths
0
389k
text_lists
sequencelengths
0
722
processed_texts
sequencelengths
1
723
text-generation
transformers
# Uploaded model - **Developed by:** ppopiolek - **License:** apache-2.0 - **Finetuned from model :** TinyLlama/TinyLlama-1.1B-Chat-v1.0 This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
{"language": ["en"], "license": "apache-2.0", "tags": ["text-generation-inference", "transformers", "unsloth", "llama", "trl", "sft"], "base_model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0"}
ppopiolek/tinyllama_merged_s_500
null
[ "transformers", "safetensors", "llama", "text-generation", "text-generation-inference", "unsloth", "trl", "sft", "conversational", "en", "base_model:TinyLlama/TinyLlama-1.1B-Chat-v1.0", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-20T18:51:04+00:00
[]
[ "en" ]
TAGS #transformers #safetensors #llama #text-generation #text-generation-inference #unsloth #trl #sft #conversational #en #base_model-TinyLlama/TinyLlama-1.1B-Chat-v1.0 #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us
# Uploaded model - Developed by: ppopiolek - License: apache-2.0 - Finetuned from model : TinyLlama/TinyLlama-1.1B-Chat-v1.0 This llama model was trained 2x faster with Unsloth and Huggingface's TRL library. <img src="URL width="200"/>
[ "# Uploaded model\n\n- Developed by: ppopiolek\n- License: apache-2.0\n- Finetuned from model : TinyLlama/TinyLlama-1.1B-Chat-v1.0\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #text-generation-inference #unsloth #trl #sft #conversational #en #base_model-TinyLlama/TinyLlama-1.1B-Chat-v1.0 #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us \n", "# Uploaded model\n\n- Developed by: ppopiolek\n- License: apache-2.0\n- Finetuned from model : TinyLlama/TinyLlama-1.1B-Chat-v1.0\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
text-generation
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # 0.0_ablation_5e7lr_4iters_256batch_iter_1 This model is a fine-tuned version of [HuggingFaceH4/mistral-7b-sft-beta](https://huggingface.co/HuggingFaceH4/mistral-7b-sft-beta) on the HuggingFaceH4/ultrafeedback_binarized dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-07 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - distributed_type: multi-GPU - num_devices: 8 - gradient_accumulation_steps: 4 - total_train_batch_size: 256 - total_eval_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.36.2 - Pytorch 2.1.2+cu121 - Datasets 2.14.6 - Tokenizers 0.15.2
{"license": "mit", "tags": ["alignment-handbook", "generated_from_trainer", "trl", "dpo", "generated_from_trainer"], "datasets": ["HuggingFaceH4/ultrafeedback_binarized"], "base_model": "HuggingFaceH4/mistral-7b-sft-beta", "model-index": [{"name": "0.0_ablation_5e7lr_4iters_256batch_iter_1", "results": []}]}
ZhangShenao/0.0_ablation_5e7lr_4iters_256batch_iter_1
null
[ "transformers", "safetensors", "mistral", "text-generation", "alignment-handbook", "generated_from_trainer", "trl", "dpo", "conversational", "dataset:HuggingFaceH4/ultrafeedback_binarized", "base_model:HuggingFaceH4/mistral-7b-sft-beta", "license:mit", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-20T18:51:20+00:00
[]
[]
TAGS #transformers #safetensors #mistral #text-generation #alignment-handbook #generated_from_trainer #trl #dpo #conversational #dataset-HuggingFaceH4/ultrafeedback_binarized #base_model-HuggingFaceH4/mistral-7b-sft-beta #license-mit #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# 0.0_ablation_5e7lr_4iters_256batch_iter_1 This model is a fine-tuned version of HuggingFaceH4/mistral-7b-sft-beta on the HuggingFaceH4/ultrafeedback_binarized dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-07 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - distributed_type: multi-GPU - num_devices: 8 - gradient_accumulation_steps: 4 - total_train_batch_size: 256 - total_eval_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.36.2 - Pytorch 2.1.2+cu121 - Datasets 2.14.6 - Tokenizers 0.15.2
[ "# 0.0_ablation_5e7lr_4iters_256batch_iter_1\n\nThis model is a fine-tuned version of HuggingFaceH4/mistral-7b-sft-beta on the HuggingFaceH4/ultrafeedback_binarized dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 5e-07\n- train_batch_size: 8\n- eval_batch_size: 8\n- seed: 42\n- distributed_type: multi-GPU\n- num_devices: 8\n- gradient_accumulation_steps: 4\n- total_train_batch_size: 256\n- total_eval_batch_size: 64\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: cosine\n- lr_scheduler_warmup_ratio: 0.1\n- num_epochs: 1", "### Training results", "### Framework versions\n\n- Transformers 4.36.2\n- Pytorch 2.1.2+cu121\n- Datasets 2.14.6\n- Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #safetensors #mistral #text-generation #alignment-handbook #generated_from_trainer #trl #dpo #conversational #dataset-HuggingFaceH4/ultrafeedback_binarized #base_model-HuggingFaceH4/mistral-7b-sft-beta #license-mit #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# 0.0_ablation_5e7lr_4iters_256batch_iter_1\n\nThis model is a fine-tuned version of HuggingFaceH4/mistral-7b-sft-beta on the HuggingFaceH4/ultrafeedback_binarized dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 5e-07\n- train_batch_size: 8\n- eval_batch_size: 8\n- seed: 42\n- distributed_type: multi-GPU\n- num_devices: 8\n- gradient_accumulation_steps: 4\n- total_train_batch_size: 256\n- total_eval_batch_size: 64\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: cosine\n- lr_scheduler_warmup_ratio: 0.1\n- num_epochs: 1", "### Training results", "### Framework versions\n\n- Transformers 4.36.2\n- Pytorch 2.1.2+cu121\n- Datasets 2.14.6\n- Tokenizers 0.15.2" ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
overeducated/test
null
[ "transformers", "safetensors", "llama", "text-generation", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "2-bit", "region:us" ]
null
2024-04-20T18:51:50+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #llama #text-generation #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #2-bit #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #2-bit #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
null
adapter-transformers
# Adapter `BigTMiami/micro_par_bn_v_3_help_class_adp_lr_0003_S_1` for roberta-base An [adapter](https://adapterhub.ml) for the `roberta-base` model that was trained on the [BigTMiami/amazon_MICRO_helpfulness_dataset](https://huggingface.co/datasets/BigTMiami/amazon_MICRO_helpfulness_dataset/) dataset and includes a prediction head for classification. This adapter was created for usage with the **[Adapters](https://github.com/Adapter-Hub/adapters)** library. ## Usage First, install `adapters`: ``` pip install -U adapters ``` Now, the adapter can be loaded and activated like this: ```python from adapters import AutoAdapterModel model = AutoAdapterModel.from_pretrained("roberta-base") adapter_name = model.load_adapter("BigTMiami/micro_par_bn_v_3_help_class_adp_lr_0003_S_1", source="hf", set_active=True) ``` ## Architecture & Training <!-- Add some description here --> ## Evaluation results <!-- Add some description here --> ## Citation <!-- Add some description here -->
{"tags": ["roberta", "adapter-transformers"], "datasets": ["BigTMiami/amazon_MICRO_helpfulness_dataset"]}
BigTMiami/micro_par_bn_v_3_help_class_adp_lr_0003_S_1
null
[ "adapter-transformers", "roberta", "dataset:BigTMiami/amazon_MICRO_helpfulness_dataset", "region:us" ]
null
2024-04-20T18:53:09+00:00
[]
[]
TAGS #adapter-transformers #roberta #dataset-BigTMiami/amazon_MICRO_helpfulness_dataset #region-us
# Adapter 'BigTMiami/micro_par_bn_v_3_help_class_adp_lr_0003_S_1' for roberta-base An adapter for the 'roberta-base' model that was trained on the BigTMiami/amazon_MICRO_helpfulness_dataset dataset and includes a prediction head for classification. This adapter was created for usage with the Adapters library. ## Usage First, install 'adapters': Now, the adapter can be loaded and activated like this: ## Architecture & Training ## Evaluation results
[ "# Adapter 'BigTMiami/micro_par_bn_v_3_help_class_adp_lr_0003_S_1' for roberta-base\n\nAn adapter for the 'roberta-base' model that was trained on the BigTMiami/amazon_MICRO_helpfulness_dataset dataset and includes a prediction head for classification.\n\nThis adapter was created for usage with the Adapters library.", "## Usage\n\nFirst, install 'adapters':\n\n\n\nNow, the adapter can be loaded and activated like this:", "## Architecture & Training", "## Evaluation results" ]
[ "TAGS\n#adapter-transformers #roberta #dataset-BigTMiami/amazon_MICRO_helpfulness_dataset #region-us \n", "# Adapter 'BigTMiami/micro_par_bn_v_3_help_class_adp_lr_0003_S_1' for roberta-base\n\nAn adapter for the 'roberta-base' model that was trained on the BigTMiami/amazon_MICRO_helpfulness_dataset dataset and includes a prediction head for classification.\n\nThis adapter was created for usage with the Adapters library.", "## Usage\n\nFirst, install 'adapters':\n\n\n\nNow, the adapter can be loaded and activated like this:", "## Architecture & Training", "## Evaluation results" ]
token-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-ar This model is a fine-tuned version of [tner/xlm-roberta-base-panx-dataset-ar](https://huggingface.co/tner/xlm-roberta-base-panx-dataset-ar) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.1977 - F1: 0.8803 - Accuracy: 0.9515 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:------:|:--------:| | 0.2179 | 1.0 | 188 | 0.1977 | 0.8803 | 0.9515 | ### Framework versions - Transformers 4.40.0 - Pytorch 2.2.1+cu121 - Datasets 2.19.0 - Tokenizers 0.19.1
{"tags": ["generated_from_trainer"], "metrics": ["f1", "accuracy"], "base_model": "tner/xlm-roberta-base-panx-dataset-ar", "model-index": [{"name": "xlm-roberta-base-finetuned-panx-ar", "results": []}]}
SanadGhassan/xlm-roberta-base-finetuned-panx-ar
null
[ "transformers", "tensorboard", "safetensors", "xlm-roberta", "token-classification", "generated_from_trainer", "base_model:tner/xlm-roberta-base-panx-dataset-ar", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-20T18:54:04+00:00
[]
[]
TAGS #transformers #tensorboard #safetensors #xlm-roberta #token-classification #generated_from_trainer #base_model-tner/xlm-roberta-base-panx-dataset-ar #autotrain_compatible #endpoints_compatible #region-us
xlm-roberta-base-finetuned-panx-ar ================================== This model is a fine-tuned version of tner/xlm-roberta-base-panx-dataset-ar on an unknown dataset. It achieves the following results on the evaluation set: * Loss: 0.1977 * F1: 0.8803 * Accuracy: 0.9515 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 5e-05 * train\_batch\_size: 64 * eval\_batch\_size: 64 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * num\_epochs: 1 ### Training results ### Framework versions * Transformers 4.40.0 * Pytorch 2.2.1+cu121 * Datasets 2.19.0 * Tokenizers 0.19.1
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 5e-05\n* train\\_batch\\_size: 64\n* eval\\_batch\\_size: 64\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 1", "### Training results", "### Framework versions\n\n\n* Transformers 4.40.0\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.19.1" ]
[ "TAGS\n#transformers #tensorboard #safetensors #xlm-roberta #token-classification #generated_from_trainer #base_model-tner/xlm-roberta-base-panx-dataset-ar #autotrain_compatible #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 5e-05\n* train\\_batch\\_size: 64\n* eval\\_batch\\_size: 64\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 1", "### Training results", "### Framework versions\n\n\n* Transformers 4.40.0\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.19.1" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # finetune_starcoder2 This model is a fine-tuned version of [bigcode/starcoder2-3b](https://huggingface.co/bigcode/starcoder2-3b) on [wrapss/flutter-codegen](https://huggingface.co/datasets/wraps/flutter-codegen). ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 2 - eval_batch_size: 8 - seed: 0 - gradient_accumulation_steps: 8 - total_train_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_steps: 20 - training_steps: 1000 ### Training results ### Framework versions - PEFT 0.8.2 - Transformers 4.39.0.dev0 - Pytorch 2.2.1+cu121 - Datasets 2.18.0 - Tokenizers 0.15.2
{"license": "bigcode-openrail-m", "library_name": "peft", "tags": ["trl", "sft", "generated_from_trainer"], "datasets": ["wraps/flutter-codegen"], "base_model": "bigcode/starcoder2-3b", "model-index": [{"name": "finetune_starcoder2", "results": []}]}
wraps/finetune_starcoder2
null
[ "peft", "safetensors", "trl", "sft", "generated_from_trainer", "dataset:wraps/flutter-codegen", "base_model:bigcode/starcoder2-3b", "license:bigcode-openrail-m", "region:us" ]
null
2024-04-20T18:54:36+00:00
[]
[]
TAGS #peft #safetensors #trl #sft #generated_from_trainer #dataset-wraps/flutter-codegen #base_model-bigcode/starcoder2-3b #license-bigcode-openrail-m #region-us
# finetune_starcoder2 This model is a fine-tuned version of bigcode/starcoder2-3b on wrapss/flutter-codegen. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 2 - eval_batch_size: 8 - seed: 0 - gradient_accumulation_steps: 8 - total_train_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_steps: 20 - training_steps: 1000 ### Training results ### Framework versions - PEFT 0.8.2 - Transformers 4.39.0.dev0 - Pytorch 2.2.1+cu121 - Datasets 2.18.0 - Tokenizers 0.15.2
[ "# finetune_starcoder2\n\nThis model is a fine-tuned version of bigcode/starcoder2-3b on wrapss/flutter-codegen.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 2e-05\n- train_batch_size: 2\n- eval_batch_size: 8\n- seed: 0\n- gradient_accumulation_steps: 8\n- total_train_batch_size: 16\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: cosine\n- lr_scheduler_warmup_steps: 20\n- training_steps: 1000", "### Training results", "### Framework versions\n\n- PEFT 0.8.2\n- Transformers 4.39.0.dev0\n- Pytorch 2.2.1+cu121\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
[ "TAGS\n#peft #safetensors #trl #sft #generated_from_trainer #dataset-wraps/flutter-codegen #base_model-bigcode/starcoder2-3b #license-bigcode-openrail-m #region-us \n", "# finetune_starcoder2\n\nThis model is a fine-tuned version of bigcode/starcoder2-3b on wrapss/flutter-codegen.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 2e-05\n- train_batch_size: 2\n- eval_batch_size: 8\n- seed: 0\n- gradient_accumulation_steps: 8\n- total_train_batch_size: 16\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: cosine\n- lr_scheduler_warmup_steps: 20\n- training_steps: 1000", "### Training results", "### Framework versions\n\n- PEFT 0.8.2\n- Transformers 4.39.0.dev0\n- Pytorch 2.2.1+cu121\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
text-generation
transformers
# Uploaded model - **Developed by:** kevin009 - **License:** apache-2.0 - **Finetuned from model :** unsloth/llama-3-8b-bnb-4bit This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
{"language": ["en"], "license": "apache-2.0", "tags": ["text-generation-inference", "transformers", "unsloth", "llama", "trl", "sft"], "base_model": "unsloth/llama-3-8b-bnb-4bit"}
kevin009/llama3math-s60
null
[ "transformers", "pytorch", "safetensors", "llama", "text-generation", "text-generation-inference", "unsloth", "trl", "sft", "en", "base_model:unsloth/llama-3-8b-bnb-4bit", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "8-bit", "region:us" ]
null
2024-04-20T18:54:52+00:00
[]
[ "en" ]
TAGS #transformers #pytorch #safetensors #llama #text-generation #text-generation-inference #unsloth #trl #sft #en #base_model-unsloth/llama-3-8b-bnb-4bit #license-apache-2.0 #autotrain_compatible #endpoints_compatible #8-bit #region-us
# Uploaded model - Developed by: kevin009 - License: apache-2.0 - Finetuned from model : unsloth/llama-3-8b-bnb-4bit This llama model was trained 2x faster with Unsloth and Huggingface's TRL library. <img src="URL width="200"/>
[ "# Uploaded model\n\n- Developed by: kevin009\n- License: apache-2.0\n- Finetuned from model : unsloth/llama-3-8b-bnb-4bit\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
[ "TAGS\n#transformers #pytorch #safetensors #llama #text-generation #text-generation-inference #unsloth #trl #sft #en #base_model-unsloth/llama-3-8b-bnb-4bit #license-apache-2.0 #autotrain_compatible #endpoints_compatible #8-bit #region-us \n", "# Uploaded model\n\n- Developed by: kevin009\n- License: apache-2.0\n- Finetuned from model : unsloth/llama-3-8b-bnb-4bit\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
weathermanj/Llama-3-8B-orpo
null
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-20T18:55:52+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #llama #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
text-generation
transformers
This is an ExLlamaV2 quantized model in 4bpw of [mpasila/Llama-3-LimaRP-8B](https://huggingface.co/mpasila/Llama-3-LimaRP-8B) using the default calibration dataset. # Original Model card: This is a merge of [mpasila/Llama-3-LimaRP-LoRA-8B](https://huggingface.co/mpasila/Llama-3-LimaRP-LoRA-8B). LoRA trained in 4-bit with 8k context using [meta-llama/Meta-Llama-3-8B](https://huggingface.co/meta-llama/Meta-Llama-3-8B/) as the base model for 1 epoch. Dataset used is [a modified](https://huggingface.co/datasets/mpasila/LimaRP-augmented-8k-context) version of [grimulkan/LimaRP-augmented](https://huggingface.co/datasets/grimulkan/LimaRP-augmented). ### Prompt format: ChatML # Uploaded model - **Developed by:** mpasila - **License:** apache-2.0 - **Finetuned from model :** unsloth/llama-3-8b-bnb-4bit This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
{"language": ["en"], "license": "apache-2.0", "tags": ["text-generation-inference", "transformers", "unsloth", "llama", "trl", "sft", "not-for-all-audiences"], "datasets": ["grimulkan/LimaRP-augmented", "mpasila/LimaRP-augmented-8k-context"], "base_model": "unsloth/llama-3-8b-bnb-4bit"}
mpasila/Llama-3-LimaRP-8B-exl2-4bpw
null
[ "transformers", "llama", "text-generation", "text-generation-inference", "unsloth", "trl", "sft", "not-for-all-audiences", "conversational", "en", "dataset:grimulkan/LimaRP-augmented", "dataset:mpasila/LimaRP-augmented-8k-context", "base_model:unsloth/llama-3-8b-bnb-4bit", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-20T18:57:51+00:00
[]
[ "en" ]
TAGS #transformers #llama #text-generation #text-generation-inference #unsloth #trl #sft #not-for-all-audiences #conversational #en #dataset-grimulkan/LimaRP-augmented #dataset-mpasila/LimaRP-augmented-8k-context #base_model-unsloth/llama-3-8b-bnb-4bit #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us
This is an ExLlamaV2 quantized model in 4bpw of mpasila/Llama-3-LimaRP-8B using the default calibration dataset. # Original Model card: This is a merge of mpasila/Llama-3-LimaRP-LoRA-8B. LoRA trained in 4-bit with 8k context using meta-llama/Meta-Llama-3-8B as the base model for 1 epoch. Dataset used is a modified version of grimulkan/LimaRP-augmented. ### Prompt format: ChatML # Uploaded model - Developed by: mpasila - License: apache-2.0 - Finetuned from model : unsloth/llama-3-8b-bnb-4bit This llama model was trained 2x faster with Unsloth and Huggingface's TRL library. <img src="URL width="200"/>
[ "# Original Model card:\n\nThis is a merge of mpasila/Llama-3-LimaRP-LoRA-8B.\n\nLoRA trained in 4-bit with 8k context using meta-llama/Meta-Llama-3-8B as the base model for 1 epoch.\n\nDataset used is a modified version of grimulkan/LimaRP-augmented.", "### Prompt format: ChatML", "# Uploaded model\n\n- Developed by: mpasila\n- License: apache-2.0\n- Finetuned from model : unsloth/llama-3-8b-bnb-4bit\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
[ "TAGS\n#transformers #llama #text-generation #text-generation-inference #unsloth #trl #sft #not-for-all-audiences #conversational #en #dataset-grimulkan/LimaRP-augmented #dataset-mpasila/LimaRP-augmented-8k-context #base_model-unsloth/llama-3-8b-bnb-4bit #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us \n", "# Original Model card:\n\nThis is a merge of mpasila/Llama-3-LimaRP-LoRA-8B.\n\nLoRA trained in 4-bit with 8k context using meta-llama/Meta-Llama-3-8B as the base model for 1 epoch.\n\nDataset used is a modified version of grimulkan/LimaRP-augmented.", "### Prompt format: ChatML", "# Uploaded model\n\n- Developed by: mpasila\n- License: apache-2.0\n- Finetuned from model : unsloth/llama-3-8b-bnb-4bit\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
null
null
# NeuralsynthesisNeuralsynthesis-7B NeuralsynthesisNeuralsynthesis-7B is an automated merge created by [Maxime Labonne](https://huggingface.co/mlabonne) using the following configuration. ## 🧩 Configuration ```yaml models: - model: mistralai/Mistral-7B-v0.1 - model: Kukedlc/NeuralSynthesis-7b-v0.4-slerp - model: Kukedlc/NeuralSynthesis-7B-v0.3 merge_method: model_stock base_model: mistralai/Mistral-7B-v0.1 dtype: bfloat16 ``` ## 💻 Usage ```python !pip install -qU transformers accelerate from transformers import AutoTokenizer import transformers import torch model = "automerger/NeuralsynthesisNeuralsynthesis-7B" messages = [{"role": "user", "content": "What is a large language model?"}] tokenizer = AutoTokenizer.from_pretrained(model) prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) pipeline = transformers.pipeline( "text-generation", model=model, torch_dtype=torch.float16, device_map="auto", ) outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) print(outputs[0]["generated_text"]) ```
{"license": "apache-2.0", "tags": ["merge", "mergekit", "lazymergekit", "automerger"]}
automerger/NeuralsynthesisNeuralsynthesis-7B
null
[ "merge", "mergekit", "lazymergekit", "automerger", "license:apache-2.0", "region:us" ]
null
2024-04-20T18:59:40+00:00
[]
[]
TAGS #merge #mergekit #lazymergekit #automerger #license-apache-2.0 #region-us
# NeuralsynthesisNeuralsynthesis-7B NeuralsynthesisNeuralsynthesis-7B is an automated merge created by Maxime Labonne using the following configuration. ## Configuration ## Usage
[ "# NeuralsynthesisNeuralsynthesis-7B\n\nNeuralsynthesisNeuralsynthesis-7B is an automated merge created by Maxime Labonne using the following configuration.", "## Configuration", "## Usage" ]
[ "TAGS\n#merge #mergekit #lazymergekit #automerger #license-apache-2.0 #region-us \n", "# NeuralsynthesisNeuralsynthesis-7B\n\nNeuralsynthesisNeuralsynthesis-7B is an automated merge created by Maxime Labonne using the following configuration.", "## Configuration", "## Usage" ]
null
null
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # tiny-llama-lora-new This model is a fine-tuned version of [TinyLlama/TinyLlama-1.1B-Chat-v1.0](https://huggingface.co/TinyLlama/TinyLlama-1.1B-Chat-v1.0) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.2252 - Accuracy: 0.8203 - Precision: 0.8184 - Recall: 0.8203 - Precision Macro: 0.7732 - Recall Macro: 0.7380 - Macro Fpr: 0.0162 - Weighted Fpr: 0.0154 - Weighted Specificity: 0.9743 - Macro Specificity: 0.9863 - Weighted Sensitivity: 0.8203 - Macro Sensitivity: 0.7380 - F1 Micro: 0.8203 - F1 Macro: 0.7435 - F1 Weighted: 0.8173 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 15 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | Precision | Recall | Precision Macro | Recall Macro | Macro Fpr | Weighted Fpr | Weighted Specificity | Macro Specificity | Weighted Sensitivity | Macro Sensitivity | F1 Micro | F1 Macro | F1 Weighted | |:-------------:|:-----:|:----:|:---------------:|:--------:|:---------:|:------:|:---------------:|:------------:|:---------:|:------------:|:--------------------:|:-----------------:|:--------------------:|:-----------------:|:--------:|:--------:|:-----------:| | No log | 1.0 | 160 | 0.6615 | 0.8002 | 0.8040 | 0.8002 | 0.7266 | 0.6678 | 0.0182 | 0.0175 | 0.9726 | 0.9848 | 0.8002 | 0.6678 | 0.8002 | 0.6790 | 0.7959 | | No log | 2.0 | 321 | 0.6996 | 0.8064 | 0.8110 | 0.8064 | 0.7448 | 0.7207 | 0.0177 | 0.0169 | 0.9737 | 0.9853 | 0.8064 | 0.7207 | 0.8064 | 0.7235 | 0.8039 | | No log | 3.0 | 482 | 0.8202 | 0.8125 | 0.8119 | 0.8125 | 0.7577 | 0.7080 | 0.0171 | 0.0162 | 0.9711 | 0.9856 | 0.8125 | 0.7080 | 0.8125 | 0.7180 | 0.8085 | | 0.2932 | 4.0 | 643 | 0.9493 | 0.8141 | 0.8204 | 0.8141 | 0.7593 | 0.7327 | 0.0166 | 0.0160 | 0.9744 | 0.9859 | 0.8141 | 0.7327 | 0.8141 | 0.7415 | 0.8154 | | 0.2932 | 5.0 | 803 | 1.0610 | 0.8110 | 0.8110 | 0.8110 | 0.7596 | 0.7427 | 0.0172 | 0.0164 | 0.9738 | 0.9857 | 0.8110 | 0.7427 | 0.8110 | 0.7413 | 0.8087 | | 0.2932 | 6.0 | 964 | 1.1362 | 0.8149 | 0.8160 | 0.8149 | 0.7731 | 0.7380 | 0.0167 | 0.0160 | 0.9741 | 0.9859 | 0.8149 | 0.7380 | 0.8149 | 0.7408 | 0.8128 | | 0.0107 | 7.0 | 1125 | 1.1713 | 0.8102 | 0.8123 | 0.8102 | 0.7734 | 0.7310 | 0.0171 | 0.0165 | 0.9736 | 0.9856 | 0.8102 | 0.7310 | 0.8102 | 0.7343 | 0.8085 | | 0.0107 | 8.0 | 1286 | 1.1786 | 0.8156 | 0.8141 | 0.8156 | 0.7656 | 0.7349 | 0.0166 | 0.0159 | 0.9740 | 0.9860 | 0.8156 | 0.7349 | 0.8156 | 0.7374 | 0.8128 | | 0.0107 | 9.0 | 1446 | 1.1960 | 0.8187 | 0.8170 | 0.8187 | 0.7693 | 0.7368 | 0.0163 | 0.0156 | 0.9743 | 0.9862 | 0.8187 | 0.7368 | 0.8187 | 0.7400 | 0.8157 | | 0.0016 | 10.0 | 1607 | 1.2049 | 0.8156 | 0.8150 | 0.8156 | 0.7659 | 0.7353 | 0.0166 | 0.0159 | 0.9741 | 0.9860 | 0.8156 | 0.7353 | 0.8156 | 0.7376 | 0.8131 | | 0.0016 | 11.0 | 1768 | 1.2137 | 0.8156 | 0.8147 | 0.8156 | 0.7661 | 0.7353 | 0.0166 | 0.0159 | 0.9741 | 0.9860 | 0.8156 | 0.7353 | 0.8156 | 0.7377 | 0.8130 | | 0.0016 | 12.0 | 1929 | 1.2158 | 0.8156 | 0.8145 | 0.8156 | 0.7664 | 0.7353 | 0.0166 | 0.0159 | 0.9739 | 0.9860 | 0.8156 | 0.7353 | 0.8156 | 0.7379 | 0.8129 | | 0.0011 | 13.0 | 2089 | 1.2202 | 0.8187 | 0.8169 | 0.8187 | 0.7720 | 0.7372 | 0.0163 | 0.0156 | 0.9741 | 0.9862 | 0.8187 | 0.7372 | 0.8187 | 0.7425 | 0.8158 | | 0.0011 | 14.0 | 2250 | 1.2229 | 0.8187 | 0.8169 | 0.8187 | 0.7720 | 0.7372 | 0.0163 | 0.0156 | 0.9741 | 0.9862 | 0.8187 | 0.7372 | 0.8187 | 0.7425 | 0.8158 | | 0.0011 | 14.93 | 2400 | 1.2252 | 0.8203 | 0.8184 | 0.8203 | 0.7732 | 0.7380 | 0.0162 | 0.0154 | 0.9743 | 0.9863 | 0.8203 | 0.7380 | 0.8203 | 0.7435 | 0.8173 | ### Framework versions - Transformers 4.35.2 - Pytorch 2.1.0+cu121 - Datasets 2.19.0 - Tokenizers 0.15.1
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "metrics": ["accuracy", "precision", "recall"], "base_model": "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "model-index": [{"name": "tiny-llama-lora-new", "results": []}]}
xshubhamx/tiny-llama-lora-new
null
[ "tensorboard", "safetensors", "generated_from_trainer", "base_model:TinyLlama/TinyLlama-1.1B-Chat-v1.0", "license:apache-2.0", "region:us" ]
null
2024-04-20T18:59:44+00:00
[]
[]
TAGS #tensorboard #safetensors #generated_from_trainer #base_model-TinyLlama/TinyLlama-1.1B-Chat-v1.0 #license-apache-2.0 #region-us
tiny-llama-lora-new =================== This model is a fine-tuned version of TinyLlama/TinyLlama-1.1B-Chat-v1.0 on an unknown dataset. It achieves the following results on the evaluation set: * Loss: 1.2252 * Accuracy: 0.8203 * Precision: 0.8184 * Recall: 0.8203 * Precision Macro: 0.7732 * Recall Macro: 0.7380 * Macro Fpr: 0.0162 * Weighted Fpr: 0.0154 * Weighted Specificity: 0.9743 * Macro Specificity: 0.9863 * Weighted Sensitivity: 0.8203 * Macro Sensitivity: 0.7380 * F1 Micro: 0.8203 * F1 Macro: 0.7435 * F1 Weighted: 0.8173 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 5e-05 * train\_batch\_size: 8 * eval\_batch\_size: 8 * seed: 42 * gradient\_accumulation\_steps: 4 * total\_train\_batch\_size: 32 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * num\_epochs: 15 ### Training results ### Framework versions * Transformers 4.35.2 * Pytorch 2.1.0+cu121 * Datasets 2.19.0 * Tokenizers 0.15.1
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 5e-05\n* train\\_batch\\_size: 8\n* eval\\_batch\\_size: 8\n* seed: 42\n* gradient\\_accumulation\\_steps: 4\n* total\\_train\\_batch\\_size: 32\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 15", "### Training results", "### Framework versions\n\n\n* Transformers 4.35.2\n* Pytorch 2.1.0+cu121\n* Datasets 2.19.0\n* Tokenizers 0.15.1" ]
[ "TAGS\n#tensorboard #safetensors #generated_from_trainer #base_model-TinyLlama/TinyLlama-1.1B-Chat-v1.0 #license-apache-2.0 #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 5e-05\n* train\\_batch\\_size: 8\n* eval\\_batch\\_size: 8\n* seed: 42\n* gradient\\_accumulation\\_steps: 4\n* total\\_train\\_batch\\_size: 32\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 15", "### Training results", "### Framework versions\n\n\n* Transformers 4.35.2\n* Pytorch 2.1.0+cu121\n* Datasets 2.19.0\n* Tokenizers 0.15.1" ]
null
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
KaggleMasterX/llama3_8b_testflight
null
[ "transformers", "safetensors", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
null
2024-04-20T19:00:32+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #arxiv-1910.09700 #endpoints_compatible #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #arxiv-1910.09700 #endpoints_compatible #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
null
adapter-transformers
# Adapter `BigTMiami/micro_par_bn_v_4_help_class_adp_lr_0003_S_1` for roberta-base An [adapter](https://adapterhub.ml) for the `roberta-base` model that was trained on the [BigTMiami/amazon_MICRO_helpfulness_dataset](https://huggingface.co/datasets/BigTMiami/amazon_MICRO_helpfulness_dataset/) dataset and includes a prediction head for classification. This adapter was created for usage with the **[Adapters](https://github.com/Adapter-Hub/adapters)** library. ## Usage First, install `adapters`: ``` pip install -U adapters ``` Now, the adapter can be loaded and activated like this: ```python from adapters import AutoAdapterModel model = AutoAdapterModel.from_pretrained("roberta-base") adapter_name = model.load_adapter("BigTMiami/micro_par_bn_v_4_help_class_adp_lr_0003_S_1", source="hf", set_active=True) ``` ## Architecture & Training <!-- Add some description here --> ## Evaluation results <!-- Add some description here --> ## Citation <!-- Add some description here -->
{"tags": ["roberta", "adapter-transformers"], "datasets": ["BigTMiami/amazon_MICRO_helpfulness_dataset"]}
BigTMiami/micro_par_bn_v_4_help_class_adp_lr_0003_S_1
null
[ "adapter-transformers", "roberta", "dataset:BigTMiami/amazon_MICRO_helpfulness_dataset", "region:us" ]
null
2024-04-20T19:00:50+00:00
[]
[]
TAGS #adapter-transformers #roberta #dataset-BigTMiami/amazon_MICRO_helpfulness_dataset #region-us
# Adapter 'BigTMiami/micro_par_bn_v_4_help_class_adp_lr_0003_S_1' for roberta-base An adapter for the 'roberta-base' model that was trained on the BigTMiami/amazon_MICRO_helpfulness_dataset dataset and includes a prediction head for classification. This adapter was created for usage with the Adapters library. ## Usage First, install 'adapters': Now, the adapter can be loaded and activated like this: ## Architecture & Training ## Evaluation results
[ "# Adapter 'BigTMiami/micro_par_bn_v_4_help_class_adp_lr_0003_S_1' for roberta-base\n\nAn adapter for the 'roberta-base' model that was trained on the BigTMiami/amazon_MICRO_helpfulness_dataset dataset and includes a prediction head for classification.\n\nThis adapter was created for usage with the Adapters library.", "## Usage\n\nFirst, install 'adapters':\n\n\n\nNow, the adapter can be loaded and activated like this:", "## Architecture & Training", "## Evaluation results" ]
[ "TAGS\n#adapter-transformers #roberta #dataset-BigTMiami/amazon_MICRO_helpfulness_dataset #region-us \n", "# Adapter 'BigTMiami/micro_par_bn_v_4_help_class_adp_lr_0003_S_1' for roberta-base\n\nAn adapter for the 'roberta-base' model that was trained on the BigTMiami/amazon_MICRO_helpfulness_dataset dataset and includes a prediction head for classification.\n\nThis adapter was created for usage with the Adapters library.", "## Usage\n\nFirst, install 'adapters':\n\n\n\nNow, the adapter can be loaded and activated like this:", "## Architecture & Training", "## Evaluation results" ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
OwOOwO/dumbo-stable8
null
[ "transformers", "safetensors", "stablelm", "text-generation", "conversational", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-20T19:01:14+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #stablelm #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #stablelm #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
deepnet/SN6-67L2
null
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-20T19:01:28+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #llama #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
image-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # ResNet_AI_image_detector This model is a fine-tuned version of [microsoft/resnet-50](https://huggingface.co/microsoft/resnet-50) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.1330 - Accuracy: 0.9507 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:| | 0.441 | 1.0 | 1093 | 0.3588 | 0.8458 | | 0.2977 | 2.0 | 2187 | 0.2798 | 0.8806 | | 0.3006 | 3.0 | 3281 | 0.1957 | 0.9222 | | 0.2916 | 4.0 | 4375 | 0.1800 | 0.9323 | | 0.2835 | 5.0 | 5468 | 0.1784 | 0.9307 | | 0.2623 | 6.0 | 6562 | 0.1505 | 0.9425 | | 0.2791 | 7.0 | 7656 | 0.1408 | 0.9460 | | 0.2686 | 8.0 | 8750 | 0.1490 | 0.9433 | | 0.2219 | 9.0 | 9843 | 0.1479 | 0.9445 | | 0.2552 | 9.99 | 10930 | 0.1330 | 0.9507 | ### Framework versions - Transformers 4.30.0 - Pytorch 2.2.1+cu121 - Datasets 2.19.0 - Tokenizers 0.13.3
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "model-index": [{"name": "ResNet_AI_image_detector", "results": []}]}
mmanikanta/ResNet_AI_image_detector
null
[ "transformers", "pytorch", "tensorboard", "resnet", "image-classification", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-20T19:02:55+00:00
[]
[]
TAGS #transformers #pytorch #tensorboard #resnet #image-classification #generated_from_trainer #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us
ResNet\_AI\_image\_detector =========================== This model is a fine-tuned version of microsoft/resnet-50 on an unknown dataset. It achieves the following results on the evaluation set: * Loss: 0.1330 * Accuracy: 0.9507 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 5e-05 * train\_batch\_size: 16 * eval\_batch\_size: 16 * seed: 42 * gradient\_accumulation\_steps: 4 * total\_train\_batch\_size: 64 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * lr\_scheduler\_warmup\_ratio: 0.1 * num\_epochs: 10 ### Training results ### Framework versions * Transformers 4.30.0 * Pytorch 2.2.1+cu121 * Datasets 2.19.0 * Tokenizers 0.13.3
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 5e-05\n* train\\_batch\\_size: 16\n* eval\\_batch\\_size: 16\n* seed: 42\n* gradient\\_accumulation\\_steps: 4\n* total\\_train\\_batch\\_size: 64\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* lr\\_scheduler\\_warmup\\_ratio: 0.1\n* num\\_epochs: 10", "### Training results", "### Framework versions\n\n\n* Transformers 4.30.0\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.13.3" ]
[ "TAGS\n#transformers #pytorch #tensorboard #resnet #image-classification #generated_from_trainer #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 5e-05\n* train\\_batch\\_size: 16\n* eval\\_batch\\_size: 16\n* seed: 42\n* gradient\\_accumulation\\_steps: 4\n* total\\_train\\_batch\\_size: 64\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* lr\\_scheduler\\_warmup\\_ratio: 0.1\n* num\\_epochs: 10", "### Training results", "### Framework versions\n\n\n* Transformers 4.30.0\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.13.3" ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
tomaszki/llama-2
null
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-20T19:04:54+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #llama #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
text2text-generation
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # byt5_3k This model was trained from scratch on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.0786 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 800 - eval_batch_size: 800 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 50 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | No log | 1.0 | 4 | 0.1739 | | No log | 2.0 | 8 | 0.1598 | | 0.3456 | 3.0 | 12 | 0.1841 | | 0.3456 | 4.0 | 16 | 0.1430 | | 0.3252 | 5.0 | 20 | 0.1628 | | 0.3252 | 6.0 | 24 | 0.1406 | | 0.3252 | 7.0 | 28 | 0.1349 | | 0.3058 | 8.0 | 32 | 0.1498 | | 0.3058 | 9.0 | 36 | 0.1371 | | 0.2953 | 10.0 | 40 | 0.1417 | | 0.2953 | 11.0 | 44 | 0.1371 | | 0.2953 | 12.0 | 48 | 0.1222 | | 0.2827 | 13.0 | 52 | 0.1317 | | 0.2827 | 14.0 | 56 | 0.1227 | | 0.2719 | 15.0 | 60 | 0.1128 | | 0.2719 | 16.0 | 64 | 0.1209 | | 0.2719 | 17.0 | 68 | 0.1200 | | 0.2698 | 18.0 | 72 | 0.1149 | | 0.2698 | 19.0 | 76 | 0.1090 | | 0.2473 | 20.0 | 80 | 0.1045 | | 0.2473 | 21.0 | 84 | 0.1174 | | 0.2473 | 22.0 | 88 | 0.1056 | | 0.2492 | 23.0 | 92 | 0.0976 | | 0.2492 | 24.0 | 96 | 0.1164 | | 0.2377 | 25.0 | 100 | 0.0974 | | 0.2377 | 26.0 | 104 | 0.0922 | | 0.2377 | 27.0 | 108 | 0.1022 | | 0.2312 | 28.0 | 112 | 0.0908 | | 0.2312 | 29.0 | 116 | 0.0891 | | 0.2254 | 30.0 | 120 | 0.0967 | | 0.2254 | 31.0 | 124 | 0.0930 | | 0.2254 | 32.0 | 128 | 0.0866 | | 0.224 | 33.0 | 132 | 0.0828 | | 0.224 | 34.0 | 136 | 0.0850 | | 0.2166 | 35.0 | 140 | 0.0883 | | 0.2166 | 36.0 | 144 | 0.0825 | | 0.2166 | 37.0 | 148 | 0.0804 | | 0.2128 | 38.0 | 152 | 0.0855 | | 0.2128 | 39.0 | 156 | 0.0865 | | 0.2048 | 40.0 | 160 | 0.0798 | | 0.2048 | 41.0 | 164 | 0.0787 | | 0.2048 | 42.0 | 168 | 0.0802 | | 0.2063 | 43.0 | 172 | 0.0812 | | 0.2063 | 44.0 | 176 | 0.0803 | | 0.2075 | 45.0 | 180 | 0.0779 | | 0.2075 | 46.0 | 184 | 0.0773 | | 0.2075 | 47.0 | 188 | 0.0772 | | 0.1997 | 48.0 | 192 | 0.0781 | | 0.1997 | 49.0 | 196 | 0.0783 | | 0.2074 | 50.0 | 200 | 0.0786 | ### Framework versions - Transformers 4.35.2 - Pytorch 2.2.1+cu121 - Datasets 2.18.0 - Tokenizers 0.15.2
{"tags": ["generated_from_trainer"], "model-index": [{"name": "byt5_3k", "results": []}]}
AlexWang99/byt5_3k
null
[ "transformers", "tensorboard", "safetensors", "t5", "text2text-generation", "generated_from_trainer", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-20T19:06:10+00:00
[]
[]
TAGS #transformers #tensorboard #safetensors #t5 #text2text-generation #generated_from_trainer #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
byt5\_3k ======== This model was trained from scratch on an unknown dataset. It achieves the following results on the evaluation set: * Loss: 0.0786 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 5e-05 * train\_batch\_size: 800 * eval\_batch\_size: 800 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * num\_epochs: 50 ### Training results ### Framework versions * Transformers 4.35.2 * Pytorch 2.2.1+cu121 * Datasets 2.18.0 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 5e-05\n* train\\_batch\\_size: 800\n* eval\\_batch\\_size: 800\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 50", "### Training results", "### Framework versions\n\n\n* Transformers 4.35.2\n* Pytorch 2.2.1+cu121\n* Datasets 2.18.0\n* Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #tensorboard #safetensors #t5 #text2text-generation #generated_from_trainer #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 5e-05\n* train\\_batch\\_size: 800\n* eval\\_batch\\_size: 800\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 50", "### Training results", "### Framework versions\n\n\n* Transformers 4.35.2\n* Pytorch 2.2.1+cu121\n* Datasets 2.18.0\n* Tokenizers 0.15.2" ]
null
peft
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed] ### Framework versions - PEFT 0.9.0
{"language": ["en"], "license": "apache-2.0", "library_name": "peft", "tags": ["subjectivity detection"], "datasets": ["j03x/CheckThat2023", "j03x/CheckThat2023_Test", "j03x/CheckThat2023_Test_gold_en"], "base_model": "mistralai/Mistral-7B-Instruct-v0.1"}
j03x/Mistral7B_finetune1_checkThat
null
[ "peft", "safetensors", "subjectivity detection", "en", "dataset:j03x/CheckThat2023", "dataset:j03x/CheckThat2023_Test", "dataset:j03x/CheckThat2023_Test_gold_en", "arxiv:1910.09700", "base_model:mistralai/Mistral-7B-Instruct-v0.1", "license:apache-2.0", "region:us" ]
null
2024-04-20T19:08:03+00:00
[ "1910.09700" ]
[ "en" ]
TAGS #peft #safetensors #subjectivity detection #en #dataset-j03x/CheckThat2023 #dataset-j03x/CheckThat2023_Test #dataset-j03x/CheckThat2023_Test_gold_en #arxiv-1910.09700 #base_model-mistralai/Mistral-7B-Instruct-v0.1 #license-apache-2.0 #region-us
# Model Card for Model ID ## Model Details ### Model Description - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact ### Framework versions - PEFT 0.9.0
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\n\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact", "### Framework versions\n\n- PEFT 0.9.0" ]
[ "TAGS\n#peft #safetensors #subjectivity detection #en #dataset-j03x/CheckThat2023 #dataset-j03x/CheckThat2023_Test #dataset-j03x/CheckThat2023_Test_gold_en #arxiv-1910.09700 #base_model-mistralai/Mistral-7B-Instruct-v0.1 #license-apache-2.0 #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\n\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact", "### Framework versions\n\n- PEFT 0.9.0" ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
GugaKunkel/gemma7b-Breaking-Bad
null
[ "transformers", "safetensors", "gemma", "text-generation", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-20T19:08:07+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #gemma #text-generation #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #gemma #text-generation #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
Juhanss/HearMe-chat-model
null
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-20T19:08:50+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #llama #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
text-generation
transformers
# Uploaded model - **Developed by:** razhan - **License:** apache-2.0 - **Finetuned from model :** unsloth/llama-3-8b-Instruct-bnb-4bit This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
{"language": ["en"], "license": "apache-2.0", "tags": ["text-generation-inference", "transformers", "unsloth", "llama", "trl", "sft"], "base_model": "unsloth/llama-3-8b-Instruct-bnb-4bit"}
razhan/llama-3-8b-lora-ckb
null
[ "transformers", "safetensors", "llama", "text-generation", "text-generation-inference", "unsloth", "trl", "sft", "conversational", "en", "base_model:unsloth/llama-3-8b-Instruct-bnb-4bit", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-20T19:09:35+00:00
[]
[ "en" ]
TAGS #transformers #safetensors #llama #text-generation #text-generation-inference #unsloth #trl #sft #conversational #en #base_model-unsloth/llama-3-8b-Instruct-bnb-4bit #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us
# Uploaded model - Developed by: razhan - License: apache-2.0 - Finetuned from model : unsloth/llama-3-8b-Instruct-bnb-4bit This llama model was trained 2x faster with Unsloth and Huggingface's TRL library. <img src="URL width="200"/>
[ "# Uploaded model\n\n- Developed by: razhan\n- License: apache-2.0\n- Finetuned from model : unsloth/llama-3-8b-Instruct-bnb-4bit\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #text-generation-inference #unsloth #trl #sft #conversational #en #base_model-unsloth/llama-3-8b-Instruct-bnb-4bit #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us \n", "# Uploaded model\n\n- Developed by: razhan\n- License: apache-2.0\n- Finetuned from model : unsloth/llama-3-8b-Instruct-bnb-4bit\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/vijayravichander/huggingface/runs/6seef25a) [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/vijayravichander/huggingface/runs/6seef25a) # mistral-7b-sft-lora-tom-final This model is a fine-tuned version of [mistralai/Mistral-7B-Instruct-v0.1](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.2004 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - gradient_accumulation_steps: 128 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 1.3136 | 0.96 | 15 | 1.3160 | | 1.2453 | 1.984 | 31 | 1.2184 | | 1.1807 | 2.88 | 45 | 1.2004 | ### Framework versions - PEFT 0.10.1.dev0 - Transformers 4.41.0.dev0 - Pytorch 2.2.1+cu121 - Datasets 2.19.0 - Tokenizers 0.19.1
{"license": "apache-2.0", "library_name": "peft", "tags": ["trl", "sft", "generated_from_trainer"], "base_model": "mistralai/Mistral-7B-Instruct-v0.1", "model-index": [{"name": "mistral-7b-sft-lora-tom-final", "results": []}]}
vijay-ravichander/mistral-7b-sft-lora-tom-final
null
[ "peft", "tensorboard", "safetensors", "trl", "sft", "generated_from_trainer", "base_model:mistralai/Mistral-7B-Instruct-v0.1", "license:apache-2.0", "region:us" ]
null
2024-04-20T19:10:50+00:00
[]
[]
TAGS #peft #tensorboard #safetensors #trl #sft #generated_from_trainer #base_model-mistralai/Mistral-7B-Instruct-v0.1 #license-apache-2.0 #region-us
<img src="URL alt="Visualize in Weights & Biases" width="200" height="32"/> <img src="URL alt="Visualize in Weights & Biases" width="200" height="32"/> mistral-7b-sft-lora-tom-final ============================= This model is a fine-tuned version of mistralai/Mistral-7B-Instruct-v0.1 on an unknown dataset. It achieves the following results on the evaluation set: * Loss: 1.2004 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 2e-05 * train\_batch\_size: 1 * eval\_batch\_size: 1 * seed: 42 * gradient\_accumulation\_steps: 128 * total\_train\_batch\_size: 128 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: cosine * num\_epochs: 3 ### Training results ### Framework versions * PEFT 0.10.1.dev0 * Transformers 4.41.0.dev0 * Pytorch 2.2.1+cu121 * Datasets 2.19.0 * Tokenizers 0.19.1
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 1\n* eval\\_batch\\_size: 1\n* seed: 42\n* gradient\\_accumulation\\_steps: 128\n* total\\_train\\_batch\\_size: 128\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: cosine\n* num\\_epochs: 3", "### Training results", "### Framework versions\n\n\n* PEFT 0.10.1.dev0\n* Transformers 4.41.0.dev0\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.19.1" ]
[ "TAGS\n#peft #tensorboard #safetensors #trl #sft #generated_from_trainer #base_model-mistralai/Mistral-7B-Instruct-v0.1 #license-apache-2.0 #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 1\n* eval\\_batch\\_size: 1\n* seed: 42\n* gradient\\_accumulation\\_steps: 128\n* total\\_train\\_batch\\_size: 128\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: cosine\n* num\\_epochs: 3", "### Training results", "### Framework versions\n\n\n* PEFT 0.10.1.dev0\n* Transformers 4.41.0.dev0\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.19.1" ]
null
adapter-transformers
# Adapter `BigTMiami/micro_par_bn_v_3_help_class_adp_lr_0003_S_2` for roberta-base An [adapter](https://adapterhub.ml) for the `roberta-base` model that was trained on the [BigTMiami/amazon_MICRO_helpfulness_dataset](https://huggingface.co/datasets/BigTMiami/amazon_MICRO_helpfulness_dataset/) dataset and includes a prediction head for classification. This adapter was created for usage with the **[Adapters](https://github.com/Adapter-Hub/adapters)** library. ## Usage First, install `adapters`: ``` pip install -U adapters ``` Now, the adapter can be loaded and activated like this: ```python from adapters import AutoAdapterModel model = AutoAdapterModel.from_pretrained("roberta-base") adapter_name = model.load_adapter("BigTMiami/micro_par_bn_v_3_help_class_adp_lr_0003_S_2", source="hf", set_active=True) ``` ## Architecture & Training <!-- Add some description here --> ## Evaluation results <!-- Add some description here --> ## Citation <!-- Add some description here -->
{"tags": ["roberta", "adapter-transformers"], "datasets": ["BigTMiami/amazon_MICRO_helpfulness_dataset"]}
BigTMiami/micro_par_bn_v_3_help_class_adp_lr_0003_S_2
null
[ "adapter-transformers", "roberta", "dataset:BigTMiami/amazon_MICRO_helpfulness_dataset", "region:us" ]
null
2024-04-20T19:11:34+00:00
[]
[]
TAGS #adapter-transformers #roberta #dataset-BigTMiami/amazon_MICRO_helpfulness_dataset #region-us
# Adapter 'BigTMiami/micro_par_bn_v_3_help_class_adp_lr_0003_S_2' for roberta-base An adapter for the 'roberta-base' model that was trained on the BigTMiami/amazon_MICRO_helpfulness_dataset dataset and includes a prediction head for classification. This adapter was created for usage with the Adapters library. ## Usage First, install 'adapters': Now, the adapter can be loaded and activated like this: ## Architecture & Training ## Evaluation results
[ "# Adapter 'BigTMiami/micro_par_bn_v_3_help_class_adp_lr_0003_S_2' for roberta-base\n\nAn adapter for the 'roberta-base' model that was trained on the BigTMiami/amazon_MICRO_helpfulness_dataset dataset and includes a prediction head for classification.\n\nThis adapter was created for usage with the Adapters library.", "## Usage\n\nFirst, install 'adapters':\n\n\n\nNow, the adapter can be loaded and activated like this:", "## Architecture & Training", "## Evaluation results" ]
[ "TAGS\n#adapter-transformers #roberta #dataset-BigTMiami/amazon_MICRO_helpfulness_dataset #region-us \n", "# Adapter 'BigTMiami/micro_par_bn_v_3_help_class_adp_lr_0003_S_2' for roberta-base\n\nAn adapter for the 'roberta-base' model that was trained on the BigTMiami/amazon_MICRO_helpfulness_dataset dataset and includes a prediction head for classification.\n\nThis adapter was created for usage with the Adapters library.", "## Usage\n\nFirst, install 'adapters':\n\n\n\nNow, the adapter can be loaded and activated like this:", "## Architecture & Training", "## Evaluation results" ]
text-generation
transformers
# Uploaded model - **Developed by:** FabioSantos - **License:** apache-2.0 - **Finetuned from model :** unsloth/llama-3-8b-bnb-4bit This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
{"language": ["en"], "license": "apache-2.0", "tags": ["text-generation-inference", "transformers", "unsloth", "llama", "trl", "sft"], "base_model": "unsloth/llama-3-8b-bnb-4bit"}
FabioSantos/llama3-8b-oig-unsloth-merged
null
[ "transformers", "pytorch", "llama", "text-generation", "text-generation-inference", "unsloth", "trl", "sft", "en", "base_model:unsloth/llama-3-8b-bnb-4bit", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-20T19:13:49+00:00
[]
[ "en" ]
TAGS #transformers #pytorch #llama #text-generation #text-generation-inference #unsloth #trl #sft #en #base_model-unsloth/llama-3-8b-bnb-4bit #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us
# Uploaded model - Developed by: FabioSantos - License: apache-2.0 - Finetuned from model : unsloth/llama-3-8b-bnb-4bit This llama model was trained 2x faster with Unsloth and Huggingface's TRL library. <img src="URL width="200"/>
[ "# Uploaded model\n\n- Developed by: FabioSantos\n- License: apache-2.0\n- Finetuned from model : unsloth/llama-3-8b-bnb-4bit\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
[ "TAGS\n#transformers #pytorch #llama #text-generation #text-generation-inference #unsloth #trl #sft #en #base_model-unsloth/llama-3-8b-bnb-4bit #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us \n", "# Uploaded model\n\n- Developed by: FabioSantos\n- License: apache-2.0\n- Finetuned from model : unsloth/llama-3-8b-bnb-4bit\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
text-generation
transformers
# NEBULA-23.8B-v1.0 ![image/png](https://huggingface.co/TeeZee/NEBULA-23B-v1.0/resolve/main/NEBULA-23B-v1.0.jpg) ## Technical notes - 108 layers,DUS procedure, mistral(32)->SOLAR(48)->GALAXY(72)->NEBULA(108) - 23.8B parameters - model created as an extension of depth upscaling procedure used for SOLAR by upstage ## Results - model can and will produce NSFW content - GSM8k evaluation seems to be often broken, HellaSwag, Winograde and TQA show that its a smart model - RP and ERP work surprisingly good and I didn't encounter any GPTisms yet - comparable memory footprint to 20B and 23B models based on llama - follows character card very well - NSFW output feels fresh comparing to existing models ## Finetuning for RP - SFT using MinervaAI/Aesir-Preview dataset, 10 epochs - DPO using athirdpath/DPO_Pairs-Roleplay-Alpaca-NSFW dataset, 1 epoch - SFT using 1xAda6000, 10h - DPO using 1x3090, 30h - jupyter notebooks or mergekit configs for anyone wanting to reproduce/reuse scripts - just drop me a message ## Prompt template - Alpaca - chat template is embedded in tokenizer config, should load automatically ## Context size - 4096 All comments are greatly appreciated, download, test and if you appreciate my work, consider buying me my fuel: <a href="https://www.buymeacoffee.com/TeeZee" target="_blank"><img src="https://cdn.buymeacoffee.com/buttons/v2/default-yellow.png" alt="Buy Me A Coffee" style="height: 60px !important;width: 217px !important;" ></a> # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_TeeZee__NEBULA-23B-v1.0) | Metric |Value| |---------------------------------|----:| |Avg. |59.94| |AI2 Reasoning Challenge (25-Shot)|66.72| |HellaSwag (10-Shot) |86.98| |MMLU (5-Shot) |65.40| |TruthfulQA (0-shot) |57.60| |Winogrande (5-shot) |82.95| |GSM8k (5-shot) | 0.00|
{"language": ["en"], "license": "apache-2.0", "tags": ["not-for-all-audiences"], "datasets": ["Intel/orca_dpo_pairs", "athirdpath/DPO_Pairs-Roleplay-Alpaca-NSFW", "Open-Orca/SlimOrca", "MinervaAI/Aesir-Preview", "allenai/ultrafeedback_binarized_cleaned"], "model-index": [{"name": "NEBULA-23B-v1.0", "results": [{"task": {"type": "text-generation", "name": "Text Generation"}, "dataset": {"name": "AI2 Reasoning Challenge (25-Shot)", "type": "ai2_arc", "config": "ARC-Challenge", "split": "test", "args": {"num_few_shot": 25}}, "metrics": [{"type": "acc_norm", "value": 66.72, "name": "normalized accuracy"}], "source": {"url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=TeeZee/NEBULA-23B-v1.0", "name": "Open LLM Leaderboard"}}, {"task": {"type": "text-generation", "name": "Text Generation"}, "dataset": {"name": "HellaSwag (10-Shot)", "type": "hellaswag", "split": "validation", "args": {"num_few_shot": 10}}, "metrics": [{"type": "acc_norm", "value": 86.98, "name": "normalized accuracy"}], "source": {"url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=TeeZee/NEBULA-23B-v1.0", "name": "Open LLM Leaderboard"}}, {"task": {"type": "text-generation", "name": "Text Generation"}, "dataset": {"name": "MMLU (5-Shot)", "type": "cais/mmlu", "config": "all", "split": "test", "args": {"num_few_shot": 5}}, "metrics": [{"type": "acc", "value": 65.4, "name": "accuracy"}], "source": {"url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=TeeZee/NEBULA-23B-v1.0", "name": "Open LLM Leaderboard"}}, {"task": {"type": "text-generation", "name": "Text Generation"}, "dataset": {"name": "TruthfulQA (0-shot)", "type": "truthful_qa", "config": "multiple_choice", "split": "validation", "args": {"num_few_shot": 0}}, "metrics": [{"type": "mc2", "value": 57.6}], "source": {"url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=TeeZee/NEBULA-23B-v1.0", "name": "Open LLM Leaderboard"}}, {"task": {"type": "text-generation", "name": "Text Generation"}, "dataset": {"name": "Winogrande (5-shot)", "type": "winogrande", "config": "winogrande_xl", "split": "validation", "args": {"num_few_shot": 5}}, "metrics": [{"type": "acc", "value": 82.95, "name": "accuracy"}], "source": {"url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=TeeZee/NEBULA-23B-v1.0", "name": "Open LLM Leaderboard"}}, {"task": {"type": "text-generation", "name": "Text Generation"}, "dataset": {"name": "GSM8k (5-shot)", "type": "gsm8k", "config": "main", "split": "test", "args": {"num_few_shot": 5}}, "metrics": [{"type": "acc", "value": 0.0, "name": "accuracy"}], "source": {"url": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=TeeZee/NEBULA-23B-v1.0", "name": "Open LLM Leaderboard"}}]}]}
TeeZee/NEBULA-23.8B-v1.0
null
[ "transformers", "safetensors", "llama", "text-generation", "not-for-all-audiences", "conversational", "en", "dataset:Intel/orca_dpo_pairs", "dataset:athirdpath/DPO_Pairs-Roleplay-Alpaca-NSFW", "dataset:Open-Orca/SlimOrca", "dataset:MinervaAI/Aesir-Preview", "dataset:allenai/ultrafeedback_binarized_cleaned", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-20T19:14:00+00:00
[]
[ "en" ]
TAGS #transformers #safetensors #llama #text-generation #not-for-all-audiences #conversational #en #dataset-Intel/orca_dpo_pairs #dataset-athirdpath/DPO_Pairs-Roleplay-Alpaca-NSFW #dataset-Open-Orca/SlimOrca #dataset-MinervaAI/Aesir-Preview #dataset-allenai/ultrafeedback_binarized_cleaned #license-apache-2.0 #model-index #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
NEBULA-23.8B-v1.0 ================= !image/png Technical notes --------------- * 108 layers,DUS procedure, mistral(32)->SOLAR(48)->GALAXY(72)->NEBULA(108) * 23.8B parameters * model created as an extension of depth upscaling procedure used for SOLAR by upstage Results ------- * model can and will produce NSFW content * GSM8k evaluation seems to be often broken, HellaSwag, Winograde and TQA show that its a smart model * RP and ERP work surprisingly good and I didn't encounter any GPTisms yet * comparable memory footprint to 20B and 23B models based on llama * follows character card very well * NSFW output feels fresh comparing to existing models Finetuning for RP ----------------- * SFT using MinervaAI/Aesir-Preview dataset, 10 epochs * DPO using athirdpath/DPO\_Pairs-Roleplay-Alpaca-NSFW dataset, 1 epoch * SFT using 1xAda6000, 10h * DPO using 1x3090, 30h * jupyter notebooks or mergekit configs for anyone wanting to reproduce/reuse scripts - just drop me a message Prompt template --------------- * Alpaca * chat template is embedded in tokenizer config, should load automatically Context size ------------ * 4096 All comments are greatly appreciated, download, test and if you appreciate my work, consider buying me my fuel: <a href="URL target="\_blank"><img src="URL alt="Buy Me A Coffee" style="height: 60px !important;width: 217px !important;" > Open LLM Leaderboard Evaluation Results ======================================= Detailed results can be found here
[]
[ "TAGS\n#transformers #safetensors #llama #text-generation #not-for-all-audiences #conversational #en #dataset-Intel/orca_dpo_pairs #dataset-athirdpath/DPO_Pairs-Roleplay-Alpaca-NSFW #dataset-Open-Orca/SlimOrca #dataset-MinervaAI/Aesir-Preview #dataset-allenai/ultrafeedback_binarized_cleaned #license-apache-2.0 #model-index #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n" ]
text-generation
transformers
# mlabonne/Chimera-8B AWQ - Model creator: [mlabonne](https://huggingface.co/mlabonne) - Original model: [Chimera-8B](https://huggingface.co/mlabonne/Chimera-8B) ## Model Summary Dare-ties merge method. List of all models and merging path is coming soon. Merging the "thick"est model weights from mistral models using amazing training methods like direct preference optimization (dpo) and reinforced learning. I have spent countless hours studying the latest research papers, attending conferences, and networking with experts in the field. I experimented with different algorithms, tactics, fine-tuned hyperparameters, optimizers, and optimized code until i achieved the best possible results. Thank you openchat 3.5 for showing me the way. Here is my contribution. ## Prompt Template Replace {system} with your system prompt, and {prompt} with your prompt instruction. ``` ### System: {system} ### User: {prompt} ### Assistant: ```
{"library_name": "transformers", "tags": ["4-bit", "AWQ", "text-generation", "autotrain_compatible", "endpoints_compatible"], "pipeline_tag": "text-generation", "inference": false, "quantized_by": "Suparious"}
solidrust/Chimera-8B-AWQ
null
[ "transformers", "safetensors", "llama", "text-generation", "4-bit", "AWQ", "autotrain_compatible", "endpoints_compatible", "conversational", "text-generation-inference", "region:us" ]
null
2024-04-20T19:14:05+00:00
[]
[]
TAGS #transformers #safetensors #llama #text-generation #4-bit #AWQ #autotrain_compatible #endpoints_compatible #conversational #text-generation-inference #region-us
# mlabonne/Chimera-8B AWQ - Model creator: mlabonne - Original model: Chimera-8B ## Model Summary Dare-ties merge method. List of all models and merging path is coming soon. Merging the "thick"est model weights from mistral models using amazing training methods like direct preference optimization (dpo) and reinforced learning. I have spent countless hours studying the latest research papers, attending conferences, and networking with experts in the field. I experimented with different algorithms, tactics, fine-tuned hyperparameters, optimizers, and optimized code until i achieved the best possible results. Thank you openchat 3.5 for showing me the way. Here is my contribution. ## Prompt Template Replace {system} with your system prompt, and {prompt} with your prompt instruction.
[ "# mlabonne/Chimera-8B AWQ\n\n- Model creator: mlabonne\n- Original model: Chimera-8B", "## Model Summary\n\nDare-ties merge method.\n\nList of all models and merging path is coming soon.\n\nMerging the \"thick\"est model weights from mistral models using amazing training methods like direct preference optimization (dpo) and reinforced learning. \n\nI have spent countless hours studying the latest research papers, attending conferences, and networking with experts in the field. I experimented with different algorithms, tactics, fine-tuned hyperparameters, optimizers, \nand optimized code until i achieved the best possible results.\n\nThank you openchat 3.5 for showing me the way.\n\nHere is my contribution.", "## Prompt Template\n\nReplace {system} with your system prompt, and {prompt} with your prompt instruction." ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #4-bit #AWQ #autotrain_compatible #endpoints_compatible #conversational #text-generation-inference #region-us \n", "# mlabonne/Chimera-8B AWQ\n\n- Model creator: mlabonne\n- Original model: Chimera-8B", "## Model Summary\n\nDare-ties merge method.\n\nList of all models and merging path is coming soon.\n\nMerging the \"thick\"est model weights from mistral models using amazing training methods like direct preference optimization (dpo) and reinforced learning. \n\nI have spent countless hours studying the latest research papers, attending conferences, and networking with experts in the field. I experimented with different algorithms, tactics, fine-tuned hyperparameters, optimizers, \nand optimized code until i achieved the best possible results.\n\nThank you openchat 3.5 for showing me the way.\n\nHere is my contribution.", "## Prompt Template\n\nReplace {system} with your system prompt, and {prompt} with your prompt instruction." ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
macadeliccc/Orpo-GutenLlama-3-8B
null
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-20T19:14:18+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #llama #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
token-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-base-finetuned-panx-ar This model is a fine-tuned version of [tner/xlm-roberta-base-panx-dataset-ar](https://huggingface.co/tner/xlm-roberta-base-panx-dataset-ar) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.1977 - F1: 0.8803 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | 0.2179 | 1.0 | 188 | 0.1977 | 0.8803 | ### Framework versions - Transformers 4.40.0 - Pytorch 2.2.1+cu121 - Datasets 2.19.0 - Tokenizers 0.19.1
{"tags": ["generated_from_trainer"], "metrics": ["f1"], "base_model": "tner/xlm-roberta-base-panx-dataset-ar", "model-index": [{"name": "xlm-roberta-base-finetuned-panx-ar", "results": []}]}
AbdelrahmanSR/xlm-roberta-base-finetuned-panx-ar
null
[ "transformers", "tensorboard", "safetensors", "xlm-roberta", "token-classification", "generated_from_trainer", "base_model:tner/xlm-roberta-base-panx-dataset-ar", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-20T19:15:00+00:00
[]
[]
TAGS #transformers #tensorboard #safetensors #xlm-roberta #token-classification #generated_from_trainer #base_model-tner/xlm-roberta-base-panx-dataset-ar #autotrain_compatible #endpoints_compatible #region-us
xlm-roberta-base-finetuned-panx-ar ================================== This model is a fine-tuned version of tner/xlm-roberta-base-panx-dataset-ar on an unknown dataset. It achieves the following results on the evaluation set: * Loss: 0.1977 * F1: 0.8803 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 5e-05 * train\_batch\_size: 64 * eval\_batch\_size: 64 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * num\_epochs: 1 ### Training results ### Framework versions * Transformers 4.40.0 * Pytorch 2.2.1+cu121 * Datasets 2.19.0 * Tokenizers 0.19.1
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 5e-05\n* train\\_batch\\_size: 64\n* eval\\_batch\\_size: 64\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 1", "### Training results", "### Framework versions\n\n\n* Transformers 4.40.0\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.19.1" ]
[ "TAGS\n#transformers #tensorboard #safetensors #xlm-roberta #token-classification #generated_from_trainer #base_model-tner/xlm-roberta-base-panx-dataset-ar #autotrain_compatible #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 5e-05\n* train\\_batch\\_size: 64\n* eval\\_batch\\_size: 64\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 1", "### Training results", "### Framework versions\n\n\n* Transformers 4.40.0\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.19.1" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mistral_instruct_generation This model is a fine-tuned version of [mistralai/Mixtral-8x7B-v0.1](https://huggingface.co/mistralai/Mixtral-8x7B-v0.1) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.7326 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 1 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: constant - lr_scheduler_warmup_steps: 0.03 - training_steps: 100 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:------:|:----:|:---------------:| | 0.6174 | 0.0078 | 20 | 0.7952 | | 0.7153 | 0.0156 | 40 | 0.7565 | | 0.7903 | 0.0235 | 60 | 0.7447 | | 0.7175 | 0.0313 | 80 | 0.7377 | | 0.7081 | 0.0391 | 100 | 0.7326 | ### Framework versions - PEFT 0.10.0 - Transformers 4.40.0 - Pytorch 2.2.1+cu121 - Tokenizers 0.19.1
{"license": "apache-2.0", "library_name": "peft", "tags": ["trl", "sft", "generated_from_trainer"], "base_model": "mistralai/Mixtral-8x7B-v0.1", "model-index": [{"name": "mistral_instruct_generation", "results": []}]}
Cem13/mixtral_generation_0
null
[ "peft", "tensorboard", "safetensors", "trl", "sft", "generated_from_trainer", "base_model:mistralai/Mixtral-8x7B-v0.1", "license:apache-2.0", "region:us" ]
null
2024-04-20T19:17:38+00:00
[]
[]
TAGS #peft #tensorboard #safetensors #trl #sft #generated_from_trainer #base_model-mistralai/Mixtral-8x7B-v0.1 #license-apache-2.0 #region-us
mistral\_instruct\_generation ============================= This model is a fine-tuned version of mistralai/Mixtral-8x7B-v0.1 on an unknown dataset. It achieves the following results on the evaluation set: * Loss: 0.7326 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0002 * train\_batch\_size: 1 * eval\_batch\_size: 8 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: constant * lr\_scheduler\_warmup\_steps: 0.03 * training\_steps: 100 ### Training results ### Framework versions * PEFT 0.10.0 * Transformers 4.40.0 * Pytorch 2.2.1+cu121 * Tokenizers 0.19.1
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0002\n* train\\_batch\\_size: 1\n* eval\\_batch\\_size: 8\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: constant\n* lr\\_scheduler\\_warmup\\_steps: 0.03\n* training\\_steps: 100", "### Training results", "### Framework versions\n\n\n* PEFT 0.10.0\n* Transformers 4.40.0\n* Pytorch 2.2.1+cu121\n* Tokenizers 0.19.1" ]
[ "TAGS\n#peft #tensorboard #safetensors #trl #sft #generated_from_trainer #base_model-mistralai/Mixtral-8x7B-v0.1 #license-apache-2.0 #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0002\n* train\\_batch\\_size: 1\n* eval\\_batch\\_size: 8\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: constant\n* lr\\_scheduler\\_warmup\\_steps: 0.03\n* training\\_steps: 100", "### Training results", "### Framework versions\n\n\n* PEFT 0.10.0\n* Transformers 4.40.0\n* Pytorch 2.2.1+cu121\n* Tokenizers 0.19.1" ]
null
keras
## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: | Hyperparameters | Value | | :-- | :-- | | name | Adam | | weight_decay | None | | clipnorm | None | | global_clipnorm | None | | clipvalue | None | | use_ema | False | | ema_momentum | 0.99 | | ema_overwrite_frequency | None | | jit_compile | True | | is_legacy_optimizer | False | | learning_rate | 0.0010000000474974513 | | beta_1 | 0.9 | | beta_2 | 0.999 | | epsilon | 1e-07 | | amsgrad | False | | training_precision | float32 | ## Model Plot <details> <summary>View Model Plot</summary> ![Model Image](./model.png) </details>
{"library_name": "keras"}
Hitomiblood/blindness_modelKerasGPU
null
[ "keras", "region:us" ]
null
2024-04-20T19:18:01+00:00
[]
[]
TAGS #keras #region-us
Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: Model Plot ---------- View Model Plot !Model Image
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n\nModel Plot\n----------\n\n\n\nView Model Plot\n!Model Image" ]
[ "TAGS\n#keras #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n\nModel Plot\n----------\n\n\n\nView Model Plot\n!Model Image" ]
null
adapter-transformers
# Adapter `BigTMiami/micro_par_bn_v_4_help_class_adp_lr_0003_S_2` for roberta-base An [adapter](https://adapterhub.ml) for the `roberta-base` model that was trained on the [BigTMiami/amazon_MICRO_helpfulness_dataset](https://huggingface.co/datasets/BigTMiami/amazon_MICRO_helpfulness_dataset/) dataset and includes a prediction head for classification. This adapter was created for usage with the **[Adapters](https://github.com/Adapter-Hub/adapters)** library. ## Usage First, install `adapters`: ``` pip install -U adapters ``` Now, the adapter can be loaded and activated like this: ```python from adapters import AutoAdapterModel model = AutoAdapterModel.from_pretrained("roberta-base") adapter_name = model.load_adapter("BigTMiami/micro_par_bn_v_4_help_class_adp_lr_0003_S_2", source="hf", set_active=True) ``` ## Architecture & Training <!-- Add some description here --> ## Evaluation results <!-- Add some description here --> ## Citation <!-- Add some description here -->
{"tags": ["roberta", "adapter-transformers"], "datasets": ["BigTMiami/amazon_MICRO_helpfulness_dataset"]}
BigTMiami/micro_par_bn_v_4_help_class_adp_lr_0003_S_2
null
[ "adapter-transformers", "roberta", "dataset:BigTMiami/amazon_MICRO_helpfulness_dataset", "region:us" ]
null
2024-04-20T19:19:11+00:00
[]
[]
TAGS #adapter-transformers #roberta #dataset-BigTMiami/amazon_MICRO_helpfulness_dataset #region-us
# Adapter 'BigTMiami/micro_par_bn_v_4_help_class_adp_lr_0003_S_2' for roberta-base An adapter for the 'roberta-base' model that was trained on the BigTMiami/amazon_MICRO_helpfulness_dataset dataset and includes a prediction head for classification. This adapter was created for usage with the Adapters library. ## Usage First, install 'adapters': Now, the adapter can be loaded and activated like this: ## Architecture & Training ## Evaluation results
[ "# Adapter 'BigTMiami/micro_par_bn_v_4_help_class_adp_lr_0003_S_2' for roberta-base\n\nAn adapter for the 'roberta-base' model that was trained on the BigTMiami/amazon_MICRO_helpfulness_dataset dataset and includes a prediction head for classification.\n\nThis adapter was created for usage with the Adapters library.", "## Usage\n\nFirst, install 'adapters':\n\n\n\nNow, the adapter can be loaded and activated like this:", "## Architecture & Training", "## Evaluation results" ]
[ "TAGS\n#adapter-transformers #roberta #dataset-BigTMiami/amazon_MICRO_helpfulness_dataset #region-us \n", "# Adapter 'BigTMiami/micro_par_bn_v_4_help_class_adp_lr_0003_S_2' for roberta-base\n\nAn adapter for the 'roberta-base' model that was trained on the BigTMiami/amazon_MICRO_helpfulness_dataset dataset and includes a prediction head for classification.\n\nThis adapter was created for usage with the Adapters library.", "## Usage\n\nFirst, install 'adapters':\n\n\n\nNow, the adapter can be loaded and activated like this:", "## Architecture & Training", "## Evaluation results" ]
null
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": ["unsloth"]}
msmart/llama-3-testing
null
[ "transformers", "safetensors", "unsloth", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
null
2024-04-20T19:20:15+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #unsloth #arxiv-1910.09700 #endpoints_compatible #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #unsloth #arxiv-1910.09700 #endpoints_compatible #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
null
transformers
# Uploaded model - **Developed by:** FabioSantos - **License:** apache-2.0 - **Finetuned from model :** unsloth/llama-3-8b-bnb-4bit This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
{"language": ["en"], "license": "apache-2.0", "tags": ["text-generation-inference", "transformers", "unsloth", "llama", "trl"], "base_model": "unsloth/llama-3-8b-bnb-4bit"}
FabioSantos/llama3-8b-oig-unsloth
null
[ "transformers", "safetensors", "text-generation-inference", "unsloth", "llama", "trl", "en", "base_model:unsloth/llama-3-8b-bnb-4bit", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2024-04-20T19:21:00+00:00
[]
[ "en" ]
TAGS #transformers #safetensors #text-generation-inference #unsloth #llama #trl #en #base_model-unsloth/llama-3-8b-bnb-4bit #license-apache-2.0 #endpoints_compatible #region-us
# Uploaded model - Developed by: FabioSantos - License: apache-2.0 - Finetuned from model : unsloth/llama-3-8b-bnb-4bit This llama model was trained 2x faster with Unsloth and Huggingface's TRL library. <img src="URL width="200"/>
[ "# Uploaded model\n\n- Developed by: FabioSantos\n- License: apache-2.0\n- Finetuned from model : unsloth/llama-3-8b-bnb-4bit\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
[ "TAGS\n#transformers #safetensors #text-generation-inference #unsloth #llama #trl #en #base_model-unsloth/llama-3-8b-bnb-4bit #license-apache-2.0 #endpoints_compatible #region-us \n", "# Uploaded model\n\n- Developed by: FabioSantos\n- License: apache-2.0\n- Finetuned from model : unsloth/llama-3-8b-bnb-4bit\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
text-generation
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # 0.001_ablation_declr_4iters_iter_2 This model is a fine-tuned version of [ShenaoZ/0.001_ablation_declr_4iters_iter_1](https://huggingface.co/ShenaoZ/0.001_ablation_declr_4iters_iter_1) on the updated and the original datasets. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-07 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - distributed_type: multi-GPU - num_devices: 8 - gradient_accumulation_steps: 2 - total_train_batch_size: 128 - total_eval_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.36.2 - Pytorch 2.1.2+cu121 - Datasets 2.14.6 - Tokenizers 0.15.2
{"license": "mit", "tags": ["alignment-handbook", "generated_from_trainer", "trl", "dpo", "generated_from_trainer"], "datasets": ["updated", "original"], "base_model": "ShenaoZ/0.001_ablation_declr_4iters_iter_1", "model-index": [{"name": "0.001_ablation_declr_4iters_iter_2", "results": []}]}
ShenaoZ/0.001_ablation_declr_4iters_iter_2
null
[ "transformers", "safetensors", "mistral", "text-generation", "alignment-handbook", "generated_from_trainer", "trl", "dpo", "conversational", "dataset:updated", "dataset:original", "base_model:ShenaoZ/0.001_ablation_declr_4iters_iter_1", "license:mit", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-20T19:22:00+00:00
[]
[]
TAGS #transformers #safetensors #mistral #text-generation #alignment-handbook #generated_from_trainer #trl #dpo #conversational #dataset-updated #dataset-original #base_model-ShenaoZ/0.001_ablation_declr_4iters_iter_1 #license-mit #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# 0.001_ablation_declr_4iters_iter_2 This model is a fine-tuned version of ShenaoZ/0.001_ablation_declr_4iters_iter_1 on the updated and the original datasets. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-07 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - distributed_type: multi-GPU - num_devices: 8 - gradient_accumulation_steps: 2 - total_train_batch_size: 128 - total_eval_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.36.2 - Pytorch 2.1.2+cu121 - Datasets 2.14.6 - Tokenizers 0.15.2
[ "# 0.001_ablation_declr_4iters_iter_2\n\nThis model is a fine-tuned version of ShenaoZ/0.001_ablation_declr_4iters_iter_1 on the updated and the original datasets.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 5e-07\n- train_batch_size: 8\n- eval_batch_size: 8\n- seed: 42\n- distributed_type: multi-GPU\n- num_devices: 8\n- gradient_accumulation_steps: 2\n- total_train_batch_size: 128\n- total_eval_batch_size: 64\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: cosine\n- lr_scheduler_warmup_ratio: 0.1\n- num_epochs: 1", "### Training results", "### Framework versions\n\n- Transformers 4.36.2\n- Pytorch 2.1.2+cu121\n- Datasets 2.14.6\n- Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #safetensors #mistral #text-generation #alignment-handbook #generated_from_trainer #trl #dpo #conversational #dataset-updated #dataset-original #base_model-ShenaoZ/0.001_ablation_declr_4iters_iter_1 #license-mit #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# 0.001_ablation_declr_4iters_iter_2\n\nThis model is a fine-tuned version of ShenaoZ/0.001_ablation_declr_4iters_iter_1 on the updated and the original datasets.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 5e-07\n- train_batch_size: 8\n- eval_batch_size: 8\n- seed: 42\n- distributed_type: multi-GPU\n- num_devices: 8\n- gradient_accumulation_steps: 2\n- total_train_batch_size: 128\n- total_eval_batch_size: 64\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: cosine\n- lr_scheduler_warmup_ratio: 0.1\n- num_epochs: 1", "### Training results", "### Framework versions\n\n- Transformers 4.36.2\n- Pytorch 2.1.2+cu121\n- Datasets 2.14.6\n- Tokenizers 0.15.2" ]
null
transformers
# Uploaded model - **Developed by:** prtm - **License:** apache-2.0 - **Finetuned from model :** unsloth/llama-3-8b-bnb-4bit This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
{"language": ["en"], "license": "apache-2.0", "tags": ["text-generation-inference", "transformers", "unsloth", "llama", "trl"], "base_model": "unsloth/llama-3-8b-bnb-4bit"}
prtm/ConstructionAdviserModelDraft1
null
[ "transformers", "safetensors", "text-generation-inference", "unsloth", "llama", "trl", "en", "base_model:unsloth/llama-3-8b-bnb-4bit", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2024-04-20T19:22:08+00:00
[]
[ "en" ]
TAGS #transformers #safetensors #text-generation-inference #unsloth #llama #trl #en #base_model-unsloth/llama-3-8b-bnb-4bit #license-apache-2.0 #endpoints_compatible #region-us
# Uploaded model - Developed by: prtm - License: apache-2.0 - Finetuned from model : unsloth/llama-3-8b-bnb-4bit This llama model was trained 2x faster with Unsloth and Huggingface's TRL library. <img src="URL width="200"/>
[ "# Uploaded model\n\n- Developed by: prtm\n- License: apache-2.0\n- Finetuned from model : unsloth/llama-3-8b-bnb-4bit\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
[ "TAGS\n#transformers #safetensors #text-generation-inference #unsloth #llama #trl #en #base_model-unsloth/llama-3-8b-bnb-4bit #license-apache-2.0 #endpoints_compatible #region-us \n", "# Uploaded model\n\n- Developed by: prtm\n- License: apache-2.0\n- Finetuned from model : unsloth/llama-3-8b-bnb-4bit\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
null
null
GGUF Quantum model to be used in llama.cpp after: https://github.com/ggerganov/llama.cpp/issues/6747
{"license": "llama2"}
phymbert/llama3-8b-instruct-q4_k_m-gguf
null
[ "gguf", "license:llama2", "region:us" ]
null
2024-04-20T19:26:49+00:00
[]
[]
TAGS #gguf #license-llama2 #region-us
GGUF Quantum model to be used in URL after: URL
[]
[ "TAGS\n#gguf #license-llama2 #region-us \n" ]
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # my_awesome_model This model is a fine-tuned version of [distilbert/distilbert-base-uncased](https://huggingface.co/distilbert/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.5328 - Accuracy: 0.1944 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | No log | 1.0 | 20 | 1.5233 | 0.1944 | | No log | 2.0 | 40 | 1.5328 | 0.1944 | ### Framework versions - Transformers 4.38.2 - Pytorch 2.2.1+cu121 - Datasets 2.19.0 - Tokenizers 0.15.2
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "distilbert/distilbert-base-uncased", "model-index": [{"name": "my_awesome_model", "results": []}]}
ivanma1/my_awesome_model
null
[ "transformers", "tensorboard", "safetensors", "distilbert", "text-classification", "generated_from_trainer", "base_model:distilbert/distilbert-base-uncased", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-20T19:27:43+00:00
[]
[]
TAGS #transformers #tensorboard #safetensors #distilbert #text-classification #generated_from_trainer #base_model-distilbert/distilbert-base-uncased #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us
my\_awesome\_model ================== This model is a fine-tuned version of distilbert/distilbert-base-uncased on an unknown dataset. It achieves the following results on the evaluation set: * Loss: 1.5328 * Accuracy: 0.1944 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 2e-05 * train\_batch\_size: 16 * eval\_batch\_size: 16 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * num\_epochs: 2 ### Training results ### Framework versions * Transformers 4.38.2 * Pytorch 2.2.1+cu121 * Datasets 2.19.0 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 16\n* eval\\_batch\\_size: 16\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 2", "### Training results", "### Framework versions\n\n\n* Transformers 4.38.2\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #tensorboard #safetensors #distilbert #text-classification #generated_from_trainer #base_model-distilbert/distilbert-base-uncased #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 16\n* eval\\_batch\\_size: 16\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 2", "### Training results", "### Framework versions\n\n\n* Transformers 4.38.2\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.15.2" ]
unconditional-image-generation
diffusers
# Model Card for Unit 1 of the [Diffusion Models Class 🧨](https://github.com/huggingface/diffusion-models-class) This model is a diffusion model for unconditional image generation of cute 🦋. ## Usage ```python from diffusers import DDPMPipeline pipeline = DDPMPipeline.from_pretrained('Sanyam0605/sd-class-butterflies-32') image = pipeline().images[0] image ```
{"license": "mit", "tags": ["pytorch", "diffusers", "unconditional-image-generation", "diffusion-models-class"]}
Sanyam0605/sd-class-butterflies-32
null
[ "diffusers", "safetensors", "pytorch", "unconditional-image-generation", "diffusion-models-class", "license:mit", "diffusers:DDPMPipeline", "region:us" ]
null
2024-04-20T19:29:31+00:00
[]
[]
TAGS #diffusers #safetensors #pytorch #unconditional-image-generation #diffusion-models-class #license-mit #diffusers-DDPMPipeline #region-us
# Model Card for Unit 1 of the Diffusion Models Class This model is a diffusion model for unconditional image generation of cute . ## Usage
[ "# Model Card for Unit 1 of the Diffusion Models Class \n\nThis model is a diffusion model for unconditional image generation of cute .", "## Usage" ]
[ "TAGS\n#diffusers #safetensors #pytorch #unconditional-image-generation #diffusion-models-class #license-mit #diffusers-DDPMPipeline #region-us \n", "# Model Card for Unit 1 of the Diffusion Models Class \n\nThis model is a diffusion model for unconditional image generation of cute .", "## Usage" ]
null
adapter-transformers
# Adapter `BigTMiami/micro_par_bn_v_3_help_class_adp_lr_0003_S_3` for roberta-base An [adapter](https://adapterhub.ml) for the `roberta-base` model that was trained on the [BigTMiami/amazon_MICRO_helpfulness_dataset](https://huggingface.co/datasets/BigTMiami/amazon_MICRO_helpfulness_dataset/) dataset and includes a prediction head for classification. This adapter was created for usage with the **[Adapters](https://github.com/Adapter-Hub/adapters)** library. ## Usage First, install `adapters`: ``` pip install -U adapters ``` Now, the adapter can be loaded and activated like this: ```python from adapters import AutoAdapterModel model = AutoAdapterModel.from_pretrained("roberta-base") adapter_name = model.load_adapter("BigTMiami/micro_par_bn_v_3_help_class_adp_lr_0003_S_3", source="hf", set_active=True) ``` ## Architecture & Training <!-- Add some description here --> ## Evaluation results <!-- Add some description here --> ## Citation <!-- Add some description here -->
{"tags": ["roberta", "adapter-transformers"], "datasets": ["BigTMiami/amazon_MICRO_helpfulness_dataset"]}
BigTMiami/micro_par_bn_v_3_help_class_adp_lr_0003_S_3
null
[ "adapter-transformers", "roberta", "dataset:BigTMiami/amazon_MICRO_helpfulness_dataset", "region:us" ]
null
2024-04-20T19:29:56+00:00
[]
[]
TAGS #adapter-transformers #roberta #dataset-BigTMiami/amazon_MICRO_helpfulness_dataset #region-us
# Adapter 'BigTMiami/micro_par_bn_v_3_help_class_adp_lr_0003_S_3' for roberta-base An adapter for the 'roberta-base' model that was trained on the BigTMiami/amazon_MICRO_helpfulness_dataset dataset and includes a prediction head for classification. This adapter was created for usage with the Adapters library. ## Usage First, install 'adapters': Now, the adapter can be loaded and activated like this: ## Architecture & Training ## Evaluation results
[ "# Adapter 'BigTMiami/micro_par_bn_v_3_help_class_adp_lr_0003_S_3' for roberta-base\n\nAn adapter for the 'roberta-base' model that was trained on the BigTMiami/amazon_MICRO_helpfulness_dataset dataset and includes a prediction head for classification.\n\nThis adapter was created for usage with the Adapters library.", "## Usage\n\nFirst, install 'adapters':\n\n\n\nNow, the adapter can be loaded and activated like this:", "## Architecture & Training", "## Evaluation results" ]
[ "TAGS\n#adapter-transformers #roberta #dataset-BigTMiami/amazon_MICRO_helpfulness_dataset #region-us \n", "# Adapter 'BigTMiami/micro_par_bn_v_3_help_class_adp_lr_0003_S_3' for roberta-base\n\nAn adapter for the 'roberta-base' model that was trained on the BigTMiami/amazon_MICRO_helpfulness_dataset dataset and includes a prediction head for classification.\n\nThis adapter was created for usage with the Adapters library.", "## Usage\n\nFirst, install 'adapters':\n\n\n\nNow, the adapter can be loaded and activated like this:", "## Architecture & Training", "## Evaluation results" ]
null
peft
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed] ### Framework versions - PEFT 0.10.0
{"library_name": "peft", "base_model": "unsloth/llama-3-8b"}
Fredithefish/Llama3RP-chkpt-6000
null
[ "peft", "safetensors", "arxiv:1910.09700", "base_model:unsloth/llama-3-8b", "region:us" ]
null
2024-04-20T19:33:13+00:00
[ "1910.09700" ]
[]
TAGS #peft #safetensors #arxiv-1910.09700 #base_model-unsloth/llama-3-8b #region-us
# Model Card for Model ID ## Model Details ### Model Description - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact ### Framework versions - PEFT 0.10.0
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\n\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact", "### Framework versions\n\n- PEFT 0.10.0" ]
[ "TAGS\n#peft #safetensors #arxiv-1910.09700 #base_model-unsloth/llama-3-8b #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\n\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact", "### Framework versions\n\n- PEFT 0.10.0" ]
null
adapter-transformers
# Adapter `BigTMiami/micro_par_bn_v_4_help_class_adp_lr_0003_S_3` for roberta-base An [adapter](https://adapterhub.ml) for the `roberta-base` model that was trained on the [BigTMiami/amazon_MICRO_helpfulness_dataset](https://huggingface.co/datasets/BigTMiami/amazon_MICRO_helpfulness_dataset/) dataset and includes a prediction head for classification. This adapter was created for usage with the **[Adapters](https://github.com/Adapter-Hub/adapters)** library. ## Usage First, install `adapters`: ``` pip install -U adapters ``` Now, the adapter can be loaded and activated like this: ```python from adapters import AutoAdapterModel model = AutoAdapterModel.from_pretrained("roberta-base") adapter_name = model.load_adapter("BigTMiami/micro_par_bn_v_4_help_class_adp_lr_0003_S_3", source="hf", set_active=True) ``` ## Architecture & Training <!-- Add some description here --> ## Evaluation results <!-- Add some description here --> ## Citation <!-- Add some description here -->
{"tags": ["roberta", "adapter-transformers"], "datasets": ["BigTMiami/amazon_MICRO_helpfulness_dataset"]}
BigTMiami/micro_par_bn_v_4_help_class_adp_lr_0003_S_3
null
[ "adapter-transformers", "roberta", "dataset:BigTMiami/amazon_MICRO_helpfulness_dataset", "region:us" ]
null
2024-04-20T19:37:27+00:00
[]
[]
TAGS #adapter-transformers #roberta #dataset-BigTMiami/amazon_MICRO_helpfulness_dataset #region-us
# Adapter 'BigTMiami/micro_par_bn_v_4_help_class_adp_lr_0003_S_3' for roberta-base An adapter for the 'roberta-base' model that was trained on the BigTMiami/amazon_MICRO_helpfulness_dataset dataset and includes a prediction head for classification. This adapter was created for usage with the Adapters library. ## Usage First, install 'adapters': Now, the adapter can be loaded and activated like this: ## Architecture & Training ## Evaluation results
[ "# Adapter 'BigTMiami/micro_par_bn_v_4_help_class_adp_lr_0003_S_3' for roberta-base\n\nAn adapter for the 'roberta-base' model that was trained on the BigTMiami/amazon_MICRO_helpfulness_dataset dataset and includes a prediction head for classification.\n\nThis adapter was created for usage with the Adapters library.", "## Usage\n\nFirst, install 'adapters':\n\n\n\nNow, the adapter can be loaded and activated like this:", "## Architecture & Training", "## Evaluation results" ]
[ "TAGS\n#adapter-transformers #roberta #dataset-BigTMiami/amazon_MICRO_helpfulness_dataset #region-us \n", "# Adapter 'BigTMiami/micro_par_bn_v_4_help_class_adp_lr_0003_S_3' for roberta-base\n\nAn adapter for the 'roberta-base' model that was trained on the BigTMiami/amazon_MICRO_helpfulness_dataset dataset and includes a prediction head for classification.\n\nThis adapter was created for usage with the Adapters library.", "## Usage\n\nFirst, install 'adapters':\n\n\n\nNow, the adapter can be loaded and activated like this:", "## Architecture & Training", "## Evaluation results" ]
null
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vsft-llava_builder_Meta-Llama-3-8B This model is a fine-tuned version of [](https://huggingface.co/) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1.4e-05 - train_batch_size: 2 - eval_batch_size: 4 - seed: 42 - distributed_type: multi-GPU - num_devices: 8 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - total_eval_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1.0 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.40.0.dev0 - Pytorch 2.2.2+cu121 - Datasets 2.18.0 - Tokenizers 0.15.2
{"tags": ["trl", "sft", "generated_from_trainer"], "model-index": [{"name": "vsft-llava_builder_Meta-Llama-3-8B", "results": []}]}
edbeeching/vsft-llava_builder_Meta-Llama-3-8B
null
[ "transformers", "safetensors", "llava", "pretraining", "trl", "sft", "generated_from_trainer", "endpoints_compatible", "region:us" ]
null
2024-04-20T19:38:31+00:00
[]
[]
TAGS #transformers #safetensors #llava #pretraining #trl #sft #generated_from_trainer #endpoints_compatible #region-us
# vsft-llava_builder_Meta-Llama-3-8B This model is a fine-tuned version of [](URL on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1.4e-05 - train_batch_size: 2 - eval_batch_size: 4 - seed: 42 - distributed_type: multi-GPU - num_devices: 8 - gradient_accumulation_steps: 4 - total_train_batch_size: 64 - total_eval_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1.0 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.40.0.dev0 - Pytorch 2.2.2+cu121 - Datasets 2.18.0 - Tokenizers 0.15.2
[ "# vsft-llava_builder_Meta-Llama-3-8B\n\nThis model is a fine-tuned version of [](URL on an unknown dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1.4e-05\n- train_batch_size: 2\n- eval_batch_size: 4\n- seed: 42\n- distributed_type: multi-GPU\n- num_devices: 8\n- gradient_accumulation_steps: 4\n- total_train_batch_size: 64\n- total_eval_batch_size: 32\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1.0\n- mixed_precision_training: Native AMP", "### Training results", "### Framework versions\n\n- Transformers 4.40.0.dev0\n- Pytorch 2.2.2+cu121\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #safetensors #llava #pretraining #trl #sft #generated_from_trainer #endpoints_compatible #region-us \n", "# vsft-llava_builder_Meta-Llama-3-8B\n\nThis model is a fine-tuned version of [](URL on an unknown dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1.4e-05\n- train_batch_size: 2\n- eval_batch_size: 4\n- seed: 42\n- distributed_type: multi-GPU\n- num_devices: 8\n- gradient_accumulation_steps: 4\n- total_train_batch_size: 64\n- total_eval_batch_size: 32\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1.0\n- mixed_precision_training: Native AMP", "### Training results", "### Framework versions\n\n- Transformers 4.40.0.dev0\n- Pytorch 2.2.2+cu121\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
null
null
["0102", "0295", "0071", "0174", "0106", "0116", "0065"] num_epochs = 10000 gradient_accumulation_steps = 1 learning_rate = 5e-5 [ transforms.Resize((config.image_size, config.image_size)), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] "DownBlock2D", # a regular ResNet downsampling block "DownBlock2D", "DownBlock2D", "DownBlock2D", "AttnDownBlock2D", # a ResNet downsampling block with spatial self-attention "DownBlock2D",
{}
Giux22/semana7-figuras_naturales
null
[ "region:us" ]
null
2024-04-20T19:38:37+00:00
[]
[]
TAGS #region-us
["0102", "0295", "0071", "0174", "0106", "0116", "0065"] num_epochs = 10000 gradient_accumulation_steps = 1 learning_rate = 5e-5 [ transforms.Resize((config.image_size, config.image_size)), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] "DownBlock2D", # a regular ResNet downsampling block "DownBlock2D", "DownBlock2D", "DownBlock2D", "AttnDownBlock2D", # a ResNet downsampling block with spatial self-attention "DownBlock2D",
[ "# a regular ResNet downsampling block\n \"DownBlock2D\",\n \"DownBlock2D\",\n \"DownBlock2D\",\n \"AttnDownBlock2D\", # a ResNet downsampling block with spatial self-attention\n \"DownBlock2D\"," ]
[ "TAGS\n#region-us \n", "# a regular ResNet downsampling block\n \"DownBlock2D\",\n \"DownBlock2D\",\n \"DownBlock2D\",\n \"AttnDownBlock2D\", # a ResNet downsampling block with spatial self-attention\n \"DownBlock2D\"," ]
text-generation
transformers
<!-- original-model-card start --> # Original model card: WizardLM's WizardLM 70B V1.0 ## WizardLM: Empowering Large Pre-Trained Language Models to Follow Complex Instructions <p align="center"> 🤗 <a href="https://huggingface.co/WizardLM" target="_blank">HF Repo</a> •🐱 <a href="https://github.com/nlpxucan/WizardLM" target="_blank">Github Repo</a> • 🐦 <a href="https://twitter.com/WizardLM_AI" target="_blank">Twitter</a> • 📃 <a href="https://arxiv.org/abs/2304.12244" target="_blank">[WizardLM]</a> • 📃 <a href="https://arxiv.org/abs/2306.08568" target="_blank">[WizardCoder]</a> • 📃 <a href="https://arxiv.org/abs/2308.09583" target="_blank">[WizardMath]</a> <br> </p> <p align="center"> 👋 Join our <a href="https://discord.gg/VZjjHtWrKs" target="_blank">Discord</a> </p> ## Unofficial Video Introductions Thanks to the enthusiastic friends, their video introductions are more lively and interesting. 1. [NEW WizardLM 70b 🔥 Giant Model...Insane Performance](https://www.youtube.com/watch?v=WdpiIXrO4_o) 2. [GET WizardLM NOW! 7B LLM KING That Can Beat ChatGPT! I'm IMPRESSED!](https://www.youtube.com/watch?v=SaJ8wyKMBds) 3. [WizardLM: Enhancing Large Language Models to Follow Complex Instructions](https://www.youtube.com/watch?v=I6sER-qivYk) 4. [WizardCoder AI Is The NEW ChatGPT's Coding TWIN!](https://www.youtube.com/watch?v=XjsyHrmd3Xo) ## News - 🔥🔥🔥[2023/08/26] We released **WizardCoder-Python-34B-V1.0** , which achieves the **73.2 pass@1** and surpasses **GPT4 (2023/03/15)**, **ChatGPT-3.5**, and **Claude2** on the [HumanEval Benchmarks](https://github.com/openai/human-eval). For more details, please refer to [WizardCoder](https://github.com/nlpxucan/WizardLM/tree/main/WizardCoder). - [2023/06/16] We released **WizardCoder-15B-V1.0** , which surpasses **Claude-Plus (+6.8)**, **Bard (+15.3)** and **InstructCodeT5+ (+22.3)** on the [HumanEval Benchmarks](https://github.com/openai/human-eval). For more details, please refer to [WizardCoder](https://github.com/nlpxucan/WizardLM/tree/main/WizardCoder). | Model | Checkpoint | Paper | HumanEval | MBPP | Demo | License | | ----- |------| ---- |------|-------| ----- | ----- | | WizardCoder-Python-34B-V1.0 | 🤗 <a href="https://huggingface.co/WizardLM/WizardCoder-Python-34B-V1.0" target="_blank">HF Link</a> | 📃 <a href="https://arxiv.org/abs/2306.08568" target="_blank">[WizardCoder]</a> | 73.2 | 61.2 | [Demo](http://47.103.63.15:50085/) | <a href="https://ai.meta.com/resources/models-and-libraries/llama-downloads/" target="_blank">Llama2</a> | | WizardCoder-15B-V1.0 | 🤗 <a href="https://huggingface.co/WizardLM/WizardCoder-15B-V1.0" target="_blank">HF Link</a> | 📃 <a href="https://arxiv.org/abs/2306.08568" target="_blank">[WizardCoder]</a> | 59.8 |50.6 | -- | <a href="https://huggingface.co/spaces/bigcode/bigcode-model-license-agreement" target="_blank">OpenRAIL-M</a> | | WizardCoder-Python-13B-V1.0 | 🤗 <a href="https://huggingface.co/WizardLM/WizardCoder-Python-13B-V1.0" target="_blank">HF Link</a> | 📃 <a href="https://arxiv.org/abs/2306.08568" target="_blank">[WizardCoder]</a> | 64.0 | 55.6 | -- | <a href="https://ai.meta.com/resources/models-and-libraries/llama-downloads/" target="_blank">Llama2</a> | | WizardCoder-Python-7B-V1.0 | 🤗 <a href="https://huggingface.co/WizardLM/WizardCoder-Python-7B-V1.0" target="_blank">HF Link</a> | 📃 <a href="https://arxiv.org/abs/2306.08568" target="_blank">[WizardCoder]</a> | 55.5 | 51.6 | [Demo](http://47.103.63.15:50088/) | <a href="https://ai.meta.com/resources/models-and-libraries/llama-downloads/" target="_blank">Llama2</a> | | WizardCoder-3B-V1.0 | 🤗 <a href="https://huggingface.co/WizardLM/WizardCoder-3B-V1.0" target="_blank">HF Link</a> | 📃 <a href="https://arxiv.org/abs/2306.08568" target="_blank">[WizardCoder]</a> | 34.8 |37.4 | -- | <a href="https://huggingface.co/spaces/bigcode/bigcode-model-license-agreement" target="_blank">OpenRAIL-M</a> | | WizardCoder-1B-V1.0 | 🤗 <a href="https://huggingface.co/WizardLM/WizardCoder-1B-V1.0" target="_blank">HF Link</a> | 📃 <a href="https://arxiv.org/abs/2306.08568" target="_blank">[WizardCoder]</a> | 23.8 |28.6 | -- | <a href="https://huggingface.co/spaces/bigcode/bigcode-model-license-agreement" target="_blank">OpenRAIL-M</a> | - 🔥 [08/11/2023] We release **WizardMath** Models. - 🔥 Our **WizardMath-70B-V1.0** model slightly outperforms some closed-source LLMs on the GSM8K, including **ChatGPT 3.5**, **Claude Instant 1** and **PaLM 2 540B**. - 🔥 Our **WizardMath-70B-V1.0** model achieves **81.6 pass@1** on the [GSM8k Benchmarks](https://github.com/openai/grade-school-math), which is **24.8** points higher than the SOTA open-source LLM. - 🔥 Our **WizardMath-70B-V1.0** model achieves **22.7 pass@1** on the [MATH Benchmarks](https://github.com/hendrycks/math), which is **9.2** points higher than the SOTA open-source LLM. | Model | Checkpoint | Paper | GSM8k | MATH |Online Demo| License| | ----- |------| ---- |------|-------| ----- | ----- | | WizardMath-70B-V1.0 | 🤗 <a href="https://huggingface.co/WizardLM/WizardMath-70B-V1.0" target="_blank">HF Link</a> | 📃 <a href="https://arxiv.org/abs/2308.09583" target="_blank">[WizardMath]</a>| **81.6** | **22.7** |[Demo](http://47.103.63.15:50083/)| <a href="https://ai.meta.com/resources/models-and-libraries/llama-downloads/" target="_blank">Llama 2 </a> | | WizardMath-13B-V1.0 | 🤗 <a href="https://huggingface.co/WizardLM/WizardMath-13B-V1.0" target="_blank">HF Link</a> | 📃 <a href="https://arxiv.org/abs/2308.09583" target="_blank">[WizardMath]</a>| **63.9** | **14.0** |[Demo](http://47.103.63.15:50082/)| <a href="https://ai.meta.com/resources/models-and-libraries/llama-downloads/" target="_blank">Llama 2 </a> | | WizardMath-7B-V1.0 | 🤗 <a href="https://huggingface.co/WizardLM/WizardMath-7B-V1.0" target="_blank">HF Link</a> | 📃 <a href="https://arxiv.org/abs/2308.09583" target="_blank">[WizardMath]</a>| **54.9** | **10.7** | [Demo](http://47.103.63.15:50080/)| <a href="https://ai.meta.com/resources/models-and-libraries/llama-downloads/" target="_blank">Llama 2 </a>| <font size=4> | <sup>Model</sup> | <sup>Checkpoint</sup> | <sup>Paper</sup> |<sup>MT-Bench</sup> | <sup>AlpacaEval</sup> | <sup>GSM8k</sup> | <sup>HumanEval</sup> | <sup>License</sup>| | ----- |------| ---- |------|-------| ----- | ----- | ----- | | <sup>**WizardLM-70B-V1.0**</sup> | <sup>🤗 <a href="https://huggingface.co/WizardLM/WizardLM-70B-V1.0" target="_blank">HF Link</a> </sup>|<sup>📃**Coming Soon**</sup>| <sup>**7.78**</sup> | <sup>**92.91%**</sup> |<sup>**77.6%**</sup> | <sup> **50.6 pass@1**</sup>|<sup> <a href="https://ai.meta.com/resources/models-and-libraries/llama-downloads/" target="_blank">Llama 2 License </a></sup> | | <sup>WizardLM-13B-V1.2</sup> | <sup>🤗 <a href="https://huggingface.co/WizardLM/WizardLM-13B-V1.2" target="_blank">HF Link</a> </sup>| | <sup>7.06</sup> | <sup>89.17%</sup> |<sup>55.3%</sup> | <sup>36.6 pass@1</sup>|<sup> <a href="https://ai.meta.com/resources/models-and-libraries/llama-downloads/" target="_blank">Llama 2 License </a></sup> | | <sup>WizardLM-13B-V1.1</sup> |<sup> 🤗 <a href="https://huggingface.co/WizardLM/WizardLM-13B-V1.1" target="_blank">HF Link</a> </sup> | | <sup>6.76</sup> |<sup>86.32%</sup> | | <sup>25.0 pass@1</sup>| <sup>Non-commercial</sup>| | <sup>WizardLM-30B-V1.0</sup> | <sup>🤗 <a href="https://huggingface.co/WizardLM/WizardLM-30B-V1.0" target="_blank">HF Link</a></sup> | | <sup>7.01</sup> | | | <sup>37.8 pass@1</sup>| <sup>Non-commercial</sup> | | <sup>WizardLM-13B-V1.0</sup> | <sup>🤗 <a href="https://huggingface.co/WizardLM/WizardLM-13B-V1.0" target="_blank">HF Link</a> </sup> | | <sup>6.35</sup> | <sup>75.31%</sup> | | <sup> 24.0 pass@1 </sup> | <sup>Non-commercial</sup>| | <sup>WizardLM-7B-V1.0 </sup>| <sup>🤗 <a href="https://huggingface.co/WizardLM/WizardLM-7B-V1.0" target="_blank">HF Link</a> </sup> |<sup> 📃 <a href="https://arxiv.org/abs/2304.12244" target="_blank">[WizardLM]</a> </sup>| | | |<sup>19.1 pass@1 </sup>|<sup> Non-commercial</sup>| </font> - 🔥🔥🔥 [08/09/2023] We released **WizardLM-70B-V1.0** model. **Github Repo**: https://github.com/nlpxucan/WizardLM **Twitter**: https://twitter.com/WizardLM_AI/status/1689270108747976704 **Discord**: https://discord.gg/bpmeZD7V ❗<b>Note for model system prompts usage:</b> <b>WizardLM</b> adopts the prompt format from <b>Vicuna</b> and supports **multi-turn** conversation. The prompt should be as following: ``` A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER: Hi ASSISTANT: Hello.</s>USER: Who are you? ASSISTANT: I am WizardLM.</s>...... ``` ## Inference WizardLM Demo Script We provide the inference WizardLM demo code [here](https://github.com/nlpxucan/WizardLM/tree/main/demo). Please cite the paper if you use the data or code from WizardLM. ``` @article{xu2023wizardlm, title={Wizardlm: Empowering large language models to follow complex instructions}, author={Xu, Can and Sun, Qingfeng and Zheng, Kai and Geng, Xiubo and Zhao, Pu and Feng, Jiazhan and Tao, Chongyang and Jiang, Daxin}, journal={arXiv preprint arXiv:2304.12244}, year={2023} } ``` ❗<b>To commen concern about dataset:</b> Recently, there have been clear changes in the open-source policy and regulations of our overall organization's code, data, and models. Despite this, we have still worked hard to obtain opening the weights of the model first, but the data involves stricter auditing and is in review with our legal team . Our researchers have no authority to publicly release them without authorization. Thank you for your understanding. <!-- original-model-card end -->
{"language": ["en"], "license": "apache-2.0", "library_name": "transformers", "tags": ["wizardlm", "llama", "finetuned"], "pipeline_tag": "text-generation"}
MaziyarPanahi/WizardLM-70B-V1.0
null
[ "transformers", "pytorch", "llama", "text-generation", "wizardlm", "finetuned", "en", "arxiv:2304.12244", "arxiv:2306.08568", "arxiv:2308.09583", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-20T19:40:39+00:00
[ "2304.12244", "2306.08568", "2308.09583" ]
[ "en" ]
TAGS #transformers #pytorch #llama #text-generation #wizardlm #finetuned #en #arxiv-2304.12244 #arxiv-2306.08568 #arxiv-2308.09583 #license-apache-2.0 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
Original model card: WizardLM's WizardLM 70B V1.0 ================================================= WizardLM: Empowering Large Pre-Trained Language Models to Follow Complex Instructions ------------------------------------------------------------------------------------- [HF Repo](URL target=) • [Github Repo](URL target=) • [Twitter](URL target=) • [[WizardLM]](URL target=) • [[WizardCoder]](URL target=) • [[WizardMath]](URL target=) Join our [Discord](URL target=) Unofficial Video Introductions ------------------------------ Thanks to the enthusiastic friends, their video introductions are more lively and interesting. 1. NEW WizardLM 70b Giant Model...Insane Performance 2. GET WizardLM NOW! 7B LLM KING That Can Beat ChatGPT! I'm IMPRESSED! 3. WizardLM: Enhancing Large Language Models to Follow Complex Instructions 4. WizardCoder AI Is The NEW ChatGPT's Coding TWIN! News ---- * [2023/08/26] We released WizardCoder-Python-34B-V1.0 , which achieves the 73.2 pass@1 and surpasses GPT4 (2023/03/15), ChatGPT-3.5, and Claude2 on the HumanEval Benchmarks. For more details, please refer to WizardCoder. * [2023/06/16] We released WizardCoder-15B-V1.0 , which surpasses Claude-Plus (+6.8), Bard (+15.3) and InstructCodeT5+ (+22.3) on the HumanEval Benchmarks. For more details, please refer to WizardCoder. * [08/11/2023] We release WizardMath Models. * Our WizardMath-70B-V1.0 model slightly outperforms some closed-source LLMs on the GSM8K, including ChatGPT 3.5, Claude Instant 1 and PaLM 2 540B. * Our WizardMath-70B-V1.0 model achieves 81.6 pass@1 on the GSM8k Benchmarks, which is 24.8 points higher than the SOTA open-source LLM. * Our WizardMath-70B-V1.0 model achieves 22.7 pass@1 on the MATH Benchmarks, which is 9.2 points higher than the SOTA open-source LLM. * [08/09/2023] We released WizardLM-70B-V1.0 model. Github Repo: URL Twitter: URL Discord: URL **Note for model system prompts usage:** **WizardLM** adopts the prompt format from **Vicuna** and supports multi-turn conversation. The prompt should be as following: Inference WizardLM Demo Script ------------------------------ We provide the inference WizardLM demo code here. Please cite the paper if you use the data or code from WizardLM. **To commen concern about dataset:** Recently, there have been clear changes in the open-source policy and regulations of our overall organization's code, data, and models. Despite this, we have still worked hard to obtain opening the weights of the model first, but the data involves stricter auditing and is in review with our legal team . Our researchers have no authority to publicly release them without authorization. Thank you for your understanding.
[]
[ "TAGS\n#transformers #pytorch #llama #text-generation #wizardlm #finetuned #en #arxiv-2304.12244 #arxiv-2306.08568 #arxiv-2308.09583 #license-apache-2.0 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n" ]
automatic-speech-recognition
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # wav2vec2-large-xls-r-300m-upper-sorbian-pl-frozen-2-colab This model is a fine-tuned version of [](https://huggingface.co/) on the common_voice_16_1 dataset. It achieves the following results on the evaluation set: - Loss: 0.6971 - Wer: 0.3974 - Cer: 0.0976 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0003 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 60 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Wer | Cer | |:-------------:|:-----:|:----:|:---------------:|:------:|:------:| | 0.6605 | 3.23 | 100 | 0.6446 | 0.7160 | 0.1772 | | 0.499 | 6.45 | 200 | 0.5900 | 0.6256 | 0.1482 | | 0.3831 | 9.68 | 300 | 0.5681 | 0.5825 | 0.1385 | | 0.2866 | 12.9 | 400 | 0.5691 | 0.5239 | 0.1235 | | 0.2269 | 16.13 | 500 | 0.6304 | 0.5061 | 0.1202 | | 0.1725 | 19.35 | 600 | 0.6649 | 0.4864 | 0.1137 | | 0.1365 | 22.58 | 700 | 0.6574 | 0.4597 | 0.1107 | | 0.1081 | 25.81 | 800 | 0.6525 | 0.4452 | 0.1057 | | 0.0873 | 29.03 | 900 | 0.6799 | 0.4236 | 0.1031 | | 0.0782 | 32.26 | 1000 | 0.7263 | 0.4468 | 0.1093 | | 0.0715 | 35.48 | 1100 | 0.7404 | 0.4173 | 0.1002 | | 0.061 | 38.71 | 1200 | 0.6985 | 0.4208 | 0.1009 | | 0.0551 | 41.94 | 1300 | 0.7094 | 0.4082 | 0.0988 | | 0.049 | 45.16 | 1400 | 0.7069 | 0.4096 | 0.1011 | | 0.0492 | 48.39 | 1500 | 0.6971 | 0.4117 | 0.1008 | | 0.0441 | 51.61 | 1600 | 0.6906 | 0.4025 | 0.0972 | | 0.0407 | 54.84 | 1700 | 0.7154 | 0.4035 | 0.0985 | | 0.0389 | 58.06 | 1800 | 0.6971 | 0.3974 | 0.0976 | ### Framework versions - Transformers 4.38.2 - Pytorch 2.2.1+cu121 - Datasets 2.19.0 - Tokenizers 0.15.2
{"tags": ["generated_from_trainer"], "datasets": ["common_voice_16_1"], "metrics": ["wer"], "model-index": [{"name": "wav2vec2-large-xls-r-300m-upper-sorbian-pl-frozen-2-colab", "results": [{"task": {"type": "automatic-speech-recognition", "name": "Automatic Speech Recognition"}, "dataset": {"name": "common_voice_16_1", "type": "common_voice_16_1", "config": "hsb", "split": "test", "args": "hsb"}, "metrics": [{"type": "wer", "value": 0.39737582005623245, "name": "Wer"}]}]}]}
badrabdullah/wav2vec2-large-xls-r-300m-upper-sorbian-pl-frozen-2-colab
null
[ "transformers", "tensorboard", "safetensors", "wav2vec2", "automatic-speech-recognition", "generated_from_trainer", "dataset:common_voice_16_1", "model-index", "endpoints_compatible", "region:us" ]
null
2024-04-20T19:42:01+00:00
[]
[]
TAGS #transformers #tensorboard #safetensors #wav2vec2 #automatic-speech-recognition #generated_from_trainer #dataset-common_voice_16_1 #model-index #endpoints_compatible #region-us
wav2vec2-large-xls-r-300m-upper-sorbian-pl-frozen-2-colab ========================================================= This model is a fine-tuned version of [](URL on the common\_voice\_16\_1 dataset. It achieves the following results on the evaluation set: * Loss: 0.6971 * Wer: 0.3974 * Cer: 0.0976 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0003 * train\_batch\_size: 16 * eval\_batch\_size: 8 * seed: 42 * gradient\_accumulation\_steps: 2 * total\_train\_batch\_size: 32 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * lr\_scheduler\_warmup\_steps: 500 * num\_epochs: 60 * mixed\_precision\_training: Native AMP ### Training results ### Framework versions * Transformers 4.38.2 * Pytorch 2.2.1+cu121 * Datasets 2.19.0 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0003\n* train\\_batch\\_size: 16\n* eval\\_batch\\_size: 8\n* seed: 42\n* gradient\\_accumulation\\_steps: 2\n* total\\_train\\_batch\\_size: 32\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* lr\\_scheduler\\_warmup\\_steps: 500\n* num\\_epochs: 60\n* mixed\\_precision\\_training: Native AMP", "### Training results", "### Framework versions\n\n\n* Transformers 4.38.2\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #tensorboard #safetensors #wav2vec2 #automatic-speech-recognition #generated_from_trainer #dataset-common_voice_16_1 #model-index #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0003\n* train\\_batch\\_size: 16\n* eval\\_batch\\_size: 8\n* seed: 42\n* gradient\\_accumulation\\_steps: 2\n* total\\_train\\_batch\\_size: 32\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* lr\\_scheduler\\_warmup\\_steps: 500\n* num\\_epochs: 60\n* mixed\\_precision\\_training: Native AMP", "### Training results", "### Framework versions\n\n\n* Transformers 4.38.2\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.15.2" ]
text2text-generation
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # pegasus-xsum-finetuned-keyfindings This model is a fine-tuned version of [google/pegasus-xsum](https://huggingface.co/google/pegasus-xsum) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.5754 - Rouge1: 49.1483 - Rouge2: 26.7485 - Rougel: 40.7712 - Rougelsum: 40.7847 - Gen Len: 31.5733 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:-------:| | No log | 1.0 | 300 | 1.5754 | 49.1483 | 26.7485 | 40.7712 | 40.7847 | 31.5733 | ### Framework versions - Transformers 4.38.2 - Pytorch 2.2.1+cu121 - Datasets 2.19.0 - Tokenizers 0.15.2
{"tags": ["generated_from_trainer"], "metrics": ["rouge"], "base_model": "google/pegasus-xsum", "model-index": [{"name": "pegasus-xsum-finetuned-keyfindings", "results": []}]}
basvreeman/pegasus-xsum-finetuned-keyfindings
null
[ "transformers", "tensorboard", "safetensors", "pegasus", "text2text-generation", "generated_from_trainer", "base_model:google/pegasus-xsum", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-20T19:42:42+00:00
[]
[]
TAGS #transformers #tensorboard #safetensors #pegasus #text2text-generation #generated_from_trainer #base_model-google/pegasus-xsum #autotrain_compatible #endpoints_compatible #region-us
pegasus-xsum-finetuned-keyfindings ================================== This model is a fine-tuned version of google/pegasus-xsum on the None dataset. It achieves the following results on the evaluation set: * Loss: 1.5754 * Rouge1: 49.1483 * Rouge2: 26.7485 * Rougel: 40.7712 * Rougelsum: 40.7847 * Gen Len: 31.5733 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 2e-05 * train\_batch\_size: 4 * eval\_batch\_size: 4 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * num\_epochs: 1 * mixed\_precision\_training: Native AMP ### Training results ### Framework versions * Transformers 4.38.2 * Pytorch 2.2.1+cu121 * Datasets 2.19.0 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 4\n* eval\\_batch\\_size: 4\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 1\n* mixed\\_precision\\_training: Native AMP", "### Training results", "### Framework versions\n\n\n* Transformers 4.38.2\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #tensorboard #safetensors #pegasus #text2text-generation #generated_from_trainer #base_model-google/pegasus-xsum #autotrain_compatible #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 4\n* eval\\_batch\\_size: 4\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 1\n* mixed\\_precision\\_training: Native AMP", "### Training results", "### Framework versions\n\n\n* Transformers 4.38.2\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.15.2" ]
text-classification
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> SMM4H Task 6 : Model for twitter texts ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{}
1024m/SMM4H-Task6-BartL-B20
null
[ "transformers", "safetensors", "bart", "text-classification", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-20T19:47:14+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #bart #text-classification #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #region-us
# Model Card for Model ID SMM4H Task 6 : Model for twitter texts ## Model Details ### Model Description - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID\n\n\n\nSMM4H Task 6 : Model for twitter texts", "## Model Details", "### Model Description\n\n\n\n\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #bart #text-classification #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #region-us \n", "# Model Card for Model ID\n\n\n\nSMM4H Task 6 : Model for twitter texts", "## Model Details", "### Model Description\n\n\n\n\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
text-to-image
diffusers
### web_icons_v1 on Stable Diffusion This is the `<outline-icon>` concept taught to Stable Diffusion via Textual Inversion. You can load this concept into the [Stable Conceptualizer](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_conceptualizer_inference.ipynb) notebook. You can also train your own concepts and load them into the concept libraries using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb). Here is the new concept you will be able to use as a `style`: ![<outline-icon> 0](https://huggingface.co/sd-concepts-library/web-icons-v1/resolve/main/concept_images/model_index.json) ![<outline-icon> 1](https://huggingface.co/sd-concepts-library/web-icons-v1/resolve/main/concept_images/tokenizer) ![<outline-icon> 2](https://huggingface.co/sd-concepts-library/web-icons-v1/resolve/main/concept_images/text_encoder) ![<outline-icon> 3](https://huggingface.co/sd-concepts-library/web-icons-v1/resolve/main/concept_images/unet) ![<outline-icon> 4](https://huggingface.co/sd-concepts-library/web-icons-v1/resolve/main/concept_images/feature_extractor) ![<outline-icon> 5](https://huggingface.co/sd-concepts-library/web-icons-v1/resolve/main/concept_images/learned_embeds.bin) ![<outline-icon> 6](https://huggingface.co/sd-concepts-library/web-icons-v1/resolve/main/concept_images/learned_embeds-step-1000.bin) ![<outline-icon> 7](https://huggingface.co/sd-concepts-library/web-icons-v1/resolve/main/concept_images/scheduler) ![<outline-icon> 8](https://huggingface.co/sd-concepts-library/web-icons-v1/resolve/main/concept_images/vae) ![<outline-icon> 9](https://huggingface.co/sd-concepts-library/web-icons-v1/resolve/main/concept_images/safety_checker)
{"language": ["en"], "license": "mit", "library_name": "diffusers", "tags": ["code"], "base_model": "proximasanfinetuning/fantassified_icons_v2", "pipeline_tag": "text-to-image"}
sd-concepts-library/web-icons-v1
null
[ "diffusers", "code", "text-to-image", "en", "base_model:proximasanfinetuning/fantassified_icons_v2", "license:mit", "region:us" ]
null
2024-04-20T19:47:41+00:00
[]
[ "en" ]
TAGS #diffusers #code #text-to-image #en #base_model-proximasanfinetuning/fantassified_icons_v2 #license-mit #region-us
### web_icons_v1 on Stable Diffusion This is the '<outline-icon>' concept taught to Stable Diffusion via Textual Inversion. You can load this concept into the Stable Conceptualizer notebook. You can also train your own concepts and load them into the concept libraries using this notebook. Here is the new concept you will be able to use as a 'style': !<outline-icon> 0 !<outline-icon> 1 !<outline-icon> 2 !<outline-icon> 3 !<outline-icon> 4 !<outline-icon> 5 !<outline-icon> 6 !<outline-icon> 7 !<outline-icon> 8 !<outline-icon> 9
[ "### web_icons_v1 on Stable Diffusion\nThis is the '<outline-icon>' concept taught to Stable Diffusion via Textual Inversion. You can load this concept into the Stable Conceptualizer notebook. You can also train your own concepts and load them into the concept libraries using this notebook.\n\nHere is the new concept you will be able to use as a 'style':\n!<outline-icon> 0\n!<outline-icon> 1\n!<outline-icon> 2\n!<outline-icon> 3\n!<outline-icon> 4\n!<outline-icon> 5\n!<outline-icon> 6\n!<outline-icon> 7\n!<outline-icon> 8\n!<outline-icon> 9" ]
[ "TAGS\n#diffusers #code #text-to-image #en #base_model-proximasanfinetuning/fantassified_icons_v2 #license-mit #region-us \n", "### web_icons_v1 on Stable Diffusion\nThis is the '<outline-icon>' concept taught to Stable Diffusion via Textual Inversion. You can load this concept into the Stable Conceptualizer notebook. You can also train your own concepts and load them into the concept libraries using this notebook.\n\nHere is the new concept you will be able to use as a 'style':\n!<outline-icon> 0\n!<outline-icon> 1\n!<outline-icon> 2\n!<outline-icon> 3\n!<outline-icon> 4\n!<outline-icon> 5\n!<outline-icon> 6\n!<outline-icon> 7\n!<outline-icon> 8\n!<outline-icon> 9" ]
null
adapter-transformers
# Adapter `BigTMiami/micro_par_bn_v_3_help_class_adp_lr_0003_S_4` for roberta-base An [adapter](https://adapterhub.ml) for the `roberta-base` model that was trained on the [BigTMiami/amazon_MICRO_helpfulness_dataset](https://huggingface.co/datasets/BigTMiami/amazon_MICRO_helpfulness_dataset/) dataset and includes a prediction head for classification. This adapter was created for usage with the **[Adapters](https://github.com/Adapter-Hub/adapters)** library. ## Usage First, install `adapters`: ``` pip install -U adapters ``` Now, the adapter can be loaded and activated like this: ```python from adapters import AutoAdapterModel model = AutoAdapterModel.from_pretrained("roberta-base") adapter_name = model.load_adapter("BigTMiami/micro_par_bn_v_3_help_class_adp_lr_0003_S_4", source="hf", set_active=True) ``` ## Architecture & Training <!-- Add some description here --> ## Evaluation results <!-- Add some description here --> ## Citation <!-- Add some description here -->
{"tags": ["roberta", "adapter-transformers"], "datasets": ["BigTMiami/amazon_MICRO_helpfulness_dataset"]}
BigTMiami/micro_par_bn_v_3_help_class_adp_lr_0003_S_4
null
[ "adapter-transformers", "roberta", "dataset:BigTMiami/amazon_MICRO_helpfulness_dataset", "region:us" ]
null
2024-04-20T19:48:21+00:00
[]
[]
TAGS #adapter-transformers #roberta #dataset-BigTMiami/amazon_MICRO_helpfulness_dataset #region-us
# Adapter 'BigTMiami/micro_par_bn_v_3_help_class_adp_lr_0003_S_4' for roberta-base An adapter for the 'roberta-base' model that was trained on the BigTMiami/amazon_MICRO_helpfulness_dataset dataset and includes a prediction head for classification. This adapter was created for usage with the Adapters library. ## Usage First, install 'adapters': Now, the adapter can be loaded and activated like this: ## Architecture & Training ## Evaluation results
[ "# Adapter 'BigTMiami/micro_par_bn_v_3_help_class_adp_lr_0003_S_4' for roberta-base\n\nAn adapter for the 'roberta-base' model that was trained on the BigTMiami/amazon_MICRO_helpfulness_dataset dataset and includes a prediction head for classification.\n\nThis adapter was created for usage with the Adapters library.", "## Usage\n\nFirst, install 'adapters':\n\n\n\nNow, the adapter can be loaded and activated like this:", "## Architecture & Training", "## Evaluation results" ]
[ "TAGS\n#adapter-transformers #roberta #dataset-BigTMiami/amazon_MICRO_helpfulness_dataset #region-us \n", "# Adapter 'BigTMiami/micro_par_bn_v_3_help_class_adp_lr_0003_S_4' for roberta-base\n\nAn adapter for the 'roberta-base' model that was trained on the BigTMiami/amazon_MICRO_helpfulness_dataset dataset and includes a prediction head for classification.\n\nThis adapter was created for usage with the Adapters library.", "## Usage\n\nFirst, install 'adapters':\n\n\n\nNow, the adapter can be loaded and activated like this:", "## Architecture & Training", "## Evaluation results" ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
OwOOwO/dumbo-stable10
null
[ "transformers", "safetensors", "stablelm", "text-generation", "conversational", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-20T19:48:29+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #stablelm #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #stablelm #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
null
null
trained on the first 100k lines of uniprot
{"license": "mit"}
cabrooks/1k-proteins-wordpiece
null
[ "license:mit", "region:us" ]
null
2024-04-20T19:49:11+00:00
[]
[]
TAGS #license-mit #region-us
trained on the first 100k lines of uniprot
[]
[ "TAGS\n#license-mit #region-us \n" ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
domenicrosati/lens-loss-minimality_lr_2e-5_attack_meta-llama_Llama-2-7b-chat-hf_1_num_layers_32_8e-5_10k
null
[ "transformers", "safetensors", "llama", "text-generation", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-20T19:49:47+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #llama #text-generation #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
text-generation
transformers
Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) Llama-2-7b-hf - bnb 4bits - Model creator: https://huggingface.co/meta-llama/ - Original model: https://huggingface.co/meta-llama/Llama-2-7b-hf/ Original model description: --- extra_gated_heading: You need to share contact information with Meta to access this model extra_gated_prompt: >- ### LLAMA 2 COMMUNITY LICENSE AGREEMENT "Agreement" means the terms and conditions for use, reproduction, distribution and modification of the Llama Materials set forth herein. "Documentation" means the specifications, manuals and documentation accompanying Llama 2 distributed by Meta at https://ai.meta.com/resources/models-and-libraries/llama-downloads/. "Licensee" or "you" means you, or your employer or any other person or entity (if you are entering into this Agreement on such person or entity's behalf), of the age required under applicable laws, rules or regulations to provide legal consent and that has legal authority to bind your employer or such other person or entity if you are entering in this Agreement on their behalf. "Llama 2" means the foundational large language models and software and algorithms, including machine-learning model code, trained model weights, inference-enabling code, training-enabling code, fine-tuning enabling code and other elements of the foregoing distributed by Meta at ai.meta.com/resources/models-and-libraries/llama-downloads/. "Llama Materials" means, collectively, Meta's proprietary Llama 2 and documentation (and any portion thereof) made available under this Agreement. "Meta" or "we" means Meta Platforms Ireland Limited (if you are located in or, if you are an entity, your principal place of business is in the EEA or Switzerland) and Meta Platforms, Inc. (if you are located outside of the EEA or Switzerland). By clicking "I Accept" below or by using or distributing any portion or element of the Llama Materials, you agree to be bound by this Agreement. 1. License Rights and Redistribution. a. Grant of Rights. You are granted a non-exclusive, worldwide, non- transferable and royalty-free limited license under Meta's intellectual property or other rights owned by Meta embodied in the Llama Materials to use, reproduce, distribute, copy, create derivative works of, and make modifications to the Llama Materials. b. Redistribution and Use. i. If you distribute or make the Llama Materials, or any derivative works thereof, available to a third party, you shall provide a copy of this Agreement to such third party. ii. If you receive Llama Materials, or any derivative works thereof, from a Licensee as part of an integrated end user product, then Section 2 of this Agreement will not apply to you. iii. You must retain in all copies of the Llama Materials that you distribute the following attribution notice within a "Notice" text file distributed as a part of such copies: "Llama 2 is licensed under the LLAMA 2 Community License, Copyright (c) Meta Platforms, Inc. All Rights Reserved." iv. Your use of the Llama Materials must comply with applicable laws and regulations (including trade compliance laws and regulations) and adhere to the Acceptable Use Policy for the Llama Materials (available at https://ai.meta.com/llama/use-policy), which is hereby incorporated by reference into this Agreement. v. You will not use the Llama Materials or any output or results of the Llama Materials to improve any other large language model (excluding Llama 2 or derivative works thereof). 2. Additional Commercial Terms. If, on the Llama 2 version release date, the monthly active users of the products or services made available by or for Licensee, or Licensee's affiliates, is greater than 700 million monthly active users in the preceding calendar month, you must request a license from Meta, which Meta may grant to you in its sole discretion, and you are not authorized to exercise any of the rights under this Agreement unless or until Meta otherwise expressly grants you such rights. 3. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING THE LLAMA MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS. 4. Limitation of Liability. IN NO EVENT WILL META OR ITS AFFILIATES BE LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST PROFITS OR ANY INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR PUNITIVE DAMAGES, EVEN IF META OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF ANY OF THE FOREGOING. 5. Intellectual Property. a. No trademark licenses are granted under this Agreement, and in connection with the Llama Materials, neither Meta nor Licensee may use any name or mark owned by or associated with the other or any of its affiliates, except as required for reasonable and customary use in describing and redistributing the Llama Materials. b. Subject to Meta's ownership of Llama Materials and derivatives made by or for Meta, with respect to any derivative works and modifications of the Llama Materials that are made by you, as between you and Meta, you are and will be the owner of such derivative works and modifications. c. If you institute litigation or other proceedings against Meta or any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Llama Materials or Llama 2 outputs or results, or any portion of any of the foregoing, constitutes infringement of intellectual property or other rights owned or licensable by you, then any licenses granted to you under this Agreement shall terminate as of the date such litigation or claim is filed or instituted. You will indemnify and hold harmless Meta from and against any claim by any third party arising out of or related to your use or distribution of the Llama Materials. 6. Term and Termination. The term of this Agreement will commence upon your acceptance of this Agreement or access to the Llama Materials and will continue in full force and effect until terminated in accordance with the terms and conditions herein. Meta may terminate this Agreement if you are in breach of any term or condition of this Agreement. Upon termination of this Agreement, you shall delete and cease use of the Llama Materials. Sections 3, 4 and 7 shall survive the termination of this Agreement. 7. Governing Law and Jurisdiction. This Agreement will be governed and construed under the laws of the State of California without regard to choice of law principles, and the UN Convention on Contracts for the International Sale of Goods does not apply to this Agreement. The courts of California shall have exclusive jurisdiction of any dispute arising out of this Agreement. ### Llama 2 Acceptable Use Policy Meta is committed to promoting safe and fair use of its tools and features, including Llama 2. If you access or use Llama 2, you agree to this Acceptable Use Policy (“Policy”). The most recent copy of this policy can be found at [ai.meta.com/llama/use-policy](http://ai.meta.com/llama/use-policy). #### Prohibited Uses We want everyone to use Llama 2 safely and responsibly. You agree you will not use, or allow others to use, Llama 2 to: 1. Violate the law or others’ rights, including to: 1. Engage in, promote, generate, contribute to, encourage, plan, incite, or further illegal or unlawful activity or content, such as: 1. Violence or terrorism 2. Exploitation or harm to children, including the solicitation, creation, acquisition, or dissemination of child exploitative content or failure to report Child Sexual Abuse Material 3. Human trafficking, exploitation, and sexual violence 4. The illegal distribution of information or materials to minors, including obscene materials, or failure to employ legally required age-gating in connection with such information or materials. 5. Sexual solicitation 6. Any other criminal activity 2. Engage in, promote, incite, or facilitate the harassment, abuse, threatening, or bullying of individuals or groups of individuals 3. Engage in, promote, incite, or facilitate discrimination or other unlawful or harmful conduct in the provision of employment, employment benefits, credit, housing, other economic benefits, or other essential goods and services 4. Engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or related professional practices 5. Collect, process, disclose, generate, or infer health, demographic, or other sensitive personal or private information about individuals without rights and consents required by applicable laws 6. Engage in or facilitate any action or generate any content that infringes, misappropriates, or otherwise violates any third-party rights, including the outputs or results of any products or services using the Llama 2 Materials 7. Create, generate, or facilitate the creation of malicious code, malware, computer viruses or do anything else that could disable, overburden, interfere with or impair the proper working, integrity, operation or appearance of a website or computer system 2. Engage in, promote, incite, facilitate, or assist in the planning or development of activities that present a risk of death or bodily harm to individuals, including use of Llama 2 related to the following: 1. Military, warfare, nuclear industries or applications, espionage, use for materials or activities that are subject to the International Traffic Arms Regulations (ITAR) maintained by the United States Department of State 2. Guns and illegal weapons (including weapon development) 3. Illegal drugs and regulated/controlled substances 4. Operation of critical infrastructure, transportation technologies, or heavy machinery 5. Self-harm or harm to others, including suicide, cutting, and eating disorders 6. Any content intended to incite or promote violence, abuse, or any infliction of bodily harm to an individual 3. Intentionally deceive or mislead others, including use of Llama 2 related to the following: 1. Generating, promoting, or furthering fraud or the creation or promotion of disinformation 2. Generating, promoting, or furthering defamatory content, including the creation of defamatory statements, images, or other content 3. Generating, promoting, or further distributing spam 4. Impersonating another individual without consent, authorization, or legal right 5. Representing that the use of Llama 2 or outputs are human-generated 6. Generating or facilitating false online engagement, including fake reviews and other means of fake online engagement 4. Fail to appropriately disclose to end users any known dangers of your AI system Please report any violation of this Policy, software “bug,” or other problems that could lead to a violation of this Policy through one of the following means: * Reporting issues with the model: [github.com/facebookresearch/llama](http://github.com/facebookresearch/llama) * Reporting risky content generated by the model: [developers.facebook.com/llama_output_feedback](http://developers.facebook.com/llama_output_feedback) * Reporting bugs and security concerns: [facebook.com/whitehat/info](http://facebook.com/whitehat/info) * Reporting violations of the Acceptable Use Policy or unlicensed uses of Llama: [[email protected]](mailto:[email protected]) extra_gated_fields: First Name: text Last Name: text Date of birth: date_picker Country: country Affiliation: text geo: ip_location By clicking Submit below I accept the terms of the license and acknowledge that the information I provide will be collected stored processed and shared in accordance with the Meta Privacy Policy: checkbox extra_gated_description: >- The information you provide will be collected, stored, processed and shared in accordance with the [Meta Privacy Policy](https://www.facebook.com/privacy/policy/). extra_gated_button_content: Submit language: - en pipeline_tag: text-generation tags: - facebook - meta - pytorch - llama - llama-2 license: llama2 --- # **Llama 2** Llama 2 is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 7B pretrained model, converted for the Hugging Face Transformers format. Links to other models can be found in the index at the bottom. ## Model Details *Note: Use of this model is governed by the Meta license. In order to download the model weights and tokenizer, please visit the [website](https://ai.meta.com/resources/models-and-libraries/llama-downloads/) and accept our License before requesting access here.* Meta developed and publicly released the Llama 2 family of large language models (LLMs), a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. Our fine-tuned LLMs, called Llama-2-Chat, are optimized for dialogue use cases. Llama-2-Chat models outperform open-source chat models on most benchmarks we tested, and in our human evaluations for helpfulness and safety, are on par with some popular closed-source models like ChatGPT and PaLM. **Model Developers** Meta **Variations** Llama 2 comes in a range of parameter sizes — 7B, 13B, and 70B — as well as pretrained and fine-tuned variations. **Input** Models input text only. **Output** Models generate text only. **Model Architecture** Llama 2 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align to human preferences for helpfulness and safety. ||Training Data|Params|Content Length|GQA|Tokens|LR| |---|---|---|---|---|---|---| |Llama 2|*A new mix of publicly available online data*|7B|4k|&#10007;|2.0T|3.0 x 10<sup>-4</sup>| |Llama 2|*A new mix of publicly available online data*|13B|4k|&#10007;|2.0T|3.0 x 10<sup>-4</sup>| |Llama 2|*A new mix of publicly available online data*|70B|4k|&#10004;|2.0T|1.5 x 10<sup>-4</sup>| *Llama 2 family of models.* Token counts refer to pretraining data only. All models are trained with a global batch-size of 4M tokens. Bigger models - 70B -- use Grouped-Query Attention (GQA) for improved inference scalability. **Model Dates** Llama 2 was trained between January 2023 and July 2023. **Status** This is a static model trained on an offline dataset. Future versions of the tuned models will be released as we improve model safety with community feedback. **License** A custom commercial license is available at: [https://ai.meta.com/resources/models-and-libraries/llama-downloads/](https://ai.meta.com/resources/models-and-libraries/llama-downloads/) **Research Paper** ["Llama-2: Open Foundation and Fine-tuned Chat Models"](arxiv.org/abs/2307.09288) ## Intended Use **Intended Use Cases** Llama 2 is intended for commercial and research use in English. Tuned models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks. To get the expected features and performance for the chat versions, a specific formatting needs to be followed, including the `INST` and `<<SYS>>` tags, `BOS` and `EOS` tokens, and the whitespaces and breaklines in between (we recommend calling `strip()` on inputs to avoid double-spaces). See our reference code in github for details: [`chat_completion`](https://github.com/facebookresearch/llama/blob/main/llama/generation.py#L212). **Out-of-scope Uses** Use in any manner that violates applicable laws or regulations (including trade compliance laws).Use in languages other than English. Use in any other way that is prohibited by the Acceptable Use Policy and Licensing Agreement for Llama 2. ## Hardware and Software **Training Factors** We used custom training libraries, Meta's Research Super Cluster, and production clusters for pretraining. Fine-tuning, annotation, and evaluation were also performed on third-party cloud compute. **Carbon Footprint** Pretraining utilized a cumulative 3.3M GPU hours of computation on hardware of type A100-80GB (TDP of 350-400W). Estimated total emissions were 539 tCO2eq, 100% of which were offset by Meta’s sustainability program. ||Time (GPU hours)|Power Consumption (W)|Carbon Emitted(tCO<sub>2</sub>eq)| |---|---|---|---| |Llama 2 7B|184320|400|31.22| |Llama 2 13B|368640|400|62.44| |Llama 2 70B|1720320|400|291.42| |Total|3311616||539.00| **CO<sub>2</sub> emissions during pretraining.** Time: total GPU time required for training each model. Power Consumption: peak power capacity per GPU device for the GPUs used adjusted for power usage efficiency. 100% of the emissions are directly offset by Meta's sustainability program, and because we are openly releasing these models, the pretraining costs do not need to be incurred by others. ## Training Data **Overview** Llama 2 was pretrained on 2 trillion tokens of data from publicly available sources. The fine-tuning data includes publicly available instruction datasets, as well as over one million new human-annotated examples. Neither the pretraining nor the fine-tuning datasets include Meta user data. **Data Freshness** The pretraining data has a cutoff of September 2022, but some tuning data is more recent, up to July 2023. ## Evaluation Results In this section, we report the results for the Llama 1 and Llama 2 models on standard academic benchmarks.For all the evaluations, we use our internal evaluations library. |Model|Size|Code|Commonsense Reasoning|World Knowledge|Reading Comprehension|Math|MMLU|BBH|AGI Eval| |---|---|---|---|---|---|---|---|---|---| |Llama 1|7B|14.1|60.8|46.2|58.5|6.95|35.1|30.3|23.9| |Llama 1|13B|18.9|66.1|52.6|62.3|10.9|46.9|37.0|33.9| |Llama 1|33B|26.0|70.0|58.4|67.6|21.4|57.8|39.8|41.7| |Llama 1|65B|30.7|70.7|60.5|68.6|30.8|63.4|43.5|47.6| |Llama 2|7B|16.8|63.9|48.9|61.3|14.6|45.3|32.6|29.3| |Llama 2|13B|24.5|66.9|55.4|65.8|28.7|54.8|39.4|39.1| |Llama 2|70B|**37.5**|**71.9**|**63.6**|**69.4**|**35.2**|**68.9**|**51.2**|**54.2**| **Overall performance on grouped academic benchmarks.** *Code:* We report the average pass@1 scores of our models on HumanEval and MBPP. *Commonsense Reasoning:* We report the average of PIQA, SIQA, HellaSwag, WinoGrande, ARC easy and challenge, OpenBookQA, and CommonsenseQA. We report 7-shot results for CommonSenseQA and 0-shot results for all other benchmarks. *World Knowledge:* We evaluate the 5-shot performance on NaturalQuestions and TriviaQA and report the average. *Reading Comprehension:* For reading comprehension, we report the 0-shot average on SQuAD, QuAC, and BoolQ. *MATH:* We report the average of the GSM8K (8 shot) and MATH (4 shot) benchmarks at top 1. |||TruthfulQA|Toxigen| |---|---|---|---| |Llama 1|7B|27.42|23.00| |Llama 1|13B|41.74|23.08| |Llama 1|33B|44.19|22.57| |Llama 1|65B|48.71|21.77| |Llama 2|7B|33.29|**21.25**| |Llama 2|13B|41.86|26.10| |Llama 2|70B|**50.18**|24.60| **Evaluation of pretrained LLMs on automatic safety benchmarks.** For TruthfulQA, we present the percentage of generations that are both truthful and informative (the higher the better). For ToxiGen, we present the percentage of toxic generations (the smaller the better). |||TruthfulQA|Toxigen| |---|---|---|---| |Llama-2-Chat|7B|57.04|**0.00**| |Llama-2-Chat|13B|62.18|**0.00**| |Llama-2-Chat|70B|**64.14**|0.01| **Evaluation of fine-tuned LLMs on different safety datasets.** Same metric definitions as above. ## Ethical Considerations and Limitations Llama 2 is a new technology that carries risks with use. Testing conducted to date has been in English, and has not covered, nor could it cover all scenarios. For these reasons, as with all LLMs, Llama 2’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate, biased or other objectionable responses to user prompts. Therefore, before deploying any applications of Llama 2, developers should perform safety testing and tuning tailored to their specific applications of the model. Please see the Responsible Use Guide available at [https://ai.meta.com/llama/responsible-use-guide/](https://ai.meta.com/llama/responsible-use-guide) ## Reporting Issues Please report any software “bug,” or other problems with the models through one of the following means: - Reporting issues with the model: [github.com/facebookresearch/llama](http://github.com/facebookresearch/llama) - Reporting problematic content generated by the model: [developers.facebook.com/llama_output_feedback](http://developers.facebook.com/llama_output_feedback) - Reporting bugs and security concerns: [facebook.com/whitehat/info](http://facebook.com/whitehat/info) ## Llama Model Index |Model|Llama2|Llama2-hf|Llama2-chat|Llama2-chat-hf| |---|---|---|---|---| |7B| [Link](https://huggingface.co/meta-llama/Llama-2-7b) | [Link](https://huggingface.co/meta-llama/Llama-2-7b-hf) | [Link](https://huggingface.co/meta-llama/Llama-2-7b-chat) | [Link](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf)| |13B| [Link](https://huggingface.co/meta-llama/Llama-2-13b) | [Link](https://huggingface.co/meta-llama/Llama-2-13b-hf) | [Link](https://huggingface.co/meta-llama/Llama-2-13b-chat) | [Link](https://huggingface.co/meta-llama/Llama-2-13b-chat-hf)| |70B| [Link](https://huggingface.co/meta-llama/Llama-2-70b) | [Link](https://huggingface.co/meta-llama/Llama-2-70b-hf) | [Link](https://huggingface.co/meta-llama/Llama-2-70b-chat) | [Link](https://huggingface.co/meta-llama/Llama-2-70b-chat-hf)|
{}
RichardErkhov/meta-llama_-_Llama-2-7b-hf-4bits
null
[ "transformers", "safetensors", "llama", "text-generation", "arxiv:2307.09288", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "4-bit", "region:us" ]
null
2024-04-20T19:50:22+00:00
[ "2307.09288" ]
[]
TAGS #transformers #safetensors #llama #text-generation #arxiv-2307.09288 #autotrain_compatible #endpoints_compatible #text-generation-inference #4-bit #region-us
Quantization made by Richard Erkhov. Github Discord Request more models Llama-2-7b-hf - bnb 4bits * Model creator: URL * Original model: URL Original model description: --------------------------- extra\_gated\_heading: You need to share contact information with Meta to access this model extra\_gated\_prompt: >- ### LLAMA 2 COMMUNITY LICENSE AGREEMENT "Agreement" means the terms and conditions for use, reproduction, distribution and modification of the Llama Materials set forth herein. "Documentation" means the specifications, manuals and documentation accompanying Llama 2 distributed by Meta at URL "Licensee" or "you" means you, or your employer or any other person or entity (if you are entering into this Agreement on such person or entity's behalf), of the age required under applicable laws, rules or regulations to provide legal consent and that has legal authority to bind your employer or such other person or entity if you are entering in this Agreement on their behalf. "Llama 2" means the foundational large language models and software and algorithms, including machine-learning model code, trained model weights, inference-enabling code, training-enabling code, fine-tuning enabling code and other elements of the foregoing distributed by Meta at URL "Llama Materials" means, collectively, Meta's proprietary Llama 2 and documentation (and any portion thereof) made available under this Agreement. "Meta" or "we" means Meta Platforms Ireland Limited (if you are located in or, if you are an entity, your principal place of business is in the EEA or Switzerland) and Meta Platforms, Inc. (if you are located outside of the EEA or Switzerland). By clicking "I Accept" below or by using or distributing any portion or element of the Llama Materials, you agree to be bound by this Agreement. 1. License Rights and Redistribution. a. Grant of Rights. You are granted a non-exclusive, worldwide, non- transferable and royalty-free limited license under Meta's intellectual property or other rights owned by Meta embodied in the Llama Materials to use, reproduce, distribute, copy, create derivative works of, and make modifications to the Llama Materials. b. Redistribution and Use. i. If you distribute or make the Llama Materials, or any derivative works thereof, available to a third party, you shall provide a copy of this Agreement to such third party. ii. If you receive Llama Materials, or any derivative works thereof, from a Licensee as part of an integrated end user product, then Section 2 of this Agreement will not apply to you. iii. You must retain in all copies of the Llama Materials that you distribute the following attribution notice within a "Notice" text file distributed as a part of such copies: "Llama 2 is licensed under the LLAMA 2 Community License, Copyright (c) Meta Platforms, Inc. All Rights Reserved." iv. Your use of the Llama Materials must comply with applicable laws and regulations (including trade compliance laws and regulations) and adhere to the Acceptable Use Policy for the Llama Materials (available at URL which is hereby incorporated by reference into this Agreement. v. You will not use the Llama Materials or any output or results of the Llama Materials to improve any other large language model (excluding Llama 2 or derivative works thereof). 2. Additional Commercial Terms. If, on the Llama 2 version release date, the monthly active users of the products or services made available by or for Licensee, or Licensee's affiliates, is greater than 700 million monthly active users in the preceding calendar month, you must request a license from Meta, which Meta may grant to you in its sole discretion, and you are not authorized to exercise any of the rights under this Agreement unless or until Meta otherwise expressly grants you such rights. 3. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING THE LLAMA MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS. 4. Limitation of Liability. IN NO EVENT WILL META OR ITS AFFILIATES BE LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST PROFITS OR ANY INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR PUNITIVE DAMAGES, EVEN IF META OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF ANY OF THE FOREGOING. 5. Intellectual Property. a. No trademark licenses are granted under this Agreement, and in connection with the Llama Materials, neither Meta nor Licensee may use any name or mark owned by or associated with the other or any of its affiliates, except as required for reasonable and customary use in describing and redistributing the Llama Materials. b. Subject to Meta's ownership of Llama Materials and derivatives made by or for Meta, with respect to any derivative works and modifications of the Llama Materials that are made by you, as between you and Meta, you are and will be the owner of such derivative works and modifications. c. If you institute litigation or other proceedings against Meta or any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Llama Materials or Llama 2 outputs or results, or any portion of any of the foregoing, constitutes infringement of intellectual property or other rights owned or licensable by you, then any licenses granted to you under this Agreement shall terminate as of the date such litigation or claim is filed or instituted. You will indemnify and hold harmless Meta from and against any claim by any third party arising out of or related to your use or distribution of the Llama Materials. 6. Term and Termination. The term of this Agreement will commence upon your acceptance of this Agreement or access to the Llama Materials and will continue in full force and effect until terminated in accordance with the terms and conditions herein. Meta may terminate this Agreement if you are in breach of any term or condition of this Agreement. Upon termination of this Agreement, you shall delete and cease use of the Llama Materials. Sections 3, 4 and 7 shall survive the termination of this Agreement. 7. Governing Law and Jurisdiction. This Agreement will be governed and construed under the laws of the State of California without regard to choice of law principles, and the UN Convention on Contracts for the International Sale of Goods does not apply to this Agreement. The courts of California shall have exclusive jurisdiction of any dispute arising out of this Agreement. ### Llama 2 Acceptable Use Policy Meta is committed to promoting safe and fair use of its tools and features, including Llama 2. If you access or use Llama 2, you agree to this Acceptable Use Policy (“Policy”). The most recent copy of this policy can be found at URL #### Prohibited Uses We want everyone to use Llama 2 safely and responsibly. You agree you will not use, or allow others to use, Llama 2 to: 1. Violate the law or others’ rights, including to: 1. Engage in, promote, generate, contribute to, encourage, plan, incite, or further illegal or unlawful activity or content, such as: 1. Violence or terrorism 2. Exploitation or harm to children, including the solicitation, creation, acquisition, or dissemination of child exploitative content or failure to report Child Sexual Abuse Material 3. Human trafficking, exploitation, and sexual violence 4. The illegal distribution of information or materials to minors, including obscene materials, or failure to employ legally required age-gating in connection with such information or materials. 5. Sexual solicitation 6. Any other criminal activity 2. Engage in, promote, incite, or facilitate the harassment, abuse, threatening, or bullying of individuals or groups of individuals 3. Engage in, promote, incite, or facilitate discrimination or other unlawful or harmful conduct in the provision of employment, employment benefits, credit, housing, other economic benefits, or other essential goods and services 4. Engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or related professional practices 5. Collect, process, disclose, generate, or infer health, demographic, or other sensitive personal or private information about individuals without rights and consents required by applicable laws 6. Engage in or facilitate any action or generate any content that infringes, misappropriates, or otherwise violates any third-party rights, including the outputs or results of any products or services using the Llama 2 Materials 7. Create, generate, or facilitate the creation of malicious code, malware, computer viruses or do anything else that could disable, overburden, interfere with or impair the proper working, integrity, operation or appearance of a website or computer system 2. Engage in, promote, incite, facilitate, or assist in the planning or development of activities that present a risk of death or bodily harm to individuals, including use of Llama 2 related to the following: 1. Military, warfare, nuclear industries or applications, espionage, use for materials or activities that are subject to the International Traffic Arms Regulations (ITAR) maintained by the United States Department of State 2. Guns and illegal weapons (including weapon development) 3. Illegal drugs and regulated/controlled substances 4. Operation of critical infrastructure, transportation technologies, or heavy machinery 5. Self-harm or harm to others, including suicide, cutting, and eating disorders 6. Any content intended to incite or promote violence, abuse, or any infliction of bodily harm to an individual 3. Intentionally deceive or mislead others, including use of Llama 2 related to the following: 1. Generating, promoting, or furthering fraud or the creation or promotion of disinformation 2. Generating, promoting, or furthering defamatory content, including the creation of defamatory statements, images, or other content 3. Generating, promoting, or further distributing spam 4. Impersonating another individual without consent, authorization, or legal right 5. Representing that the use of Llama 2 or outputs are human-generated 6. Generating or facilitating false online engagement, including fake reviews and other means of fake online engagement 7. Fail to appropriately disclose to end users any known dangers of your AI system Please report any violation of this Policy, software “bug,” or other problems that could lead to a violation of this Policy through one of the following means: * Reporting issues with the model: URL * Reporting risky content generated by the model: URL * Reporting bugs and security concerns: URL * Reporting violations of the Acceptable Use Policy or unlicensed uses of Llama: LlamaUseReport@URL extra\_gated\_fields: First Name: text Last Name: text Date of birth: date\_picker Country: country Affiliation: text geo: ip\_location By clicking Submit below I accept the terms of the license and acknowledge that the information I provide will be collected stored processed and shared in accordance with the Meta Privacy Policy: checkbox extra\_gated\_description: >- The information you provide will be collected, stored, processed and shared in accordance with the Meta Privacy Policy. extra\_gated\_button\_content: Submit language: * en pipeline\_tag: text-generation tags: * facebook * meta * pytorch * llama * llama-2 license: llama2 --- Llama 2 ======= Llama 2 is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 7B pretrained model, converted for the Hugging Face Transformers format. Links to other models can be found in the index at the bottom. Model Details ------------- *Note: Use of this model is governed by the Meta license. In order to download the model weights and tokenizer, please visit the website and accept our License before requesting access here.* Meta developed and publicly released the Llama 2 family of large language models (LLMs), a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. Our fine-tuned LLMs, called Llama-2-Chat, are optimized for dialogue use cases. Llama-2-Chat models outperform open-source chat models on most benchmarks we tested, and in our human evaluations for helpfulness and safety, are on par with some popular closed-source models like ChatGPT and PaLM. Model Developers Meta Variations Llama 2 comes in a range of parameter sizes — 7B, 13B, and 70B — as well as pretrained and fine-tuned variations. Input Models input text only. Output Models generate text only. Model Architecture Llama 2 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align to human preferences for helpfulness and safety. *Llama 2 family of models.* Token counts refer to pretraining data only. All models are trained with a global batch-size of 4M tokens. Bigger models - 70B -- use Grouped-Query Attention (GQA) for improved inference scalability. Model Dates Llama 2 was trained between January 2023 and July 2023. Status This is a static model trained on an offline dataset. Future versions of the tuned models will be released as we improve model safety with community feedback. License A custom commercial license is available at: URL Research Paper "Llama-2: Open Foundation and Fine-tuned Chat Models" Intended Use ------------ Intended Use Cases Llama 2 is intended for commercial and research use in English. Tuned models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks. To get the expected features and performance for the chat versions, a specific formatting needs to be followed, including the 'INST' and '<>' tags, 'BOS' and 'EOS' tokens, and the whitespaces and breaklines in between (we recommend calling 'strip()' on inputs to avoid double-spaces). See our reference code in github for details: 'chat\_completion'. Out-of-scope Uses Use in any manner that violates applicable laws or regulations (including trade compliance laws).Use in languages other than English. Use in any other way that is prohibited by the Acceptable Use Policy and Licensing Agreement for Llama 2. Hardware and Software --------------------- Training Factors We used custom training libraries, Meta's Research Super Cluster, and production clusters for pretraining. Fine-tuning, annotation, and evaluation were also performed on third-party cloud compute. Carbon Footprint Pretraining utilized a cumulative 3.3M GPU hours of computation on hardware of type A100-80GB (TDP of 350-400W). Estimated total emissions were 539 tCO2eq, 100% of which were offset by Meta’s sustainability program. CO2 emissions during pretraining. Time: total GPU time required for training each model. Power Consumption: peak power capacity per GPU device for the GPUs used adjusted for power usage efficiency. 100% of the emissions are directly offset by Meta's sustainability program, and because we are openly releasing these models, the pretraining costs do not need to be incurred by others. Training Data ------------- Overview Llama 2 was pretrained on 2 trillion tokens of data from publicly available sources. The fine-tuning data includes publicly available instruction datasets, as well as over one million new human-annotated examples. Neither the pretraining nor the fine-tuning datasets include Meta user data. Data Freshness The pretraining data has a cutoff of September 2022, but some tuning data is more recent, up to July 2023. Evaluation Results ------------------ In this section, we report the results for the Llama 1 and Llama 2 models on standard academic benchmarks.For all the evaluations, we use our internal evaluations library. Overall performance on grouped academic benchmarks. *Code:* We report the average pass@1 scores of our models on HumanEval and MBPP. *Commonsense Reasoning:* We report the average of PIQA, SIQA, HellaSwag, WinoGrande, ARC easy and challenge, OpenBookQA, and CommonsenseQA. We report 7-shot results for CommonSenseQA and 0-shot results for all other benchmarks. *World Knowledge:* We evaluate the 5-shot performance on NaturalQuestions and TriviaQA and report the average. *Reading Comprehension:* For reading comprehension, we report the 0-shot average on SQuAD, QuAC, and BoolQ. *MATH:* We report the average of the GSM8K (8 shot) and MATH (4 shot) benchmarks at top 1. Evaluation of pretrained LLMs on automatic safety benchmarks. For TruthfulQA, we present the percentage of generations that are both truthful and informative (the higher the better). For ToxiGen, we present the percentage of toxic generations (the smaller the better). Evaluation of fine-tuned LLMs on different safety datasets. Same metric definitions as above. Ethical Considerations and Limitations -------------------------------------- Llama 2 is a new technology that carries risks with use. Testing conducted to date has been in English, and has not covered, nor could it cover all scenarios. For these reasons, as with all LLMs, Llama 2’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate, biased or other objectionable responses to user prompts. Therefore, before deploying any applications of Llama 2, developers should perform safety testing and tuning tailored to their specific applications of the model. Please see the Responsible Use Guide available at URL Reporting Issues ---------------- Please report any software “bug,” or other problems with the models through one of the following means: * Reporting issues with the model: URL * Reporting problematic content generated by the model: URL * Reporting bugs and security concerns: URL Llama Model Index -----------------
[ "### LLAMA 2 COMMUNITY LICENSE AGREEMENT\n\n\n\"Agreement\" means the terms and conditions for use, reproduction, distribution\nand modification of the Llama Materials set forth herein.\n\n\n\"Documentation\" means the specifications, manuals and documentation\naccompanying Llama 2 distributed by Meta at\nURL\n\n\n\"Licensee\" or \"you\" means you, or your employer or any other person or entity\n(if you are entering into this Agreement on such person or entity's behalf),\nof the age required under applicable laws, rules or regulations to provide\nlegal consent and that has legal authority to bind your employer or such other\nperson or entity if you are entering in this Agreement on their behalf.\n\n\n\"Llama 2\" means the foundational large language models and software and\nalgorithms, including machine-learning model code, trained model weights,\ninference-enabling code, training-enabling code, fine-tuning enabling code and\nother elements of the foregoing distributed by Meta at\nURL\n\n\n\"Llama Materials\" means, collectively, Meta's proprietary Llama 2 and\ndocumentation (and any portion thereof) made available under this Agreement.\n\n\n\"Meta\" or \"we\" means Meta Platforms Ireland Limited (if you are located in or,\nif you are an entity, your principal place of business is in the EEA or\nSwitzerland) and Meta Platforms, Inc. (if you are located outside of the EEA\nor Switzerland).\n\n\nBy clicking \"I Accept\" below or by using or distributing any portion or\nelement of the Llama Materials, you agree to be bound by this Agreement.\n\n\n1. License Rights and Redistribution.\n\n\na. Grant of Rights. You are granted a non-exclusive, worldwide, non-\ntransferable and royalty-free limited license under Meta's intellectual\nproperty or other rights owned by Meta embodied in the Llama Materials to\nuse, reproduce, distribute, copy, create derivative works of, and make\nmodifications to the Llama Materials.\n\n\nb. Redistribution and Use.\n\n\ni. If you distribute or make the Llama Materials, or any derivative works\nthereof, available to a third party, you shall provide a copy of this\nAgreement to such third party.\n\n\nii. If you receive Llama Materials, or any derivative works thereof, from a\nLicensee as part of an integrated end user product, then Section 2 of this\nAgreement will not apply to you.\n\n\niii. You must retain in all copies of the Llama Materials that you distribute\nthe following attribution notice within a \"Notice\" text file distributed as a\npart of such copies: \"Llama 2 is licensed under the LLAMA 2 Community\nLicense, Copyright (c) Meta Platforms, Inc. All Rights Reserved.\"\n\n\niv. Your use of the Llama Materials must comply with applicable laws and\nregulations (including trade compliance laws and regulations) and adhere to\nthe Acceptable Use Policy for the Llama Materials (available at\nURL which is hereby incorporated by\nreference into this Agreement.\n\n\nv. You will not use the Llama Materials or any output or results of the Llama\nMaterials to improve any other large language model (excluding Llama 2 or\nderivative works thereof).\n\n\n2. Additional Commercial Terms. If, on the Llama 2 version release date, the\nmonthly active users of the products or services made available by or for\nLicensee, or Licensee's affiliates, is greater than 700 million monthly\nactive users in the preceding calendar month, you must request a license from\nMeta, which Meta may grant to you in its sole discretion, and you are not\nauthorized to exercise any of the rights under this Agreement unless or until\nMeta otherwise expressly grants you such rights.\n3. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE LLAMA\nMATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED ON AN \"AS IS\"\nBASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING,\nWITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT,\nMERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY\nRESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING\nTHE LLAMA MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE\nLLAMA MATERIALS AND ANY OUTPUT AND RESULTS.\n4. Limitation of Liability. IN NO EVENT WILL META OR ITS AFFILIATES BE LIABLE\nUNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE,\nPRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST\nPROFITS OR ANY INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR\nPUNITIVE DAMAGES, EVEN IF META OR ITS AFFILIATES HAVE BEEN ADVISED OF THE\nPOSSIBILITY OF ANY OF THE FOREGOING.\n5. Intellectual Property.\n\n\na. No trademark licenses are granted under this Agreement, and in connection\nwith the Llama Materials, neither Meta nor Licensee may use any name or mark\nowned by or associated with the other or any of its affiliates, except as\nrequired for reasonable and customary use in describing and redistributing\nthe Llama Materials.\n\n\nb. Subject to Meta's ownership of Llama Materials and derivatives made by or\nfor Meta, with respect to any derivative works and modifications of the Llama\nMaterials that are made by you, as between you and Meta, you are and will be\nthe owner of such derivative works and modifications.\n\n\nc. If you institute litigation or other proceedings against Meta or any\nentity (including a cross-claim or counterclaim in a lawsuit) alleging that\nthe Llama Materials or Llama 2 outputs or results, or any portion of any of\nthe foregoing, constitutes infringement of intellectual property or other\nrights owned or licensable by you, then any licenses granted to you under\nthis Agreement shall terminate as of the date such litigation or claim is\nfiled or instituted. You will indemnify and hold harmless Meta from and\nagainst any claim by any third party arising out of or related to your use or\ndistribution of the Llama Materials.\n\n\n6. Term and Termination. The term of this Agreement will commence upon your\nacceptance of this Agreement or access to the Llama Materials and will\ncontinue in full force and effect until terminated in accordance with the\nterms and conditions herein. Meta may terminate this Agreement if you are in\nbreach of any term or condition of this Agreement. Upon termination of this\nAgreement, you shall delete and cease use of the Llama Materials. Sections 3,\n4 and 7 shall survive the termination of this Agreement.\n7. Governing Law and Jurisdiction. This Agreement will be governed and\nconstrued under the laws of the State of California without regard to choice\nof law principles, and the UN Convention on Contracts for the International\nSale of Goods does not apply to this Agreement. The courts of California\nshall have exclusive jurisdiction of any dispute arising out of this\nAgreement.", "### Llama 2 Acceptable Use Policy\n\n\nMeta is committed to promoting safe and fair use of its tools and features,\nincluding Llama 2. If you access or use Llama 2, you agree to this Acceptable\nUse Policy (“Policy”). The most recent copy of this policy can be found at\nURL", "#### Prohibited Uses\n\n\nWe want everyone to use Llama 2 safely and responsibly. You agree you will not\nuse, or allow others to use, Llama 2 to:\n\n\n1. Violate the law or others’ rights, including to:\n\t1. Engage in, promote, generate, contribute to, encourage, plan, incite, or further illegal or unlawful activity or content, such as:\n\t\t1. Violence or terrorism\n\t\t2. Exploitation or harm to children, including the solicitation, creation, acquisition, or dissemination of child exploitative content or failure to report Child Sexual Abuse Material\n\t\t3. Human trafficking, exploitation, and sexual violence\n\t\t4. The illegal distribution of information or materials to minors, including obscene materials, or failure to employ legally required age-gating in connection with such information or materials.\n\t\t5. Sexual solicitation\n\t\t6. Any other criminal activity\n\t2. Engage in, promote, incite, or facilitate the harassment, abuse, threatening, or bullying of individuals or groups of individuals\n\t3. Engage in, promote, incite, or facilitate discrimination or other unlawful or harmful conduct in the provision of employment, employment benefits, credit, housing, other economic benefits, or other essential goods and services\n\t4. Engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or related professional practices\n\t5. Collect, process, disclose, generate, or infer health, demographic, or other sensitive personal or private information about individuals without rights and consents required by applicable laws\n\t6. Engage in or facilitate any action or generate any content that infringes, misappropriates, or otherwise violates any third-party rights, including the outputs or results of any products or services using the Llama 2 Materials\n\t7. Create, generate, or facilitate the creation of malicious code, malware, computer viruses or do anything else that could disable, overburden, interfere with or impair the proper working, integrity, operation or appearance of a website or computer system\n2. Engage in, promote, incite, facilitate, or assist in the planning or\ndevelopment of activities that present a risk of death or bodily harm to\nindividuals, including use of Llama 2 related to the following:\n\t1. Military, warfare, nuclear industries or applications, espionage, use for materials or activities that are subject to the International Traffic Arms Regulations (ITAR) maintained by the United States Department of State\n\t2. Guns and illegal weapons (including weapon development)\n\t3. Illegal drugs and regulated/controlled substances\n\t4. Operation of critical infrastructure, transportation technologies, or heavy machinery\n\t5. Self-harm or harm to others, including suicide, cutting, and eating disorders\n\t6. Any content intended to incite or promote violence, abuse, or any infliction of bodily harm to an individual\n3. Intentionally deceive or mislead others, including use of Llama 2 related\nto the following:\n\t1. Generating, promoting, or furthering fraud or the creation or promotion of disinformation\n\t2. Generating, promoting, or furthering defamatory content, including the creation of defamatory statements, images, or other content\n\t3. Generating, promoting, or further distributing spam\n\t4. Impersonating another individual without consent, authorization, or legal right\n\t5. Representing that the use of Llama 2 or outputs are human-generated\n\t6. Generating or facilitating false online engagement, including fake reviews and other means of fake online engagement\n\t7. Fail to appropriately disclose to end users any known dangers of your AI system\n\tPlease report any violation of this Policy, software “bug,” or other problems\n\tthat could lead to a violation of this Policy through one of the following\n\tmeans:\n\t* Reporting issues with the model: URL\n\t* Reporting risky content generated by the model: URL\n\t* Reporting bugs and security concerns: URL\n\t* Reporting violations of the Acceptable Use Policy or unlicensed uses of Llama: LlamaUseReport@URL\n\textra\\_gated\\_fields:\n\tFirst Name: text\n\tLast Name: text\n\tDate of birth: date\\_picker\n\tCountry: country\n\tAffiliation: text\n\tgeo: ip\\_location\n\tBy clicking Submit below I accept the terms of the license and acknowledge that the information I provide will be collected stored processed and shared in accordance with the Meta Privacy Policy: checkbox\n\textra\\_gated\\_description: >-\n\tThe information you provide will be collected, stored, processed and shared in\n\taccordance with the Meta Privacy\n\tPolicy.\n\textra\\_gated\\_button\\_content: Submit\n\tlanguage:\n\n\n* en\npipeline\\_tag: text-generation\ntags:\n* facebook\n* meta\n* pytorch\n* llama\n* llama-2\nlicense: llama2\n\n\n\n\n---\n\n\nLlama 2\n=======\n\n\nLlama 2 is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 7B pretrained model, converted for the Hugging Face Transformers format. Links to other models can be found in the index at the bottom.\n\n\nModel Details\n-------------\n\n\n*Note: Use of this model is governed by the Meta license. In order to download the model weights and tokenizer, please visit the website and accept our License before requesting access here.*\n\n\nMeta developed and publicly released the Llama 2 family of large language models (LLMs), a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. Our fine-tuned LLMs, called Llama-2-Chat, are optimized for dialogue use cases. Llama-2-Chat models outperform open-source chat models on most benchmarks we tested, and in our human evaluations for helpfulness and safety, are on par with some popular closed-source models like ChatGPT and PaLM.\n\n\nModel Developers Meta\n\n\nVariations Llama 2 comes in a range of parameter sizes — 7B, 13B, and 70B — as well as pretrained and fine-tuned variations.\n\n\nInput Models input text only.\n\n\nOutput Models generate text only.\n\n\nModel Architecture Llama 2 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align to human preferences for helpfulness and safety.\n\n\n\n*Llama 2 family of models.* Token counts refer to pretraining data only. All models are trained with a global batch-size of 4M tokens. Bigger models - 70B -- use Grouped-Query Attention (GQA) for improved inference scalability.\n\n\nModel Dates Llama 2 was trained between January 2023 and July 2023.\n\n\nStatus This is a static model trained on an offline dataset. Future versions of the tuned models will be released as we improve model safety with community feedback.\n\n\nLicense A custom commercial license is available at: URL\n\n\nResearch Paper \"Llama-2: Open Foundation and Fine-tuned Chat Models\"\n\n\nIntended Use\n------------\n\n\nIntended Use Cases Llama 2 is intended for commercial and research use in English. Tuned models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks.\n\n\nTo get the expected features and performance for the chat versions, a specific formatting needs to be followed, including the 'INST' and '<>' tags, 'BOS' and 'EOS' tokens, and the whitespaces and breaklines in between (we recommend calling 'strip()' on inputs to avoid double-spaces). See our reference code in github for details: 'chat\\_completion'.\n\n\nOut-of-scope Uses Use in any manner that violates applicable laws or regulations (including trade compliance laws).Use in languages other than English. Use in any other way that is prohibited by the Acceptable Use Policy and Licensing Agreement for Llama 2.\n\n\nHardware and Software\n---------------------\n\n\nTraining Factors We used custom training libraries, Meta's Research Super Cluster, and production clusters for pretraining. Fine-tuning, annotation, and evaluation were also performed on third-party cloud compute.\n\n\nCarbon Footprint Pretraining utilized a cumulative 3.3M GPU hours of computation on hardware of type A100-80GB (TDP of 350-400W). Estimated total emissions were 539 tCO2eq, 100% of which were offset by Meta’s sustainability program.\n\n\n\nCO2 emissions during pretraining. Time: total GPU time required for training each model. Power Consumption: peak power capacity per GPU device for the GPUs used adjusted for power usage efficiency. 100% of the emissions are directly offset by Meta's sustainability program, and because we are openly releasing these models, the pretraining costs do not need to be incurred by others.\n\n\nTraining Data\n-------------\n\n\nOverview Llama 2 was pretrained on 2 trillion tokens of data from publicly available sources. The fine-tuning data includes publicly available instruction datasets, as well as over one million new human-annotated examples. Neither the pretraining nor the fine-tuning datasets include Meta user data.\n\n\nData Freshness The pretraining data has a cutoff of September 2022, but some tuning data is more recent, up to July 2023.\n\n\nEvaluation Results\n------------------\n\n\nIn this section, we report the results for the Llama 1 and Llama 2 models on standard academic benchmarks.For all the evaluations, we use our internal evaluations library.\n\n\n\nOverall performance on grouped academic benchmarks. *Code:* We report the average pass@1 scores of our models on HumanEval and MBPP. *Commonsense Reasoning:* We report the average of PIQA, SIQA, HellaSwag, WinoGrande, ARC easy and challenge, OpenBookQA, and CommonsenseQA. We report 7-shot results for CommonSenseQA and 0-shot results for all other benchmarks. *World Knowledge:* We evaluate the 5-shot performance on NaturalQuestions and TriviaQA and report the average. *Reading Comprehension:* For reading comprehension, we report the 0-shot average on SQuAD, QuAC, and BoolQ. *MATH:* We report the average of the GSM8K (8 shot) and MATH (4 shot) benchmarks at top 1.\n\n\n\nEvaluation of pretrained LLMs on automatic safety benchmarks. For TruthfulQA, we present the percentage of generations that are both truthful and informative (the higher the better). For ToxiGen, we present the percentage of toxic generations (the smaller the better).\n\n\n\nEvaluation of fine-tuned LLMs on different safety datasets. Same metric definitions as above.\n\n\nEthical Considerations and Limitations\n--------------------------------------\n\n\nLlama 2 is a new technology that carries risks with use. Testing conducted to date has been in English, and has not covered, nor could it cover all scenarios. For these reasons, as with all LLMs, Llama 2’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate, biased or other objectionable responses to user prompts. Therefore, before deploying any applications of Llama 2, developers should perform safety testing and tuning tailored to their specific applications of the model.\n\n\nPlease see the Responsible Use Guide available at URL\n\n\nReporting Issues\n----------------\n\n\nPlease report any software “bug,” or other problems with the models through one of the following means:\n\n\n* Reporting issues with the model: URL\n* Reporting problematic content generated by the model: URL\n* Reporting bugs and security concerns: URL\n\n\nLlama Model Index\n-----------------" ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #arxiv-2307.09288 #autotrain_compatible #endpoints_compatible #text-generation-inference #4-bit #region-us \n", "### LLAMA 2 COMMUNITY LICENSE AGREEMENT\n\n\n\"Agreement\" means the terms and conditions for use, reproduction, distribution\nand modification of the Llama Materials set forth herein.\n\n\n\"Documentation\" means the specifications, manuals and documentation\naccompanying Llama 2 distributed by Meta at\nURL\n\n\n\"Licensee\" or \"you\" means you, or your employer or any other person or entity\n(if you are entering into this Agreement on such person or entity's behalf),\nof the age required under applicable laws, rules or regulations to provide\nlegal consent and that has legal authority to bind your employer or such other\nperson or entity if you are entering in this Agreement on their behalf.\n\n\n\"Llama 2\" means the foundational large language models and software and\nalgorithms, including machine-learning model code, trained model weights,\ninference-enabling code, training-enabling code, fine-tuning enabling code and\nother elements of the foregoing distributed by Meta at\nURL\n\n\n\"Llama Materials\" means, collectively, Meta's proprietary Llama 2 and\ndocumentation (and any portion thereof) made available under this Agreement.\n\n\n\"Meta\" or \"we\" means Meta Platforms Ireland Limited (if you are located in or,\nif you are an entity, your principal place of business is in the EEA or\nSwitzerland) and Meta Platforms, Inc. (if you are located outside of the EEA\nor Switzerland).\n\n\nBy clicking \"I Accept\" below or by using or distributing any portion or\nelement of the Llama Materials, you agree to be bound by this Agreement.\n\n\n1. License Rights and Redistribution.\n\n\na. Grant of Rights. You are granted a non-exclusive, worldwide, non-\ntransferable and royalty-free limited license under Meta's intellectual\nproperty or other rights owned by Meta embodied in the Llama Materials to\nuse, reproduce, distribute, copy, create derivative works of, and make\nmodifications to the Llama Materials.\n\n\nb. Redistribution and Use.\n\n\ni. If you distribute or make the Llama Materials, or any derivative works\nthereof, available to a third party, you shall provide a copy of this\nAgreement to such third party.\n\n\nii. If you receive Llama Materials, or any derivative works thereof, from a\nLicensee as part of an integrated end user product, then Section 2 of this\nAgreement will not apply to you.\n\n\niii. You must retain in all copies of the Llama Materials that you distribute\nthe following attribution notice within a \"Notice\" text file distributed as a\npart of such copies: \"Llama 2 is licensed under the LLAMA 2 Community\nLicense, Copyright (c) Meta Platforms, Inc. All Rights Reserved.\"\n\n\niv. Your use of the Llama Materials must comply with applicable laws and\nregulations (including trade compliance laws and regulations) and adhere to\nthe Acceptable Use Policy for the Llama Materials (available at\nURL which is hereby incorporated by\nreference into this Agreement.\n\n\nv. You will not use the Llama Materials or any output or results of the Llama\nMaterials to improve any other large language model (excluding Llama 2 or\nderivative works thereof).\n\n\n2. Additional Commercial Terms. If, on the Llama 2 version release date, the\nmonthly active users of the products or services made available by or for\nLicensee, or Licensee's affiliates, is greater than 700 million monthly\nactive users in the preceding calendar month, you must request a license from\nMeta, which Meta may grant to you in its sole discretion, and you are not\nauthorized to exercise any of the rights under this Agreement unless or until\nMeta otherwise expressly grants you such rights.\n3. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE LLAMA\nMATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED ON AN \"AS IS\"\nBASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING,\nWITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT,\nMERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY\nRESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING\nTHE LLAMA MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE\nLLAMA MATERIALS AND ANY OUTPUT AND RESULTS.\n4. Limitation of Liability. IN NO EVENT WILL META OR ITS AFFILIATES BE LIABLE\nUNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE,\nPRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST\nPROFITS OR ANY INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR\nPUNITIVE DAMAGES, EVEN IF META OR ITS AFFILIATES HAVE BEEN ADVISED OF THE\nPOSSIBILITY OF ANY OF THE FOREGOING.\n5. Intellectual Property.\n\n\na. No trademark licenses are granted under this Agreement, and in connection\nwith the Llama Materials, neither Meta nor Licensee may use any name or mark\nowned by or associated with the other or any of its affiliates, except as\nrequired for reasonable and customary use in describing and redistributing\nthe Llama Materials.\n\n\nb. Subject to Meta's ownership of Llama Materials and derivatives made by or\nfor Meta, with respect to any derivative works and modifications of the Llama\nMaterials that are made by you, as between you and Meta, you are and will be\nthe owner of such derivative works and modifications.\n\n\nc. If you institute litigation or other proceedings against Meta or any\nentity (including a cross-claim or counterclaim in a lawsuit) alleging that\nthe Llama Materials or Llama 2 outputs or results, or any portion of any of\nthe foregoing, constitutes infringement of intellectual property or other\nrights owned or licensable by you, then any licenses granted to you under\nthis Agreement shall terminate as of the date such litigation or claim is\nfiled or instituted. You will indemnify and hold harmless Meta from and\nagainst any claim by any third party arising out of or related to your use or\ndistribution of the Llama Materials.\n\n\n6. Term and Termination. The term of this Agreement will commence upon your\nacceptance of this Agreement or access to the Llama Materials and will\ncontinue in full force and effect until terminated in accordance with the\nterms and conditions herein. Meta may terminate this Agreement if you are in\nbreach of any term or condition of this Agreement. Upon termination of this\nAgreement, you shall delete and cease use of the Llama Materials. Sections 3,\n4 and 7 shall survive the termination of this Agreement.\n7. Governing Law and Jurisdiction. This Agreement will be governed and\nconstrued under the laws of the State of California without regard to choice\nof law principles, and the UN Convention on Contracts for the International\nSale of Goods does not apply to this Agreement. The courts of California\nshall have exclusive jurisdiction of any dispute arising out of this\nAgreement.", "### Llama 2 Acceptable Use Policy\n\n\nMeta is committed to promoting safe and fair use of its tools and features,\nincluding Llama 2. If you access or use Llama 2, you agree to this Acceptable\nUse Policy (“Policy”). The most recent copy of this policy can be found at\nURL", "#### Prohibited Uses\n\n\nWe want everyone to use Llama 2 safely and responsibly. You agree you will not\nuse, or allow others to use, Llama 2 to:\n\n\n1. Violate the law or others’ rights, including to:\n\t1. Engage in, promote, generate, contribute to, encourage, plan, incite, or further illegal or unlawful activity or content, such as:\n\t\t1. Violence or terrorism\n\t\t2. Exploitation or harm to children, including the solicitation, creation, acquisition, or dissemination of child exploitative content or failure to report Child Sexual Abuse Material\n\t\t3. Human trafficking, exploitation, and sexual violence\n\t\t4. The illegal distribution of information or materials to minors, including obscene materials, or failure to employ legally required age-gating in connection with such information or materials.\n\t\t5. Sexual solicitation\n\t\t6. Any other criminal activity\n\t2. Engage in, promote, incite, or facilitate the harassment, abuse, threatening, or bullying of individuals or groups of individuals\n\t3. Engage in, promote, incite, or facilitate discrimination or other unlawful or harmful conduct in the provision of employment, employment benefits, credit, housing, other economic benefits, or other essential goods and services\n\t4. Engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or related professional practices\n\t5. Collect, process, disclose, generate, or infer health, demographic, or other sensitive personal or private information about individuals without rights and consents required by applicable laws\n\t6. Engage in or facilitate any action or generate any content that infringes, misappropriates, or otherwise violates any third-party rights, including the outputs or results of any products or services using the Llama 2 Materials\n\t7. Create, generate, or facilitate the creation of malicious code, malware, computer viruses or do anything else that could disable, overburden, interfere with or impair the proper working, integrity, operation or appearance of a website or computer system\n2. Engage in, promote, incite, facilitate, or assist in the planning or\ndevelopment of activities that present a risk of death or bodily harm to\nindividuals, including use of Llama 2 related to the following:\n\t1. Military, warfare, nuclear industries or applications, espionage, use for materials or activities that are subject to the International Traffic Arms Regulations (ITAR) maintained by the United States Department of State\n\t2. Guns and illegal weapons (including weapon development)\n\t3. Illegal drugs and regulated/controlled substances\n\t4. Operation of critical infrastructure, transportation technologies, or heavy machinery\n\t5. Self-harm or harm to others, including suicide, cutting, and eating disorders\n\t6. Any content intended to incite or promote violence, abuse, or any infliction of bodily harm to an individual\n3. Intentionally deceive or mislead others, including use of Llama 2 related\nto the following:\n\t1. Generating, promoting, or furthering fraud or the creation or promotion of disinformation\n\t2. Generating, promoting, or furthering defamatory content, including the creation of defamatory statements, images, or other content\n\t3. Generating, promoting, or further distributing spam\n\t4. Impersonating another individual without consent, authorization, or legal right\n\t5. Representing that the use of Llama 2 or outputs are human-generated\n\t6. Generating or facilitating false online engagement, including fake reviews and other means of fake online engagement\n\t7. Fail to appropriately disclose to end users any known dangers of your AI system\n\tPlease report any violation of this Policy, software “bug,” or other problems\n\tthat could lead to a violation of this Policy through one of the following\n\tmeans:\n\t* Reporting issues with the model: URL\n\t* Reporting risky content generated by the model: URL\n\t* Reporting bugs and security concerns: URL\n\t* Reporting violations of the Acceptable Use Policy or unlicensed uses of Llama: LlamaUseReport@URL\n\textra\\_gated\\_fields:\n\tFirst Name: text\n\tLast Name: text\n\tDate of birth: date\\_picker\n\tCountry: country\n\tAffiliation: text\n\tgeo: ip\\_location\n\tBy clicking Submit below I accept the terms of the license and acknowledge that the information I provide will be collected stored processed and shared in accordance with the Meta Privacy Policy: checkbox\n\textra\\_gated\\_description: >-\n\tThe information you provide will be collected, stored, processed and shared in\n\taccordance with the Meta Privacy\n\tPolicy.\n\textra\\_gated\\_button\\_content: Submit\n\tlanguage:\n\n\n* en\npipeline\\_tag: text-generation\ntags:\n* facebook\n* meta\n* pytorch\n* llama\n* llama-2\nlicense: llama2\n\n\n\n\n---\n\n\nLlama 2\n=======\n\n\nLlama 2 is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 7B pretrained model, converted for the Hugging Face Transformers format. Links to other models can be found in the index at the bottom.\n\n\nModel Details\n-------------\n\n\n*Note: Use of this model is governed by the Meta license. In order to download the model weights and tokenizer, please visit the website and accept our License before requesting access here.*\n\n\nMeta developed and publicly released the Llama 2 family of large language models (LLMs), a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. Our fine-tuned LLMs, called Llama-2-Chat, are optimized for dialogue use cases. Llama-2-Chat models outperform open-source chat models on most benchmarks we tested, and in our human evaluations for helpfulness and safety, are on par with some popular closed-source models like ChatGPT and PaLM.\n\n\nModel Developers Meta\n\n\nVariations Llama 2 comes in a range of parameter sizes — 7B, 13B, and 70B — as well as pretrained and fine-tuned variations.\n\n\nInput Models input text only.\n\n\nOutput Models generate text only.\n\n\nModel Architecture Llama 2 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align to human preferences for helpfulness and safety.\n\n\n\n*Llama 2 family of models.* Token counts refer to pretraining data only. All models are trained with a global batch-size of 4M tokens. Bigger models - 70B -- use Grouped-Query Attention (GQA) for improved inference scalability.\n\n\nModel Dates Llama 2 was trained between January 2023 and July 2023.\n\n\nStatus This is a static model trained on an offline dataset. Future versions of the tuned models will be released as we improve model safety with community feedback.\n\n\nLicense A custom commercial license is available at: URL\n\n\nResearch Paper \"Llama-2: Open Foundation and Fine-tuned Chat Models\"\n\n\nIntended Use\n------------\n\n\nIntended Use Cases Llama 2 is intended for commercial and research use in English. Tuned models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks.\n\n\nTo get the expected features and performance for the chat versions, a specific formatting needs to be followed, including the 'INST' and '<>' tags, 'BOS' and 'EOS' tokens, and the whitespaces and breaklines in between (we recommend calling 'strip()' on inputs to avoid double-spaces). See our reference code in github for details: 'chat\\_completion'.\n\n\nOut-of-scope Uses Use in any manner that violates applicable laws or regulations (including trade compliance laws).Use in languages other than English. Use in any other way that is prohibited by the Acceptable Use Policy and Licensing Agreement for Llama 2.\n\n\nHardware and Software\n---------------------\n\n\nTraining Factors We used custom training libraries, Meta's Research Super Cluster, and production clusters for pretraining. Fine-tuning, annotation, and evaluation were also performed on third-party cloud compute.\n\n\nCarbon Footprint Pretraining utilized a cumulative 3.3M GPU hours of computation on hardware of type A100-80GB (TDP of 350-400W). Estimated total emissions were 539 tCO2eq, 100% of which were offset by Meta’s sustainability program.\n\n\n\nCO2 emissions during pretraining. Time: total GPU time required for training each model. Power Consumption: peak power capacity per GPU device for the GPUs used adjusted for power usage efficiency. 100% of the emissions are directly offset by Meta's sustainability program, and because we are openly releasing these models, the pretraining costs do not need to be incurred by others.\n\n\nTraining Data\n-------------\n\n\nOverview Llama 2 was pretrained on 2 trillion tokens of data from publicly available sources. The fine-tuning data includes publicly available instruction datasets, as well as over one million new human-annotated examples. Neither the pretraining nor the fine-tuning datasets include Meta user data.\n\n\nData Freshness The pretraining data has a cutoff of September 2022, but some tuning data is more recent, up to July 2023.\n\n\nEvaluation Results\n------------------\n\n\nIn this section, we report the results for the Llama 1 and Llama 2 models on standard academic benchmarks.For all the evaluations, we use our internal evaluations library.\n\n\n\nOverall performance on grouped academic benchmarks. *Code:* We report the average pass@1 scores of our models on HumanEval and MBPP. *Commonsense Reasoning:* We report the average of PIQA, SIQA, HellaSwag, WinoGrande, ARC easy and challenge, OpenBookQA, and CommonsenseQA. We report 7-shot results for CommonSenseQA and 0-shot results for all other benchmarks. *World Knowledge:* We evaluate the 5-shot performance on NaturalQuestions and TriviaQA and report the average. *Reading Comprehension:* For reading comprehension, we report the 0-shot average on SQuAD, QuAC, and BoolQ. *MATH:* We report the average of the GSM8K (8 shot) and MATH (4 shot) benchmarks at top 1.\n\n\n\nEvaluation of pretrained LLMs on automatic safety benchmarks. For TruthfulQA, we present the percentage of generations that are both truthful and informative (the higher the better). For ToxiGen, we present the percentage of toxic generations (the smaller the better).\n\n\n\nEvaluation of fine-tuned LLMs on different safety datasets. Same metric definitions as above.\n\n\nEthical Considerations and Limitations\n--------------------------------------\n\n\nLlama 2 is a new technology that carries risks with use. Testing conducted to date has been in English, and has not covered, nor could it cover all scenarios. For these reasons, as with all LLMs, Llama 2’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate, biased or other objectionable responses to user prompts. Therefore, before deploying any applications of Llama 2, developers should perform safety testing and tuning tailored to their specific applications of the model.\n\n\nPlease see the Responsible Use Guide available at URL\n\n\nReporting Issues\n----------------\n\n\nPlease report any software “bug,” or other problems with the models through one of the following means:\n\n\n* Reporting issues with the model: URL\n* Reporting problematic content generated by the model: URL\n* Reporting bugs and security concerns: URL\n\n\nLlama Model Index\n-----------------" ]
null
transformers
## About <!-- ### quantize_version: 1 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: --> <!-- ### vocab_type: --> static quants of https://huggingface.co/mlabonne/Chimera-8B <!-- provided-files --> weighted/imatrix quants seem not to be available (by me) at this time. If they do not show up a week or so after the static ones, I have probably not planned for them. Feel free to request them by opening a Community Discussion. ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/Chimera-8B-GGUF/resolve/main/Chimera-8B.Q2_K.gguf) | Q2_K | 3.3 | | | [GGUF](https://huggingface.co/mradermacher/Chimera-8B-GGUF/resolve/main/Chimera-8B.IQ3_XS.gguf) | IQ3_XS | 3.6 | | | [GGUF](https://huggingface.co/mradermacher/Chimera-8B-GGUF/resolve/main/Chimera-8B.Q3_K_S.gguf) | Q3_K_S | 3.8 | | | [GGUF](https://huggingface.co/mradermacher/Chimera-8B-GGUF/resolve/main/Chimera-8B.IQ3_S.gguf) | IQ3_S | 3.8 | beats Q3_K* | | [GGUF](https://huggingface.co/mradermacher/Chimera-8B-GGUF/resolve/main/Chimera-8B.IQ3_M.gguf) | IQ3_M | 3.9 | | | [GGUF](https://huggingface.co/mradermacher/Chimera-8B-GGUF/resolve/main/Chimera-8B.Q3_K_M.gguf) | Q3_K_M | 4.1 | lower quality | | [GGUF](https://huggingface.co/mradermacher/Chimera-8B-GGUF/resolve/main/Chimera-8B.Q3_K_L.gguf) | Q3_K_L | 4.4 | | | [GGUF](https://huggingface.co/mradermacher/Chimera-8B-GGUF/resolve/main/Chimera-8B.IQ4_XS.gguf) | IQ4_XS | 4.6 | | | [GGUF](https://huggingface.co/mradermacher/Chimera-8B-GGUF/resolve/main/Chimera-8B.Q4_K_S.gguf) | Q4_K_S | 4.8 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/Chimera-8B-GGUF/resolve/main/Chimera-8B.Q4_K_M.gguf) | Q4_K_M | 5.0 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/Chimera-8B-GGUF/resolve/main/Chimera-8B.Q5_K_S.gguf) | Q5_K_S | 5.7 | | | [GGUF](https://huggingface.co/mradermacher/Chimera-8B-GGUF/resolve/main/Chimera-8B.Q5_K_M.gguf) | Q5_K_M | 5.8 | | | [GGUF](https://huggingface.co/mradermacher/Chimera-8B-GGUF/resolve/main/Chimera-8B.Q6_K.gguf) | Q6_K | 6.7 | very good quality | | [GGUF](https://huggingface.co/mradermacher/Chimera-8B-GGUF/resolve/main/Chimera-8B.Q8_0.gguf) | Q8_0 | 8.6 | fast, best quality | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. <!-- end -->
{"language": ["en"], "license": "other", "library_name": "transformers", "tags": ["merge", "mergekit", "lazymergekit"], "base_model": "mlabonne/Chimera-8B", "quantized_by": "mradermacher"}
mradermacher/Chimera-8B-GGUF
null
[ "transformers", "gguf", "merge", "mergekit", "lazymergekit", "en", "base_model:mlabonne/Chimera-8B", "license:other", "endpoints_compatible", "region:us" ]
null
2024-04-20T19:51:15+00:00
[]
[ "en" ]
TAGS #transformers #gguf #merge #mergekit #lazymergekit #en #base_model-mlabonne/Chimera-8B #license-other #endpoints_compatible #region-us
About ----- static quants of URL weighted/imatrix quants seem not to be available (by me) at this time. If they do not show up a week or so after the static ones, I have probably not planned for them. Feel free to request them by opening a Community Discussion. Usage ----- If you are unsure how to use GGUF files, refer to one of TheBloke's READMEs for more details, including on how to concatenate multi-part files. Provided Quants --------------- (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): !URL And here are Artefact2's thoughts on the matter: URL FAQ / Model Request ------------------- See URL for some answers to questions you might have and/or if you want some other model quantized. Thanks ------ I thank my company, nethype GmbH, for letting me use its servers and providing upgrades to my workstation to enable this work in my free time.
[]
[ "TAGS\n#transformers #gguf #merge #mergekit #lazymergekit #en #base_model-mlabonne/Chimera-8B #license-other #endpoints_compatible #region-us \n" ]
null
peft
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed] ### Framework versions - PEFT 0.10.0
{"library_name": "peft", "base_model": "unsloth/llama-3-8b"}
Fredithefish/Llama3RP-chkpt-6250
null
[ "peft", "safetensors", "arxiv:1910.09700", "base_model:unsloth/llama-3-8b", "region:us" ]
null
2024-04-20T19:51:20+00:00
[ "1910.09700" ]
[]
TAGS #peft #safetensors #arxiv-1910.09700 #base_model-unsloth/llama-3-8b #region-us
# Model Card for Model ID ## Model Details ### Model Description - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact ### Framework versions - PEFT 0.10.0
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\n\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact", "### Framework versions\n\n- PEFT 0.10.0" ]
[ "TAGS\n#peft #safetensors #arxiv-1910.09700 #base_model-unsloth/llama-3-8b #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\n\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact", "### Framework versions\n\n- PEFT 0.10.0" ]
text-generation
transformers
A fishy model Trained with the ChatML format with a max context length of 32k. Average length in datasets is around 4-8k tokens. # Uploaded model - **Developed by:** TheTsar1209 - **License:** apache-2.0 - **Finetuned from model :** unsloth/mistral-7b-v0.2-bnb-4bit This mistral model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
{"language": ["en"], "license": "apache-2.0", "tags": ["text-generation-inference", "transformers", "unsloth", "mistral", "trl"], "base_model": "unsloth/mistral-7b-v0.2-bnb-4bit"}
TheTsar1209/mistral-carp-v0.1
null
[ "transformers", "safetensors", "mistral", "text-generation", "text-generation-inference", "unsloth", "trl", "conversational", "en", "base_model:unsloth/mistral-7b-v0.2-bnb-4bit", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-20T19:52:48+00:00
[]
[ "en" ]
TAGS #transformers #safetensors #mistral #text-generation #text-generation-inference #unsloth #trl #conversational #en #base_model-unsloth/mistral-7b-v0.2-bnb-4bit #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us
A fishy model Trained with the ChatML format with a max context length of 32k. Average length in datasets is around 4-8k tokens. # Uploaded model - Developed by: TheTsar1209 - License: apache-2.0 - Finetuned from model : unsloth/mistral-7b-v0.2-bnb-4bit This mistral model was trained 2x faster with Unsloth and Huggingface's TRL library. <img src="URL width="200"/>
[ "# Uploaded model\n\n- Developed by: TheTsar1209\n- License: apache-2.0\n- Finetuned from model : unsloth/mistral-7b-v0.2-bnb-4bit\n\nThis mistral model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
[ "TAGS\n#transformers #safetensors #mistral #text-generation #text-generation-inference #unsloth #trl #conversational #en #base_model-unsloth/mistral-7b-v0.2-bnb-4bit #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us \n", "# Uploaded model\n\n- Developed by: TheTsar1209\n- License: apache-2.0\n- Finetuned from model : unsloth/mistral-7b-v0.2-bnb-4bit\n\nThis mistral model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
text-classification
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
JonnySilverhand2077/ai_text_identification
null
[ "transformers", "safetensors", "distilbert", "text-classification", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-20T19:55:17+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #distilbert #text-classification #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #distilbert #text-classification #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
null
adapter-transformers
# Adapter `BigTMiami/micro_par_bn_v_4_help_class_adp_lr_0003_S_4` for roberta-base An [adapter](https://adapterhub.ml) for the `roberta-base` model that was trained on the [BigTMiami/amazon_MICRO_helpfulness_dataset](https://huggingface.co/datasets/BigTMiami/amazon_MICRO_helpfulness_dataset/) dataset and includes a prediction head for classification. This adapter was created for usage with the **[Adapters](https://github.com/Adapter-Hub/adapters)** library. ## Usage First, install `adapters`: ``` pip install -U adapters ``` Now, the adapter can be loaded and activated like this: ```python from adapters import AutoAdapterModel model = AutoAdapterModel.from_pretrained("roberta-base") adapter_name = model.load_adapter("BigTMiami/micro_par_bn_v_4_help_class_adp_lr_0003_S_4", source="hf", set_active=True) ``` ## Architecture & Training <!-- Add some description here --> ## Evaluation results <!-- Add some description here --> ## Citation <!-- Add some description here -->
{"tags": ["roberta", "adapter-transformers"], "datasets": ["BigTMiami/amazon_MICRO_helpfulness_dataset"]}
BigTMiami/micro_par_bn_v_4_help_class_adp_lr_0003_S_4
null
[ "adapter-transformers", "roberta", "dataset:BigTMiami/amazon_MICRO_helpfulness_dataset", "region:us" ]
null
2024-04-20T19:55:43+00:00
[]
[]
TAGS #adapter-transformers #roberta #dataset-BigTMiami/amazon_MICRO_helpfulness_dataset #region-us
# Adapter 'BigTMiami/micro_par_bn_v_4_help_class_adp_lr_0003_S_4' for roberta-base An adapter for the 'roberta-base' model that was trained on the BigTMiami/amazon_MICRO_helpfulness_dataset dataset and includes a prediction head for classification. This adapter was created for usage with the Adapters library. ## Usage First, install 'adapters': Now, the adapter can be loaded and activated like this: ## Architecture & Training ## Evaluation results
[ "# Adapter 'BigTMiami/micro_par_bn_v_4_help_class_adp_lr_0003_S_4' for roberta-base\n\nAn adapter for the 'roberta-base' model that was trained on the BigTMiami/amazon_MICRO_helpfulness_dataset dataset and includes a prediction head for classification.\n\nThis adapter was created for usage with the Adapters library.", "## Usage\n\nFirst, install 'adapters':\n\n\n\nNow, the adapter can be loaded and activated like this:", "## Architecture & Training", "## Evaluation results" ]
[ "TAGS\n#adapter-transformers #roberta #dataset-BigTMiami/amazon_MICRO_helpfulness_dataset #region-us \n", "# Adapter 'BigTMiami/micro_par_bn_v_4_help_class_adp_lr_0003_S_4' for roberta-base\n\nAn adapter for the 'roberta-base' model that was trained on the BigTMiami/amazon_MICRO_helpfulness_dataset dataset and includes a prediction head for classification.\n\nThis adapter was created for usage with the Adapters library.", "## Usage\n\nFirst, install 'adapters':\n\n\n\nNow, the adapter can be loaded and activated like this:", "## Architecture & Training", "## Evaluation results" ]
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # canine_100_chars This model is a fine-tuned version of [google/canine-c](https://huggingface.co/google/canine-c) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.0507 - Accuracy: 0.9905 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:| | 0.2123 | 1.0 | 6000 | 0.1265 | 0.9721 | | 0.0673 | 2.0 | 12000 | 0.0507 | 0.9905 | ### Framework versions - Transformers 4.40.0 - Pytorch 2.0.0+cu117 - Datasets 2.19.0 - Tokenizers 0.19.1
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "google/canine-c", "model-index": [{"name": "canine_100_chars", "results": []}]}
addykan/canine_100_chars
null
[ "transformers", "safetensors", "canine", "text-classification", "generated_from_trainer", "base_model:google/canine-c", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-20T19:58:19+00:00
[]
[]
TAGS #transformers #safetensors #canine #text-classification #generated_from_trainer #base_model-google/canine-c #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us
canine\_100\_chars ================== This model is a fine-tuned version of google/canine-c on an unknown dataset. It achieves the following results on the evaluation set: * Loss: 0.0507 * Accuracy: 0.9905 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 2e-05 * train\_batch\_size: 8 * eval\_batch\_size: 8 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * num\_epochs: 2 ### Training results ### Framework versions * Transformers 4.40.0 * Pytorch 2.0.0+cu117 * Datasets 2.19.0 * Tokenizers 0.19.1
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 8\n* eval\\_batch\\_size: 8\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 2", "### Training results", "### Framework versions\n\n\n* Transformers 4.40.0\n* Pytorch 2.0.0+cu117\n* Datasets 2.19.0\n* Tokenizers 0.19.1" ]
[ "TAGS\n#transformers #safetensors #canine #text-classification #generated_from_trainer #base_model-google/canine-c #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 8\n* eval\\_batch\\_size: 8\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 2", "### Training results", "### Framework versions\n\n\n* Transformers 4.40.0\n* Pytorch 2.0.0+cu117\n* Datasets 2.19.0\n* Tokenizers 0.19.1" ]
text-generation
transformers
Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) Llama-2-7b-hf - bnb 8bits - Model creator: https://huggingface.co/meta-llama/ - Original model: https://huggingface.co/meta-llama/Llama-2-7b-hf/ Original model description: --- extra_gated_heading: You need to share contact information with Meta to access this model extra_gated_prompt: >- ### LLAMA 2 COMMUNITY LICENSE AGREEMENT "Agreement" means the terms and conditions for use, reproduction, distribution and modification of the Llama Materials set forth herein. "Documentation" means the specifications, manuals and documentation accompanying Llama 2 distributed by Meta at https://ai.meta.com/resources/models-and-libraries/llama-downloads/. "Licensee" or "you" means you, or your employer or any other person or entity (if you are entering into this Agreement on such person or entity's behalf), of the age required under applicable laws, rules or regulations to provide legal consent and that has legal authority to bind your employer or such other person or entity if you are entering in this Agreement on their behalf. "Llama 2" means the foundational large language models and software and algorithms, including machine-learning model code, trained model weights, inference-enabling code, training-enabling code, fine-tuning enabling code and other elements of the foregoing distributed by Meta at ai.meta.com/resources/models-and-libraries/llama-downloads/. "Llama Materials" means, collectively, Meta's proprietary Llama 2 and documentation (and any portion thereof) made available under this Agreement. "Meta" or "we" means Meta Platforms Ireland Limited (if you are located in or, if you are an entity, your principal place of business is in the EEA or Switzerland) and Meta Platforms, Inc. (if you are located outside of the EEA or Switzerland). By clicking "I Accept" below or by using or distributing any portion or element of the Llama Materials, you agree to be bound by this Agreement. 1. License Rights and Redistribution. a. Grant of Rights. You are granted a non-exclusive, worldwide, non- transferable and royalty-free limited license under Meta's intellectual property or other rights owned by Meta embodied in the Llama Materials to use, reproduce, distribute, copy, create derivative works of, and make modifications to the Llama Materials. b. Redistribution and Use. i. If you distribute or make the Llama Materials, or any derivative works thereof, available to a third party, you shall provide a copy of this Agreement to such third party. ii. If you receive Llama Materials, or any derivative works thereof, from a Licensee as part of an integrated end user product, then Section 2 of this Agreement will not apply to you. iii. You must retain in all copies of the Llama Materials that you distribute the following attribution notice within a "Notice" text file distributed as a part of such copies: "Llama 2 is licensed under the LLAMA 2 Community License, Copyright (c) Meta Platforms, Inc. All Rights Reserved." iv. Your use of the Llama Materials must comply with applicable laws and regulations (including trade compliance laws and regulations) and adhere to the Acceptable Use Policy for the Llama Materials (available at https://ai.meta.com/llama/use-policy), which is hereby incorporated by reference into this Agreement. v. You will not use the Llama Materials or any output or results of the Llama Materials to improve any other large language model (excluding Llama 2 or derivative works thereof). 2. Additional Commercial Terms. If, on the Llama 2 version release date, the monthly active users of the products or services made available by or for Licensee, or Licensee's affiliates, is greater than 700 million monthly active users in the preceding calendar month, you must request a license from Meta, which Meta may grant to you in its sole discretion, and you are not authorized to exercise any of the rights under this Agreement unless or until Meta otherwise expressly grants you such rights. 3. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING THE LLAMA MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS. 4. Limitation of Liability. IN NO EVENT WILL META OR ITS AFFILIATES BE LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST PROFITS OR ANY INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR PUNITIVE DAMAGES, EVEN IF META OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF ANY OF THE FOREGOING. 5. Intellectual Property. a. No trademark licenses are granted under this Agreement, and in connection with the Llama Materials, neither Meta nor Licensee may use any name or mark owned by or associated with the other or any of its affiliates, except as required for reasonable and customary use in describing and redistributing the Llama Materials. b. Subject to Meta's ownership of Llama Materials and derivatives made by or for Meta, with respect to any derivative works and modifications of the Llama Materials that are made by you, as between you and Meta, you are and will be the owner of such derivative works and modifications. c. If you institute litigation or other proceedings against Meta or any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Llama Materials or Llama 2 outputs or results, or any portion of any of the foregoing, constitutes infringement of intellectual property or other rights owned or licensable by you, then any licenses granted to you under this Agreement shall terminate as of the date such litigation or claim is filed or instituted. You will indemnify and hold harmless Meta from and against any claim by any third party arising out of or related to your use or distribution of the Llama Materials. 6. Term and Termination. The term of this Agreement will commence upon your acceptance of this Agreement or access to the Llama Materials and will continue in full force and effect until terminated in accordance with the terms and conditions herein. Meta may terminate this Agreement if you are in breach of any term or condition of this Agreement. Upon termination of this Agreement, you shall delete and cease use of the Llama Materials. Sections 3, 4 and 7 shall survive the termination of this Agreement. 7. Governing Law and Jurisdiction. This Agreement will be governed and construed under the laws of the State of California without regard to choice of law principles, and the UN Convention on Contracts for the International Sale of Goods does not apply to this Agreement. The courts of California shall have exclusive jurisdiction of any dispute arising out of this Agreement. ### Llama 2 Acceptable Use Policy Meta is committed to promoting safe and fair use of its tools and features, including Llama 2. If you access or use Llama 2, you agree to this Acceptable Use Policy (“Policy”). The most recent copy of this policy can be found at [ai.meta.com/llama/use-policy](http://ai.meta.com/llama/use-policy). #### Prohibited Uses We want everyone to use Llama 2 safely and responsibly. You agree you will not use, or allow others to use, Llama 2 to: 1. Violate the law or others’ rights, including to: 1. Engage in, promote, generate, contribute to, encourage, plan, incite, or further illegal or unlawful activity or content, such as: 1. Violence or terrorism 2. Exploitation or harm to children, including the solicitation, creation, acquisition, or dissemination of child exploitative content or failure to report Child Sexual Abuse Material 3. Human trafficking, exploitation, and sexual violence 4. The illegal distribution of information or materials to minors, including obscene materials, or failure to employ legally required age-gating in connection with such information or materials. 5. Sexual solicitation 6. Any other criminal activity 2. Engage in, promote, incite, or facilitate the harassment, abuse, threatening, or bullying of individuals or groups of individuals 3. Engage in, promote, incite, or facilitate discrimination or other unlawful or harmful conduct in the provision of employment, employment benefits, credit, housing, other economic benefits, or other essential goods and services 4. Engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or related professional practices 5. Collect, process, disclose, generate, or infer health, demographic, or other sensitive personal or private information about individuals without rights and consents required by applicable laws 6. Engage in or facilitate any action or generate any content that infringes, misappropriates, or otherwise violates any third-party rights, including the outputs or results of any products or services using the Llama 2 Materials 7. Create, generate, or facilitate the creation of malicious code, malware, computer viruses or do anything else that could disable, overburden, interfere with or impair the proper working, integrity, operation or appearance of a website or computer system 2. Engage in, promote, incite, facilitate, or assist in the planning or development of activities that present a risk of death or bodily harm to individuals, including use of Llama 2 related to the following: 1. Military, warfare, nuclear industries or applications, espionage, use for materials or activities that are subject to the International Traffic Arms Regulations (ITAR) maintained by the United States Department of State 2. Guns and illegal weapons (including weapon development) 3. Illegal drugs and regulated/controlled substances 4. Operation of critical infrastructure, transportation technologies, or heavy machinery 5. Self-harm or harm to others, including suicide, cutting, and eating disorders 6. Any content intended to incite or promote violence, abuse, or any infliction of bodily harm to an individual 3. Intentionally deceive or mislead others, including use of Llama 2 related to the following: 1. Generating, promoting, or furthering fraud or the creation or promotion of disinformation 2. Generating, promoting, or furthering defamatory content, including the creation of defamatory statements, images, or other content 3. Generating, promoting, or further distributing spam 4. Impersonating another individual without consent, authorization, or legal right 5. Representing that the use of Llama 2 or outputs are human-generated 6. Generating or facilitating false online engagement, including fake reviews and other means of fake online engagement 4. Fail to appropriately disclose to end users any known dangers of your AI system Please report any violation of this Policy, software “bug,” or other problems that could lead to a violation of this Policy through one of the following means: * Reporting issues with the model: [github.com/facebookresearch/llama](http://github.com/facebookresearch/llama) * Reporting risky content generated by the model: [developers.facebook.com/llama_output_feedback](http://developers.facebook.com/llama_output_feedback) * Reporting bugs and security concerns: [facebook.com/whitehat/info](http://facebook.com/whitehat/info) * Reporting violations of the Acceptable Use Policy or unlicensed uses of Llama: [[email protected]](mailto:[email protected]) extra_gated_fields: First Name: text Last Name: text Date of birth: date_picker Country: country Affiliation: text geo: ip_location By clicking Submit below I accept the terms of the license and acknowledge that the information I provide will be collected stored processed and shared in accordance with the Meta Privacy Policy: checkbox extra_gated_description: >- The information you provide will be collected, stored, processed and shared in accordance with the [Meta Privacy Policy](https://www.facebook.com/privacy/policy/). extra_gated_button_content: Submit language: - en pipeline_tag: text-generation tags: - facebook - meta - pytorch - llama - llama-2 license: llama2 --- # **Llama 2** Llama 2 is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 7B pretrained model, converted for the Hugging Face Transformers format. Links to other models can be found in the index at the bottom. ## Model Details *Note: Use of this model is governed by the Meta license. In order to download the model weights and tokenizer, please visit the [website](https://ai.meta.com/resources/models-and-libraries/llama-downloads/) and accept our License before requesting access here.* Meta developed and publicly released the Llama 2 family of large language models (LLMs), a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. Our fine-tuned LLMs, called Llama-2-Chat, are optimized for dialogue use cases. Llama-2-Chat models outperform open-source chat models on most benchmarks we tested, and in our human evaluations for helpfulness and safety, are on par with some popular closed-source models like ChatGPT and PaLM. **Model Developers** Meta **Variations** Llama 2 comes in a range of parameter sizes — 7B, 13B, and 70B — as well as pretrained and fine-tuned variations. **Input** Models input text only. **Output** Models generate text only. **Model Architecture** Llama 2 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align to human preferences for helpfulness and safety. ||Training Data|Params|Content Length|GQA|Tokens|LR| |---|---|---|---|---|---|---| |Llama 2|*A new mix of publicly available online data*|7B|4k|&#10007;|2.0T|3.0 x 10<sup>-4</sup>| |Llama 2|*A new mix of publicly available online data*|13B|4k|&#10007;|2.0T|3.0 x 10<sup>-4</sup>| |Llama 2|*A new mix of publicly available online data*|70B|4k|&#10004;|2.0T|1.5 x 10<sup>-4</sup>| *Llama 2 family of models.* Token counts refer to pretraining data only. All models are trained with a global batch-size of 4M tokens. Bigger models - 70B -- use Grouped-Query Attention (GQA) for improved inference scalability. **Model Dates** Llama 2 was trained between January 2023 and July 2023. **Status** This is a static model trained on an offline dataset. Future versions of the tuned models will be released as we improve model safety with community feedback. **License** A custom commercial license is available at: [https://ai.meta.com/resources/models-and-libraries/llama-downloads/](https://ai.meta.com/resources/models-and-libraries/llama-downloads/) **Research Paper** ["Llama-2: Open Foundation and Fine-tuned Chat Models"](arxiv.org/abs/2307.09288) ## Intended Use **Intended Use Cases** Llama 2 is intended for commercial and research use in English. Tuned models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks. To get the expected features and performance for the chat versions, a specific formatting needs to be followed, including the `INST` and `<<SYS>>` tags, `BOS` and `EOS` tokens, and the whitespaces and breaklines in between (we recommend calling `strip()` on inputs to avoid double-spaces). See our reference code in github for details: [`chat_completion`](https://github.com/facebookresearch/llama/blob/main/llama/generation.py#L212). **Out-of-scope Uses** Use in any manner that violates applicable laws or regulations (including trade compliance laws).Use in languages other than English. Use in any other way that is prohibited by the Acceptable Use Policy and Licensing Agreement for Llama 2. ## Hardware and Software **Training Factors** We used custom training libraries, Meta's Research Super Cluster, and production clusters for pretraining. Fine-tuning, annotation, and evaluation were also performed on third-party cloud compute. **Carbon Footprint** Pretraining utilized a cumulative 3.3M GPU hours of computation on hardware of type A100-80GB (TDP of 350-400W). Estimated total emissions were 539 tCO2eq, 100% of which were offset by Meta’s sustainability program. ||Time (GPU hours)|Power Consumption (W)|Carbon Emitted(tCO<sub>2</sub>eq)| |---|---|---|---| |Llama 2 7B|184320|400|31.22| |Llama 2 13B|368640|400|62.44| |Llama 2 70B|1720320|400|291.42| |Total|3311616||539.00| **CO<sub>2</sub> emissions during pretraining.** Time: total GPU time required for training each model. Power Consumption: peak power capacity per GPU device for the GPUs used adjusted for power usage efficiency. 100% of the emissions are directly offset by Meta's sustainability program, and because we are openly releasing these models, the pretraining costs do not need to be incurred by others. ## Training Data **Overview** Llama 2 was pretrained on 2 trillion tokens of data from publicly available sources. The fine-tuning data includes publicly available instruction datasets, as well as over one million new human-annotated examples. Neither the pretraining nor the fine-tuning datasets include Meta user data. **Data Freshness** The pretraining data has a cutoff of September 2022, but some tuning data is more recent, up to July 2023. ## Evaluation Results In this section, we report the results for the Llama 1 and Llama 2 models on standard academic benchmarks.For all the evaluations, we use our internal evaluations library. |Model|Size|Code|Commonsense Reasoning|World Knowledge|Reading Comprehension|Math|MMLU|BBH|AGI Eval| |---|---|---|---|---|---|---|---|---|---| |Llama 1|7B|14.1|60.8|46.2|58.5|6.95|35.1|30.3|23.9| |Llama 1|13B|18.9|66.1|52.6|62.3|10.9|46.9|37.0|33.9| |Llama 1|33B|26.0|70.0|58.4|67.6|21.4|57.8|39.8|41.7| |Llama 1|65B|30.7|70.7|60.5|68.6|30.8|63.4|43.5|47.6| |Llama 2|7B|16.8|63.9|48.9|61.3|14.6|45.3|32.6|29.3| |Llama 2|13B|24.5|66.9|55.4|65.8|28.7|54.8|39.4|39.1| |Llama 2|70B|**37.5**|**71.9**|**63.6**|**69.4**|**35.2**|**68.9**|**51.2**|**54.2**| **Overall performance on grouped academic benchmarks.** *Code:* We report the average pass@1 scores of our models on HumanEval and MBPP. *Commonsense Reasoning:* We report the average of PIQA, SIQA, HellaSwag, WinoGrande, ARC easy and challenge, OpenBookQA, and CommonsenseQA. We report 7-shot results for CommonSenseQA and 0-shot results for all other benchmarks. *World Knowledge:* We evaluate the 5-shot performance on NaturalQuestions and TriviaQA and report the average. *Reading Comprehension:* For reading comprehension, we report the 0-shot average on SQuAD, QuAC, and BoolQ. *MATH:* We report the average of the GSM8K (8 shot) and MATH (4 shot) benchmarks at top 1. |||TruthfulQA|Toxigen| |---|---|---|---| |Llama 1|7B|27.42|23.00| |Llama 1|13B|41.74|23.08| |Llama 1|33B|44.19|22.57| |Llama 1|65B|48.71|21.77| |Llama 2|7B|33.29|**21.25**| |Llama 2|13B|41.86|26.10| |Llama 2|70B|**50.18**|24.60| **Evaluation of pretrained LLMs on automatic safety benchmarks.** For TruthfulQA, we present the percentage of generations that are both truthful and informative (the higher the better). For ToxiGen, we present the percentage of toxic generations (the smaller the better). |||TruthfulQA|Toxigen| |---|---|---|---| |Llama-2-Chat|7B|57.04|**0.00**| |Llama-2-Chat|13B|62.18|**0.00**| |Llama-2-Chat|70B|**64.14**|0.01| **Evaluation of fine-tuned LLMs on different safety datasets.** Same metric definitions as above. ## Ethical Considerations and Limitations Llama 2 is a new technology that carries risks with use. Testing conducted to date has been in English, and has not covered, nor could it cover all scenarios. For these reasons, as with all LLMs, Llama 2’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate, biased or other objectionable responses to user prompts. Therefore, before deploying any applications of Llama 2, developers should perform safety testing and tuning tailored to their specific applications of the model. Please see the Responsible Use Guide available at [https://ai.meta.com/llama/responsible-use-guide/](https://ai.meta.com/llama/responsible-use-guide) ## Reporting Issues Please report any software “bug,” or other problems with the models through one of the following means: - Reporting issues with the model: [github.com/facebookresearch/llama](http://github.com/facebookresearch/llama) - Reporting problematic content generated by the model: [developers.facebook.com/llama_output_feedback](http://developers.facebook.com/llama_output_feedback) - Reporting bugs and security concerns: [facebook.com/whitehat/info](http://facebook.com/whitehat/info) ## Llama Model Index |Model|Llama2|Llama2-hf|Llama2-chat|Llama2-chat-hf| |---|---|---|---|---| |7B| [Link](https://huggingface.co/meta-llama/Llama-2-7b) | [Link](https://huggingface.co/meta-llama/Llama-2-7b-hf) | [Link](https://huggingface.co/meta-llama/Llama-2-7b-chat) | [Link](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf)| |13B| [Link](https://huggingface.co/meta-llama/Llama-2-13b) | [Link](https://huggingface.co/meta-llama/Llama-2-13b-hf) | [Link](https://huggingface.co/meta-llama/Llama-2-13b-chat) | [Link](https://huggingface.co/meta-llama/Llama-2-13b-chat-hf)| |70B| [Link](https://huggingface.co/meta-llama/Llama-2-70b) | [Link](https://huggingface.co/meta-llama/Llama-2-70b-hf) | [Link](https://huggingface.co/meta-llama/Llama-2-70b-chat) | [Link](https://huggingface.co/meta-llama/Llama-2-70b-chat-hf)|
{}
RichardErkhov/meta-llama_-_Llama-2-7b-hf-8bits
null
[ "transformers", "safetensors", "llama", "text-generation", "arxiv:2307.09288", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "8-bit", "region:us" ]
null
2024-04-20T19:58:54+00:00
[ "2307.09288" ]
[]
TAGS #transformers #safetensors #llama #text-generation #arxiv-2307.09288 #autotrain_compatible #endpoints_compatible #text-generation-inference #8-bit #region-us
Quantization made by Richard Erkhov. Github Discord Request more models Llama-2-7b-hf - bnb 8bits * Model creator: URL * Original model: URL Original model description: --------------------------- extra\_gated\_heading: You need to share contact information with Meta to access this model extra\_gated\_prompt: >- ### LLAMA 2 COMMUNITY LICENSE AGREEMENT "Agreement" means the terms and conditions for use, reproduction, distribution and modification of the Llama Materials set forth herein. "Documentation" means the specifications, manuals and documentation accompanying Llama 2 distributed by Meta at URL "Licensee" or "you" means you, or your employer or any other person or entity (if you are entering into this Agreement on such person or entity's behalf), of the age required under applicable laws, rules or regulations to provide legal consent and that has legal authority to bind your employer or such other person or entity if you are entering in this Agreement on their behalf. "Llama 2" means the foundational large language models and software and algorithms, including machine-learning model code, trained model weights, inference-enabling code, training-enabling code, fine-tuning enabling code and other elements of the foregoing distributed by Meta at URL "Llama Materials" means, collectively, Meta's proprietary Llama 2 and documentation (and any portion thereof) made available under this Agreement. "Meta" or "we" means Meta Platforms Ireland Limited (if you are located in or, if you are an entity, your principal place of business is in the EEA or Switzerland) and Meta Platforms, Inc. (if you are located outside of the EEA or Switzerland). By clicking "I Accept" below or by using or distributing any portion or element of the Llama Materials, you agree to be bound by this Agreement. 1. License Rights and Redistribution. a. Grant of Rights. You are granted a non-exclusive, worldwide, non- transferable and royalty-free limited license under Meta's intellectual property or other rights owned by Meta embodied in the Llama Materials to use, reproduce, distribute, copy, create derivative works of, and make modifications to the Llama Materials. b. Redistribution and Use. i. If you distribute or make the Llama Materials, or any derivative works thereof, available to a third party, you shall provide a copy of this Agreement to such third party. ii. If you receive Llama Materials, or any derivative works thereof, from a Licensee as part of an integrated end user product, then Section 2 of this Agreement will not apply to you. iii. You must retain in all copies of the Llama Materials that you distribute the following attribution notice within a "Notice" text file distributed as a part of such copies: "Llama 2 is licensed under the LLAMA 2 Community License, Copyright (c) Meta Platforms, Inc. All Rights Reserved." iv. Your use of the Llama Materials must comply with applicable laws and regulations (including trade compliance laws and regulations) and adhere to the Acceptable Use Policy for the Llama Materials (available at URL which is hereby incorporated by reference into this Agreement. v. You will not use the Llama Materials or any output or results of the Llama Materials to improve any other large language model (excluding Llama 2 or derivative works thereof). 2. Additional Commercial Terms. If, on the Llama 2 version release date, the monthly active users of the products or services made available by or for Licensee, or Licensee's affiliates, is greater than 700 million monthly active users in the preceding calendar month, you must request a license from Meta, which Meta may grant to you in its sole discretion, and you are not authorized to exercise any of the rights under this Agreement unless or until Meta otherwise expressly grants you such rights. 3. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING THE LLAMA MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS. 4. Limitation of Liability. IN NO EVENT WILL META OR ITS AFFILIATES BE LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST PROFITS OR ANY INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR PUNITIVE DAMAGES, EVEN IF META OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF ANY OF THE FOREGOING. 5. Intellectual Property. a. No trademark licenses are granted under this Agreement, and in connection with the Llama Materials, neither Meta nor Licensee may use any name or mark owned by or associated with the other or any of its affiliates, except as required for reasonable and customary use in describing and redistributing the Llama Materials. b. Subject to Meta's ownership of Llama Materials and derivatives made by or for Meta, with respect to any derivative works and modifications of the Llama Materials that are made by you, as between you and Meta, you are and will be the owner of such derivative works and modifications. c. If you institute litigation or other proceedings against Meta or any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Llama Materials or Llama 2 outputs or results, or any portion of any of the foregoing, constitutes infringement of intellectual property or other rights owned or licensable by you, then any licenses granted to you under this Agreement shall terminate as of the date such litigation or claim is filed or instituted. You will indemnify and hold harmless Meta from and against any claim by any third party arising out of or related to your use or distribution of the Llama Materials. 6. Term and Termination. The term of this Agreement will commence upon your acceptance of this Agreement or access to the Llama Materials and will continue in full force and effect until terminated in accordance with the terms and conditions herein. Meta may terminate this Agreement if you are in breach of any term or condition of this Agreement. Upon termination of this Agreement, you shall delete and cease use of the Llama Materials. Sections 3, 4 and 7 shall survive the termination of this Agreement. 7. Governing Law and Jurisdiction. This Agreement will be governed and construed under the laws of the State of California without regard to choice of law principles, and the UN Convention on Contracts for the International Sale of Goods does not apply to this Agreement. The courts of California shall have exclusive jurisdiction of any dispute arising out of this Agreement. ### Llama 2 Acceptable Use Policy Meta is committed to promoting safe and fair use of its tools and features, including Llama 2. If you access or use Llama 2, you agree to this Acceptable Use Policy (“Policy”). The most recent copy of this policy can be found at URL #### Prohibited Uses We want everyone to use Llama 2 safely and responsibly. You agree you will not use, or allow others to use, Llama 2 to: 1. Violate the law or others’ rights, including to: 1. Engage in, promote, generate, contribute to, encourage, plan, incite, or further illegal or unlawful activity or content, such as: 1. Violence or terrorism 2. Exploitation or harm to children, including the solicitation, creation, acquisition, or dissemination of child exploitative content or failure to report Child Sexual Abuse Material 3. Human trafficking, exploitation, and sexual violence 4. The illegal distribution of information or materials to minors, including obscene materials, or failure to employ legally required age-gating in connection with such information or materials. 5. Sexual solicitation 6. Any other criminal activity 2. Engage in, promote, incite, or facilitate the harassment, abuse, threatening, or bullying of individuals or groups of individuals 3. Engage in, promote, incite, or facilitate discrimination or other unlawful or harmful conduct in the provision of employment, employment benefits, credit, housing, other economic benefits, or other essential goods and services 4. Engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or related professional practices 5. Collect, process, disclose, generate, or infer health, demographic, or other sensitive personal or private information about individuals without rights and consents required by applicable laws 6. Engage in or facilitate any action or generate any content that infringes, misappropriates, or otherwise violates any third-party rights, including the outputs or results of any products or services using the Llama 2 Materials 7. Create, generate, or facilitate the creation of malicious code, malware, computer viruses or do anything else that could disable, overburden, interfere with or impair the proper working, integrity, operation or appearance of a website or computer system 2. Engage in, promote, incite, facilitate, or assist in the planning or development of activities that present a risk of death or bodily harm to individuals, including use of Llama 2 related to the following: 1. Military, warfare, nuclear industries or applications, espionage, use for materials or activities that are subject to the International Traffic Arms Regulations (ITAR) maintained by the United States Department of State 2. Guns and illegal weapons (including weapon development) 3. Illegal drugs and regulated/controlled substances 4. Operation of critical infrastructure, transportation technologies, or heavy machinery 5. Self-harm or harm to others, including suicide, cutting, and eating disorders 6. Any content intended to incite or promote violence, abuse, or any infliction of bodily harm to an individual 3. Intentionally deceive or mislead others, including use of Llama 2 related to the following: 1. Generating, promoting, or furthering fraud or the creation or promotion of disinformation 2. Generating, promoting, or furthering defamatory content, including the creation of defamatory statements, images, or other content 3. Generating, promoting, or further distributing spam 4. Impersonating another individual without consent, authorization, or legal right 5. Representing that the use of Llama 2 or outputs are human-generated 6. Generating or facilitating false online engagement, including fake reviews and other means of fake online engagement 7. Fail to appropriately disclose to end users any known dangers of your AI system Please report any violation of this Policy, software “bug,” or other problems that could lead to a violation of this Policy through one of the following means: * Reporting issues with the model: URL * Reporting risky content generated by the model: URL * Reporting bugs and security concerns: URL * Reporting violations of the Acceptable Use Policy or unlicensed uses of Llama: LlamaUseReport@URL extra\_gated\_fields: First Name: text Last Name: text Date of birth: date\_picker Country: country Affiliation: text geo: ip\_location By clicking Submit below I accept the terms of the license and acknowledge that the information I provide will be collected stored processed and shared in accordance with the Meta Privacy Policy: checkbox extra\_gated\_description: >- The information you provide will be collected, stored, processed and shared in accordance with the Meta Privacy Policy. extra\_gated\_button\_content: Submit language: * en pipeline\_tag: text-generation tags: * facebook * meta * pytorch * llama * llama-2 license: llama2 --- Llama 2 ======= Llama 2 is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 7B pretrained model, converted for the Hugging Face Transformers format. Links to other models can be found in the index at the bottom. Model Details ------------- *Note: Use of this model is governed by the Meta license. In order to download the model weights and tokenizer, please visit the website and accept our License before requesting access here.* Meta developed and publicly released the Llama 2 family of large language models (LLMs), a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. Our fine-tuned LLMs, called Llama-2-Chat, are optimized for dialogue use cases. Llama-2-Chat models outperform open-source chat models on most benchmarks we tested, and in our human evaluations for helpfulness and safety, are on par with some popular closed-source models like ChatGPT and PaLM. Model Developers Meta Variations Llama 2 comes in a range of parameter sizes — 7B, 13B, and 70B — as well as pretrained and fine-tuned variations. Input Models input text only. Output Models generate text only. Model Architecture Llama 2 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align to human preferences for helpfulness and safety. *Llama 2 family of models.* Token counts refer to pretraining data only. All models are trained with a global batch-size of 4M tokens. Bigger models - 70B -- use Grouped-Query Attention (GQA) for improved inference scalability. Model Dates Llama 2 was trained between January 2023 and July 2023. Status This is a static model trained on an offline dataset. Future versions of the tuned models will be released as we improve model safety with community feedback. License A custom commercial license is available at: URL Research Paper "Llama-2: Open Foundation and Fine-tuned Chat Models" Intended Use ------------ Intended Use Cases Llama 2 is intended for commercial and research use in English. Tuned models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks. To get the expected features and performance for the chat versions, a specific formatting needs to be followed, including the 'INST' and '<>' tags, 'BOS' and 'EOS' tokens, and the whitespaces and breaklines in between (we recommend calling 'strip()' on inputs to avoid double-spaces). See our reference code in github for details: 'chat\_completion'. Out-of-scope Uses Use in any manner that violates applicable laws or regulations (including trade compliance laws).Use in languages other than English. Use in any other way that is prohibited by the Acceptable Use Policy and Licensing Agreement for Llama 2. Hardware and Software --------------------- Training Factors We used custom training libraries, Meta's Research Super Cluster, and production clusters for pretraining. Fine-tuning, annotation, and evaluation were also performed on third-party cloud compute. Carbon Footprint Pretraining utilized a cumulative 3.3M GPU hours of computation on hardware of type A100-80GB (TDP of 350-400W). Estimated total emissions were 539 tCO2eq, 100% of which were offset by Meta’s sustainability program. CO2 emissions during pretraining. Time: total GPU time required for training each model. Power Consumption: peak power capacity per GPU device for the GPUs used adjusted for power usage efficiency. 100% of the emissions are directly offset by Meta's sustainability program, and because we are openly releasing these models, the pretraining costs do not need to be incurred by others. Training Data ------------- Overview Llama 2 was pretrained on 2 trillion tokens of data from publicly available sources. The fine-tuning data includes publicly available instruction datasets, as well as over one million new human-annotated examples. Neither the pretraining nor the fine-tuning datasets include Meta user data. Data Freshness The pretraining data has a cutoff of September 2022, but some tuning data is more recent, up to July 2023. Evaluation Results ------------------ In this section, we report the results for the Llama 1 and Llama 2 models on standard academic benchmarks.For all the evaluations, we use our internal evaluations library. Overall performance on grouped academic benchmarks. *Code:* We report the average pass@1 scores of our models on HumanEval and MBPP. *Commonsense Reasoning:* We report the average of PIQA, SIQA, HellaSwag, WinoGrande, ARC easy and challenge, OpenBookQA, and CommonsenseQA. We report 7-shot results for CommonSenseQA and 0-shot results for all other benchmarks. *World Knowledge:* We evaluate the 5-shot performance on NaturalQuestions and TriviaQA and report the average. *Reading Comprehension:* For reading comprehension, we report the 0-shot average on SQuAD, QuAC, and BoolQ. *MATH:* We report the average of the GSM8K (8 shot) and MATH (4 shot) benchmarks at top 1. Evaluation of pretrained LLMs on automatic safety benchmarks. For TruthfulQA, we present the percentage of generations that are both truthful and informative (the higher the better). For ToxiGen, we present the percentage of toxic generations (the smaller the better). Evaluation of fine-tuned LLMs on different safety datasets. Same metric definitions as above. Ethical Considerations and Limitations -------------------------------------- Llama 2 is a new technology that carries risks with use. Testing conducted to date has been in English, and has not covered, nor could it cover all scenarios. For these reasons, as with all LLMs, Llama 2’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate, biased or other objectionable responses to user prompts. Therefore, before deploying any applications of Llama 2, developers should perform safety testing and tuning tailored to their specific applications of the model. Please see the Responsible Use Guide available at URL Reporting Issues ---------------- Please report any software “bug,” or other problems with the models through one of the following means: * Reporting issues with the model: URL * Reporting problematic content generated by the model: URL * Reporting bugs and security concerns: URL Llama Model Index -----------------
[ "### LLAMA 2 COMMUNITY LICENSE AGREEMENT\n\n\n\"Agreement\" means the terms and conditions for use, reproduction, distribution\nand modification of the Llama Materials set forth herein.\n\n\n\"Documentation\" means the specifications, manuals and documentation\naccompanying Llama 2 distributed by Meta at\nURL\n\n\n\"Licensee\" or \"you\" means you, or your employer or any other person or entity\n(if you are entering into this Agreement on such person or entity's behalf),\nof the age required under applicable laws, rules or regulations to provide\nlegal consent and that has legal authority to bind your employer or such other\nperson or entity if you are entering in this Agreement on their behalf.\n\n\n\"Llama 2\" means the foundational large language models and software and\nalgorithms, including machine-learning model code, trained model weights,\ninference-enabling code, training-enabling code, fine-tuning enabling code and\nother elements of the foregoing distributed by Meta at\nURL\n\n\n\"Llama Materials\" means, collectively, Meta's proprietary Llama 2 and\ndocumentation (and any portion thereof) made available under this Agreement.\n\n\n\"Meta\" or \"we\" means Meta Platforms Ireland Limited (if you are located in or,\nif you are an entity, your principal place of business is in the EEA or\nSwitzerland) and Meta Platforms, Inc. (if you are located outside of the EEA\nor Switzerland).\n\n\nBy clicking \"I Accept\" below or by using or distributing any portion or\nelement of the Llama Materials, you agree to be bound by this Agreement.\n\n\n1. License Rights and Redistribution.\n\n\na. Grant of Rights. You are granted a non-exclusive, worldwide, non-\ntransferable and royalty-free limited license under Meta's intellectual\nproperty or other rights owned by Meta embodied in the Llama Materials to\nuse, reproduce, distribute, copy, create derivative works of, and make\nmodifications to the Llama Materials.\n\n\nb. Redistribution and Use.\n\n\ni. If you distribute or make the Llama Materials, or any derivative works\nthereof, available to a third party, you shall provide a copy of this\nAgreement to such third party.\n\n\nii. If you receive Llama Materials, or any derivative works thereof, from a\nLicensee as part of an integrated end user product, then Section 2 of this\nAgreement will not apply to you.\n\n\niii. You must retain in all copies of the Llama Materials that you distribute\nthe following attribution notice within a \"Notice\" text file distributed as a\npart of such copies: \"Llama 2 is licensed under the LLAMA 2 Community\nLicense, Copyright (c) Meta Platforms, Inc. All Rights Reserved.\"\n\n\niv. Your use of the Llama Materials must comply with applicable laws and\nregulations (including trade compliance laws and regulations) and adhere to\nthe Acceptable Use Policy for the Llama Materials (available at\nURL which is hereby incorporated by\nreference into this Agreement.\n\n\nv. You will not use the Llama Materials or any output or results of the Llama\nMaterials to improve any other large language model (excluding Llama 2 or\nderivative works thereof).\n\n\n2. Additional Commercial Terms. If, on the Llama 2 version release date, the\nmonthly active users of the products or services made available by or for\nLicensee, or Licensee's affiliates, is greater than 700 million monthly\nactive users in the preceding calendar month, you must request a license from\nMeta, which Meta may grant to you in its sole discretion, and you are not\nauthorized to exercise any of the rights under this Agreement unless or until\nMeta otherwise expressly grants you such rights.\n3. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE LLAMA\nMATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED ON AN \"AS IS\"\nBASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING,\nWITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT,\nMERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY\nRESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING\nTHE LLAMA MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE\nLLAMA MATERIALS AND ANY OUTPUT AND RESULTS.\n4. Limitation of Liability. IN NO EVENT WILL META OR ITS AFFILIATES BE LIABLE\nUNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE,\nPRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST\nPROFITS OR ANY INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR\nPUNITIVE DAMAGES, EVEN IF META OR ITS AFFILIATES HAVE BEEN ADVISED OF THE\nPOSSIBILITY OF ANY OF THE FOREGOING.\n5. Intellectual Property.\n\n\na. No trademark licenses are granted under this Agreement, and in connection\nwith the Llama Materials, neither Meta nor Licensee may use any name or mark\nowned by or associated with the other or any of its affiliates, except as\nrequired for reasonable and customary use in describing and redistributing\nthe Llama Materials.\n\n\nb. Subject to Meta's ownership of Llama Materials and derivatives made by or\nfor Meta, with respect to any derivative works and modifications of the Llama\nMaterials that are made by you, as between you and Meta, you are and will be\nthe owner of such derivative works and modifications.\n\n\nc. If you institute litigation or other proceedings against Meta or any\nentity (including a cross-claim or counterclaim in a lawsuit) alleging that\nthe Llama Materials or Llama 2 outputs or results, or any portion of any of\nthe foregoing, constitutes infringement of intellectual property or other\nrights owned or licensable by you, then any licenses granted to you under\nthis Agreement shall terminate as of the date such litigation or claim is\nfiled or instituted. You will indemnify and hold harmless Meta from and\nagainst any claim by any third party arising out of or related to your use or\ndistribution of the Llama Materials.\n\n\n6. Term and Termination. The term of this Agreement will commence upon your\nacceptance of this Agreement or access to the Llama Materials and will\ncontinue in full force and effect until terminated in accordance with the\nterms and conditions herein. Meta may terminate this Agreement if you are in\nbreach of any term or condition of this Agreement. Upon termination of this\nAgreement, you shall delete and cease use of the Llama Materials. Sections 3,\n4 and 7 shall survive the termination of this Agreement.\n7. Governing Law and Jurisdiction. This Agreement will be governed and\nconstrued under the laws of the State of California without regard to choice\nof law principles, and the UN Convention on Contracts for the International\nSale of Goods does not apply to this Agreement. The courts of California\nshall have exclusive jurisdiction of any dispute arising out of this\nAgreement.", "### Llama 2 Acceptable Use Policy\n\n\nMeta is committed to promoting safe and fair use of its tools and features,\nincluding Llama 2. If you access or use Llama 2, you agree to this Acceptable\nUse Policy (“Policy”). The most recent copy of this policy can be found at\nURL", "#### Prohibited Uses\n\n\nWe want everyone to use Llama 2 safely and responsibly. You agree you will not\nuse, or allow others to use, Llama 2 to:\n\n\n1. Violate the law or others’ rights, including to:\n\t1. Engage in, promote, generate, contribute to, encourage, plan, incite, or further illegal or unlawful activity or content, such as:\n\t\t1. Violence or terrorism\n\t\t2. Exploitation or harm to children, including the solicitation, creation, acquisition, or dissemination of child exploitative content or failure to report Child Sexual Abuse Material\n\t\t3. Human trafficking, exploitation, and sexual violence\n\t\t4. The illegal distribution of information or materials to minors, including obscene materials, or failure to employ legally required age-gating in connection with such information or materials.\n\t\t5. Sexual solicitation\n\t\t6. Any other criminal activity\n\t2. Engage in, promote, incite, or facilitate the harassment, abuse, threatening, or bullying of individuals or groups of individuals\n\t3. Engage in, promote, incite, or facilitate discrimination or other unlawful or harmful conduct in the provision of employment, employment benefits, credit, housing, other economic benefits, or other essential goods and services\n\t4. Engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or related professional practices\n\t5. Collect, process, disclose, generate, or infer health, demographic, or other sensitive personal or private information about individuals without rights and consents required by applicable laws\n\t6. Engage in or facilitate any action or generate any content that infringes, misappropriates, or otherwise violates any third-party rights, including the outputs or results of any products or services using the Llama 2 Materials\n\t7. Create, generate, or facilitate the creation of malicious code, malware, computer viruses or do anything else that could disable, overburden, interfere with or impair the proper working, integrity, operation or appearance of a website or computer system\n2. Engage in, promote, incite, facilitate, or assist in the planning or\ndevelopment of activities that present a risk of death or bodily harm to\nindividuals, including use of Llama 2 related to the following:\n\t1. Military, warfare, nuclear industries or applications, espionage, use for materials or activities that are subject to the International Traffic Arms Regulations (ITAR) maintained by the United States Department of State\n\t2. Guns and illegal weapons (including weapon development)\n\t3. Illegal drugs and regulated/controlled substances\n\t4. Operation of critical infrastructure, transportation technologies, or heavy machinery\n\t5. Self-harm or harm to others, including suicide, cutting, and eating disorders\n\t6. Any content intended to incite or promote violence, abuse, or any infliction of bodily harm to an individual\n3. Intentionally deceive or mislead others, including use of Llama 2 related\nto the following:\n\t1. Generating, promoting, or furthering fraud or the creation or promotion of disinformation\n\t2. Generating, promoting, or furthering defamatory content, including the creation of defamatory statements, images, or other content\n\t3. Generating, promoting, or further distributing spam\n\t4. Impersonating another individual without consent, authorization, or legal right\n\t5. Representing that the use of Llama 2 or outputs are human-generated\n\t6. Generating or facilitating false online engagement, including fake reviews and other means of fake online engagement\n\t7. Fail to appropriately disclose to end users any known dangers of your AI system\n\tPlease report any violation of this Policy, software “bug,” or other problems\n\tthat could lead to a violation of this Policy through one of the following\n\tmeans:\n\t* Reporting issues with the model: URL\n\t* Reporting risky content generated by the model: URL\n\t* Reporting bugs and security concerns: URL\n\t* Reporting violations of the Acceptable Use Policy or unlicensed uses of Llama: LlamaUseReport@URL\n\textra\\_gated\\_fields:\n\tFirst Name: text\n\tLast Name: text\n\tDate of birth: date\\_picker\n\tCountry: country\n\tAffiliation: text\n\tgeo: ip\\_location\n\tBy clicking Submit below I accept the terms of the license and acknowledge that the information I provide will be collected stored processed and shared in accordance with the Meta Privacy Policy: checkbox\n\textra\\_gated\\_description: >-\n\tThe information you provide will be collected, stored, processed and shared in\n\taccordance with the Meta Privacy\n\tPolicy.\n\textra\\_gated\\_button\\_content: Submit\n\tlanguage:\n\n\n* en\npipeline\\_tag: text-generation\ntags:\n* facebook\n* meta\n* pytorch\n* llama\n* llama-2\nlicense: llama2\n\n\n\n\n---\n\n\nLlama 2\n=======\n\n\nLlama 2 is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 7B pretrained model, converted for the Hugging Face Transformers format. Links to other models can be found in the index at the bottom.\n\n\nModel Details\n-------------\n\n\n*Note: Use of this model is governed by the Meta license. In order to download the model weights and tokenizer, please visit the website and accept our License before requesting access here.*\n\n\nMeta developed and publicly released the Llama 2 family of large language models (LLMs), a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. Our fine-tuned LLMs, called Llama-2-Chat, are optimized for dialogue use cases. Llama-2-Chat models outperform open-source chat models on most benchmarks we tested, and in our human evaluations for helpfulness and safety, are on par with some popular closed-source models like ChatGPT and PaLM.\n\n\nModel Developers Meta\n\n\nVariations Llama 2 comes in a range of parameter sizes — 7B, 13B, and 70B — as well as pretrained and fine-tuned variations.\n\n\nInput Models input text only.\n\n\nOutput Models generate text only.\n\n\nModel Architecture Llama 2 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align to human preferences for helpfulness and safety.\n\n\n\n*Llama 2 family of models.* Token counts refer to pretraining data only. All models are trained with a global batch-size of 4M tokens. Bigger models - 70B -- use Grouped-Query Attention (GQA) for improved inference scalability.\n\n\nModel Dates Llama 2 was trained between January 2023 and July 2023.\n\n\nStatus This is a static model trained on an offline dataset. Future versions of the tuned models will be released as we improve model safety with community feedback.\n\n\nLicense A custom commercial license is available at: URL\n\n\nResearch Paper \"Llama-2: Open Foundation and Fine-tuned Chat Models\"\n\n\nIntended Use\n------------\n\n\nIntended Use Cases Llama 2 is intended for commercial and research use in English. Tuned models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks.\n\n\nTo get the expected features and performance for the chat versions, a specific formatting needs to be followed, including the 'INST' and '<>' tags, 'BOS' and 'EOS' tokens, and the whitespaces and breaklines in between (we recommend calling 'strip()' on inputs to avoid double-spaces). See our reference code in github for details: 'chat\\_completion'.\n\n\nOut-of-scope Uses Use in any manner that violates applicable laws or regulations (including trade compliance laws).Use in languages other than English. Use in any other way that is prohibited by the Acceptable Use Policy and Licensing Agreement for Llama 2.\n\n\nHardware and Software\n---------------------\n\n\nTraining Factors We used custom training libraries, Meta's Research Super Cluster, and production clusters for pretraining. Fine-tuning, annotation, and evaluation were also performed on third-party cloud compute.\n\n\nCarbon Footprint Pretraining utilized a cumulative 3.3M GPU hours of computation on hardware of type A100-80GB (TDP of 350-400W). Estimated total emissions were 539 tCO2eq, 100% of which were offset by Meta’s sustainability program.\n\n\n\nCO2 emissions during pretraining. Time: total GPU time required for training each model. Power Consumption: peak power capacity per GPU device for the GPUs used adjusted for power usage efficiency. 100% of the emissions are directly offset by Meta's sustainability program, and because we are openly releasing these models, the pretraining costs do not need to be incurred by others.\n\n\nTraining Data\n-------------\n\n\nOverview Llama 2 was pretrained on 2 trillion tokens of data from publicly available sources. The fine-tuning data includes publicly available instruction datasets, as well as over one million new human-annotated examples. Neither the pretraining nor the fine-tuning datasets include Meta user data.\n\n\nData Freshness The pretraining data has a cutoff of September 2022, but some tuning data is more recent, up to July 2023.\n\n\nEvaluation Results\n------------------\n\n\nIn this section, we report the results for the Llama 1 and Llama 2 models on standard academic benchmarks.For all the evaluations, we use our internal evaluations library.\n\n\n\nOverall performance on grouped academic benchmarks. *Code:* We report the average pass@1 scores of our models on HumanEval and MBPP. *Commonsense Reasoning:* We report the average of PIQA, SIQA, HellaSwag, WinoGrande, ARC easy and challenge, OpenBookQA, and CommonsenseQA. We report 7-shot results for CommonSenseQA and 0-shot results for all other benchmarks. *World Knowledge:* We evaluate the 5-shot performance on NaturalQuestions and TriviaQA and report the average. *Reading Comprehension:* For reading comprehension, we report the 0-shot average on SQuAD, QuAC, and BoolQ. *MATH:* We report the average of the GSM8K (8 shot) and MATH (4 shot) benchmarks at top 1.\n\n\n\nEvaluation of pretrained LLMs on automatic safety benchmarks. For TruthfulQA, we present the percentage of generations that are both truthful and informative (the higher the better). For ToxiGen, we present the percentage of toxic generations (the smaller the better).\n\n\n\nEvaluation of fine-tuned LLMs on different safety datasets. Same metric definitions as above.\n\n\nEthical Considerations and Limitations\n--------------------------------------\n\n\nLlama 2 is a new technology that carries risks with use. Testing conducted to date has been in English, and has not covered, nor could it cover all scenarios. For these reasons, as with all LLMs, Llama 2’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate, biased or other objectionable responses to user prompts. Therefore, before deploying any applications of Llama 2, developers should perform safety testing and tuning tailored to their specific applications of the model.\n\n\nPlease see the Responsible Use Guide available at URL\n\n\nReporting Issues\n----------------\n\n\nPlease report any software “bug,” or other problems with the models through one of the following means:\n\n\n* Reporting issues with the model: URL\n* Reporting problematic content generated by the model: URL\n* Reporting bugs and security concerns: URL\n\n\nLlama Model Index\n-----------------" ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #arxiv-2307.09288 #autotrain_compatible #endpoints_compatible #text-generation-inference #8-bit #region-us \n", "### LLAMA 2 COMMUNITY LICENSE AGREEMENT\n\n\n\"Agreement\" means the terms and conditions for use, reproduction, distribution\nand modification of the Llama Materials set forth herein.\n\n\n\"Documentation\" means the specifications, manuals and documentation\naccompanying Llama 2 distributed by Meta at\nURL\n\n\n\"Licensee\" or \"you\" means you, or your employer or any other person or entity\n(if you are entering into this Agreement on such person or entity's behalf),\nof the age required under applicable laws, rules or regulations to provide\nlegal consent and that has legal authority to bind your employer or such other\nperson or entity if you are entering in this Agreement on their behalf.\n\n\n\"Llama 2\" means the foundational large language models and software and\nalgorithms, including machine-learning model code, trained model weights,\ninference-enabling code, training-enabling code, fine-tuning enabling code and\nother elements of the foregoing distributed by Meta at\nURL\n\n\n\"Llama Materials\" means, collectively, Meta's proprietary Llama 2 and\ndocumentation (and any portion thereof) made available under this Agreement.\n\n\n\"Meta\" or \"we\" means Meta Platforms Ireland Limited (if you are located in or,\nif you are an entity, your principal place of business is in the EEA or\nSwitzerland) and Meta Platforms, Inc. (if you are located outside of the EEA\nor Switzerland).\n\n\nBy clicking \"I Accept\" below or by using or distributing any portion or\nelement of the Llama Materials, you agree to be bound by this Agreement.\n\n\n1. License Rights and Redistribution.\n\n\na. Grant of Rights. You are granted a non-exclusive, worldwide, non-\ntransferable and royalty-free limited license under Meta's intellectual\nproperty or other rights owned by Meta embodied in the Llama Materials to\nuse, reproduce, distribute, copy, create derivative works of, and make\nmodifications to the Llama Materials.\n\n\nb. Redistribution and Use.\n\n\ni. If you distribute or make the Llama Materials, or any derivative works\nthereof, available to a third party, you shall provide a copy of this\nAgreement to such third party.\n\n\nii. If you receive Llama Materials, or any derivative works thereof, from a\nLicensee as part of an integrated end user product, then Section 2 of this\nAgreement will not apply to you.\n\n\niii. You must retain in all copies of the Llama Materials that you distribute\nthe following attribution notice within a \"Notice\" text file distributed as a\npart of such copies: \"Llama 2 is licensed under the LLAMA 2 Community\nLicense, Copyright (c) Meta Platforms, Inc. All Rights Reserved.\"\n\n\niv. Your use of the Llama Materials must comply with applicable laws and\nregulations (including trade compliance laws and regulations) and adhere to\nthe Acceptable Use Policy for the Llama Materials (available at\nURL which is hereby incorporated by\nreference into this Agreement.\n\n\nv. You will not use the Llama Materials or any output or results of the Llama\nMaterials to improve any other large language model (excluding Llama 2 or\nderivative works thereof).\n\n\n2. Additional Commercial Terms. If, on the Llama 2 version release date, the\nmonthly active users of the products or services made available by or for\nLicensee, or Licensee's affiliates, is greater than 700 million monthly\nactive users in the preceding calendar month, you must request a license from\nMeta, which Meta may grant to you in its sole discretion, and you are not\nauthorized to exercise any of the rights under this Agreement unless or until\nMeta otherwise expressly grants you such rights.\n3. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE LLAMA\nMATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED ON AN \"AS IS\"\nBASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING,\nWITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT,\nMERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY\nRESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING\nTHE LLAMA MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE\nLLAMA MATERIALS AND ANY OUTPUT AND RESULTS.\n4. Limitation of Liability. IN NO EVENT WILL META OR ITS AFFILIATES BE LIABLE\nUNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE,\nPRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST\nPROFITS OR ANY INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR\nPUNITIVE DAMAGES, EVEN IF META OR ITS AFFILIATES HAVE BEEN ADVISED OF THE\nPOSSIBILITY OF ANY OF THE FOREGOING.\n5. Intellectual Property.\n\n\na. No trademark licenses are granted under this Agreement, and in connection\nwith the Llama Materials, neither Meta nor Licensee may use any name or mark\nowned by or associated with the other or any of its affiliates, except as\nrequired for reasonable and customary use in describing and redistributing\nthe Llama Materials.\n\n\nb. Subject to Meta's ownership of Llama Materials and derivatives made by or\nfor Meta, with respect to any derivative works and modifications of the Llama\nMaterials that are made by you, as between you and Meta, you are and will be\nthe owner of such derivative works and modifications.\n\n\nc. If you institute litigation or other proceedings against Meta or any\nentity (including a cross-claim or counterclaim in a lawsuit) alleging that\nthe Llama Materials or Llama 2 outputs or results, or any portion of any of\nthe foregoing, constitutes infringement of intellectual property or other\nrights owned or licensable by you, then any licenses granted to you under\nthis Agreement shall terminate as of the date such litigation or claim is\nfiled or instituted. You will indemnify and hold harmless Meta from and\nagainst any claim by any third party arising out of or related to your use or\ndistribution of the Llama Materials.\n\n\n6. Term and Termination. The term of this Agreement will commence upon your\nacceptance of this Agreement or access to the Llama Materials and will\ncontinue in full force and effect until terminated in accordance with the\nterms and conditions herein. Meta may terminate this Agreement if you are in\nbreach of any term or condition of this Agreement. Upon termination of this\nAgreement, you shall delete and cease use of the Llama Materials. Sections 3,\n4 and 7 shall survive the termination of this Agreement.\n7. Governing Law and Jurisdiction. This Agreement will be governed and\nconstrued under the laws of the State of California without regard to choice\nof law principles, and the UN Convention on Contracts for the International\nSale of Goods does not apply to this Agreement. The courts of California\nshall have exclusive jurisdiction of any dispute arising out of this\nAgreement.", "### Llama 2 Acceptable Use Policy\n\n\nMeta is committed to promoting safe and fair use of its tools and features,\nincluding Llama 2. If you access or use Llama 2, you agree to this Acceptable\nUse Policy (“Policy”). The most recent copy of this policy can be found at\nURL", "#### Prohibited Uses\n\n\nWe want everyone to use Llama 2 safely and responsibly. You agree you will not\nuse, or allow others to use, Llama 2 to:\n\n\n1. Violate the law or others’ rights, including to:\n\t1. Engage in, promote, generate, contribute to, encourage, plan, incite, or further illegal or unlawful activity or content, such as:\n\t\t1. Violence or terrorism\n\t\t2. Exploitation or harm to children, including the solicitation, creation, acquisition, or dissemination of child exploitative content or failure to report Child Sexual Abuse Material\n\t\t3. Human trafficking, exploitation, and sexual violence\n\t\t4. The illegal distribution of information or materials to minors, including obscene materials, or failure to employ legally required age-gating in connection with such information or materials.\n\t\t5. Sexual solicitation\n\t\t6. Any other criminal activity\n\t2. Engage in, promote, incite, or facilitate the harassment, abuse, threatening, or bullying of individuals or groups of individuals\n\t3. Engage in, promote, incite, or facilitate discrimination or other unlawful or harmful conduct in the provision of employment, employment benefits, credit, housing, other economic benefits, or other essential goods and services\n\t4. Engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or related professional practices\n\t5. Collect, process, disclose, generate, or infer health, demographic, or other sensitive personal or private information about individuals without rights and consents required by applicable laws\n\t6. Engage in or facilitate any action or generate any content that infringes, misappropriates, or otherwise violates any third-party rights, including the outputs or results of any products or services using the Llama 2 Materials\n\t7. Create, generate, or facilitate the creation of malicious code, malware, computer viruses or do anything else that could disable, overburden, interfere with or impair the proper working, integrity, operation or appearance of a website or computer system\n2. Engage in, promote, incite, facilitate, or assist in the planning or\ndevelopment of activities that present a risk of death or bodily harm to\nindividuals, including use of Llama 2 related to the following:\n\t1. Military, warfare, nuclear industries or applications, espionage, use for materials or activities that are subject to the International Traffic Arms Regulations (ITAR) maintained by the United States Department of State\n\t2. Guns and illegal weapons (including weapon development)\n\t3. Illegal drugs and regulated/controlled substances\n\t4. Operation of critical infrastructure, transportation technologies, or heavy machinery\n\t5. Self-harm or harm to others, including suicide, cutting, and eating disorders\n\t6. Any content intended to incite or promote violence, abuse, or any infliction of bodily harm to an individual\n3. Intentionally deceive or mislead others, including use of Llama 2 related\nto the following:\n\t1. Generating, promoting, or furthering fraud or the creation or promotion of disinformation\n\t2. Generating, promoting, or furthering defamatory content, including the creation of defamatory statements, images, or other content\n\t3. Generating, promoting, or further distributing spam\n\t4. Impersonating another individual without consent, authorization, or legal right\n\t5. Representing that the use of Llama 2 or outputs are human-generated\n\t6. Generating or facilitating false online engagement, including fake reviews and other means of fake online engagement\n\t7. Fail to appropriately disclose to end users any known dangers of your AI system\n\tPlease report any violation of this Policy, software “bug,” or other problems\n\tthat could lead to a violation of this Policy through one of the following\n\tmeans:\n\t* Reporting issues with the model: URL\n\t* Reporting risky content generated by the model: URL\n\t* Reporting bugs and security concerns: URL\n\t* Reporting violations of the Acceptable Use Policy or unlicensed uses of Llama: LlamaUseReport@URL\n\textra\\_gated\\_fields:\n\tFirst Name: text\n\tLast Name: text\n\tDate of birth: date\\_picker\n\tCountry: country\n\tAffiliation: text\n\tgeo: ip\\_location\n\tBy clicking Submit below I accept the terms of the license and acknowledge that the information I provide will be collected stored processed and shared in accordance with the Meta Privacy Policy: checkbox\n\textra\\_gated\\_description: >-\n\tThe information you provide will be collected, stored, processed and shared in\n\taccordance with the Meta Privacy\n\tPolicy.\n\textra\\_gated\\_button\\_content: Submit\n\tlanguage:\n\n\n* en\npipeline\\_tag: text-generation\ntags:\n* facebook\n* meta\n* pytorch\n* llama\n* llama-2\nlicense: llama2\n\n\n\n\n---\n\n\nLlama 2\n=======\n\n\nLlama 2 is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 7B pretrained model, converted for the Hugging Face Transformers format. Links to other models can be found in the index at the bottom.\n\n\nModel Details\n-------------\n\n\n*Note: Use of this model is governed by the Meta license. In order to download the model weights and tokenizer, please visit the website and accept our License before requesting access here.*\n\n\nMeta developed and publicly released the Llama 2 family of large language models (LLMs), a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. Our fine-tuned LLMs, called Llama-2-Chat, are optimized for dialogue use cases. Llama-2-Chat models outperform open-source chat models on most benchmarks we tested, and in our human evaluations for helpfulness and safety, are on par with some popular closed-source models like ChatGPT and PaLM.\n\n\nModel Developers Meta\n\n\nVariations Llama 2 comes in a range of parameter sizes — 7B, 13B, and 70B — as well as pretrained and fine-tuned variations.\n\n\nInput Models input text only.\n\n\nOutput Models generate text only.\n\n\nModel Architecture Llama 2 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align to human preferences for helpfulness and safety.\n\n\n\n*Llama 2 family of models.* Token counts refer to pretraining data only. All models are trained with a global batch-size of 4M tokens. Bigger models - 70B -- use Grouped-Query Attention (GQA) for improved inference scalability.\n\n\nModel Dates Llama 2 was trained between January 2023 and July 2023.\n\n\nStatus This is a static model trained on an offline dataset. Future versions of the tuned models will be released as we improve model safety with community feedback.\n\n\nLicense A custom commercial license is available at: URL\n\n\nResearch Paper \"Llama-2: Open Foundation and Fine-tuned Chat Models\"\n\n\nIntended Use\n------------\n\n\nIntended Use Cases Llama 2 is intended for commercial and research use in English. Tuned models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks.\n\n\nTo get the expected features and performance for the chat versions, a specific formatting needs to be followed, including the 'INST' and '<>' tags, 'BOS' and 'EOS' tokens, and the whitespaces and breaklines in between (we recommend calling 'strip()' on inputs to avoid double-spaces). See our reference code in github for details: 'chat\\_completion'.\n\n\nOut-of-scope Uses Use in any manner that violates applicable laws or regulations (including trade compliance laws).Use in languages other than English. Use in any other way that is prohibited by the Acceptable Use Policy and Licensing Agreement for Llama 2.\n\n\nHardware and Software\n---------------------\n\n\nTraining Factors We used custom training libraries, Meta's Research Super Cluster, and production clusters for pretraining. Fine-tuning, annotation, and evaluation were also performed on third-party cloud compute.\n\n\nCarbon Footprint Pretraining utilized a cumulative 3.3M GPU hours of computation on hardware of type A100-80GB (TDP of 350-400W). Estimated total emissions were 539 tCO2eq, 100% of which were offset by Meta’s sustainability program.\n\n\n\nCO2 emissions during pretraining. Time: total GPU time required for training each model. Power Consumption: peak power capacity per GPU device for the GPUs used adjusted for power usage efficiency. 100% of the emissions are directly offset by Meta's sustainability program, and because we are openly releasing these models, the pretraining costs do not need to be incurred by others.\n\n\nTraining Data\n-------------\n\n\nOverview Llama 2 was pretrained on 2 trillion tokens of data from publicly available sources. The fine-tuning data includes publicly available instruction datasets, as well as over one million new human-annotated examples. Neither the pretraining nor the fine-tuning datasets include Meta user data.\n\n\nData Freshness The pretraining data has a cutoff of September 2022, but some tuning data is more recent, up to July 2023.\n\n\nEvaluation Results\n------------------\n\n\nIn this section, we report the results for the Llama 1 and Llama 2 models on standard academic benchmarks.For all the evaluations, we use our internal evaluations library.\n\n\n\nOverall performance on grouped academic benchmarks. *Code:* We report the average pass@1 scores of our models on HumanEval and MBPP. *Commonsense Reasoning:* We report the average of PIQA, SIQA, HellaSwag, WinoGrande, ARC easy and challenge, OpenBookQA, and CommonsenseQA. We report 7-shot results for CommonSenseQA and 0-shot results for all other benchmarks. *World Knowledge:* We evaluate the 5-shot performance on NaturalQuestions and TriviaQA and report the average. *Reading Comprehension:* For reading comprehension, we report the 0-shot average on SQuAD, QuAC, and BoolQ. *MATH:* We report the average of the GSM8K (8 shot) and MATH (4 shot) benchmarks at top 1.\n\n\n\nEvaluation of pretrained LLMs on automatic safety benchmarks. For TruthfulQA, we present the percentage of generations that are both truthful and informative (the higher the better). For ToxiGen, we present the percentage of toxic generations (the smaller the better).\n\n\n\nEvaluation of fine-tuned LLMs on different safety datasets. Same metric definitions as above.\n\n\nEthical Considerations and Limitations\n--------------------------------------\n\n\nLlama 2 is a new technology that carries risks with use. Testing conducted to date has been in English, and has not covered, nor could it cover all scenarios. For these reasons, as with all LLMs, Llama 2’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate, biased or other objectionable responses to user prompts. Therefore, before deploying any applications of Llama 2, developers should perform safety testing and tuning tailored to their specific applications of the model.\n\n\nPlease see the Responsible Use Guide available at URL\n\n\nReporting Issues\n----------------\n\n\nPlease report any software “bug,” or other problems with the models through one of the following means:\n\n\n* Reporting issues with the model: URL\n* Reporting problematic content generated by the model: URL\n* Reporting bugs and security concerns: URL\n\n\nLlama Model Index\n-----------------" ]
text-generation
transformers
Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) Llama-2-7b-chat-hf - bnb 4bits - Model creator: https://huggingface.co/meta-llama/ - Original model: https://huggingface.co/meta-llama/Llama-2-7b-chat-hf/ Original model description: --- extra_gated_heading: You need to share contact information with Meta to access this model extra_gated_prompt: >- ### LLAMA 2 COMMUNITY LICENSE AGREEMENT "Agreement" means the terms and conditions for use, reproduction, distribution and modification of the Llama Materials set forth herein. "Documentation" means the specifications, manuals and documentation accompanying Llama 2 distributed by Meta at https://ai.meta.com/resources/models-and-libraries/llama-downloads/. "Licensee" or "you" means you, or your employer or any other person or entity (if you are entering into this Agreement on such person or entity's behalf), of the age required under applicable laws, rules or regulations to provide legal consent and that has legal authority to bind your employer or such other person or entity if you are entering in this Agreement on their behalf. "Llama 2" means the foundational large language models and software and algorithms, including machine-learning model code, trained model weights, inference-enabling code, training-enabling code, fine-tuning enabling code and other elements of the foregoing distributed by Meta at ai.meta.com/resources/models-and-libraries/llama-downloads/. "Llama Materials" means, collectively, Meta's proprietary Llama 2 and documentation (and any portion thereof) made available under this Agreement. "Meta" or "we" means Meta Platforms Ireland Limited (if you are located in or, if you are an entity, your principal place of business is in the EEA or Switzerland) and Meta Platforms, Inc. (if you are located outside of the EEA or Switzerland). By clicking "I Accept" below or by using or distributing any portion or element of the Llama Materials, you agree to be bound by this Agreement. 1. License Rights and Redistribution. a. Grant of Rights. You are granted a non-exclusive, worldwide, non- transferable and royalty-free limited license under Meta's intellectual property or other rights owned by Meta embodied in the Llama Materials to use, reproduce, distribute, copy, create derivative works of, and make modifications to the Llama Materials. b. Redistribution and Use. i. If you distribute or make the Llama Materials, or any derivative works thereof, available to a third party, you shall provide a copy of this Agreement to such third party. ii. If you receive Llama Materials, or any derivative works thereof, from a Licensee as part of an integrated end user product, then Section 2 of this Agreement will not apply to you. iii. You must retain in all copies of the Llama Materials that you distribute the following attribution notice within a "Notice" text file distributed as a part of such copies: "Llama 2 is licensed under the LLAMA 2 Community License, Copyright (c) Meta Platforms, Inc. All Rights Reserved." iv. Your use of the Llama Materials must comply with applicable laws and regulations (including trade compliance laws and regulations) and adhere to the Acceptable Use Policy for the Llama Materials (available at https://ai.meta.com/llama/use-policy), which is hereby incorporated by reference into this Agreement. v. You will not use the Llama Materials or any output or results of the Llama Materials to improve any other large language model (excluding Llama 2 or derivative works thereof). 2. Additional Commercial Terms. If, on the Llama 2 version release date, the monthly active users of the products or services made available by or for Licensee, or Licensee's affiliates, is greater than 700 million monthly active users in the preceding calendar month, you must request a license from Meta, which Meta may grant to you in its sole discretion, and you are not authorized to exercise any of the rights under this Agreement unless or until Meta otherwise expressly grants you such rights. 3. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING THE LLAMA MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS. 4. Limitation of Liability. IN NO EVENT WILL META OR ITS AFFILIATES BE LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST PROFITS OR ANY INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR PUNITIVE DAMAGES, EVEN IF META OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF ANY OF THE FOREGOING. 5. Intellectual Property. a. No trademark licenses are granted under this Agreement, and in connection with the Llama Materials, neither Meta nor Licensee may use any name or mark owned by or associated with the other or any of its affiliates, except as required for reasonable and customary use in describing and redistributing the Llama Materials. b. Subject to Meta's ownership of Llama Materials and derivatives made by or for Meta, with respect to any derivative works and modifications of the Llama Materials that are made by you, as between you and Meta, you are and will be the owner of such derivative works and modifications. c. If you institute litigation or other proceedings against Meta or any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Llama Materials or Llama 2 outputs or results, or any portion of any of the foregoing, constitutes infringement of intellectual property or other rights owned or licensable by you, then any licenses granted to you under this Agreement shall terminate as of the date such litigation or claim is filed or instituted. You will indemnify and hold harmless Meta from and against any claim by any third party arising out of or related to your use or distribution of the Llama Materials. 6. Term and Termination. The term of this Agreement will commence upon your acceptance of this Agreement or access to the Llama Materials and will continue in full force and effect until terminated in accordance with the terms and conditions herein. Meta may terminate this Agreement if you are in breach of any term or condition of this Agreement. Upon termination of this Agreement, you shall delete and cease use of the Llama Materials. Sections 3, 4 and 7 shall survive the termination of this Agreement. 7. Governing Law and Jurisdiction. This Agreement will be governed and construed under the laws of the State of California without regard to choice of law principles, and the UN Convention on Contracts for the International Sale of Goods does not apply to this Agreement. The courts of California shall have exclusive jurisdiction of any dispute arising out of this Agreement. ### Llama 2 Acceptable Use Policy Meta is committed to promoting safe and fair use of its tools and features, including Llama 2. If you access or use Llama 2, you agree to this Acceptable Use Policy (“Policy”). The most recent copy of this policy can be found at [ai.meta.com/llama/use-policy](http://ai.meta.com/llama/use-policy). #### Prohibited Uses We want everyone to use Llama 2 safely and responsibly. You agree you will not use, or allow others to use, Llama 2 to: 1. Violate the law or others’ rights, including to: 1. Engage in, promote, generate, contribute to, encourage, plan, incite, or further illegal or unlawful activity or content, such as: 1. Violence or terrorism 2. Exploitation or harm to children, including the solicitation, creation, acquisition, or dissemination of child exploitative content or failure to report Child Sexual Abuse Material 3. Human trafficking, exploitation, and sexual violence 4. The illegal distribution of information or materials to minors, including obscene materials, or failure to employ legally required age-gating in connection with such information or materials. 5. Sexual solicitation 6. Any other criminal activity 2. Engage in, promote, incite, or facilitate the harassment, abuse, threatening, or bullying of individuals or groups of individuals 3. Engage in, promote, incite, or facilitate discrimination or other unlawful or harmful conduct in the provision of employment, employment benefits, credit, housing, other economic benefits, or other essential goods and services 4. Engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or related professional practices 5. Collect, process, disclose, generate, or infer health, demographic, or other sensitive personal or private information about individuals without rights and consents required by applicable laws 6. Engage in or facilitate any action or generate any content that infringes, misappropriates, or otherwise violates any third-party rights, including the outputs or results of any products or services using the Llama 2 Materials 7. Create, generate, or facilitate the creation of malicious code, malware, computer viruses or do anything else that could disable, overburden, interfere with or impair the proper working, integrity, operation or appearance of a website or computer system 2. Engage in, promote, incite, facilitate, or assist in the planning or development of activities that present a risk of death or bodily harm to individuals, including use of Llama 2 related to the following: 1. Military, warfare, nuclear industries or applications, espionage, use for materials or activities that are subject to the International Traffic Arms Regulations (ITAR) maintained by the United States Department of State 2. Guns and illegal weapons (including weapon development) 3. Illegal drugs and regulated/controlled substances 4. Operation of critical infrastructure, transportation technologies, or heavy machinery 5. Self-harm or harm to others, including suicide, cutting, and eating disorders 6. Any content intended to incite or promote violence, abuse, or any infliction of bodily harm to an individual 3. Intentionally deceive or mislead others, including use of Llama 2 related to the following: 1. Generating, promoting, or furthering fraud or the creation or promotion of disinformation 2. Generating, promoting, or furthering defamatory content, including the creation of defamatory statements, images, or other content 3. Generating, promoting, or further distributing spam 4. Impersonating another individual without consent, authorization, or legal right 5. Representing that the use of Llama 2 or outputs are human-generated 6. Generating or facilitating false online engagement, including fake reviews and other means of fake online engagement 4. Fail to appropriately disclose to end users any known dangers of your AI system Please report any violation of this Policy, software “bug,” or other problems that could lead to a violation of this Policy through one of the following means: * Reporting issues with the model: [github.com/facebookresearch/llama](http://github.com/facebookresearch/llama) * Reporting risky content generated by the model: [developers.facebook.com/llama_output_feedback](http://developers.facebook.com/llama_output_feedback) * Reporting bugs and security concerns: [facebook.com/whitehat/info](http://facebook.com/whitehat/info) * Reporting violations of the Acceptable Use Policy or unlicensed uses of Llama: [[email protected]](mailto:[email protected]) extra_gated_fields: First Name: text Last Name: text Date of birth: date_picker Country: country Affiliation: text geo: ip_location By clicking Submit below I accept the terms of the license and acknowledge that the information I provide will be collected stored processed and shared in accordance with the Meta Privacy Policy: checkbox extra_gated_description: >- The information you provide will be collected, stored, processed and shared in accordance with the [Meta Privacy Policy](https://www.facebook.com/privacy/policy/). extra_gated_button_content: Submit language: - en pipeline_tag: text-generation tags: - facebook - meta - pytorch - llama - llama-2 license: llama2 --- # **Llama 2** Llama 2 is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 7B fine-tuned model, optimized for dialogue use cases and converted for the Hugging Face Transformers format. Links to other models can be found in the index at the bottom. ## Model Details *Note: Use of this model is governed by the Meta license. In order to download the model weights and tokenizer, please visit the [website](https://ai.meta.com/resources/models-and-libraries/llama-downloads/) and accept our License before requesting access here.* Meta developed and publicly released the Llama 2 family of large language models (LLMs), a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. Our fine-tuned LLMs, called Llama-2-Chat, are optimized for dialogue use cases. Llama-2-Chat models outperform open-source chat models on most benchmarks we tested, and in our human evaluations for helpfulness and safety, are on par with some popular closed-source models like ChatGPT and PaLM. **Model Developers** Meta **Variations** Llama 2 comes in a range of parameter sizes — 7B, 13B, and 70B — as well as pretrained and fine-tuned variations. **Input** Models input text only. **Output** Models generate text only. **Model Architecture** Llama 2 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align to human preferences for helpfulness and safety. ||Training Data|Params|Content Length|GQA|Tokens|LR| |---|---|---|---|---|---|---| |Llama 2|*A new mix of publicly available online data*|7B|4k|&#10007;|2.0T|3.0 x 10<sup>-4</sup>| |Llama 2|*A new mix of publicly available online data*|13B|4k|&#10007;|2.0T|3.0 x 10<sup>-4</sup>| |Llama 2|*A new mix of publicly available online data*|70B|4k|&#10004;|2.0T|1.5 x 10<sup>-4</sup>| *Llama 2 family of models.* Token counts refer to pretraining data only. All models are trained with a global batch-size of 4M tokens. Bigger models - 70B -- use Grouped-Query Attention (GQA) for improved inference scalability. **Model Dates** Llama 2 was trained between January 2023 and July 2023. **Status** This is a static model trained on an offline dataset. Future versions of the tuned models will be released as we improve model safety with community feedback. **License** A custom commercial license is available at: [https://ai.meta.com/resources/models-and-libraries/llama-downloads/](https://ai.meta.com/resources/models-and-libraries/llama-downloads/) **Research Paper** ["Llama-2: Open Foundation and Fine-tuned Chat Models"](arxiv.org/abs/2307.09288) ## Intended Use **Intended Use Cases** Llama 2 is intended for commercial and research use in English. Tuned models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks. To get the expected features and performance for the chat versions, a specific formatting needs to be followed, including the `INST` and `<<SYS>>` tags, `BOS` and `EOS` tokens, and the whitespaces and breaklines in between (we recommend calling `strip()` on inputs to avoid double-spaces). See our reference code in github for details: [`chat_completion`](https://github.com/facebookresearch/llama/blob/main/llama/generation.py#L212). **Out-of-scope Uses** Use in any manner that violates applicable laws or regulations (including trade compliance laws).Use in languages other than English. Use in any other way that is prohibited by the Acceptable Use Policy and Licensing Agreement for Llama 2. ## Hardware and Software **Training Factors** We used custom training libraries, Meta's Research Super Cluster, and production clusters for pretraining. Fine-tuning, annotation, and evaluation were also performed on third-party cloud compute. **Carbon Footprint** Pretraining utilized a cumulative 3.3M GPU hours of computation on hardware of type A100-80GB (TDP of 350-400W). Estimated total emissions were 539 tCO2eq, 100% of which were offset by Meta’s sustainability program. ||Time (GPU hours)|Power Consumption (W)|Carbon Emitted(tCO<sub>2</sub>eq)| |---|---|---|---| |Llama 2 7B|184320|400|31.22| |Llama 2 13B|368640|400|62.44| |Llama 2 70B|1720320|400|291.42| |Total|3311616||539.00| **CO<sub>2</sub> emissions during pretraining.** Time: total GPU time required for training each model. Power Consumption: peak power capacity per GPU device for the GPUs used adjusted for power usage efficiency. 100% of the emissions are directly offset by Meta's sustainability program, and because we are openly releasing these models, the pretraining costs do not need to be incurred by others. ## Training Data **Overview** Llama 2 was pretrained on 2 trillion tokens of data from publicly available sources. The fine-tuning data includes publicly available instruction datasets, as well as over one million new human-annotated examples. Neither the pretraining nor the fine-tuning datasets include Meta user data. **Data Freshness** The pretraining data has a cutoff of September 2022, but some tuning data is more recent, up to July 2023. ## Evaluation Results In this section, we report the results for the Llama 1 and Llama 2 models on standard academic benchmarks.For all the evaluations, we use our internal evaluations library. |Model|Size|Code|Commonsense Reasoning|World Knowledge|Reading Comprehension|Math|MMLU|BBH|AGI Eval| |---|---|---|---|---|---|---|---|---|---| |Llama 1|7B|14.1|60.8|46.2|58.5|6.95|35.1|30.3|23.9| |Llama 1|13B|18.9|66.1|52.6|62.3|10.9|46.9|37.0|33.9| |Llama 1|33B|26.0|70.0|58.4|67.6|21.4|57.8|39.8|41.7| |Llama 1|65B|30.7|70.7|60.5|68.6|30.8|63.4|43.5|47.6| |Llama 2|7B|16.8|63.9|48.9|61.3|14.6|45.3|32.6|29.3| |Llama 2|13B|24.5|66.9|55.4|65.8|28.7|54.8|39.4|39.1| |Llama 2|70B|**37.5**|**71.9**|**63.6**|**69.4**|**35.2**|**68.9**|**51.2**|**54.2**| **Overall performance on grouped academic benchmarks.** *Code:* We report the average pass@1 scores of our models on HumanEval and MBPP. *Commonsense Reasoning:* We report the average of PIQA, SIQA, HellaSwag, WinoGrande, ARC easy and challenge, OpenBookQA, and CommonsenseQA. We report 7-shot results for CommonSenseQA and 0-shot results for all other benchmarks. *World Knowledge:* We evaluate the 5-shot performance on NaturalQuestions and TriviaQA and report the average. *Reading Comprehension:* For reading comprehension, we report the 0-shot average on SQuAD, QuAC, and BoolQ. *MATH:* We report the average of the GSM8K (8 shot) and MATH (4 shot) benchmarks at top 1. |||TruthfulQA|Toxigen| |---|---|---|---| |Llama 1|7B|27.42|23.00| |Llama 1|13B|41.74|23.08| |Llama 1|33B|44.19|22.57| |Llama 1|65B|48.71|21.77| |Llama 2|7B|33.29|**21.25**| |Llama 2|13B|41.86|26.10| |Llama 2|70B|**50.18**|24.60| **Evaluation of pretrained LLMs on automatic safety benchmarks.** For TruthfulQA, we present the percentage of generations that are both truthful and informative (the higher the better). For ToxiGen, we present the percentage of toxic generations (the smaller the better). |||TruthfulQA|Toxigen| |---|---|---|---| |Llama-2-Chat|7B|57.04|**0.00**| |Llama-2-Chat|13B|62.18|**0.00**| |Llama-2-Chat|70B|**64.14**|0.01| **Evaluation of fine-tuned LLMs on different safety datasets.** Same metric definitions as above. ## Ethical Considerations and Limitations Llama 2 is a new technology that carries risks with use. Testing conducted to date has been in English, and has not covered, nor could it cover all scenarios. For these reasons, as with all LLMs, Llama 2’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate, biased or other objectionable responses to user prompts. Therefore, before deploying any applications of Llama 2, developers should perform safety testing and tuning tailored to their specific applications of the model. Please see the Responsible Use Guide available at [https://ai.meta.com/llama/responsible-use-guide/](https://ai.meta.com/llama/responsible-use-guide) ## Reporting Issues Please report any software “bug,” or other problems with the models through one of the following means: - Reporting issues with the model: [github.com/facebookresearch/llama](http://github.com/facebookresearch/llama) - Reporting problematic content generated by the model: [developers.facebook.com/llama_output_feedback](http://developers.facebook.com/llama_output_feedback) - Reporting bugs and security concerns: [facebook.com/whitehat/info](http://facebook.com/whitehat/info) ## Llama Model Index |Model|Llama2|Llama2-hf|Llama2-chat|Llama2-chat-hf| |---|---|---|---|---| |7B| [Link](https://huggingface.co/meta-llama/Llama-2-7b) | [Link](https://huggingface.co/meta-llama/Llama-2-7b-hf) | [Link](https://huggingface.co/meta-llama/Llama-2-7b-chat) | [Link](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf)| |13B| [Link](https://huggingface.co/meta-llama/Llama-2-13b) | [Link](https://huggingface.co/meta-llama/Llama-2-13b-hf) | [Link](https://huggingface.co/meta-llama/Llama-2-13b-chat) | [Link](https://huggingface.co/meta-llama/Llama-2-13b-chat-hf)| |70B| [Link](https://huggingface.co/meta-llama/Llama-2-70b) | [Link](https://huggingface.co/meta-llama/Llama-2-70b-hf) | [Link](https://huggingface.co/meta-llama/Llama-2-70b-chat) | [Link](https://huggingface.co/meta-llama/Llama-2-70b-chat-hf)|
{}
RichardErkhov/meta-llama_-_Llama-2-7b-chat-hf-4bits
null
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "arxiv:2307.09288", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "4-bit", "region:us" ]
null
2024-04-20T20:01:00+00:00
[ "2307.09288" ]
[]
TAGS #transformers #safetensors #llama #text-generation #conversational #arxiv-2307.09288 #autotrain_compatible #endpoints_compatible #text-generation-inference #4-bit #region-us
Quantization made by Richard Erkhov. Github Discord Request more models Llama-2-7b-chat-hf - bnb 4bits * Model creator: URL * Original model: URL Original model description: --------------------------- extra\_gated\_heading: You need to share contact information with Meta to access this model extra\_gated\_prompt: >- ### LLAMA 2 COMMUNITY LICENSE AGREEMENT "Agreement" means the terms and conditions for use, reproduction, distribution and modification of the Llama Materials set forth herein. "Documentation" means the specifications, manuals and documentation accompanying Llama 2 distributed by Meta at URL "Licensee" or "you" means you, or your employer or any other person or entity (if you are entering into this Agreement on such person or entity's behalf), of the age required under applicable laws, rules or regulations to provide legal consent and that has legal authority to bind your employer or such other person or entity if you are entering in this Agreement on their behalf. "Llama 2" means the foundational large language models and software and algorithms, including machine-learning model code, trained model weights, inference-enabling code, training-enabling code, fine-tuning enabling code and other elements of the foregoing distributed by Meta at URL "Llama Materials" means, collectively, Meta's proprietary Llama 2 and documentation (and any portion thereof) made available under this Agreement. "Meta" or "we" means Meta Platforms Ireland Limited (if you are located in or, if you are an entity, your principal place of business is in the EEA or Switzerland) and Meta Platforms, Inc. (if you are located outside of the EEA or Switzerland). By clicking "I Accept" below or by using or distributing any portion or element of the Llama Materials, you agree to be bound by this Agreement. 1. License Rights and Redistribution. a. Grant of Rights. You are granted a non-exclusive, worldwide, non- transferable and royalty-free limited license under Meta's intellectual property or other rights owned by Meta embodied in the Llama Materials to use, reproduce, distribute, copy, create derivative works of, and make modifications to the Llama Materials. b. Redistribution and Use. i. If you distribute or make the Llama Materials, or any derivative works thereof, available to a third party, you shall provide a copy of this Agreement to such third party. ii. If you receive Llama Materials, or any derivative works thereof, from a Licensee as part of an integrated end user product, then Section 2 of this Agreement will not apply to you. iii. You must retain in all copies of the Llama Materials that you distribute the following attribution notice within a "Notice" text file distributed as a part of such copies: "Llama 2 is licensed under the LLAMA 2 Community License, Copyright (c) Meta Platforms, Inc. All Rights Reserved." iv. Your use of the Llama Materials must comply with applicable laws and regulations (including trade compliance laws and regulations) and adhere to the Acceptable Use Policy for the Llama Materials (available at URL which is hereby incorporated by reference into this Agreement. v. You will not use the Llama Materials or any output or results of the Llama Materials to improve any other large language model (excluding Llama 2 or derivative works thereof). 2. Additional Commercial Terms. If, on the Llama 2 version release date, the monthly active users of the products or services made available by or for Licensee, or Licensee's affiliates, is greater than 700 million monthly active users in the preceding calendar month, you must request a license from Meta, which Meta may grant to you in its sole discretion, and you are not authorized to exercise any of the rights under this Agreement unless or until Meta otherwise expressly grants you such rights. 3. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING THE LLAMA MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS. 4. Limitation of Liability. IN NO EVENT WILL META OR ITS AFFILIATES BE LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST PROFITS OR ANY INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR PUNITIVE DAMAGES, EVEN IF META OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF ANY OF THE FOREGOING. 5. Intellectual Property. a. No trademark licenses are granted under this Agreement, and in connection with the Llama Materials, neither Meta nor Licensee may use any name or mark owned by or associated with the other or any of its affiliates, except as required for reasonable and customary use in describing and redistributing the Llama Materials. b. Subject to Meta's ownership of Llama Materials and derivatives made by or for Meta, with respect to any derivative works and modifications of the Llama Materials that are made by you, as between you and Meta, you are and will be the owner of such derivative works and modifications. c. If you institute litigation or other proceedings against Meta or any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Llama Materials or Llama 2 outputs or results, or any portion of any of the foregoing, constitutes infringement of intellectual property or other rights owned or licensable by you, then any licenses granted to you under this Agreement shall terminate as of the date such litigation or claim is filed or instituted. You will indemnify and hold harmless Meta from and against any claim by any third party arising out of or related to your use or distribution of the Llama Materials. 6. Term and Termination. The term of this Agreement will commence upon your acceptance of this Agreement or access to the Llama Materials and will continue in full force and effect until terminated in accordance with the terms and conditions herein. Meta may terminate this Agreement if you are in breach of any term or condition of this Agreement. Upon termination of this Agreement, you shall delete and cease use of the Llama Materials. Sections 3, 4 and 7 shall survive the termination of this Agreement. 7. Governing Law and Jurisdiction. This Agreement will be governed and construed under the laws of the State of California without regard to choice of law principles, and the UN Convention on Contracts for the International Sale of Goods does not apply to this Agreement. The courts of California shall have exclusive jurisdiction of any dispute arising out of this Agreement. ### Llama 2 Acceptable Use Policy Meta is committed to promoting safe and fair use of its tools and features, including Llama 2. If you access or use Llama 2, you agree to this Acceptable Use Policy (“Policy”). The most recent copy of this policy can be found at URL #### Prohibited Uses We want everyone to use Llama 2 safely and responsibly. You agree you will not use, or allow others to use, Llama 2 to: 1. Violate the law or others’ rights, including to: 1. Engage in, promote, generate, contribute to, encourage, plan, incite, or further illegal or unlawful activity or content, such as: 1. Violence or terrorism 2. Exploitation or harm to children, including the solicitation, creation, acquisition, or dissemination of child exploitative content or failure to report Child Sexual Abuse Material 3. Human trafficking, exploitation, and sexual violence 4. The illegal distribution of information or materials to minors, including obscene materials, or failure to employ legally required age-gating in connection with such information or materials. 5. Sexual solicitation 6. Any other criminal activity 2. Engage in, promote, incite, or facilitate the harassment, abuse, threatening, or bullying of individuals or groups of individuals 3. Engage in, promote, incite, or facilitate discrimination or other unlawful or harmful conduct in the provision of employment, employment benefits, credit, housing, other economic benefits, or other essential goods and services 4. Engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or related professional practices 5. Collect, process, disclose, generate, or infer health, demographic, or other sensitive personal or private information about individuals without rights and consents required by applicable laws 6. Engage in or facilitate any action or generate any content that infringes, misappropriates, or otherwise violates any third-party rights, including the outputs or results of any products or services using the Llama 2 Materials 7. Create, generate, or facilitate the creation of malicious code, malware, computer viruses or do anything else that could disable, overburden, interfere with or impair the proper working, integrity, operation or appearance of a website or computer system 2. Engage in, promote, incite, facilitate, or assist in the planning or development of activities that present a risk of death or bodily harm to individuals, including use of Llama 2 related to the following: 1. Military, warfare, nuclear industries or applications, espionage, use for materials or activities that are subject to the International Traffic Arms Regulations (ITAR) maintained by the United States Department of State 2. Guns and illegal weapons (including weapon development) 3. Illegal drugs and regulated/controlled substances 4. Operation of critical infrastructure, transportation technologies, or heavy machinery 5. Self-harm or harm to others, including suicide, cutting, and eating disorders 6. Any content intended to incite or promote violence, abuse, or any infliction of bodily harm to an individual 3. Intentionally deceive or mislead others, including use of Llama 2 related to the following: 1. Generating, promoting, or furthering fraud or the creation or promotion of disinformation 2. Generating, promoting, or furthering defamatory content, including the creation of defamatory statements, images, or other content 3. Generating, promoting, or further distributing spam 4. Impersonating another individual without consent, authorization, or legal right 5. Representing that the use of Llama 2 or outputs are human-generated 6. Generating or facilitating false online engagement, including fake reviews and other means of fake online engagement 7. Fail to appropriately disclose to end users any known dangers of your AI system Please report any violation of this Policy, software “bug,” or other problems that could lead to a violation of this Policy through one of the following means: * Reporting issues with the model: URL * Reporting risky content generated by the model: URL * Reporting bugs and security concerns: URL * Reporting violations of the Acceptable Use Policy or unlicensed uses of Llama: LlamaUseReport@URL extra\_gated\_fields: First Name: text Last Name: text Date of birth: date\_picker Country: country Affiliation: text geo: ip\_location By clicking Submit below I accept the terms of the license and acknowledge that the information I provide will be collected stored processed and shared in accordance with the Meta Privacy Policy: checkbox extra\_gated\_description: >- The information you provide will be collected, stored, processed and shared in accordance with the Meta Privacy Policy. extra\_gated\_button\_content: Submit language: * en pipeline\_tag: text-generation tags: * facebook * meta * pytorch * llama * llama-2 license: llama2 --- Llama 2 ======= Llama 2 is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 7B fine-tuned model, optimized for dialogue use cases and converted for the Hugging Face Transformers format. Links to other models can be found in the index at the bottom. Model Details ------------- *Note: Use of this model is governed by the Meta license. In order to download the model weights and tokenizer, please visit the website and accept our License before requesting access here.* Meta developed and publicly released the Llama 2 family of large language models (LLMs), a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. Our fine-tuned LLMs, called Llama-2-Chat, are optimized for dialogue use cases. Llama-2-Chat models outperform open-source chat models on most benchmarks we tested, and in our human evaluations for helpfulness and safety, are on par with some popular closed-source models like ChatGPT and PaLM. Model Developers Meta Variations Llama 2 comes in a range of parameter sizes — 7B, 13B, and 70B — as well as pretrained and fine-tuned variations. Input Models input text only. Output Models generate text only. Model Architecture Llama 2 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align to human preferences for helpfulness and safety. *Llama 2 family of models.* Token counts refer to pretraining data only. All models are trained with a global batch-size of 4M tokens. Bigger models - 70B -- use Grouped-Query Attention (GQA) for improved inference scalability. Model Dates Llama 2 was trained between January 2023 and July 2023. Status This is a static model trained on an offline dataset. Future versions of the tuned models will be released as we improve model safety with community feedback. License A custom commercial license is available at: URL Research Paper "Llama-2: Open Foundation and Fine-tuned Chat Models" Intended Use ------------ Intended Use Cases Llama 2 is intended for commercial and research use in English. Tuned models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks. To get the expected features and performance for the chat versions, a specific formatting needs to be followed, including the 'INST' and '<>' tags, 'BOS' and 'EOS' tokens, and the whitespaces and breaklines in between (we recommend calling 'strip()' on inputs to avoid double-spaces). See our reference code in github for details: 'chat\_completion'. Out-of-scope Uses Use in any manner that violates applicable laws or regulations (including trade compliance laws).Use in languages other than English. Use in any other way that is prohibited by the Acceptable Use Policy and Licensing Agreement for Llama 2. Hardware and Software --------------------- Training Factors We used custom training libraries, Meta's Research Super Cluster, and production clusters for pretraining. Fine-tuning, annotation, and evaluation were also performed on third-party cloud compute. Carbon Footprint Pretraining utilized a cumulative 3.3M GPU hours of computation on hardware of type A100-80GB (TDP of 350-400W). Estimated total emissions were 539 tCO2eq, 100% of which were offset by Meta’s sustainability program. CO2 emissions during pretraining. Time: total GPU time required for training each model. Power Consumption: peak power capacity per GPU device for the GPUs used adjusted for power usage efficiency. 100% of the emissions are directly offset by Meta's sustainability program, and because we are openly releasing these models, the pretraining costs do not need to be incurred by others. Training Data ------------- Overview Llama 2 was pretrained on 2 trillion tokens of data from publicly available sources. The fine-tuning data includes publicly available instruction datasets, as well as over one million new human-annotated examples. Neither the pretraining nor the fine-tuning datasets include Meta user data. Data Freshness The pretraining data has a cutoff of September 2022, but some tuning data is more recent, up to July 2023. Evaluation Results ------------------ In this section, we report the results for the Llama 1 and Llama 2 models on standard academic benchmarks.For all the evaluations, we use our internal evaluations library. Overall performance on grouped academic benchmarks. *Code:* We report the average pass@1 scores of our models on HumanEval and MBPP. *Commonsense Reasoning:* We report the average of PIQA, SIQA, HellaSwag, WinoGrande, ARC easy and challenge, OpenBookQA, and CommonsenseQA. We report 7-shot results for CommonSenseQA and 0-shot results for all other benchmarks. *World Knowledge:* We evaluate the 5-shot performance on NaturalQuestions and TriviaQA and report the average. *Reading Comprehension:* For reading comprehension, we report the 0-shot average on SQuAD, QuAC, and BoolQ. *MATH:* We report the average of the GSM8K (8 shot) and MATH (4 shot) benchmarks at top 1. Evaluation of pretrained LLMs on automatic safety benchmarks. For TruthfulQA, we present the percentage of generations that are both truthful and informative (the higher the better). For ToxiGen, we present the percentage of toxic generations (the smaller the better). Evaluation of fine-tuned LLMs on different safety datasets. Same metric definitions as above. Ethical Considerations and Limitations -------------------------------------- Llama 2 is a new technology that carries risks with use. Testing conducted to date has been in English, and has not covered, nor could it cover all scenarios. For these reasons, as with all LLMs, Llama 2’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate, biased or other objectionable responses to user prompts. Therefore, before deploying any applications of Llama 2, developers should perform safety testing and tuning tailored to their specific applications of the model. Please see the Responsible Use Guide available at URL Reporting Issues ---------------- Please report any software “bug,” or other problems with the models through one of the following means: * Reporting issues with the model: URL * Reporting problematic content generated by the model: URL * Reporting bugs and security concerns: URL Llama Model Index -----------------
[ "### LLAMA 2 COMMUNITY LICENSE AGREEMENT\n\n\n\"Agreement\" means the terms and conditions for use, reproduction, distribution\nand modification of the Llama Materials set forth herein.\n\n\n\"Documentation\" means the specifications, manuals and documentation\naccompanying Llama 2 distributed by Meta at\nURL\n\n\n\"Licensee\" or \"you\" means you, or your employer or any other person or entity\n(if you are entering into this Agreement on such person or entity's behalf),\nof the age required under applicable laws, rules or regulations to provide\nlegal consent and that has legal authority to bind your employer or such other\nperson or entity if you are entering in this Agreement on their behalf.\n\n\n\"Llama 2\" means the foundational large language models and software and\nalgorithms, including machine-learning model code, trained model weights,\ninference-enabling code, training-enabling code, fine-tuning enabling code and\nother elements of the foregoing distributed by Meta at\nURL\n\n\n\"Llama Materials\" means, collectively, Meta's proprietary Llama 2 and\ndocumentation (and any portion thereof) made available under this Agreement.\n\n\n\"Meta\" or \"we\" means Meta Platforms Ireland Limited (if you are located in or,\nif you are an entity, your principal place of business is in the EEA or\nSwitzerland) and Meta Platforms, Inc. (if you are located outside of the EEA\nor Switzerland).\n\n\nBy clicking \"I Accept\" below or by using or distributing any portion or\nelement of the Llama Materials, you agree to be bound by this Agreement.\n\n\n1. License Rights and Redistribution.\n\n\na. Grant of Rights. You are granted a non-exclusive, worldwide, non-\ntransferable and royalty-free limited license under Meta's intellectual\nproperty or other rights owned by Meta embodied in the Llama Materials to\nuse, reproduce, distribute, copy, create derivative works of, and make\nmodifications to the Llama Materials.\n\n\nb. Redistribution and Use.\n\n\ni. If you distribute or make the Llama Materials, or any derivative works\nthereof, available to a third party, you shall provide a copy of this\nAgreement to such third party.\n\n\nii. If you receive Llama Materials, or any derivative works thereof, from a\nLicensee as part of an integrated end user product, then Section 2 of this\nAgreement will not apply to you.\n\n\niii. You must retain in all copies of the Llama Materials that you distribute\nthe following attribution notice within a \"Notice\" text file distributed as a\npart of such copies: \"Llama 2 is licensed under the LLAMA 2 Community\nLicense, Copyright (c) Meta Platforms, Inc. All Rights Reserved.\"\n\n\niv. Your use of the Llama Materials must comply with applicable laws and\nregulations (including trade compliance laws and regulations) and adhere to\nthe Acceptable Use Policy for the Llama Materials (available at\nURL which is hereby incorporated by\nreference into this Agreement.\n\n\nv. You will not use the Llama Materials or any output or results of the Llama\nMaterials to improve any other large language model (excluding Llama 2 or\nderivative works thereof).\n\n\n2. Additional Commercial Terms. If, on the Llama 2 version release date, the\nmonthly active users of the products or services made available by or for\nLicensee, or Licensee's affiliates, is greater than 700 million monthly\nactive users in the preceding calendar month, you must request a license from\nMeta, which Meta may grant to you in its sole discretion, and you are not\nauthorized to exercise any of the rights under this Agreement unless or until\nMeta otherwise expressly grants you such rights.\n3. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE LLAMA\nMATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED ON AN \"AS IS\"\nBASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING,\nWITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT,\nMERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY\nRESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING\nTHE LLAMA MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE\nLLAMA MATERIALS AND ANY OUTPUT AND RESULTS.\n4. Limitation of Liability. IN NO EVENT WILL META OR ITS AFFILIATES BE LIABLE\nUNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE,\nPRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST\nPROFITS OR ANY INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR\nPUNITIVE DAMAGES, EVEN IF META OR ITS AFFILIATES HAVE BEEN ADVISED OF THE\nPOSSIBILITY OF ANY OF THE FOREGOING.\n5. Intellectual Property.\n\n\na. No trademark licenses are granted under this Agreement, and in connection\nwith the Llama Materials, neither Meta nor Licensee may use any name or mark\nowned by or associated with the other or any of its affiliates, except as\nrequired for reasonable and customary use in describing and redistributing\nthe Llama Materials.\n\n\nb. Subject to Meta's ownership of Llama Materials and derivatives made by or\nfor Meta, with respect to any derivative works and modifications of the Llama\nMaterials that are made by you, as between you and Meta, you are and will be\nthe owner of such derivative works and modifications.\n\n\nc. If you institute litigation or other proceedings against Meta or any\nentity (including a cross-claim or counterclaim in a lawsuit) alleging that\nthe Llama Materials or Llama 2 outputs or results, or any portion of any of\nthe foregoing, constitutes infringement of intellectual property or other\nrights owned or licensable by you, then any licenses granted to you under\nthis Agreement shall terminate as of the date such litigation or claim is\nfiled or instituted. You will indemnify and hold harmless Meta from and\nagainst any claim by any third party arising out of or related to your use or\ndistribution of the Llama Materials.\n\n\n6. Term and Termination. The term of this Agreement will commence upon your\nacceptance of this Agreement or access to the Llama Materials and will\ncontinue in full force and effect until terminated in accordance with the\nterms and conditions herein. Meta may terminate this Agreement if you are in\nbreach of any term or condition of this Agreement. Upon termination of this\nAgreement, you shall delete and cease use of the Llama Materials. Sections 3,\n4 and 7 shall survive the termination of this Agreement.\n7. Governing Law and Jurisdiction. This Agreement will be governed and\nconstrued under the laws of the State of California without regard to choice\nof law principles, and the UN Convention on Contracts for the International\nSale of Goods does not apply to this Agreement. The courts of California\nshall have exclusive jurisdiction of any dispute arising out of this\nAgreement.", "### Llama 2 Acceptable Use Policy\n\n\nMeta is committed to promoting safe and fair use of its tools and features,\nincluding Llama 2. If you access or use Llama 2, you agree to this Acceptable\nUse Policy (“Policy”). The most recent copy of this policy can be found at\nURL", "#### Prohibited Uses\n\n\nWe want everyone to use Llama 2 safely and responsibly. You agree you will not\nuse, or allow others to use, Llama 2 to:\n\n\n1. Violate the law or others’ rights, including to:\n\t1. Engage in, promote, generate, contribute to, encourage, plan, incite, or further illegal or unlawful activity or content, such as:\n\t\t1. Violence or terrorism\n\t\t2. Exploitation or harm to children, including the solicitation, creation, acquisition, or dissemination of child exploitative content or failure to report Child Sexual Abuse Material\n\t\t3. Human trafficking, exploitation, and sexual violence\n\t\t4. The illegal distribution of information or materials to minors, including obscene materials, or failure to employ legally required age-gating in connection with such information or materials.\n\t\t5. Sexual solicitation\n\t\t6. Any other criminal activity\n\t2. Engage in, promote, incite, or facilitate the harassment, abuse, threatening, or bullying of individuals or groups of individuals\n\t3. Engage in, promote, incite, or facilitate discrimination or other unlawful or harmful conduct in the provision of employment, employment benefits, credit, housing, other economic benefits, or other essential goods and services\n\t4. Engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or related professional practices\n\t5. Collect, process, disclose, generate, or infer health, demographic, or other sensitive personal or private information about individuals without rights and consents required by applicable laws\n\t6. Engage in or facilitate any action or generate any content that infringes, misappropriates, or otherwise violates any third-party rights, including the outputs or results of any products or services using the Llama 2 Materials\n\t7. Create, generate, or facilitate the creation of malicious code, malware, computer viruses or do anything else that could disable, overburden, interfere with or impair the proper working, integrity, operation or appearance of a website or computer system\n2. Engage in, promote, incite, facilitate, or assist in the planning or\ndevelopment of activities that present a risk of death or bodily harm to\nindividuals, including use of Llama 2 related to the following:\n\t1. Military, warfare, nuclear industries or applications, espionage, use for materials or activities that are subject to the International Traffic Arms Regulations (ITAR) maintained by the United States Department of State\n\t2. Guns and illegal weapons (including weapon development)\n\t3. Illegal drugs and regulated/controlled substances\n\t4. Operation of critical infrastructure, transportation technologies, or heavy machinery\n\t5. Self-harm or harm to others, including suicide, cutting, and eating disorders\n\t6. Any content intended to incite or promote violence, abuse, or any infliction of bodily harm to an individual\n3. Intentionally deceive or mislead others, including use of Llama 2 related\nto the following:\n\t1. Generating, promoting, or furthering fraud or the creation or promotion of disinformation\n\t2. Generating, promoting, or furthering defamatory content, including the creation of defamatory statements, images, or other content\n\t3. Generating, promoting, or further distributing spam\n\t4. Impersonating another individual without consent, authorization, or legal right\n\t5. Representing that the use of Llama 2 or outputs are human-generated\n\t6. Generating or facilitating false online engagement, including fake reviews and other means of fake online engagement\n\t7. Fail to appropriately disclose to end users any known dangers of your AI system\n\tPlease report any violation of this Policy, software “bug,” or other problems\n\tthat could lead to a violation of this Policy through one of the following\n\tmeans:\n\t* Reporting issues with the model: URL\n\t* Reporting risky content generated by the model: URL\n\t* Reporting bugs and security concerns: URL\n\t* Reporting violations of the Acceptable Use Policy or unlicensed uses of Llama: LlamaUseReport@URL\n\textra\\_gated\\_fields:\n\tFirst Name: text\n\tLast Name: text\n\tDate of birth: date\\_picker\n\tCountry: country\n\tAffiliation: text\n\tgeo: ip\\_location\n\tBy clicking Submit below I accept the terms of the license and acknowledge that the information I provide will be collected stored processed and shared in accordance with the Meta Privacy Policy: checkbox\n\textra\\_gated\\_description: >-\n\tThe information you provide will be collected, stored, processed and shared in\n\taccordance with the Meta Privacy\n\tPolicy.\n\textra\\_gated\\_button\\_content: Submit\n\tlanguage:\n\n\n* en\npipeline\\_tag: text-generation\ntags:\n* facebook\n* meta\n* pytorch\n* llama\n* llama-2\nlicense: llama2\n\n\n\n\n---\n\n\nLlama 2\n=======\n\n\nLlama 2 is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 7B fine-tuned model, optimized for dialogue use cases and converted for the Hugging Face Transformers format. Links to other models can be found in the index at the bottom.\n\n\nModel Details\n-------------\n\n\n*Note: Use of this model is governed by the Meta license. In order to download the model weights and tokenizer, please visit the website and accept our License before requesting access here.*\n\n\nMeta developed and publicly released the Llama 2 family of large language models (LLMs), a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. Our fine-tuned LLMs, called Llama-2-Chat, are optimized for dialogue use cases. Llama-2-Chat models outperform open-source chat models on most benchmarks we tested, and in our human evaluations for helpfulness and safety, are on par with some popular closed-source models like ChatGPT and PaLM.\n\n\nModel Developers Meta\n\n\nVariations Llama 2 comes in a range of parameter sizes — 7B, 13B, and 70B — as well as pretrained and fine-tuned variations.\n\n\nInput Models input text only.\n\n\nOutput Models generate text only.\n\n\nModel Architecture Llama 2 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align to human preferences for helpfulness and safety.\n\n\n\n*Llama 2 family of models.* Token counts refer to pretraining data only. All models are trained with a global batch-size of 4M tokens. Bigger models - 70B -- use Grouped-Query Attention (GQA) for improved inference scalability.\n\n\nModel Dates Llama 2 was trained between January 2023 and July 2023.\n\n\nStatus This is a static model trained on an offline dataset. Future versions of the tuned models will be released as we improve model safety with community feedback.\n\n\nLicense A custom commercial license is available at: URL\n\n\nResearch Paper \"Llama-2: Open Foundation and Fine-tuned Chat Models\"\n\n\nIntended Use\n------------\n\n\nIntended Use Cases Llama 2 is intended for commercial and research use in English. Tuned models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks.\n\n\nTo get the expected features and performance for the chat versions, a specific formatting needs to be followed, including the 'INST' and '<>' tags, 'BOS' and 'EOS' tokens, and the whitespaces and breaklines in between (we recommend calling 'strip()' on inputs to avoid double-spaces). See our reference code in github for details: 'chat\\_completion'.\n\n\nOut-of-scope Uses Use in any manner that violates applicable laws or regulations (including trade compliance laws).Use in languages other than English. Use in any other way that is prohibited by the Acceptable Use Policy and Licensing Agreement for Llama 2.\n\n\nHardware and Software\n---------------------\n\n\nTraining Factors We used custom training libraries, Meta's Research Super Cluster, and production clusters for pretraining. Fine-tuning, annotation, and evaluation were also performed on third-party cloud compute.\n\n\nCarbon Footprint Pretraining utilized a cumulative 3.3M GPU hours of computation on hardware of type A100-80GB (TDP of 350-400W). Estimated total emissions were 539 tCO2eq, 100% of which were offset by Meta’s sustainability program.\n\n\n\nCO2 emissions during pretraining. Time: total GPU time required for training each model. Power Consumption: peak power capacity per GPU device for the GPUs used adjusted for power usage efficiency. 100% of the emissions are directly offset by Meta's sustainability program, and because we are openly releasing these models, the pretraining costs do not need to be incurred by others.\n\n\nTraining Data\n-------------\n\n\nOverview Llama 2 was pretrained on 2 trillion tokens of data from publicly available sources. The fine-tuning data includes publicly available instruction datasets, as well as over one million new human-annotated examples. Neither the pretraining nor the fine-tuning datasets include Meta user data.\n\n\nData Freshness The pretraining data has a cutoff of September 2022, but some tuning data is more recent, up to July 2023.\n\n\nEvaluation Results\n------------------\n\n\nIn this section, we report the results for the Llama 1 and Llama 2 models on standard academic benchmarks.For all the evaluations, we use our internal evaluations library.\n\n\n\nOverall performance on grouped academic benchmarks. *Code:* We report the average pass@1 scores of our models on HumanEval and MBPP. *Commonsense Reasoning:* We report the average of PIQA, SIQA, HellaSwag, WinoGrande, ARC easy and challenge, OpenBookQA, and CommonsenseQA. We report 7-shot results for CommonSenseQA and 0-shot results for all other benchmarks. *World Knowledge:* We evaluate the 5-shot performance on NaturalQuestions and TriviaQA and report the average. *Reading Comprehension:* For reading comprehension, we report the 0-shot average on SQuAD, QuAC, and BoolQ. *MATH:* We report the average of the GSM8K (8 shot) and MATH (4 shot) benchmarks at top 1.\n\n\n\nEvaluation of pretrained LLMs on automatic safety benchmarks. For TruthfulQA, we present the percentage of generations that are both truthful and informative (the higher the better). For ToxiGen, we present the percentage of toxic generations (the smaller the better).\n\n\n\nEvaluation of fine-tuned LLMs on different safety datasets. Same metric definitions as above.\n\n\nEthical Considerations and Limitations\n--------------------------------------\n\n\nLlama 2 is a new technology that carries risks with use. Testing conducted to date has been in English, and has not covered, nor could it cover all scenarios. For these reasons, as with all LLMs, Llama 2’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate, biased or other objectionable responses to user prompts. Therefore, before deploying any applications of Llama 2, developers should perform safety testing and tuning tailored to their specific applications of the model.\n\n\nPlease see the Responsible Use Guide available at URL\n\n\nReporting Issues\n----------------\n\n\nPlease report any software “bug,” or other problems with the models through one of the following means:\n\n\n* Reporting issues with the model: URL\n* Reporting problematic content generated by the model: URL\n* Reporting bugs and security concerns: URL\n\n\nLlama Model Index\n-----------------" ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #conversational #arxiv-2307.09288 #autotrain_compatible #endpoints_compatible #text-generation-inference #4-bit #region-us \n", "### LLAMA 2 COMMUNITY LICENSE AGREEMENT\n\n\n\"Agreement\" means the terms and conditions for use, reproduction, distribution\nand modification of the Llama Materials set forth herein.\n\n\n\"Documentation\" means the specifications, manuals and documentation\naccompanying Llama 2 distributed by Meta at\nURL\n\n\n\"Licensee\" or \"you\" means you, or your employer or any other person or entity\n(if you are entering into this Agreement on such person or entity's behalf),\nof the age required under applicable laws, rules or regulations to provide\nlegal consent and that has legal authority to bind your employer or such other\nperson or entity if you are entering in this Agreement on their behalf.\n\n\n\"Llama 2\" means the foundational large language models and software and\nalgorithms, including machine-learning model code, trained model weights,\ninference-enabling code, training-enabling code, fine-tuning enabling code and\nother elements of the foregoing distributed by Meta at\nURL\n\n\n\"Llama Materials\" means, collectively, Meta's proprietary Llama 2 and\ndocumentation (and any portion thereof) made available under this Agreement.\n\n\n\"Meta\" or \"we\" means Meta Platforms Ireland Limited (if you are located in or,\nif you are an entity, your principal place of business is in the EEA or\nSwitzerland) and Meta Platforms, Inc. (if you are located outside of the EEA\nor Switzerland).\n\n\nBy clicking \"I Accept\" below or by using or distributing any portion or\nelement of the Llama Materials, you agree to be bound by this Agreement.\n\n\n1. License Rights and Redistribution.\n\n\na. Grant of Rights. You are granted a non-exclusive, worldwide, non-\ntransferable and royalty-free limited license under Meta's intellectual\nproperty or other rights owned by Meta embodied in the Llama Materials to\nuse, reproduce, distribute, copy, create derivative works of, and make\nmodifications to the Llama Materials.\n\n\nb. Redistribution and Use.\n\n\ni. If you distribute or make the Llama Materials, or any derivative works\nthereof, available to a third party, you shall provide a copy of this\nAgreement to such third party.\n\n\nii. If you receive Llama Materials, or any derivative works thereof, from a\nLicensee as part of an integrated end user product, then Section 2 of this\nAgreement will not apply to you.\n\n\niii. You must retain in all copies of the Llama Materials that you distribute\nthe following attribution notice within a \"Notice\" text file distributed as a\npart of such copies: \"Llama 2 is licensed under the LLAMA 2 Community\nLicense, Copyright (c) Meta Platforms, Inc. All Rights Reserved.\"\n\n\niv. Your use of the Llama Materials must comply with applicable laws and\nregulations (including trade compliance laws and regulations) and adhere to\nthe Acceptable Use Policy for the Llama Materials (available at\nURL which is hereby incorporated by\nreference into this Agreement.\n\n\nv. You will not use the Llama Materials or any output or results of the Llama\nMaterials to improve any other large language model (excluding Llama 2 or\nderivative works thereof).\n\n\n2. Additional Commercial Terms. If, on the Llama 2 version release date, the\nmonthly active users of the products or services made available by or for\nLicensee, or Licensee's affiliates, is greater than 700 million monthly\nactive users in the preceding calendar month, you must request a license from\nMeta, which Meta may grant to you in its sole discretion, and you are not\nauthorized to exercise any of the rights under this Agreement unless or until\nMeta otherwise expressly grants you such rights.\n3. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE LLAMA\nMATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED ON AN \"AS IS\"\nBASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING,\nWITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT,\nMERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY\nRESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING\nTHE LLAMA MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE\nLLAMA MATERIALS AND ANY OUTPUT AND RESULTS.\n4. Limitation of Liability. IN NO EVENT WILL META OR ITS AFFILIATES BE LIABLE\nUNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE,\nPRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST\nPROFITS OR ANY INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR\nPUNITIVE DAMAGES, EVEN IF META OR ITS AFFILIATES HAVE BEEN ADVISED OF THE\nPOSSIBILITY OF ANY OF THE FOREGOING.\n5. Intellectual Property.\n\n\na. No trademark licenses are granted under this Agreement, and in connection\nwith the Llama Materials, neither Meta nor Licensee may use any name or mark\nowned by or associated with the other or any of its affiliates, except as\nrequired for reasonable and customary use in describing and redistributing\nthe Llama Materials.\n\n\nb. Subject to Meta's ownership of Llama Materials and derivatives made by or\nfor Meta, with respect to any derivative works and modifications of the Llama\nMaterials that are made by you, as between you and Meta, you are and will be\nthe owner of such derivative works and modifications.\n\n\nc. If you institute litigation or other proceedings against Meta or any\nentity (including a cross-claim or counterclaim in a lawsuit) alleging that\nthe Llama Materials or Llama 2 outputs or results, or any portion of any of\nthe foregoing, constitutes infringement of intellectual property or other\nrights owned or licensable by you, then any licenses granted to you under\nthis Agreement shall terminate as of the date such litigation or claim is\nfiled or instituted. You will indemnify and hold harmless Meta from and\nagainst any claim by any third party arising out of or related to your use or\ndistribution of the Llama Materials.\n\n\n6. Term and Termination. The term of this Agreement will commence upon your\nacceptance of this Agreement or access to the Llama Materials and will\ncontinue in full force and effect until terminated in accordance with the\nterms and conditions herein. Meta may terminate this Agreement if you are in\nbreach of any term or condition of this Agreement. Upon termination of this\nAgreement, you shall delete and cease use of the Llama Materials. Sections 3,\n4 and 7 shall survive the termination of this Agreement.\n7. Governing Law and Jurisdiction. This Agreement will be governed and\nconstrued under the laws of the State of California without regard to choice\nof law principles, and the UN Convention on Contracts for the International\nSale of Goods does not apply to this Agreement. The courts of California\nshall have exclusive jurisdiction of any dispute arising out of this\nAgreement.", "### Llama 2 Acceptable Use Policy\n\n\nMeta is committed to promoting safe and fair use of its tools and features,\nincluding Llama 2. If you access or use Llama 2, you agree to this Acceptable\nUse Policy (“Policy”). The most recent copy of this policy can be found at\nURL", "#### Prohibited Uses\n\n\nWe want everyone to use Llama 2 safely and responsibly. You agree you will not\nuse, or allow others to use, Llama 2 to:\n\n\n1. Violate the law or others’ rights, including to:\n\t1. Engage in, promote, generate, contribute to, encourage, plan, incite, or further illegal or unlawful activity or content, such as:\n\t\t1. Violence or terrorism\n\t\t2. Exploitation or harm to children, including the solicitation, creation, acquisition, or dissemination of child exploitative content or failure to report Child Sexual Abuse Material\n\t\t3. Human trafficking, exploitation, and sexual violence\n\t\t4. The illegal distribution of information or materials to minors, including obscene materials, or failure to employ legally required age-gating in connection with such information or materials.\n\t\t5. Sexual solicitation\n\t\t6. Any other criminal activity\n\t2. Engage in, promote, incite, or facilitate the harassment, abuse, threatening, or bullying of individuals or groups of individuals\n\t3. Engage in, promote, incite, or facilitate discrimination or other unlawful or harmful conduct in the provision of employment, employment benefits, credit, housing, other economic benefits, or other essential goods and services\n\t4. Engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or related professional practices\n\t5. Collect, process, disclose, generate, or infer health, demographic, or other sensitive personal or private information about individuals without rights and consents required by applicable laws\n\t6. Engage in or facilitate any action or generate any content that infringes, misappropriates, or otherwise violates any third-party rights, including the outputs or results of any products or services using the Llama 2 Materials\n\t7. Create, generate, or facilitate the creation of malicious code, malware, computer viruses or do anything else that could disable, overburden, interfere with or impair the proper working, integrity, operation or appearance of a website or computer system\n2. Engage in, promote, incite, facilitate, or assist in the planning or\ndevelopment of activities that present a risk of death or bodily harm to\nindividuals, including use of Llama 2 related to the following:\n\t1. Military, warfare, nuclear industries or applications, espionage, use for materials or activities that are subject to the International Traffic Arms Regulations (ITAR) maintained by the United States Department of State\n\t2. Guns and illegal weapons (including weapon development)\n\t3. Illegal drugs and regulated/controlled substances\n\t4. Operation of critical infrastructure, transportation technologies, or heavy machinery\n\t5. Self-harm or harm to others, including suicide, cutting, and eating disorders\n\t6. Any content intended to incite or promote violence, abuse, or any infliction of bodily harm to an individual\n3. Intentionally deceive or mislead others, including use of Llama 2 related\nto the following:\n\t1. Generating, promoting, or furthering fraud or the creation or promotion of disinformation\n\t2. Generating, promoting, or furthering defamatory content, including the creation of defamatory statements, images, or other content\n\t3. Generating, promoting, or further distributing spam\n\t4. Impersonating another individual without consent, authorization, or legal right\n\t5. Representing that the use of Llama 2 or outputs are human-generated\n\t6. Generating or facilitating false online engagement, including fake reviews and other means of fake online engagement\n\t7. Fail to appropriately disclose to end users any known dangers of your AI system\n\tPlease report any violation of this Policy, software “bug,” or other problems\n\tthat could lead to a violation of this Policy through one of the following\n\tmeans:\n\t* Reporting issues with the model: URL\n\t* Reporting risky content generated by the model: URL\n\t* Reporting bugs and security concerns: URL\n\t* Reporting violations of the Acceptable Use Policy or unlicensed uses of Llama: LlamaUseReport@URL\n\textra\\_gated\\_fields:\n\tFirst Name: text\n\tLast Name: text\n\tDate of birth: date\\_picker\n\tCountry: country\n\tAffiliation: text\n\tgeo: ip\\_location\n\tBy clicking Submit below I accept the terms of the license and acknowledge that the information I provide will be collected stored processed and shared in accordance with the Meta Privacy Policy: checkbox\n\textra\\_gated\\_description: >-\n\tThe information you provide will be collected, stored, processed and shared in\n\taccordance with the Meta Privacy\n\tPolicy.\n\textra\\_gated\\_button\\_content: Submit\n\tlanguage:\n\n\n* en\npipeline\\_tag: text-generation\ntags:\n* facebook\n* meta\n* pytorch\n* llama\n* llama-2\nlicense: llama2\n\n\n\n\n---\n\n\nLlama 2\n=======\n\n\nLlama 2 is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 7B fine-tuned model, optimized for dialogue use cases and converted for the Hugging Face Transformers format. Links to other models can be found in the index at the bottom.\n\n\nModel Details\n-------------\n\n\n*Note: Use of this model is governed by the Meta license. In order to download the model weights and tokenizer, please visit the website and accept our License before requesting access here.*\n\n\nMeta developed and publicly released the Llama 2 family of large language models (LLMs), a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. Our fine-tuned LLMs, called Llama-2-Chat, are optimized for dialogue use cases. Llama-2-Chat models outperform open-source chat models on most benchmarks we tested, and in our human evaluations for helpfulness and safety, are on par with some popular closed-source models like ChatGPT and PaLM.\n\n\nModel Developers Meta\n\n\nVariations Llama 2 comes in a range of parameter sizes — 7B, 13B, and 70B — as well as pretrained and fine-tuned variations.\n\n\nInput Models input text only.\n\n\nOutput Models generate text only.\n\n\nModel Architecture Llama 2 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align to human preferences for helpfulness and safety.\n\n\n\n*Llama 2 family of models.* Token counts refer to pretraining data only. All models are trained with a global batch-size of 4M tokens. Bigger models - 70B -- use Grouped-Query Attention (GQA) for improved inference scalability.\n\n\nModel Dates Llama 2 was trained between January 2023 and July 2023.\n\n\nStatus This is a static model trained on an offline dataset. Future versions of the tuned models will be released as we improve model safety with community feedback.\n\n\nLicense A custom commercial license is available at: URL\n\n\nResearch Paper \"Llama-2: Open Foundation and Fine-tuned Chat Models\"\n\n\nIntended Use\n------------\n\n\nIntended Use Cases Llama 2 is intended for commercial and research use in English. Tuned models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks.\n\n\nTo get the expected features and performance for the chat versions, a specific formatting needs to be followed, including the 'INST' and '<>' tags, 'BOS' and 'EOS' tokens, and the whitespaces and breaklines in between (we recommend calling 'strip()' on inputs to avoid double-spaces). See our reference code in github for details: 'chat\\_completion'.\n\n\nOut-of-scope Uses Use in any manner that violates applicable laws or regulations (including trade compliance laws).Use in languages other than English. Use in any other way that is prohibited by the Acceptable Use Policy and Licensing Agreement for Llama 2.\n\n\nHardware and Software\n---------------------\n\n\nTraining Factors We used custom training libraries, Meta's Research Super Cluster, and production clusters for pretraining. Fine-tuning, annotation, and evaluation were also performed on third-party cloud compute.\n\n\nCarbon Footprint Pretraining utilized a cumulative 3.3M GPU hours of computation on hardware of type A100-80GB (TDP of 350-400W). Estimated total emissions were 539 tCO2eq, 100% of which were offset by Meta’s sustainability program.\n\n\n\nCO2 emissions during pretraining. Time: total GPU time required for training each model. Power Consumption: peak power capacity per GPU device for the GPUs used adjusted for power usage efficiency. 100% of the emissions are directly offset by Meta's sustainability program, and because we are openly releasing these models, the pretraining costs do not need to be incurred by others.\n\n\nTraining Data\n-------------\n\n\nOverview Llama 2 was pretrained on 2 trillion tokens of data from publicly available sources. The fine-tuning data includes publicly available instruction datasets, as well as over one million new human-annotated examples. Neither the pretraining nor the fine-tuning datasets include Meta user data.\n\n\nData Freshness The pretraining data has a cutoff of September 2022, but some tuning data is more recent, up to July 2023.\n\n\nEvaluation Results\n------------------\n\n\nIn this section, we report the results for the Llama 1 and Llama 2 models on standard academic benchmarks.For all the evaluations, we use our internal evaluations library.\n\n\n\nOverall performance on grouped academic benchmarks. *Code:* We report the average pass@1 scores of our models on HumanEval and MBPP. *Commonsense Reasoning:* We report the average of PIQA, SIQA, HellaSwag, WinoGrande, ARC easy and challenge, OpenBookQA, and CommonsenseQA. We report 7-shot results for CommonSenseQA and 0-shot results for all other benchmarks. *World Knowledge:* We evaluate the 5-shot performance on NaturalQuestions and TriviaQA and report the average. *Reading Comprehension:* For reading comprehension, we report the 0-shot average on SQuAD, QuAC, and BoolQ. *MATH:* We report the average of the GSM8K (8 shot) and MATH (4 shot) benchmarks at top 1.\n\n\n\nEvaluation of pretrained LLMs on automatic safety benchmarks. For TruthfulQA, we present the percentage of generations that are both truthful and informative (the higher the better). For ToxiGen, we present the percentage of toxic generations (the smaller the better).\n\n\n\nEvaluation of fine-tuned LLMs on different safety datasets. Same metric definitions as above.\n\n\nEthical Considerations and Limitations\n--------------------------------------\n\n\nLlama 2 is a new technology that carries risks with use. Testing conducted to date has been in English, and has not covered, nor could it cover all scenarios. For these reasons, as with all LLMs, Llama 2’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate, biased or other objectionable responses to user prompts. Therefore, before deploying any applications of Llama 2, developers should perform safety testing and tuning tailored to their specific applications of the model.\n\n\nPlease see the Responsible Use Guide available at URL\n\n\nReporting Issues\n----------------\n\n\nPlease report any software “bug,” or other problems with the models through one of the following means:\n\n\n* Reporting issues with the model: URL\n* Reporting problematic content generated by the model: URL\n* Reporting bugs and security concerns: URL\n\n\nLlama Model Index\n-----------------" ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
realtreetune/HHF_tldr_sft
null
[ "transformers", "safetensors", "gpt_neox", "text-generation", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-20T20:01:44+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #gpt_neox #text-generation #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #gpt_neox #text-generation #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Omega-LLama3-MWP This model is a fine-tuned version of [meta-llama/Meta-Llama-3-8B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct) on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-06 - train_batch_size: 1 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 2 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_ratio: 0.01 - num_epochs: 3 ### Training results ### Framework versions - PEFT 0.10.0 - Transformers 4.39.3 - Pytorch 2.1.2 - Datasets 2.18.0 - Tokenizers 0.15.2
{"license": "other", "library_name": "peft", "tags": ["trl", "sft", "generated_from_trainer"], "base_model": "meta-llama/Meta-Llama-3-8B-Instruct", "model-index": [{"name": "Omega-LLama3-MWP", "results": []}]}
OmegaGamage/Omega-LLama3-MWP
null
[ "peft", "safetensors", "trl", "sft", "generated_from_trainer", "base_model:meta-llama/Meta-Llama-3-8B-Instruct", "license:other", "region:us" ]
null
2024-04-20T20:05:28+00:00
[]
[]
TAGS #peft #safetensors #trl #sft #generated_from_trainer #base_model-meta-llama/Meta-Llama-3-8B-Instruct #license-other #region-us
# Omega-LLama3-MWP This model is a fine-tuned version of meta-llama/Meta-Llama-3-8B-Instruct on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-06 - train_batch_size: 1 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 2 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_ratio: 0.01 - num_epochs: 3 ### Training results ### Framework versions - PEFT 0.10.0 - Transformers 4.39.3 - Pytorch 2.1.2 - Datasets 2.18.0 - Tokenizers 0.15.2
[ "# Omega-LLama3-MWP\n\nThis model is a fine-tuned version of meta-llama/Meta-Llama-3-8B-Instruct on the None dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-06\n- train_batch_size: 1\n- eval_batch_size: 8\n- seed: 42\n- gradient_accumulation_steps: 2\n- total_train_batch_size: 2\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: cosine\n- lr_scheduler_warmup_ratio: 0.01\n- num_epochs: 3", "### Training results", "### Framework versions\n\n- PEFT 0.10.0\n- Transformers 4.39.3\n- Pytorch 2.1.2\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
[ "TAGS\n#peft #safetensors #trl #sft #generated_from_trainer #base_model-meta-llama/Meta-Llama-3-8B-Instruct #license-other #region-us \n", "# Omega-LLama3-MWP\n\nThis model is a fine-tuned version of meta-llama/Meta-Llama-3-8B-Instruct on the None dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-06\n- train_batch_size: 1\n- eval_batch_size: 8\n- seed: 42\n- gradient_accumulation_steps: 2\n- total_train_batch_size: 2\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: cosine\n- lr_scheduler_warmup_ratio: 0.01\n- num_epochs: 3", "### Training results", "### Framework versions\n\n- PEFT 0.10.0\n- Transformers 4.39.3\n- Pytorch 2.1.2\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/soareslance/huggingface/runs/6x09sa5a) # mistral-7b-sft-lora-tom-logical This model is a fine-tuned version of [mistralai/Mistral-7B-Instruct-v0.1](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1) on the None dataset. It achieves the following results on the evaluation set: - Loss: 2.0511 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - gradient_accumulation_steps: 128 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:------:|:----:|:---------------:| | 2.4558 | 0.9752 | 16 | 2.3696 | | 2.1585 | 1.9505 | 32 | 2.1089 | | 2.0638 | 2.9257 | 48 | 2.0511 | ### Framework versions - PEFT 0.10.1.dev0 - Transformers 4.41.0.dev0 - Pytorch 2.2.1+cu121 - Datasets 2.19.0 - Tokenizers 0.19.1
{"license": "apache-2.0", "library_name": "peft", "tags": ["trl", "sft", "generated_from_trainer"], "base_model": "mistralai/Mistral-7B-Instruct-v0.1", "model-index": [{"name": "mistral-7b-sft-lora-tom-logical", "results": []}]}
soareslance/mistral-7b-sft-lora-tom-logical
null
[ "peft", "tensorboard", "safetensors", "trl", "sft", "generated_from_trainer", "base_model:mistralai/Mistral-7B-Instruct-v0.1", "license:apache-2.0", "region:us" ]
null
2024-04-20T20:07:30+00:00
[]
[]
TAGS #peft #tensorboard #safetensors #trl #sft #generated_from_trainer #base_model-mistralai/Mistral-7B-Instruct-v0.1 #license-apache-2.0 #region-us
<img src="URL alt="Visualize in Weights & Biases" width="200" height="32"/> mistral-7b-sft-lora-tom-logical =============================== This model is a fine-tuned version of mistralai/Mistral-7B-Instruct-v0.1 on the None dataset. It achieves the following results on the evaluation set: * Loss: 2.0511 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 2e-05 * train\_batch\_size: 1 * eval\_batch\_size: 1 * seed: 42 * gradient\_accumulation\_steps: 128 * total\_train\_batch\_size: 128 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: cosine * num\_epochs: 3 ### Training results ### Framework versions * PEFT 0.10.1.dev0 * Transformers 4.41.0.dev0 * Pytorch 2.2.1+cu121 * Datasets 2.19.0 * Tokenizers 0.19.1
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 1\n* eval\\_batch\\_size: 1\n* seed: 42\n* gradient\\_accumulation\\_steps: 128\n* total\\_train\\_batch\\_size: 128\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: cosine\n* num\\_epochs: 3", "### Training results", "### Framework versions\n\n\n* PEFT 0.10.1.dev0\n* Transformers 4.41.0.dev0\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.19.1" ]
[ "TAGS\n#peft #tensorboard #safetensors #trl #sft #generated_from_trainer #base_model-mistralai/Mistral-7B-Instruct-v0.1 #license-apache-2.0 #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 1\n* eval\\_batch\\_size: 1\n* seed: 42\n* gradient\\_accumulation\\_steps: 128\n* total\\_train\\_batch\\_size: 128\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: cosine\n* num\\_epochs: 3", "### Training results", "### Framework versions\n\n\n* PEFT 0.10.1.dev0\n* Transformers 4.41.0.dev0\n* Pytorch 2.2.1+cu121\n* Datasets 2.19.0\n* Tokenizers 0.19.1" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # tinyllama-1.1b-chat-sft-qlora This model is a fine-tuned version of [TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T](https://huggingface.co/TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T) on the HuggingFaceH4/ultrachat_200k dataset. It achieves the following results on the evaluation set: - Loss: 1.1695 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 4 - eval_batch_size: 8 - seed: 42 - distributed_type: multi-GPU - gradient_accumulation_steps: 2 - total_train_batch_size: 8 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 1.1785 | 1.0 | 18257 | 1.1695 | ### Framework versions - PEFT 0.7.1 - Transformers 4.39.3 - Pytorch 2.1.2 - Datasets 2.18.0 - Tokenizers 0.15.2
{"license": "apache-2.0", "library_name": "peft", "tags": ["alignment-handbook", "trl", "sft", "generated_from_trainer"], "datasets": ["HuggingFaceH4/ultrachat_200k"], "base_model": "TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T", "model-index": [{"name": "tinyllama-1.1b-sft-qlora", "results": []}]}
martimfasantos/tinyllama-1.1b-chat-sft-qlora
null
[ "peft", "tensorboard", "safetensors", "llama", "alignment-handbook", "trl", "sft", "generated_from_trainer", "dataset:HuggingFaceH4/ultrachat_200k", "base_model:TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T", "license:apache-2.0", "4-bit", "region:us" ]
null
2024-04-20T20:07:42+00:00
[]
[]
TAGS #peft #tensorboard #safetensors #llama #alignment-handbook #trl #sft #generated_from_trainer #dataset-HuggingFaceH4/ultrachat_200k #base_model-TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T #license-apache-2.0 #4-bit #region-us
tinyllama-1.1b-chat-sft-qlora ============================= This model is a fine-tuned version of TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T on the HuggingFaceH4/ultrachat\_200k dataset. It achieves the following results on the evaluation set: * Loss: 1.1695 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0002 * train\_batch\_size: 4 * eval\_batch\_size: 8 * seed: 42 * distributed\_type: multi-GPU * gradient\_accumulation\_steps: 2 * total\_train\_batch\_size: 8 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: cosine * lr\_scheduler\_warmup\_ratio: 0.1 * num\_epochs: 1 ### Training results ### Framework versions * PEFT 0.7.1 * Transformers 4.39.3 * Pytorch 2.1.2 * Datasets 2.18.0 * Tokenizers 0.15.2
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0002\n* train\\_batch\\_size: 4\n* eval\\_batch\\_size: 8\n* seed: 42\n* distributed\\_type: multi-GPU\n* gradient\\_accumulation\\_steps: 2\n* total\\_train\\_batch\\_size: 8\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: cosine\n* lr\\_scheduler\\_warmup\\_ratio: 0.1\n* num\\_epochs: 1", "### Training results", "### Framework versions\n\n\n* PEFT 0.7.1\n* Transformers 4.39.3\n* Pytorch 2.1.2\n* Datasets 2.18.0\n* Tokenizers 0.15.2" ]
[ "TAGS\n#peft #tensorboard #safetensors #llama #alignment-handbook #trl #sft #generated_from_trainer #dataset-HuggingFaceH4/ultrachat_200k #base_model-TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T #license-apache-2.0 #4-bit #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0002\n* train\\_batch\\_size: 4\n* eval\\_batch\\_size: 8\n* seed: 42\n* distributed\\_type: multi-GPU\n* gradient\\_accumulation\\_steps: 2\n* total\\_train\\_batch\\_size: 8\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: cosine\n* lr\\_scheduler\\_warmup\\_ratio: 0.1\n* num\\_epochs: 1", "### Training results", "### Framework versions\n\n\n* PEFT 0.7.1\n* Transformers 4.39.3\n* Pytorch 2.1.2\n* Datasets 2.18.0\n* Tokenizers 0.15.2" ]
null
null
Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) Llama-2-7b-hf - GGUF - Model creator: https://huggingface.co/meta-llama/ - Original model: https://huggingface.co/meta-llama/Llama-2-7b-hf/ | Name | Quant method | Size | | ---- | ---- | ---- | | [Llama-2-7b-hf.Q2_K.gguf](https://huggingface.co/RichardErkhov/meta-llama_-_Llama-2-7b-hf-gguf/blob/main/Llama-2-7b-hf.Q2_K.gguf) | Q2_K | 2.36GB | | [Llama-2-7b-hf.IQ3_XS.gguf](https://huggingface.co/RichardErkhov/meta-llama_-_Llama-2-7b-hf-gguf/blob/main/Llama-2-7b-hf.IQ3_XS.gguf) | IQ3_XS | 2.6GB | | [Llama-2-7b-hf.IQ3_S.gguf](https://huggingface.co/RichardErkhov/meta-llama_-_Llama-2-7b-hf-gguf/blob/main/Llama-2-7b-hf.IQ3_S.gguf) | IQ3_S | 2.75GB | | [Llama-2-7b-hf.Q3_K_S.gguf](https://huggingface.co/RichardErkhov/meta-llama_-_Llama-2-7b-hf-gguf/blob/main/Llama-2-7b-hf.Q3_K_S.gguf) | Q3_K_S | 2.75GB | | [Llama-2-7b-hf.IQ3_M.gguf](https://huggingface.co/RichardErkhov/meta-llama_-_Llama-2-7b-hf-gguf/blob/main/Llama-2-7b-hf.IQ3_M.gguf) | IQ3_M | 2.9GB | | [Llama-2-7b-hf.Q3_K.gguf](https://huggingface.co/RichardErkhov/meta-llama_-_Llama-2-7b-hf-gguf/blob/main/Llama-2-7b-hf.Q3_K.gguf) | Q3_K | 3.07GB | | [Llama-2-7b-hf.Q3_K_M.gguf](https://huggingface.co/RichardErkhov/meta-llama_-_Llama-2-7b-hf-gguf/blob/main/Llama-2-7b-hf.Q3_K_M.gguf) | Q3_K_M | 3.07GB | | [Llama-2-7b-hf.Q3_K_L.gguf](https://huggingface.co/RichardErkhov/meta-llama_-_Llama-2-7b-hf-gguf/blob/main/Llama-2-7b-hf.Q3_K_L.gguf) | Q3_K_L | 3.35GB | | [Llama-2-7b-hf.IQ4_XS.gguf](https://huggingface.co/RichardErkhov/meta-llama_-_Llama-2-7b-hf-gguf/blob/main/Llama-2-7b-hf.IQ4_XS.gguf) | IQ4_XS | 3.4GB | | [Llama-2-7b-hf.Q4_0.gguf](https://huggingface.co/RichardErkhov/meta-llama_-_Llama-2-7b-hf-gguf/blob/main/Llama-2-7b-hf.Q4_0.gguf) | Q4_0 | 3.56GB | | [Llama-2-7b-hf.IQ4_NL.gguf](https://huggingface.co/RichardErkhov/meta-llama_-_Llama-2-7b-hf-gguf/blob/main/Llama-2-7b-hf.IQ4_NL.gguf) | IQ4_NL | 3.58GB | | [Llama-2-7b-hf.Q4_K_S.gguf](https://huggingface.co/RichardErkhov/meta-llama_-_Llama-2-7b-hf-gguf/blob/main/Llama-2-7b-hf.Q4_K_S.gguf) | Q4_K_S | 3.59GB | | [Llama-2-7b-hf.Q4_K.gguf](https://huggingface.co/RichardErkhov/meta-llama_-_Llama-2-7b-hf-gguf/blob/main/Llama-2-7b-hf.Q4_K.gguf) | Q4_K | 3.8GB | | [Llama-2-7b-hf.Q4_K_M.gguf](https://huggingface.co/RichardErkhov/meta-llama_-_Llama-2-7b-hf-gguf/blob/main/Llama-2-7b-hf.Q4_K_M.gguf) | Q4_K_M | 3.8GB | | [Llama-2-7b-hf.Q4_1.gguf](https://huggingface.co/RichardErkhov/meta-llama_-_Llama-2-7b-hf-gguf/blob/main/Llama-2-7b-hf.Q4_1.gguf) | Q4_1 | 3.95GB | | [Llama-2-7b-hf.Q5_0.gguf](https://huggingface.co/RichardErkhov/meta-llama_-_Llama-2-7b-hf-gguf/blob/main/Llama-2-7b-hf.Q5_0.gguf) | Q5_0 | 4.33GB | | [Llama-2-7b-hf.Q5_K_S.gguf](https://huggingface.co/RichardErkhov/meta-llama_-_Llama-2-7b-hf-gguf/blob/main/Llama-2-7b-hf.Q5_K_S.gguf) | Q5_K_S | 4.33GB | | [Llama-2-7b-hf.Q5_K.gguf](https://huggingface.co/RichardErkhov/meta-llama_-_Llama-2-7b-hf-gguf/blob/main/Llama-2-7b-hf.Q5_K.gguf) | Q5_K | 4.45GB | | [Llama-2-7b-hf.Q5_K_M.gguf](https://huggingface.co/RichardErkhov/meta-llama_-_Llama-2-7b-hf-gguf/blob/main/Llama-2-7b-hf.Q5_K_M.gguf) | Q5_K_M | 4.45GB | | [Llama-2-7b-hf.Q5_1.gguf](https://huggingface.co/RichardErkhov/meta-llama_-_Llama-2-7b-hf-gguf/blob/main/Llama-2-7b-hf.Q5_1.gguf) | Q5_1 | 4.72GB | | [Llama-2-7b-hf.Q6_K.gguf](https://huggingface.co/RichardErkhov/meta-llama_-_Llama-2-7b-hf-gguf/blob/main/Llama-2-7b-hf.Q6_K.gguf) | Q6_K | 5.15GB | Original model description: --- extra_gated_heading: You need to share contact information with Meta to access this model extra_gated_prompt: >- ### LLAMA 2 COMMUNITY LICENSE AGREEMENT "Agreement" means the terms and conditions for use, reproduction, distribution and modification of the Llama Materials set forth herein. "Documentation" means the specifications, manuals and documentation accompanying Llama 2 distributed by Meta at https://ai.meta.com/resources/models-and-libraries/llama-downloads/. "Licensee" or "you" means you, or your employer or any other person or entity (if you are entering into this Agreement on such person or entity's behalf), of the age required under applicable laws, rules or regulations to provide legal consent and that has legal authority to bind your employer or such other person or entity if you are entering in this Agreement on their behalf. "Llama 2" means the foundational large language models and software and algorithms, including machine-learning model code, trained model weights, inference-enabling code, training-enabling code, fine-tuning enabling code and other elements of the foregoing distributed by Meta at ai.meta.com/resources/models-and-libraries/llama-downloads/. "Llama Materials" means, collectively, Meta's proprietary Llama 2 and documentation (and any portion thereof) made available under this Agreement. "Meta" or "we" means Meta Platforms Ireland Limited (if you are located in or, if you are an entity, your principal place of business is in the EEA or Switzerland) and Meta Platforms, Inc. (if you are located outside of the EEA or Switzerland). By clicking "I Accept" below or by using or distributing any portion or element of the Llama Materials, you agree to be bound by this Agreement. 1. License Rights and Redistribution. a. Grant of Rights. You are granted a non-exclusive, worldwide, non- transferable and royalty-free limited license under Meta's intellectual property or other rights owned by Meta embodied in the Llama Materials to use, reproduce, distribute, copy, create derivative works of, and make modifications to the Llama Materials. b. Redistribution and Use. i. If you distribute or make the Llama Materials, or any derivative works thereof, available to a third party, you shall provide a copy of this Agreement to such third party. ii. If you receive Llama Materials, or any derivative works thereof, from a Licensee as part of an integrated end user product, then Section 2 of this Agreement will not apply to you. iii. You must retain in all copies of the Llama Materials that you distribute the following attribution notice within a "Notice" text file distributed as a part of such copies: "Llama 2 is licensed under the LLAMA 2 Community License, Copyright (c) Meta Platforms, Inc. All Rights Reserved." iv. Your use of the Llama Materials must comply with applicable laws and regulations (including trade compliance laws and regulations) and adhere to the Acceptable Use Policy for the Llama Materials (available at https://ai.meta.com/llama/use-policy), which is hereby incorporated by reference into this Agreement. v. You will not use the Llama Materials or any output or results of the Llama Materials to improve any other large language model (excluding Llama 2 or derivative works thereof). 2. Additional Commercial Terms. If, on the Llama 2 version release date, the monthly active users of the products or services made available by or for Licensee, or Licensee's affiliates, is greater than 700 million monthly active users in the preceding calendar month, you must request a license from Meta, which Meta may grant to you in its sole discretion, and you are not authorized to exercise any of the rights under this Agreement unless or until Meta otherwise expressly grants you such rights. 3. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING THE LLAMA MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS. 4. Limitation of Liability. IN NO EVENT WILL META OR ITS AFFILIATES BE LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST PROFITS OR ANY INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR PUNITIVE DAMAGES, EVEN IF META OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF ANY OF THE FOREGOING. 5. Intellectual Property. a. No trademark licenses are granted under this Agreement, and in connection with the Llama Materials, neither Meta nor Licensee may use any name or mark owned by or associated with the other or any of its affiliates, except as required for reasonable and customary use in describing and redistributing the Llama Materials. b. Subject to Meta's ownership of Llama Materials and derivatives made by or for Meta, with respect to any derivative works and modifications of the Llama Materials that are made by you, as between you and Meta, you are and will be the owner of such derivative works and modifications. c. If you institute litigation or other proceedings against Meta or any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Llama Materials or Llama 2 outputs or results, or any portion of any of the foregoing, constitutes infringement of intellectual property or other rights owned or licensable by you, then any licenses granted to you under this Agreement shall terminate as of the date such litigation or claim is filed or instituted. You will indemnify and hold harmless Meta from and against any claim by any third party arising out of or related to your use or distribution of the Llama Materials. 6. Term and Termination. The term of this Agreement will commence upon your acceptance of this Agreement or access to the Llama Materials and will continue in full force and effect until terminated in accordance with the terms and conditions herein. Meta may terminate this Agreement if you are in breach of any term or condition of this Agreement. Upon termination of this Agreement, you shall delete and cease use of the Llama Materials. Sections 3, 4 and 7 shall survive the termination of this Agreement. 7. Governing Law and Jurisdiction. This Agreement will be governed and construed under the laws of the State of California without regard to choice of law principles, and the UN Convention on Contracts for the International Sale of Goods does not apply to this Agreement. The courts of California shall have exclusive jurisdiction of any dispute arising out of this Agreement. ### Llama 2 Acceptable Use Policy Meta is committed to promoting safe and fair use of its tools and features, including Llama 2. If you access or use Llama 2, you agree to this Acceptable Use Policy (“Policy”). The most recent copy of this policy can be found at [ai.meta.com/llama/use-policy](http://ai.meta.com/llama/use-policy). #### Prohibited Uses We want everyone to use Llama 2 safely and responsibly. You agree you will not use, or allow others to use, Llama 2 to: 1. Violate the law or others’ rights, including to: 1. Engage in, promote, generate, contribute to, encourage, plan, incite, or further illegal or unlawful activity or content, such as: 1. Violence or terrorism 2. Exploitation or harm to children, including the solicitation, creation, acquisition, or dissemination of child exploitative content or failure to report Child Sexual Abuse Material 3. Human trafficking, exploitation, and sexual violence 4. The illegal distribution of information or materials to minors, including obscene materials, or failure to employ legally required age-gating in connection with such information or materials. 5. Sexual solicitation 6. Any other criminal activity 2. Engage in, promote, incite, or facilitate the harassment, abuse, threatening, or bullying of individuals or groups of individuals 3. Engage in, promote, incite, or facilitate discrimination or other unlawful or harmful conduct in the provision of employment, employment benefits, credit, housing, other economic benefits, or other essential goods and services 4. Engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or related professional practices 5. Collect, process, disclose, generate, or infer health, demographic, or other sensitive personal or private information about individuals without rights and consents required by applicable laws 6. Engage in or facilitate any action or generate any content that infringes, misappropriates, or otherwise violates any third-party rights, including the outputs or results of any products or services using the Llama 2 Materials 7. Create, generate, or facilitate the creation of malicious code, malware, computer viruses or do anything else that could disable, overburden, interfere with or impair the proper working, integrity, operation or appearance of a website or computer system 2. Engage in, promote, incite, facilitate, or assist in the planning or development of activities that present a risk of death or bodily harm to individuals, including use of Llama 2 related to the following: 1. Military, warfare, nuclear industries or applications, espionage, use for materials or activities that are subject to the International Traffic Arms Regulations (ITAR) maintained by the United States Department of State 2. Guns and illegal weapons (including weapon development) 3. Illegal drugs and regulated/controlled substances 4. Operation of critical infrastructure, transportation technologies, or heavy machinery 5. Self-harm or harm to others, including suicide, cutting, and eating disorders 6. Any content intended to incite or promote violence, abuse, or any infliction of bodily harm to an individual 3. Intentionally deceive or mislead others, including use of Llama 2 related to the following: 1. Generating, promoting, or furthering fraud or the creation or promotion of disinformation 2. Generating, promoting, or furthering defamatory content, including the creation of defamatory statements, images, or other content 3. Generating, promoting, or further distributing spam 4. Impersonating another individual without consent, authorization, or legal right 5. Representing that the use of Llama 2 or outputs are human-generated 6. Generating or facilitating false online engagement, including fake reviews and other means of fake online engagement 4. Fail to appropriately disclose to end users any known dangers of your AI system Please report any violation of this Policy, software “bug,” or other problems that could lead to a violation of this Policy through one of the following means: * Reporting issues with the model: [github.com/facebookresearch/llama](http://github.com/facebookresearch/llama) * Reporting risky content generated by the model: [developers.facebook.com/llama_output_feedback](http://developers.facebook.com/llama_output_feedback) * Reporting bugs and security concerns: [facebook.com/whitehat/info](http://facebook.com/whitehat/info) * Reporting violations of the Acceptable Use Policy or unlicensed uses of Llama: [[email protected]](mailto:[email protected]) extra_gated_fields: First Name: text Last Name: text Date of birth: date_picker Country: country Affiliation: text geo: ip_location By clicking Submit below I accept the terms of the license and acknowledge that the information I provide will be collected stored processed and shared in accordance with the Meta Privacy Policy: checkbox extra_gated_description: >- The information you provide will be collected, stored, processed and shared in accordance with the [Meta Privacy Policy](https://www.facebook.com/privacy/policy/). extra_gated_button_content: Submit language: - en pipeline_tag: text-generation tags: - facebook - meta - pytorch - llama - llama-2 license: llama2 --- # **Llama 2** Llama 2 is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 7B pretrained model, converted for the Hugging Face Transformers format. Links to other models can be found in the index at the bottom. ## Model Details *Note: Use of this model is governed by the Meta license. In order to download the model weights and tokenizer, please visit the [website](https://ai.meta.com/resources/models-and-libraries/llama-downloads/) and accept our License before requesting access here.* Meta developed and publicly released the Llama 2 family of large language models (LLMs), a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. Our fine-tuned LLMs, called Llama-2-Chat, are optimized for dialogue use cases. Llama-2-Chat models outperform open-source chat models on most benchmarks we tested, and in our human evaluations for helpfulness and safety, are on par with some popular closed-source models like ChatGPT and PaLM. **Model Developers** Meta **Variations** Llama 2 comes in a range of parameter sizes — 7B, 13B, and 70B — as well as pretrained and fine-tuned variations. **Input** Models input text only. **Output** Models generate text only. **Model Architecture** Llama 2 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align to human preferences for helpfulness and safety. ||Training Data|Params|Content Length|GQA|Tokens|LR| |---|---|---|---|---|---|---| |Llama 2|*A new mix of publicly available online data*|7B|4k|&#10007;|2.0T|3.0 x 10<sup>-4</sup>| |Llama 2|*A new mix of publicly available online data*|13B|4k|&#10007;|2.0T|3.0 x 10<sup>-4</sup>| |Llama 2|*A new mix of publicly available online data*|70B|4k|&#10004;|2.0T|1.5 x 10<sup>-4</sup>| *Llama 2 family of models.* Token counts refer to pretraining data only. All models are trained with a global batch-size of 4M tokens. Bigger models - 70B -- use Grouped-Query Attention (GQA) for improved inference scalability. **Model Dates** Llama 2 was trained between January 2023 and July 2023. **Status** This is a static model trained on an offline dataset. Future versions of the tuned models will be released as we improve model safety with community feedback. **License** A custom commercial license is available at: [https://ai.meta.com/resources/models-and-libraries/llama-downloads/](https://ai.meta.com/resources/models-and-libraries/llama-downloads/) **Research Paper** ["Llama-2: Open Foundation and Fine-tuned Chat Models"](arxiv.org/abs/2307.09288) ## Intended Use **Intended Use Cases** Llama 2 is intended for commercial and research use in English. Tuned models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks. To get the expected features and performance for the chat versions, a specific formatting needs to be followed, including the `INST` and `<<SYS>>` tags, `BOS` and `EOS` tokens, and the whitespaces and breaklines in between (we recommend calling `strip()` on inputs to avoid double-spaces). See our reference code in github for details: [`chat_completion`](https://github.com/facebookresearch/llama/blob/main/llama/generation.py#L212). **Out-of-scope Uses** Use in any manner that violates applicable laws or regulations (including trade compliance laws).Use in languages other than English. Use in any other way that is prohibited by the Acceptable Use Policy and Licensing Agreement for Llama 2. ## Hardware and Software **Training Factors** We used custom training libraries, Meta's Research Super Cluster, and production clusters for pretraining. Fine-tuning, annotation, and evaluation were also performed on third-party cloud compute. **Carbon Footprint** Pretraining utilized a cumulative 3.3M GPU hours of computation on hardware of type A100-80GB (TDP of 350-400W). Estimated total emissions were 539 tCO2eq, 100% of which were offset by Meta’s sustainability program. ||Time (GPU hours)|Power Consumption (W)|Carbon Emitted(tCO<sub>2</sub>eq)| |---|---|---|---| |Llama 2 7B|184320|400|31.22| |Llama 2 13B|368640|400|62.44| |Llama 2 70B|1720320|400|291.42| |Total|3311616||539.00| **CO<sub>2</sub> emissions during pretraining.** Time: total GPU time required for training each model. Power Consumption: peak power capacity per GPU device for the GPUs used adjusted for power usage efficiency. 100% of the emissions are directly offset by Meta's sustainability program, and because we are openly releasing these models, the pretraining costs do not need to be incurred by others. ## Training Data **Overview** Llama 2 was pretrained on 2 trillion tokens of data from publicly available sources. The fine-tuning data includes publicly available instruction datasets, as well as over one million new human-annotated examples. Neither the pretraining nor the fine-tuning datasets include Meta user data. **Data Freshness** The pretraining data has a cutoff of September 2022, but some tuning data is more recent, up to July 2023. ## Evaluation Results In this section, we report the results for the Llama 1 and Llama 2 models on standard academic benchmarks.For all the evaluations, we use our internal evaluations library. |Model|Size|Code|Commonsense Reasoning|World Knowledge|Reading Comprehension|Math|MMLU|BBH|AGI Eval| |---|---|---|---|---|---|---|---|---|---| |Llama 1|7B|14.1|60.8|46.2|58.5|6.95|35.1|30.3|23.9| |Llama 1|13B|18.9|66.1|52.6|62.3|10.9|46.9|37.0|33.9| |Llama 1|33B|26.0|70.0|58.4|67.6|21.4|57.8|39.8|41.7| |Llama 1|65B|30.7|70.7|60.5|68.6|30.8|63.4|43.5|47.6| |Llama 2|7B|16.8|63.9|48.9|61.3|14.6|45.3|32.6|29.3| |Llama 2|13B|24.5|66.9|55.4|65.8|28.7|54.8|39.4|39.1| |Llama 2|70B|**37.5**|**71.9**|**63.6**|**69.4**|**35.2**|**68.9**|**51.2**|**54.2**| **Overall performance on grouped academic benchmarks.** *Code:* We report the average pass@1 scores of our models on HumanEval and MBPP. *Commonsense Reasoning:* We report the average of PIQA, SIQA, HellaSwag, WinoGrande, ARC easy and challenge, OpenBookQA, and CommonsenseQA. We report 7-shot results for CommonSenseQA and 0-shot results for all other benchmarks. *World Knowledge:* We evaluate the 5-shot performance on NaturalQuestions and TriviaQA and report the average. *Reading Comprehension:* For reading comprehension, we report the 0-shot average on SQuAD, QuAC, and BoolQ. *MATH:* We report the average of the GSM8K (8 shot) and MATH (4 shot) benchmarks at top 1. |||TruthfulQA|Toxigen| |---|---|---|---| |Llama 1|7B|27.42|23.00| |Llama 1|13B|41.74|23.08| |Llama 1|33B|44.19|22.57| |Llama 1|65B|48.71|21.77| |Llama 2|7B|33.29|**21.25**| |Llama 2|13B|41.86|26.10| |Llama 2|70B|**50.18**|24.60| **Evaluation of pretrained LLMs on automatic safety benchmarks.** For TruthfulQA, we present the percentage of generations that are both truthful and informative (the higher the better). For ToxiGen, we present the percentage of toxic generations (the smaller the better). |||TruthfulQA|Toxigen| |---|---|---|---| |Llama-2-Chat|7B|57.04|**0.00**| |Llama-2-Chat|13B|62.18|**0.00**| |Llama-2-Chat|70B|**64.14**|0.01| **Evaluation of fine-tuned LLMs on different safety datasets.** Same metric definitions as above. ## Ethical Considerations and Limitations Llama 2 is a new technology that carries risks with use. Testing conducted to date has been in English, and has not covered, nor could it cover all scenarios. For these reasons, as with all LLMs, Llama 2’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate, biased or other objectionable responses to user prompts. Therefore, before deploying any applications of Llama 2, developers should perform safety testing and tuning tailored to their specific applications of the model. Please see the Responsible Use Guide available at [https://ai.meta.com/llama/responsible-use-guide/](https://ai.meta.com/llama/responsible-use-guide) ## Reporting Issues Please report any software “bug,” or other problems with the models through one of the following means: - Reporting issues with the model: [github.com/facebookresearch/llama](http://github.com/facebookresearch/llama) - Reporting problematic content generated by the model: [developers.facebook.com/llama_output_feedback](http://developers.facebook.com/llama_output_feedback) - Reporting bugs and security concerns: [facebook.com/whitehat/info](http://facebook.com/whitehat/info) ## Llama Model Index |Model|Llama2|Llama2-hf|Llama2-chat|Llama2-chat-hf| |---|---|---|---|---| |7B| [Link](https://huggingface.co/meta-llama/Llama-2-7b) | [Link](https://huggingface.co/meta-llama/Llama-2-7b-hf) | [Link](https://huggingface.co/meta-llama/Llama-2-7b-chat) | [Link](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf)| |13B| [Link](https://huggingface.co/meta-llama/Llama-2-13b) | [Link](https://huggingface.co/meta-llama/Llama-2-13b-hf) | [Link](https://huggingface.co/meta-llama/Llama-2-13b-chat) | [Link](https://huggingface.co/meta-llama/Llama-2-13b-chat-hf)| |70B| [Link](https://huggingface.co/meta-llama/Llama-2-70b) | [Link](https://huggingface.co/meta-llama/Llama-2-70b-hf) | [Link](https://huggingface.co/meta-llama/Llama-2-70b-chat) | [Link](https://huggingface.co/meta-llama/Llama-2-70b-chat-hf)|
{}
RichardErkhov/meta-llama_-_Llama-2-7b-hf-gguf
null
[ "gguf", "arxiv:2307.09288", "region:us" ]
null
2024-04-20T20:08:10+00:00
[ "2307.09288" ]
[]
TAGS #gguf #arxiv-2307.09288 #region-us
Quantization made by Richard Erkhov. Github Discord Request more models Llama-2-7b-hf - GGUF * Model creator: URL * Original model: URL Name: Llama-2-7b-hf.Q2\_K.gguf, Quant method: Q2\_K, Size: 2.36GB Name: Llama-2-7b-hf.IQ3\_XS.gguf, Quant method: IQ3\_XS, Size: 2.6GB Name: Llama-2-7b-hf.IQ3\_S.gguf, Quant method: IQ3\_S, Size: 2.75GB Name: Llama-2-7b-hf.Q3\_K\_S.gguf, Quant method: Q3\_K\_S, Size: 2.75GB Name: Llama-2-7b-hf.IQ3\_M.gguf, Quant method: IQ3\_M, Size: 2.9GB Name: Llama-2-7b-hf.Q3\_K.gguf, Quant method: Q3\_K, Size: 3.07GB Name: Llama-2-7b-hf.Q3\_K\_M.gguf, Quant method: Q3\_K\_M, Size: 3.07GB Name: Llama-2-7b-hf.Q3\_K\_L.gguf, Quant method: Q3\_K\_L, Size: 3.35GB Name: Llama-2-7b-hf.IQ4\_XS.gguf, Quant method: IQ4\_XS, Size: 3.4GB Name: Llama-2-7b-hf.Q4\_0.gguf, Quant method: Q4\_0, Size: 3.56GB Name: Llama-2-7b-hf.IQ4\_NL.gguf, Quant method: IQ4\_NL, Size: 3.58GB Name: Llama-2-7b-hf.Q4\_K\_S.gguf, Quant method: Q4\_K\_S, Size: 3.59GB Name: Llama-2-7b-hf.Q4\_K.gguf, Quant method: Q4\_K, Size: 3.8GB Name: Llama-2-7b-hf.Q4\_K\_M.gguf, Quant method: Q4\_K\_M, Size: 3.8GB Name: Llama-2-7b-hf.Q4\_1.gguf, Quant method: Q4\_1, Size: 3.95GB Name: Llama-2-7b-hf.Q5\_0.gguf, Quant method: Q5\_0, Size: 4.33GB Name: Llama-2-7b-hf.Q5\_K\_S.gguf, Quant method: Q5\_K\_S, Size: 4.33GB Name: Llama-2-7b-hf.Q5\_K.gguf, Quant method: Q5\_K, Size: 4.45GB Name: Llama-2-7b-hf.Q5\_K\_M.gguf, Quant method: Q5\_K\_M, Size: 4.45GB Name: Llama-2-7b-hf.Q5\_1.gguf, Quant method: Q5\_1, Size: 4.72GB Name: Llama-2-7b-hf.Q6\_K.gguf, Quant method: Q6\_K, Size: 5.15GB Original model description: --------------------------- extra\_gated\_heading: You need to share contact information with Meta to access this model extra\_gated\_prompt: >- ### LLAMA 2 COMMUNITY LICENSE AGREEMENT "Agreement" means the terms and conditions for use, reproduction, distribution and modification of the Llama Materials set forth herein. "Documentation" means the specifications, manuals and documentation accompanying Llama 2 distributed by Meta at URL "Licensee" or "you" means you, or your employer or any other person or entity (if you are entering into this Agreement on such person or entity's behalf), of the age required under applicable laws, rules or regulations to provide legal consent and that has legal authority to bind your employer or such other person or entity if you are entering in this Agreement on their behalf. "Llama 2" means the foundational large language models and software and algorithms, including machine-learning model code, trained model weights, inference-enabling code, training-enabling code, fine-tuning enabling code and other elements of the foregoing distributed by Meta at URL "Llama Materials" means, collectively, Meta's proprietary Llama 2 and documentation (and any portion thereof) made available under this Agreement. "Meta" or "we" means Meta Platforms Ireland Limited (if you are located in or, if you are an entity, your principal place of business is in the EEA or Switzerland) and Meta Platforms, Inc. (if you are located outside of the EEA or Switzerland). By clicking "I Accept" below or by using or distributing any portion or element of the Llama Materials, you agree to be bound by this Agreement. 1. License Rights and Redistribution. a. Grant of Rights. You are granted a non-exclusive, worldwide, non- transferable and royalty-free limited license under Meta's intellectual property or other rights owned by Meta embodied in the Llama Materials to use, reproduce, distribute, copy, create derivative works of, and make modifications to the Llama Materials. b. Redistribution and Use. i. If you distribute or make the Llama Materials, or any derivative works thereof, available to a third party, you shall provide a copy of this Agreement to such third party. ii. If you receive Llama Materials, or any derivative works thereof, from a Licensee as part of an integrated end user product, then Section 2 of this Agreement will not apply to you. iii. You must retain in all copies of the Llama Materials that you distribute the following attribution notice within a "Notice" text file distributed as a part of such copies: "Llama 2 is licensed under the LLAMA 2 Community License, Copyright (c) Meta Platforms, Inc. All Rights Reserved." iv. Your use of the Llama Materials must comply with applicable laws and regulations (including trade compliance laws and regulations) and adhere to the Acceptable Use Policy for the Llama Materials (available at URL which is hereby incorporated by reference into this Agreement. v. You will not use the Llama Materials or any output or results of the Llama Materials to improve any other large language model (excluding Llama 2 or derivative works thereof). 2. Additional Commercial Terms. If, on the Llama 2 version release date, the monthly active users of the products or services made available by or for Licensee, or Licensee's affiliates, is greater than 700 million monthly active users in the preceding calendar month, you must request a license from Meta, which Meta may grant to you in its sole discretion, and you are not authorized to exercise any of the rights under this Agreement unless or until Meta otherwise expressly grants you such rights. 3. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING THE LLAMA MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS. 4. Limitation of Liability. IN NO EVENT WILL META OR ITS AFFILIATES BE LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST PROFITS OR ANY INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR PUNITIVE DAMAGES, EVEN IF META OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF ANY OF THE FOREGOING. 5. Intellectual Property. a. No trademark licenses are granted under this Agreement, and in connection with the Llama Materials, neither Meta nor Licensee may use any name or mark owned by or associated with the other or any of its affiliates, except as required for reasonable and customary use in describing and redistributing the Llama Materials. b. Subject to Meta's ownership of Llama Materials and derivatives made by or for Meta, with respect to any derivative works and modifications of the Llama Materials that are made by you, as between you and Meta, you are and will be the owner of such derivative works and modifications. c. If you institute litigation or other proceedings against Meta or any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Llama Materials or Llama 2 outputs or results, or any portion of any of the foregoing, constitutes infringement of intellectual property or other rights owned or licensable by you, then any licenses granted to you under this Agreement shall terminate as of the date such litigation or claim is filed or instituted. You will indemnify and hold harmless Meta from and against any claim by any third party arising out of or related to your use or distribution of the Llama Materials. 6. Term and Termination. The term of this Agreement will commence upon your acceptance of this Agreement or access to the Llama Materials and will continue in full force and effect until terminated in accordance with the terms and conditions herein. Meta may terminate this Agreement if you are in breach of any term or condition of this Agreement. Upon termination of this Agreement, you shall delete and cease use of the Llama Materials. Sections 3, 4 and 7 shall survive the termination of this Agreement. 7. Governing Law and Jurisdiction. This Agreement will be governed and construed under the laws of the State of California without regard to choice of law principles, and the UN Convention on Contracts for the International Sale of Goods does not apply to this Agreement. The courts of California shall have exclusive jurisdiction of any dispute arising out of this Agreement. ### Llama 2 Acceptable Use Policy Meta is committed to promoting safe and fair use of its tools and features, including Llama 2. If you access or use Llama 2, you agree to this Acceptable Use Policy (“Policy”). The most recent copy of this policy can be found at URL #### Prohibited Uses We want everyone to use Llama 2 safely and responsibly. You agree you will not use, or allow others to use, Llama 2 to: 1. Violate the law or others’ rights, including to: 1. Engage in, promote, generate, contribute to, encourage, plan, incite, or further illegal or unlawful activity or content, such as: 1. Violence or terrorism 2. Exploitation or harm to children, including the solicitation, creation, acquisition, or dissemination of child exploitative content or failure to report Child Sexual Abuse Material 3. Human trafficking, exploitation, and sexual violence 4. The illegal distribution of information or materials to minors, including obscene materials, or failure to employ legally required age-gating in connection with such information or materials. 5. Sexual solicitation 6. Any other criminal activity 2. Engage in, promote, incite, or facilitate the harassment, abuse, threatening, or bullying of individuals or groups of individuals 3. Engage in, promote, incite, or facilitate discrimination or other unlawful or harmful conduct in the provision of employment, employment benefits, credit, housing, other economic benefits, or other essential goods and services 4. Engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or related professional practices 5. Collect, process, disclose, generate, or infer health, demographic, or other sensitive personal or private information about individuals without rights and consents required by applicable laws 6. Engage in or facilitate any action or generate any content that infringes, misappropriates, or otherwise violates any third-party rights, including the outputs or results of any products or services using the Llama 2 Materials 7. Create, generate, or facilitate the creation of malicious code, malware, computer viruses or do anything else that could disable, overburden, interfere with or impair the proper working, integrity, operation or appearance of a website or computer system 2. Engage in, promote, incite, facilitate, or assist in the planning or development of activities that present a risk of death or bodily harm to individuals, including use of Llama 2 related to the following: 1. Military, warfare, nuclear industries or applications, espionage, use for materials or activities that are subject to the International Traffic Arms Regulations (ITAR) maintained by the United States Department of State 2. Guns and illegal weapons (including weapon development) 3. Illegal drugs and regulated/controlled substances 4. Operation of critical infrastructure, transportation technologies, or heavy machinery 5. Self-harm or harm to others, including suicide, cutting, and eating disorders 6. Any content intended to incite or promote violence, abuse, or any infliction of bodily harm to an individual 3. Intentionally deceive or mislead others, including use of Llama 2 related to the following: 1. Generating, promoting, or furthering fraud or the creation or promotion of disinformation 2. Generating, promoting, or furthering defamatory content, including the creation of defamatory statements, images, or other content 3. Generating, promoting, or further distributing spam 4. Impersonating another individual without consent, authorization, or legal right 5. Representing that the use of Llama 2 or outputs are human-generated 6. Generating or facilitating false online engagement, including fake reviews and other means of fake online engagement 7. Fail to appropriately disclose to end users any known dangers of your AI system Please report any violation of this Policy, software “bug,” or other problems that could lead to a violation of this Policy through one of the following means: * Reporting issues with the model: URL * Reporting risky content generated by the model: URL * Reporting bugs and security concerns: URL * Reporting violations of the Acceptable Use Policy or unlicensed uses of Llama: LlamaUseReport@URL extra\_gated\_fields: First Name: text Last Name: text Date of birth: date\_picker Country: country Affiliation: text geo: ip\_location By clicking Submit below I accept the terms of the license and acknowledge that the information I provide will be collected stored processed and shared in accordance with the Meta Privacy Policy: checkbox extra\_gated\_description: >- The information you provide will be collected, stored, processed and shared in accordance with the Meta Privacy Policy. extra\_gated\_button\_content: Submit language: * en pipeline\_tag: text-generation tags: * facebook * meta * pytorch * llama * llama-2 license: llama2 --- Llama 2 ======= Llama 2 is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 7B pretrained model, converted for the Hugging Face Transformers format. Links to other models can be found in the index at the bottom. Model Details ------------- *Note: Use of this model is governed by the Meta license. In order to download the model weights and tokenizer, please visit the website and accept our License before requesting access here.* Meta developed and publicly released the Llama 2 family of large language models (LLMs), a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. Our fine-tuned LLMs, called Llama-2-Chat, are optimized for dialogue use cases. Llama-2-Chat models outperform open-source chat models on most benchmarks we tested, and in our human evaluations for helpfulness and safety, are on par with some popular closed-source models like ChatGPT and PaLM. Model Developers Meta Variations Llama 2 comes in a range of parameter sizes — 7B, 13B, and 70B — as well as pretrained and fine-tuned variations. Input Models input text only. Output Models generate text only. Model Architecture Llama 2 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align to human preferences for helpfulness and safety. *Llama 2 family of models.* Token counts refer to pretraining data only. All models are trained with a global batch-size of 4M tokens. Bigger models - 70B -- use Grouped-Query Attention (GQA) for improved inference scalability. Model Dates Llama 2 was trained between January 2023 and July 2023. Status This is a static model trained on an offline dataset. Future versions of the tuned models will be released as we improve model safety with community feedback. License A custom commercial license is available at: URL Research Paper "Llama-2: Open Foundation and Fine-tuned Chat Models" Intended Use ------------ Intended Use Cases Llama 2 is intended for commercial and research use in English. Tuned models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks. To get the expected features and performance for the chat versions, a specific formatting needs to be followed, including the 'INST' and '<>' tags, 'BOS' and 'EOS' tokens, and the whitespaces and breaklines in between (we recommend calling 'strip()' on inputs to avoid double-spaces). See our reference code in github for details: 'chat\_completion'. Out-of-scope Uses Use in any manner that violates applicable laws or regulations (including trade compliance laws).Use in languages other than English. Use in any other way that is prohibited by the Acceptable Use Policy and Licensing Agreement for Llama 2. Hardware and Software --------------------- Training Factors We used custom training libraries, Meta's Research Super Cluster, and production clusters for pretraining. Fine-tuning, annotation, and evaluation were also performed on third-party cloud compute. Carbon Footprint Pretraining utilized a cumulative 3.3M GPU hours of computation on hardware of type A100-80GB (TDP of 350-400W). Estimated total emissions were 539 tCO2eq, 100% of which were offset by Meta’s sustainability program. CO2 emissions during pretraining. Time: total GPU time required for training each model. Power Consumption: peak power capacity per GPU device for the GPUs used adjusted for power usage efficiency. 100% of the emissions are directly offset by Meta's sustainability program, and because we are openly releasing these models, the pretraining costs do not need to be incurred by others. Training Data ------------- Overview Llama 2 was pretrained on 2 trillion tokens of data from publicly available sources. The fine-tuning data includes publicly available instruction datasets, as well as over one million new human-annotated examples. Neither the pretraining nor the fine-tuning datasets include Meta user data. Data Freshness The pretraining data has a cutoff of September 2022, but some tuning data is more recent, up to July 2023. Evaluation Results ------------------ In this section, we report the results for the Llama 1 and Llama 2 models on standard academic benchmarks.For all the evaluations, we use our internal evaluations library. Overall performance on grouped academic benchmarks. *Code:* We report the average pass@1 scores of our models on HumanEval and MBPP. *Commonsense Reasoning:* We report the average of PIQA, SIQA, HellaSwag, WinoGrande, ARC easy and challenge, OpenBookQA, and CommonsenseQA. We report 7-shot results for CommonSenseQA and 0-shot results for all other benchmarks. *World Knowledge:* We evaluate the 5-shot performance on NaturalQuestions and TriviaQA and report the average. *Reading Comprehension:* For reading comprehension, we report the 0-shot average on SQuAD, QuAC, and BoolQ. *MATH:* We report the average of the GSM8K (8 shot) and MATH (4 shot) benchmarks at top 1. Evaluation of pretrained LLMs on automatic safety benchmarks. For TruthfulQA, we present the percentage of generations that are both truthful and informative (the higher the better). For ToxiGen, we present the percentage of toxic generations (the smaller the better). Evaluation of fine-tuned LLMs on different safety datasets. Same metric definitions as above. Ethical Considerations and Limitations -------------------------------------- Llama 2 is a new technology that carries risks with use. Testing conducted to date has been in English, and has not covered, nor could it cover all scenarios. For these reasons, as with all LLMs, Llama 2’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate, biased or other objectionable responses to user prompts. Therefore, before deploying any applications of Llama 2, developers should perform safety testing and tuning tailored to their specific applications of the model. Please see the Responsible Use Guide available at URL Reporting Issues ---------------- Please report any software “bug,” or other problems with the models through one of the following means: * Reporting issues with the model: URL * Reporting problematic content generated by the model: URL * Reporting bugs and security concerns: URL Llama Model Index -----------------
[ "### LLAMA 2 COMMUNITY LICENSE AGREEMENT\n\n\n\"Agreement\" means the terms and conditions for use, reproduction, distribution\nand modification of the Llama Materials set forth herein.\n\n\n\"Documentation\" means the specifications, manuals and documentation\naccompanying Llama 2 distributed by Meta at\nURL\n\n\n\"Licensee\" or \"you\" means you, or your employer or any other person or entity\n(if you are entering into this Agreement on such person or entity's behalf),\nof the age required under applicable laws, rules or regulations to provide\nlegal consent and that has legal authority to bind your employer or such other\nperson or entity if you are entering in this Agreement on their behalf.\n\n\n\"Llama 2\" means the foundational large language models and software and\nalgorithms, including machine-learning model code, trained model weights,\ninference-enabling code, training-enabling code, fine-tuning enabling code and\nother elements of the foregoing distributed by Meta at\nURL\n\n\n\"Llama Materials\" means, collectively, Meta's proprietary Llama 2 and\ndocumentation (and any portion thereof) made available under this Agreement.\n\n\n\"Meta\" or \"we\" means Meta Platforms Ireland Limited (if you are located in or,\nif you are an entity, your principal place of business is in the EEA or\nSwitzerland) and Meta Platforms, Inc. (if you are located outside of the EEA\nor Switzerland).\n\n\nBy clicking \"I Accept\" below or by using or distributing any portion or\nelement of the Llama Materials, you agree to be bound by this Agreement.\n\n\n1. License Rights and Redistribution.\n\n\na. Grant of Rights. You are granted a non-exclusive, worldwide, non-\ntransferable and royalty-free limited license under Meta's intellectual\nproperty or other rights owned by Meta embodied in the Llama Materials to\nuse, reproduce, distribute, copy, create derivative works of, and make\nmodifications to the Llama Materials.\n\n\nb. Redistribution and Use.\n\n\ni. If you distribute or make the Llama Materials, or any derivative works\nthereof, available to a third party, you shall provide a copy of this\nAgreement to such third party.\n\n\nii. If you receive Llama Materials, or any derivative works thereof, from a\nLicensee as part of an integrated end user product, then Section 2 of this\nAgreement will not apply to you.\n\n\niii. You must retain in all copies of the Llama Materials that you distribute\nthe following attribution notice within a \"Notice\" text file distributed as a\npart of such copies: \"Llama 2 is licensed under the LLAMA 2 Community\nLicense, Copyright (c) Meta Platforms, Inc. All Rights Reserved.\"\n\n\niv. Your use of the Llama Materials must comply with applicable laws and\nregulations (including trade compliance laws and regulations) and adhere to\nthe Acceptable Use Policy for the Llama Materials (available at\nURL which is hereby incorporated by\nreference into this Agreement.\n\n\nv. You will not use the Llama Materials or any output or results of the Llama\nMaterials to improve any other large language model (excluding Llama 2 or\nderivative works thereof).\n\n\n2. Additional Commercial Terms. If, on the Llama 2 version release date, the\nmonthly active users of the products or services made available by or for\nLicensee, or Licensee's affiliates, is greater than 700 million monthly\nactive users in the preceding calendar month, you must request a license from\nMeta, which Meta may grant to you in its sole discretion, and you are not\nauthorized to exercise any of the rights under this Agreement unless or until\nMeta otherwise expressly grants you such rights.\n3. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE LLAMA\nMATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED ON AN \"AS IS\"\nBASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING,\nWITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT,\nMERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY\nRESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING\nTHE LLAMA MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE\nLLAMA MATERIALS AND ANY OUTPUT AND RESULTS.\n4. Limitation of Liability. IN NO EVENT WILL META OR ITS AFFILIATES BE LIABLE\nUNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE,\nPRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST\nPROFITS OR ANY INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR\nPUNITIVE DAMAGES, EVEN IF META OR ITS AFFILIATES HAVE BEEN ADVISED OF THE\nPOSSIBILITY OF ANY OF THE FOREGOING.\n5. Intellectual Property.\n\n\na. No trademark licenses are granted under this Agreement, and in connection\nwith the Llama Materials, neither Meta nor Licensee may use any name or mark\nowned by or associated with the other or any of its affiliates, except as\nrequired for reasonable and customary use in describing and redistributing\nthe Llama Materials.\n\n\nb. Subject to Meta's ownership of Llama Materials and derivatives made by or\nfor Meta, with respect to any derivative works and modifications of the Llama\nMaterials that are made by you, as between you and Meta, you are and will be\nthe owner of such derivative works and modifications.\n\n\nc. If you institute litigation or other proceedings against Meta or any\nentity (including a cross-claim or counterclaim in a lawsuit) alleging that\nthe Llama Materials or Llama 2 outputs or results, or any portion of any of\nthe foregoing, constitutes infringement of intellectual property or other\nrights owned or licensable by you, then any licenses granted to you under\nthis Agreement shall terminate as of the date such litigation or claim is\nfiled or instituted. You will indemnify and hold harmless Meta from and\nagainst any claim by any third party arising out of or related to your use or\ndistribution of the Llama Materials.\n\n\n6. Term and Termination. The term of this Agreement will commence upon your\nacceptance of this Agreement or access to the Llama Materials and will\ncontinue in full force and effect until terminated in accordance with the\nterms and conditions herein. Meta may terminate this Agreement if you are in\nbreach of any term or condition of this Agreement. Upon termination of this\nAgreement, you shall delete and cease use of the Llama Materials. Sections 3,\n4 and 7 shall survive the termination of this Agreement.\n7. Governing Law and Jurisdiction. This Agreement will be governed and\nconstrued under the laws of the State of California without regard to choice\nof law principles, and the UN Convention on Contracts for the International\nSale of Goods does not apply to this Agreement. The courts of California\nshall have exclusive jurisdiction of any dispute arising out of this\nAgreement.", "### Llama 2 Acceptable Use Policy\n\n\nMeta is committed to promoting safe and fair use of its tools and features,\nincluding Llama 2. If you access or use Llama 2, you agree to this Acceptable\nUse Policy (“Policy”). The most recent copy of this policy can be found at\nURL", "#### Prohibited Uses\n\n\nWe want everyone to use Llama 2 safely and responsibly. You agree you will not\nuse, or allow others to use, Llama 2 to:\n\n\n1. Violate the law or others’ rights, including to:\n\t1. Engage in, promote, generate, contribute to, encourage, plan, incite, or further illegal or unlawful activity or content, such as:\n\t\t1. Violence or terrorism\n\t\t2. Exploitation or harm to children, including the solicitation, creation, acquisition, or dissemination of child exploitative content or failure to report Child Sexual Abuse Material\n\t\t3. Human trafficking, exploitation, and sexual violence\n\t\t4. The illegal distribution of information or materials to minors, including obscene materials, or failure to employ legally required age-gating in connection with such information or materials.\n\t\t5. Sexual solicitation\n\t\t6. Any other criminal activity\n\t2. Engage in, promote, incite, or facilitate the harassment, abuse, threatening, or bullying of individuals or groups of individuals\n\t3. Engage in, promote, incite, or facilitate discrimination or other unlawful or harmful conduct in the provision of employment, employment benefits, credit, housing, other economic benefits, or other essential goods and services\n\t4. Engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or related professional practices\n\t5. Collect, process, disclose, generate, or infer health, demographic, or other sensitive personal or private information about individuals without rights and consents required by applicable laws\n\t6. Engage in or facilitate any action or generate any content that infringes, misappropriates, or otherwise violates any third-party rights, including the outputs or results of any products or services using the Llama 2 Materials\n\t7. Create, generate, or facilitate the creation of malicious code, malware, computer viruses or do anything else that could disable, overburden, interfere with or impair the proper working, integrity, operation or appearance of a website or computer system\n2. Engage in, promote, incite, facilitate, or assist in the planning or\ndevelopment of activities that present a risk of death or bodily harm to\nindividuals, including use of Llama 2 related to the following:\n\t1. Military, warfare, nuclear industries or applications, espionage, use for materials or activities that are subject to the International Traffic Arms Regulations (ITAR) maintained by the United States Department of State\n\t2. Guns and illegal weapons (including weapon development)\n\t3. Illegal drugs and regulated/controlled substances\n\t4. Operation of critical infrastructure, transportation technologies, or heavy machinery\n\t5. Self-harm or harm to others, including suicide, cutting, and eating disorders\n\t6. Any content intended to incite or promote violence, abuse, or any infliction of bodily harm to an individual\n3. Intentionally deceive or mislead others, including use of Llama 2 related\nto the following:\n\t1. Generating, promoting, or furthering fraud or the creation or promotion of disinformation\n\t2. Generating, promoting, or furthering defamatory content, including the creation of defamatory statements, images, or other content\n\t3. Generating, promoting, or further distributing spam\n\t4. Impersonating another individual without consent, authorization, or legal right\n\t5. Representing that the use of Llama 2 or outputs are human-generated\n\t6. Generating or facilitating false online engagement, including fake reviews and other means of fake online engagement\n\t7. Fail to appropriately disclose to end users any known dangers of your AI system\n\tPlease report any violation of this Policy, software “bug,” or other problems\n\tthat could lead to a violation of this Policy through one of the following\n\tmeans:\n\t* Reporting issues with the model: URL\n\t* Reporting risky content generated by the model: URL\n\t* Reporting bugs and security concerns: URL\n\t* Reporting violations of the Acceptable Use Policy or unlicensed uses of Llama: LlamaUseReport@URL\n\textra\\_gated\\_fields:\n\tFirst Name: text\n\tLast Name: text\n\tDate of birth: date\\_picker\n\tCountry: country\n\tAffiliation: text\n\tgeo: ip\\_location\n\tBy clicking Submit below I accept the terms of the license and acknowledge that the information I provide will be collected stored processed and shared in accordance with the Meta Privacy Policy: checkbox\n\textra\\_gated\\_description: >-\n\tThe information you provide will be collected, stored, processed and shared in\n\taccordance with the Meta Privacy\n\tPolicy.\n\textra\\_gated\\_button\\_content: Submit\n\tlanguage:\n\n\n* en\npipeline\\_tag: text-generation\ntags:\n* facebook\n* meta\n* pytorch\n* llama\n* llama-2\nlicense: llama2\n\n\n\n\n---\n\n\nLlama 2\n=======\n\n\nLlama 2 is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 7B pretrained model, converted for the Hugging Face Transformers format. Links to other models can be found in the index at the bottom.\n\n\nModel Details\n-------------\n\n\n*Note: Use of this model is governed by the Meta license. In order to download the model weights and tokenizer, please visit the website and accept our License before requesting access here.*\n\n\nMeta developed and publicly released the Llama 2 family of large language models (LLMs), a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. Our fine-tuned LLMs, called Llama-2-Chat, are optimized for dialogue use cases. Llama-2-Chat models outperform open-source chat models on most benchmarks we tested, and in our human evaluations for helpfulness and safety, are on par with some popular closed-source models like ChatGPT and PaLM.\n\n\nModel Developers Meta\n\n\nVariations Llama 2 comes in a range of parameter sizes — 7B, 13B, and 70B — as well as pretrained and fine-tuned variations.\n\n\nInput Models input text only.\n\n\nOutput Models generate text only.\n\n\nModel Architecture Llama 2 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align to human preferences for helpfulness and safety.\n\n\n\n*Llama 2 family of models.* Token counts refer to pretraining data only. All models are trained with a global batch-size of 4M tokens. Bigger models - 70B -- use Grouped-Query Attention (GQA) for improved inference scalability.\n\n\nModel Dates Llama 2 was trained between January 2023 and July 2023.\n\n\nStatus This is a static model trained on an offline dataset. Future versions of the tuned models will be released as we improve model safety with community feedback.\n\n\nLicense A custom commercial license is available at: URL\n\n\nResearch Paper \"Llama-2: Open Foundation and Fine-tuned Chat Models\"\n\n\nIntended Use\n------------\n\n\nIntended Use Cases Llama 2 is intended for commercial and research use in English. Tuned models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks.\n\n\nTo get the expected features and performance for the chat versions, a specific formatting needs to be followed, including the 'INST' and '<>' tags, 'BOS' and 'EOS' tokens, and the whitespaces and breaklines in between (we recommend calling 'strip()' on inputs to avoid double-spaces). See our reference code in github for details: 'chat\\_completion'.\n\n\nOut-of-scope Uses Use in any manner that violates applicable laws or regulations (including trade compliance laws).Use in languages other than English. Use in any other way that is prohibited by the Acceptable Use Policy and Licensing Agreement for Llama 2.\n\n\nHardware and Software\n---------------------\n\n\nTraining Factors We used custom training libraries, Meta's Research Super Cluster, and production clusters for pretraining. Fine-tuning, annotation, and evaluation were also performed on third-party cloud compute.\n\n\nCarbon Footprint Pretraining utilized a cumulative 3.3M GPU hours of computation on hardware of type A100-80GB (TDP of 350-400W). Estimated total emissions were 539 tCO2eq, 100% of which were offset by Meta’s sustainability program.\n\n\n\nCO2 emissions during pretraining. Time: total GPU time required for training each model. Power Consumption: peak power capacity per GPU device for the GPUs used adjusted for power usage efficiency. 100% of the emissions are directly offset by Meta's sustainability program, and because we are openly releasing these models, the pretraining costs do not need to be incurred by others.\n\n\nTraining Data\n-------------\n\n\nOverview Llama 2 was pretrained on 2 trillion tokens of data from publicly available sources. The fine-tuning data includes publicly available instruction datasets, as well as over one million new human-annotated examples. Neither the pretraining nor the fine-tuning datasets include Meta user data.\n\n\nData Freshness The pretraining data has a cutoff of September 2022, but some tuning data is more recent, up to July 2023.\n\n\nEvaluation Results\n------------------\n\n\nIn this section, we report the results for the Llama 1 and Llama 2 models on standard academic benchmarks.For all the evaluations, we use our internal evaluations library.\n\n\n\nOverall performance on grouped academic benchmarks. *Code:* We report the average pass@1 scores of our models on HumanEval and MBPP. *Commonsense Reasoning:* We report the average of PIQA, SIQA, HellaSwag, WinoGrande, ARC easy and challenge, OpenBookQA, and CommonsenseQA. We report 7-shot results for CommonSenseQA and 0-shot results for all other benchmarks. *World Knowledge:* We evaluate the 5-shot performance on NaturalQuestions and TriviaQA and report the average. *Reading Comprehension:* For reading comprehension, we report the 0-shot average on SQuAD, QuAC, and BoolQ. *MATH:* We report the average of the GSM8K (8 shot) and MATH (4 shot) benchmarks at top 1.\n\n\n\nEvaluation of pretrained LLMs on automatic safety benchmarks. For TruthfulQA, we present the percentage of generations that are both truthful and informative (the higher the better). For ToxiGen, we present the percentage of toxic generations (the smaller the better).\n\n\n\nEvaluation of fine-tuned LLMs on different safety datasets. Same metric definitions as above.\n\n\nEthical Considerations and Limitations\n--------------------------------------\n\n\nLlama 2 is a new technology that carries risks with use. Testing conducted to date has been in English, and has not covered, nor could it cover all scenarios. For these reasons, as with all LLMs, Llama 2’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate, biased or other objectionable responses to user prompts. Therefore, before deploying any applications of Llama 2, developers should perform safety testing and tuning tailored to their specific applications of the model.\n\n\nPlease see the Responsible Use Guide available at URL\n\n\nReporting Issues\n----------------\n\n\nPlease report any software “bug,” or other problems with the models through one of the following means:\n\n\n* Reporting issues with the model: URL\n* Reporting problematic content generated by the model: URL\n* Reporting bugs and security concerns: URL\n\n\nLlama Model Index\n-----------------" ]
[ "TAGS\n#gguf #arxiv-2307.09288 #region-us \n", "### LLAMA 2 COMMUNITY LICENSE AGREEMENT\n\n\n\"Agreement\" means the terms and conditions for use, reproduction, distribution\nand modification of the Llama Materials set forth herein.\n\n\n\"Documentation\" means the specifications, manuals and documentation\naccompanying Llama 2 distributed by Meta at\nURL\n\n\n\"Licensee\" or \"you\" means you, or your employer or any other person or entity\n(if you are entering into this Agreement on such person or entity's behalf),\nof the age required under applicable laws, rules or regulations to provide\nlegal consent and that has legal authority to bind your employer or such other\nperson or entity if you are entering in this Agreement on their behalf.\n\n\n\"Llama 2\" means the foundational large language models and software and\nalgorithms, including machine-learning model code, trained model weights,\ninference-enabling code, training-enabling code, fine-tuning enabling code and\nother elements of the foregoing distributed by Meta at\nURL\n\n\n\"Llama Materials\" means, collectively, Meta's proprietary Llama 2 and\ndocumentation (and any portion thereof) made available under this Agreement.\n\n\n\"Meta\" or \"we\" means Meta Platforms Ireland Limited (if you are located in or,\nif you are an entity, your principal place of business is in the EEA or\nSwitzerland) and Meta Platforms, Inc. (if you are located outside of the EEA\nor Switzerland).\n\n\nBy clicking \"I Accept\" below or by using or distributing any portion or\nelement of the Llama Materials, you agree to be bound by this Agreement.\n\n\n1. License Rights and Redistribution.\n\n\na. Grant of Rights. You are granted a non-exclusive, worldwide, non-\ntransferable and royalty-free limited license under Meta's intellectual\nproperty or other rights owned by Meta embodied in the Llama Materials to\nuse, reproduce, distribute, copy, create derivative works of, and make\nmodifications to the Llama Materials.\n\n\nb. Redistribution and Use.\n\n\ni. If you distribute or make the Llama Materials, or any derivative works\nthereof, available to a third party, you shall provide a copy of this\nAgreement to such third party.\n\n\nii. If you receive Llama Materials, or any derivative works thereof, from a\nLicensee as part of an integrated end user product, then Section 2 of this\nAgreement will not apply to you.\n\n\niii. You must retain in all copies of the Llama Materials that you distribute\nthe following attribution notice within a \"Notice\" text file distributed as a\npart of such copies: \"Llama 2 is licensed under the LLAMA 2 Community\nLicense, Copyright (c) Meta Platforms, Inc. All Rights Reserved.\"\n\n\niv. Your use of the Llama Materials must comply with applicable laws and\nregulations (including trade compliance laws and regulations) and adhere to\nthe Acceptable Use Policy for the Llama Materials (available at\nURL which is hereby incorporated by\nreference into this Agreement.\n\n\nv. You will not use the Llama Materials or any output or results of the Llama\nMaterials to improve any other large language model (excluding Llama 2 or\nderivative works thereof).\n\n\n2. Additional Commercial Terms. If, on the Llama 2 version release date, the\nmonthly active users of the products or services made available by or for\nLicensee, or Licensee's affiliates, is greater than 700 million monthly\nactive users in the preceding calendar month, you must request a license from\nMeta, which Meta may grant to you in its sole discretion, and you are not\nauthorized to exercise any of the rights under this Agreement unless or until\nMeta otherwise expressly grants you such rights.\n3. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE LLAMA\nMATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED ON AN \"AS IS\"\nBASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING,\nWITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT,\nMERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY\nRESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING\nTHE LLAMA MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE\nLLAMA MATERIALS AND ANY OUTPUT AND RESULTS.\n4. Limitation of Liability. IN NO EVENT WILL META OR ITS AFFILIATES BE LIABLE\nUNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE,\nPRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST\nPROFITS OR ANY INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR\nPUNITIVE DAMAGES, EVEN IF META OR ITS AFFILIATES HAVE BEEN ADVISED OF THE\nPOSSIBILITY OF ANY OF THE FOREGOING.\n5. Intellectual Property.\n\n\na. No trademark licenses are granted under this Agreement, and in connection\nwith the Llama Materials, neither Meta nor Licensee may use any name or mark\nowned by or associated with the other or any of its affiliates, except as\nrequired for reasonable and customary use in describing and redistributing\nthe Llama Materials.\n\n\nb. Subject to Meta's ownership of Llama Materials and derivatives made by or\nfor Meta, with respect to any derivative works and modifications of the Llama\nMaterials that are made by you, as between you and Meta, you are and will be\nthe owner of such derivative works and modifications.\n\n\nc. If you institute litigation or other proceedings against Meta or any\nentity (including a cross-claim or counterclaim in a lawsuit) alleging that\nthe Llama Materials or Llama 2 outputs or results, or any portion of any of\nthe foregoing, constitutes infringement of intellectual property or other\nrights owned or licensable by you, then any licenses granted to you under\nthis Agreement shall terminate as of the date such litigation or claim is\nfiled or instituted. You will indemnify and hold harmless Meta from and\nagainst any claim by any third party arising out of or related to your use or\ndistribution of the Llama Materials.\n\n\n6. Term and Termination. The term of this Agreement will commence upon your\nacceptance of this Agreement or access to the Llama Materials and will\ncontinue in full force and effect until terminated in accordance with the\nterms and conditions herein. Meta may terminate this Agreement if you are in\nbreach of any term or condition of this Agreement. Upon termination of this\nAgreement, you shall delete and cease use of the Llama Materials. Sections 3,\n4 and 7 shall survive the termination of this Agreement.\n7. Governing Law and Jurisdiction. This Agreement will be governed and\nconstrued under the laws of the State of California without regard to choice\nof law principles, and the UN Convention on Contracts for the International\nSale of Goods does not apply to this Agreement. The courts of California\nshall have exclusive jurisdiction of any dispute arising out of this\nAgreement.", "### Llama 2 Acceptable Use Policy\n\n\nMeta is committed to promoting safe and fair use of its tools and features,\nincluding Llama 2. If you access or use Llama 2, you agree to this Acceptable\nUse Policy (“Policy”). The most recent copy of this policy can be found at\nURL", "#### Prohibited Uses\n\n\nWe want everyone to use Llama 2 safely and responsibly. You agree you will not\nuse, or allow others to use, Llama 2 to:\n\n\n1. Violate the law or others’ rights, including to:\n\t1. Engage in, promote, generate, contribute to, encourage, plan, incite, or further illegal or unlawful activity or content, such as:\n\t\t1. Violence or terrorism\n\t\t2. Exploitation or harm to children, including the solicitation, creation, acquisition, or dissemination of child exploitative content or failure to report Child Sexual Abuse Material\n\t\t3. Human trafficking, exploitation, and sexual violence\n\t\t4. The illegal distribution of information or materials to minors, including obscene materials, or failure to employ legally required age-gating in connection with such information or materials.\n\t\t5. Sexual solicitation\n\t\t6. Any other criminal activity\n\t2. Engage in, promote, incite, or facilitate the harassment, abuse, threatening, or bullying of individuals or groups of individuals\n\t3. Engage in, promote, incite, or facilitate discrimination or other unlawful or harmful conduct in the provision of employment, employment benefits, credit, housing, other economic benefits, or other essential goods and services\n\t4. Engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or related professional practices\n\t5. Collect, process, disclose, generate, or infer health, demographic, or other sensitive personal or private information about individuals without rights and consents required by applicable laws\n\t6. Engage in or facilitate any action or generate any content that infringes, misappropriates, or otherwise violates any third-party rights, including the outputs or results of any products or services using the Llama 2 Materials\n\t7. Create, generate, or facilitate the creation of malicious code, malware, computer viruses or do anything else that could disable, overburden, interfere with or impair the proper working, integrity, operation or appearance of a website or computer system\n2. Engage in, promote, incite, facilitate, or assist in the planning or\ndevelopment of activities that present a risk of death or bodily harm to\nindividuals, including use of Llama 2 related to the following:\n\t1. Military, warfare, nuclear industries or applications, espionage, use for materials or activities that are subject to the International Traffic Arms Regulations (ITAR) maintained by the United States Department of State\n\t2. Guns and illegal weapons (including weapon development)\n\t3. Illegal drugs and regulated/controlled substances\n\t4. Operation of critical infrastructure, transportation technologies, or heavy machinery\n\t5. Self-harm or harm to others, including suicide, cutting, and eating disorders\n\t6. Any content intended to incite or promote violence, abuse, or any infliction of bodily harm to an individual\n3. Intentionally deceive or mislead others, including use of Llama 2 related\nto the following:\n\t1. Generating, promoting, or furthering fraud or the creation or promotion of disinformation\n\t2. Generating, promoting, or furthering defamatory content, including the creation of defamatory statements, images, or other content\n\t3. Generating, promoting, or further distributing spam\n\t4. Impersonating another individual without consent, authorization, or legal right\n\t5. Representing that the use of Llama 2 or outputs are human-generated\n\t6. Generating or facilitating false online engagement, including fake reviews and other means of fake online engagement\n\t7. Fail to appropriately disclose to end users any known dangers of your AI system\n\tPlease report any violation of this Policy, software “bug,” or other problems\n\tthat could lead to a violation of this Policy through one of the following\n\tmeans:\n\t* Reporting issues with the model: URL\n\t* Reporting risky content generated by the model: URL\n\t* Reporting bugs and security concerns: URL\n\t* Reporting violations of the Acceptable Use Policy or unlicensed uses of Llama: LlamaUseReport@URL\n\textra\\_gated\\_fields:\n\tFirst Name: text\n\tLast Name: text\n\tDate of birth: date\\_picker\n\tCountry: country\n\tAffiliation: text\n\tgeo: ip\\_location\n\tBy clicking Submit below I accept the terms of the license and acknowledge that the information I provide will be collected stored processed and shared in accordance with the Meta Privacy Policy: checkbox\n\textra\\_gated\\_description: >-\n\tThe information you provide will be collected, stored, processed and shared in\n\taccordance with the Meta Privacy\n\tPolicy.\n\textra\\_gated\\_button\\_content: Submit\n\tlanguage:\n\n\n* en\npipeline\\_tag: text-generation\ntags:\n* facebook\n* meta\n* pytorch\n* llama\n* llama-2\nlicense: llama2\n\n\n\n\n---\n\n\nLlama 2\n=======\n\n\nLlama 2 is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 7B pretrained model, converted for the Hugging Face Transformers format. Links to other models can be found in the index at the bottom.\n\n\nModel Details\n-------------\n\n\n*Note: Use of this model is governed by the Meta license. In order to download the model weights and tokenizer, please visit the website and accept our License before requesting access here.*\n\n\nMeta developed and publicly released the Llama 2 family of large language models (LLMs), a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. Our fine-tuned LLMs, called Llama-2-Chat, are optimized for dialogue use cases. Llama-2-Chat models outperform open-source chat models on most benchmarks we tested, and in our human evaluations for helpfulness and safety, are on par with some popular closed-source models like ChatGPT and PaLM.\n\n\nModel Developers Meta\n\n\nVariations Llama 2 comes in a range of parameter sizes — 7B, 13B, and 70B — as well as pretrained and fine-tuned variations.\n\n\nInput Models input text only.\n\n\nOutput Models generate text only.\n\n\nModel Architecture Llama 2 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align to human preferences for helpfulness and safety.\n\n\n\n*Llama 2 family of models.* Token counts refer to pretraining data only. All models are trained with a global batch-size of 4M tokens. Bigger models - 70B -- use Grouped-Query Attention (GQA) for improved inference scalability.\n\n\nModel Dates Llama 2 was trained between January 2023 and July 2023.\n\n\nStatus This is a static model trained on an offline dataset. Future versions of the tuned models will be released as we improve model safety with community feedback.\n\n\nLicense A custom commercial license is available at: URL\n\n\nResearch Paper \"Llama-2: Open Foundation and Fine-tuned Chat Models\"\n\n\nIntended Use\n------------\n\n\nIntended Use Cases Llama 2 is intended for commercial and research use in English. Tuned models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks.\n\n\nTo get the expected features and performance for the chat versions, a specific formatting needs to be followed, including the 'INST' and '<>' tags, 'BOS' and 'EOS' tokens, and the whitespaces and breaklines in between (we recommend calling 'strip()' on inputs to avoid double-spaces). See our reference code in github for details: 'chat\\_completion'.\n\n\nOut-of-scope Uses Use in any manner that violates applicable laws or regulations (including trade compliance laws).Use in languages other than English. Use in any other way that is prohibited by the Acceptable Use Policy and Licensing Agreement for Llama 2.\n\n\nHardware and Software\n---------------------\n\n\nTraining Factors We used custom training libraries, Meta's Research Super Cluster, and production clusters for pretraining. Fine-tuning, annotation, and evaluation were also performed on third-party cloud compute.\n\n\nCarbon Footprint Pretraining utilized a cumulative 3.3M GPU hours of computation on hardware of type A100-80GB (TDP of 350-400W). Estimated total emissions were 539 tCO2eq, 100% of which were offset by Meta’s sustainability program.\n\n\n\nCO2 emissions during pretraining. Time: total GPU time required for training each model. Power Consumption: peak power capacity per GPU device for the GPUs used adjusted for power usage efficiency. 100% of the emissions are directly offset by Meta's sustainability program, and because we are openly releasing these models, the pretraining costs do not need to be incurred by others.\n\n\nTraining Data\n-------------\n\n\nOverview Llama 2 was pretrained on 2 trillion tokens of data from publicly available sources. The fine-tuning data includes publicly available instruction datasets, as well as over one million new human-annotated examples. Neither the pretraining nor the fine-tuning datasets include Meta user data.\n\n\nData Freshness The pretraining data has a cutoff of September 2022, but some tuning data is more recent, up to July 2023.\n\n\nEvaluation Results\n------------------\n\n\nIn this section, we report the results for the Llama 1 and Llama 2 models on standard academic benchmarks.For all the evaluations, we use our internal evaluations library.\n\n\n\nOverall performance on grouped academic benchmarks. *Code:* We report the average pass@1 scores of our models on HumanEval and MBPP. *Commonsense Reasoning:* We report the average of PIQA, SIQA, HellaSwag, WinoGrande, ARC easy and challenge, OpenBookQA, and CommonsenseQA. We report 7-shot results for CommonSenseQA and 0-shot results for all other benchmarks. *World Knowledge:* We evaluate the 5-shot performance on NaturalQuestions and TriviaQA and report the average. *Reading Comprehension:* For reading comprehension, we report the 0-shot average on SQuAD, QuAC, and BoolQ. *MATH:* We report the average of the GSM8K (8 shot) and MATH (4 shot) benchmarks at top 1.\n\n\n\nEvaluation of pretrained LLMs on automatic safety benchmarks. For TruthfulQA, we present the percentage of generations that are both truthful and informative (the higher the better). For ToxiGen, we present the percentage of toxic generations (the smaller the better).\n\n\n\nEvaluation of fine-tuned LLMs on different safety datasets. Same metric definitions as above.\n\n\nEthical Considerations and Limitations\n--------------------------------------\n\n\nLlama 2 is a new technology that carries risks with use. Testing conducted to date has been in English, and has not covered, nor could it cover all scenarios. For these reasons, as with all LLMs, Llama 2’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate, biased or other objectionable responses to user prompts. Therefore, before deploying any applications of Llama 2, developers should perform safety testing and tuning tailored to their specific applications of the model.\n\n\nPlease see the Responsible Use Guide available at URL\n\n\nReporting Issues\n----------------\n\n\nPlease report any software “bug,” or other problems with the models through one of the following means:\n\n\n* Reporting issues with the model: URL\n* Reporting problematic content generated by the model: URL\n* Reporting bugs and security concerns: URL\n\n\nLlama Model Index\n-----------------" ]
text-generation
transformers
Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) Llama-2-7b-chat-hf - bnb 8bits - Model creator: https://huggingface.co/meta-llama/ - Original model: https://huggingface.co/meta-llama/Llama-2-7b-chat-hf/ Original model description: --- extra_gated_heading: You need to share contact information with Meta to access this model extra_gated_prompt: >- ### LLAMA 2 COMMUNITY LICENSE AGREEMENT "Agreement" means the terms and conditions for use, reproduction, distribution and modification of the Llama Materials set forth herein. "Documentation" means the specifications, manuals and documentation accompanying Llama 2 distributed by Meta at https://ai.meta.com/resources/models-and-libraries/llama-downloads/. "Licensee" or "you" means you, or your employer or any other person or entity (if you are entering into this Agreement on such person or entity's behalf), of the age required under applicable laws, rules or regulations to provide legal consent and that has legal authority to bind your employer or such other person or entity if you are entering in this Agreement on their behalf. "Llama 2" means the foundational large language models and software and algorithms, including machine-learning model code, trained model weights, inference-enabling code, training-enabling code, fine-tuning enabling code and other elements of the foregoing distributed by Meta at ai.meta.com/resources/models-and-libraries/llama-downloads/. "Llama Materials" means, collectively, Meta's proprietary Llama 2 and documentation (and any portion thereof) made available under this Agreement. "Meta" or "we" means Meta Platforms Ireland Limited (if you are located in or, if you are an entity, your principal place of business is in the EEA or Switzerland) and Meta Platforms, Inc. (if you are located outside of the EEA or Switzerland). By clicking "I Accept" below or by using or distributing any portion or element of the Llama Materials, you agree to be bound by this Agreement. 1. License Rights and Redistribution. a. Grant of Rights. You are granted a non-exclusive, worldwide, non- transferable and royalty-free limited license under Meta's intellectual property or other rights owned by Meta embodied in the Llama Materials to use, reproduce, distribute, copy, create derivative works of, and make modifications to the Llama Materials. b. Redistribution and Use. i. If you distribute or make the Llama Materials, or any derivative works thereof, available to a third party, you shall provide a copy of this Agreement to such third party. ii. If you receive Llama Materials, or any derivative works thereof, from a Licensee as part of an integrated end user product, then Section 2 of this Agreement will not apply to you. iii. You must retain in all copies of the Llama Materials that you distribute the following attribution notice within a "Notice" text file distributed as a part of such copies: "Llama 2 is licensed under the LLAMA 2 Community License, Copyright (c) Meta Platforms, Inc. All Rights Reserved." iv. Your use of the Llama Materials must comply with applicable laws and regulations (including trade compliance laws and regulations) and adhere to the Acceptable Use Policy for the Llama Materials (available at https://ai.meta.com/llama/use-policy), which is hereby incorporated by reference into this Agreement. v. You will not use the Llama Materials or any output or results of the Llama Materials to improve any other large language model (excluding Llama 2 or derivative works thereof). 2. Additional Commercial Terms. If, on the Llama 2 version release date, the monthly active users of the products or services made available by or for Licensee, or Licensee's affiliates, is greater than 700 million monthly active users in the preceding calendar month, you must request a license from Meta, which Meta may grant to you in its sole discretion, and you are not authorized to exercise any of the rights under this Agreement unless or until Meta otherwise expressly grants you such rights. 3. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING THE LLAMA MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS. 4. Limitation of Liability. IN NO EVENT WILL META OR ITS AFFILIATES BE LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST PROFITS OR ANY INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR PUNITIVE DAMAGES, EVEN IF META OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF ANY OF THE FOREGOING. 5. Intellectual Property. a. No trademark licenses are granted under this Agreement, and in connection with the Llama Materials, neither Meta nor Licensee may use any name or mark owned by or associated with the other or any of its affiliates, except as required for reasonable and customary use in describing and redistributing the Llama Materials. b. Subject to Meta's ownership of Llama Materials and derivatives made by or for Meta, with respect to any derivative works and modifications of the Llama Materials that are made by you, as between you and Meta, you are and will be the owner of such derivative works and modifications. c. If you institute litigation or other proceedings against Meta or any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Llama Materials or Llama 2 outputs or results, or any portion of any of the foregoing, constitutes infringement of intellectual property or other rights owned or licensable by you, then any licenses granted to you under this Agreement shall terminate as of the date such litigation or claim is filed or instituted. You will indemnify and hold harmless Meta from and against any claim by any third party arising out of or related to your use or distribution of the Llama Materials. 6. Term and Termination. The term of this Agreement will commence upon your acceptance of this Agreement or access to the Llama Materials and will continue in full force and effect until terminated in accordance with the terms and conditions herein. Meta may terminate this Agreement if you are in breach of any term or condition of this Agreement. Upon termination of this Agreement, you shall delete and cease use of the Llama Materials. Sections 3, 4 and 7 shall survive the termination of this Agreement. 7. Governing Law and Jurisdiction. This Agreement will be governed and construed under the laws of the State of California without regard to choice of law principles, and the UN Convention on Contracts for the International Sale of Goods does not apply to this Agreement. The courts of California shall have exclusive jurisdiction of any dispute arising out of this Agreement. ### Llama 2 Acceptable Use Policy Meta is committed to promoting safe and fair use of its tools and features, including Llama 2. If you access or use Llama 2, you agree to this Acceptable Use Policy (“Policy”). The most recent copy of this policy can be found at [ai.meta.com/llama/use-policy](http://ai.meta.com/llama/use-policy). #### Prohibited Uses We want everyone to use Llama 2 safely and responsibly. You agree you will not use, or allow others to use, Llama 2 to: 1. Violate the law or others’ rights, including to: 1. Engage in, promote, generate, contribute to, encourage, plan, incite, or further illegal or unlawful activity or content, such as: 1. Violence or terrorism 2. Exploitation or harm to children, including the solicitation, creation, acquisition, or dissemination of child exploitative content or failure to report Child Sexual Abuse Material 3. Human trafficking, exploitation, and sexual violence 4. The illegal distribution of information or materials to minors, including obscene materials, or failure to employ legally required age-gating in connection with such information or materials. 5. Sexual solicitation 6. Any other criminal activity 2. Engage in, promote, incite, or facilitate the harassment, abuse, threatening, or bullying of individuals or groups of individuals 3. Engage in, promote, incite, or facilitate discrimination or other unlawful or harmful conduct in the provision of employment, employment benefits, credit, housing, other economic benefits, or other essential goods and services 4. Engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or related professional practices 5. Collect, process, disclose, generate, or infer health, demographic, or other sensitive personal or private information about individuals without rights and consents required by applicable laws 6. Engage in or facilitate any action or generate any content that infringes, misappropriates, or otherwise violates any third-party rights, including the outputs or results of any products or services using the Llama 2 Materials 7. Create, generate, or facilitate the creation of malicious code, malware, computer viruses or do anything else that could disable, overburden, interfere with or impair the proper working, integrity, operation or appearance of a website or computer system 2. Engage in, promote, incite, facilitate, or assist in the planning or development of activities that present a risk of death or bodily harm to individuals, including use of Llama 2 related to the following: 1. Military, warfare, nuclear industries or applications, espionage, use for materials or activities that are subject to the International Traffic Arms Regulations (ITAR) maintained by the United States Department of State 2. Guns and illegal weapons (including weapon development) 3. Illegal drugs and regulated/controlled substances 4. Operation of critical infrastructure, transportation technologies, or heavy machinery 5. Self-harm or harm to others, including suicide, cutting, and eating disorders 6. Any content intended to incite or promote violence, abuse, or any infliction of bodily harm to an individual 3. Intentionally deceive or mislead others, including use of Llama 2 related to the following: 1. Generating, promoting, or furthering fraud or the creation or promotion of disinformation 2. Generating, promoting, or furthering defamatory content, including the creation of defamatory statements, images, or other content 3. Generating, promoting, or further distributing spam 4. Impersonating another individual without consent, authorization, or legal right 5. Representing that the use of Llama 2 or outputs are human-generated 6. Generating or facilitating false online engagement, including fake reviews and other means of fake online engagement 4. Fail to appropriately disclose to end users any known dangers of your AI system Please report any violation of this Policy, software “bug,” or other problems that could lead to a violation of this Policy through one of the following means: * Reporting issues with the model: [github.com/facebookresearch/llama](http://github.com/facebookresearch/llama) * Reporting risky content generated by the model: [developers.facebook.com/llama_output_feedback](http://developers.facebook.com/llama_output_feedback) * Reporting bugs and security concerns: [facebook.com/whitehat/info](http://facebook.com/whitehat/info) * Reporting violations of the Acceptable Use Policy or unlicensed uses of Llama: [[email protected]](mailto:[email protected]) extra_gated_fields: First Name: text Last Name: text Date of birth: date_picker Country: country Affiliation: text geo: ip_location By clicking Submit below I accept the terms of the license and acknowledge that the information I provide will be collected stored processed and shared in accordance with the Meta Privacy Policy: checkbox extra_gated_description: >- The information you provide will be collected, stored, processed and shared in accordance with the [Meta Privacy Policy](https://www.facebook.com/privacy/policy/). extra_gated_button_content: Submit language: - en pipeline_tag: text-generation tags: - facebook - meta - pytorch - llama - llama-2 license: llama2 --- # **Llama 2** Llama 2 is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 7B fine-tuned model, optimized for dialogue use cases and converted for the Hugging Face Transformers format. Links to other models can be found in the index at the bottom. ## Model Details *Note: Use of this model is governed by the Meta license. In order to download the model weights and tokenizer, please visit the [website](https://ai.meta.com/resources/models-and-libraries/llama-downloads/) and accept our License before requesting access here.* Meta developed and publicly released the Llama 2 family of large language models (LLMs), a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. Our fine-tuned LLMs, called Llama-2-Chat, are optimized for dialogue use cases. Llama-2-Chat models outperform open-source chat models on most benchmarks we tested, and in our human evaluations for helpfulness and safety, are on par with some popular closed-source models like ChatGPT and PaLM. **Model Developers** Meta **Variations** Llama 2 comes in a range of parameter sizes — 7B, 13B, and 70B — as well as pretrained and fine-tuned variations. **Input** Models input text only. **Output** Models generate text only. **Model Architecture** Llama 2 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align to human preferences for helpfulness and safety. ||Training Data|Params|Content Length|GQA|Tokens|LR| |---|---|---|---|---|---|---| |Llama 2|*A new mix of publicly available online data*|7B|4k|&#10007;|2.0T|3.0 x 10<sup>-4</sup>| |Llama 2|*A new mix of publicly available online data*|13B|4k|&#10007;|2.0T|3.0 x 10<sup>-4</sup>| |Llama 2|*A new mix of publicly available online data*|70B|4k|&#10004;|2.0T|1.5 x 10<sup>-4</sup>| *Llama 2 family of models.* Token counts refer to pretraining data only. All models are trained with a global batch-size of 4M tokens. Bigger models - 70B -- use Grouped-Query Attention (GQA) for improved inference scalability. **Model Dates** Llama 2 was trained between January 2023 and July 2023. **Status** This is a static model trained on an offline dataset. Future versions of the tuned models will be released as we improve model safety with community feedback. **License** A custom commercial license is available at: [https://ai.meta.com/resources/models-and-libraries/llama-downloads/](https://ai.meta.com/resources/models-and-libraries/llama-downloads/) **Research Paper** ["Llama-2: Open Foundation and Fine-tuned Chat Models"](arxiv.org/abs/2307.09288) ## Intended Use **Intended Use Cases** Llama 2 is intended for commercial and research use in English. Tuned models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks. To get the expected features and performance for the chat versions, a specific formatting needs to be followed, including the `INST` and `<<SYS>>` tags, `BOS` and `EOS` tokens, and the whitespaces and breaklines in between (we recommend calling `strip()` on inputs to avoid double-spaces). See our reference code in github for details: [`chat_completion`](https://github.com/facebookresearch/llama/blob/main/llama/generation.py#L212). **Out-of-scope Uses** Use in any manner that violates applicable laws or regulations (including trade compliance laws).Use in languages other than English. Use in any other way that is prohibited by the Acceptable Use Policy and Licensing Agreement for Llama 2. ## Hardware and Software **Training Factors** We used custom training libraries, Meta's Research Super Cluster, and production clusters for pretraining. Fine-tuning, annotation, and evaluation were also performed on third-party cloud compute. **Carbon Footprint** Pretraining utilized a cumulative 3.3M GPU hours of computation on hardware of type A100-80GB (TDP of 350-400W). Estimated total emissions were 539 tCO2eq, 100% of which were offset by Meta’s sustainability program. ||Time (GPU hours)|Power Consumption (W)|Carbon Emitted(tCO<sub>2</sub>eq)| |---|---|---|---| |Llama 2 7B|184320|400|31.22| |Llama 2 13B|368640|400|62.44| |Llama 2 70B|1720320|400|291.42| |Total|3311616||539.00| **CO<sub>2</sub> emissions during pretraining.** Time: total GPU time required for training each model. Power Consumption: peak power capacity per GPU device for the GPUs used adjusted for power usage efficiency. 100% of the emissions are directly offset by Meta's sustainability program, and because we are openly releasing these models, the pretraining costs do not need to be incurred by others. ## Training Data **Overview** Llama 2 was pretrained on 2 trillion tokens of data from publicly available sources. The fine-tuning data includes publicly available instruction datasets, as well as over one million new human-annotated examples. Neither the pretraining nor the fine-tuning datasets include Meta user data. **Data Freshness** The pretraining data has a cutoff of September 2022, but some tuning data is more recent, up to July 2023. ## Evaluation Results In this section, we report the results for the Llama 1 and Llama 2 models on standard academic benchmarks.For all the evaluations, we use our internal evaluations library. |Model|Size|Code|Commonsense Reasoning|World Knowledge|Reading Comprehension|Math|MMLU|BBH|AGI Eval| |---|---|---|---|---|---|---|---|---|---| |Llama 1|7B|14.1|60.8|46.2|58.5|6.95|35.1|30.3|23.9| |Llama 1|13B|18.9|66.1|52.6|62.3|10.9|46.9|37.0|33.9| |Llama 1|33B|26.0|70.0|58.4|67.6|21.4|57.8|39.8|41.7| |Llama 1|65B|30.7|70.7|60.5|68.6|30.8|63.4|43.5|47.6| |Llama 2|7B|16.8|63.9|48.9|61.3|14.6|45.3|32.6|29.3| |Llama 2|13B|24.5|66.9|55.4|65.8|28.7|54.8|39.4|39.1| |Llama 2|70B|**37.5**|**71.9**|**63.6**|**69.4**|**35.2**|**68.9**|**51.2**|**54.2**| **Overall performance on grouped academic benchmarks.** *Code:* We report the average pass@1 scores of our models on HumanEval and MBPP. *Commonsense Reasoning:* We report the average of PIQA, SIQA, HellaSwag, WinoGrande, ARC easy and challenge, OpenBookQA, and CommonsenseQA. We report 7-shot results for CommonSenseQA and 0-shot results for all other benchmarks. *World Knowledge:* We evaluate the 5-shot performance on NaturalQuestions and TriviaQA and report the average. *Reading Comprehension:* For reading comprehension, we report the 0-shot average on SQuAD, QuAC, and BoolQ. *MATH:* We report the average of the GSM8K (8 shot) and MATH (4 shot) benchmarks at top 1. |||TruthfulQA|Toxigen| |---|---|---|---| |Llama 1|7B|27.42|23.00| |Llama 1|13B|41.74|23.08| |Llama 1|33B|44.19|22.57| |Llama 1|65B|48.71|21.77| |Llama 2|7B|33.29|**21.25**| |Llama 2|13B|41.86|26.10| |Llama 2|70B|**50.18**|24.60| **Evaluation of pretrained LLMs on automatic safety benchmarks.** For TruthfulQA, we present the percentage of generations that are both truthful and informative (the higher the better). For ToxiGen, we present the percentage of toxic generations (the smaller the better). |||TruthfulQA|Toxigen| |---|---|---|---| |Llama-2-Chat|7B|57.04|**0.00**| |Llama-2-Chat|13B|62.18|**0.00**| |Llama-2-Chat|70B|**64.14**|0.01| **Evaluation of fine-tuned LLMs on different safety datasets.** Same metric definitions as above. ## Ethical Considerations and Limitations Llama 2 is a new technology that carries risks with use. Testing conducted to date has been in English, and has not covered, nor could it cover all scenarios. For these reasons, as with all LLMs, Llama 2’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate, biased or other objectionable responses to user prompts. Therefore, before deploying any applications of Llama 2, developers should perform safety testing and tuning tailored to their specific applications of the model. Please see the Responsible Use Guide available at [https://ai.meta.com/llama/responsible-use-guide/](https://ai.meta.com/llama/responsible-use-guide) ## Reporting Issues Please report any software “bug,” or other problems with the models through one of the following means: - Reporting issues with the model: [github.com/facebookresearch/llama](http://github.com/facebookresearch/llama) - Reporting problematic content generated by the model: [developers.facebook.com/llama_output_feedback](http://developers.facebook.com/llama_output_feedback) - Reporting bugs and security concerns: [facebook.com/whitehat/info](http://facebook.com/whitehat/info) ## Llama Model Index |Model|Llama2|Llama2-hf|Llama2-chat|Llama2-chat-hf| |---|---|---|---|---| |7B| [Link](https://huggingface.co/meta-llama/Llama-2-7b) | [Link](https://huggingface.co/meta-llama/Llama-2-7b-hf) | [Link](https://huggingface.co/meta-llama/Llama-2-7b-chat) | [Link](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf)| |13B| [Link](https://huggingface.co/meta-llama/Llama-2-13b) | [Link](https://huggingface.co/meta-llama/Llama-2-13b-hf) | [Link](https://huggingface.co/meta-llama/Llama-2-13b-chat) | [Link](https://huggingface.co/meta-llama/Llama-2-13b-chat-hf)| |70B| [Link](https://huggingface.co/meta-llama/Llama-2-70b) | [Link](https://huggingface.co/meta-llama/Llama-2-70b-hf) | [Link](https://huggingface.co/meta-llama/Llama-2-70b-chat) | [Link](https://huggingface.co/meta-llama/Llama-2-70b-chat-hf)|
{}
RichardErkhov/meta-llama_-_Llama-2-7b-chat-hf-8bits
null
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "arxiv:2307.09288", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "8-bit", "region:us" ]
null
2024-04-20T20:08:28+00:00
[ "2307.09288" ]
[]
TAGS #transformers #safetensors #llama #text-generation #conversational #arxiv-2307.09288 #autotrain_compatible #endpoints_compatible #text-generation-inference #8-bit #region-us
Quantization made by Richard Erkhov. Github Discord Request more models Llama-2-7b-chat-hf - bnb 8bits * Model creator: URL * Original model: URL Original model description: --------------------------- extra\_gated\_heading: You need to share contact information with Meta to access this model extra\_gated\_prompt: >- ### LLAMA 2 COMMUNITY LICENSE AGREEMENT "Agreement" means the terms and conditions for use, reproduction, distribution and modification of the Llama Materials set forth herein. "Documentation" means the specifications, manuals and documentation accompanying Llama 2 distributed by Meta at URL "Licensee" or "you" means you, or your employer or any other person or entity (if you are entering into this Agreement on such person or entity's behalf), of the age required under applicable laws, rules or regulations to provide legal consent and that has legal authority to bind your employer or such other person or entity if you are entering in this Agreement on their behalf. "Llama 2" means the foundational large language models and software and algorithms, including machine-learning model code, trained model weights, inference-enabling code, training-enabling code, fine-tuning enabling code and other elements of the foregoing distributed by Meta at URL "Llama Materials" means, collectively, Meta's proprietary Llama 2 and documentation (and any portion thereof) made available under this Agreement. "Meta" or "we" means Meta Platforms Ireland Limited (if you are located in or, if you are an entity, your principal place of business is in the EEA or Switzerland) and Meta Platforms, Inc. (if you are located outside of the EEA or Switzerland). By clicking "I Accept" below or by using or distributing any portion or element of the Llama Materials, you agree to be bound by this Agreement. 1. License Rights and Redistribution. a. Grant of Rights. You are granted a non-exclusive, worldwide, non- transferable and royalty-free limited license under Meta's intellectual property or other rights owned by Meta embodied in the Llama Materials to use, reproduce, distribute, copy, create derivative works of, and make modifications to the Llama Materials. b. Redistribution and Use. i. If you distribute or make the Llama Materials, or any derivative works thereof, available to a third party, you shall provide a copy of this Agreement to such third party. ii. If you receive Llama Materials, or any derivative works thereof, from a Licensee as part of an integrated end user product, then Section 2 of this Agreement will not apply to you. iii. You must retain in all copies of the Llama Materials that you distribute the following attribution notice within a "Notice" text file distributed as a part of such copies: "Llama 2 is licensed under the LLAMA 2 Community License, Copyright (c) Meta Platforms, Inc. All Rights Reserved." iv. Your use of the Llama Materials must comply with applicable laws and regulations (including trade compliance laws and regulations) and adhere to the Acceptable Use Policy for the Llama Materials (available at URL which is hereby incorporated by reference into this Agreement. v. You will not use the Llama Materials or any output or results of the Llama Materials to improve any other large language model (excluding Llama 2 or derivative works thereof). 2. Additional Commercial Terms. If, on the Llama 2 version release date, the monthly active users of the products or services made available by or for Licensee, or Licensee's affiliates, is greater than 700 million monthly active users in the preceding calendar month, you must request a license from Meta, which Meta may grant to you in its sole discretion, and you are not authorized to exercise any of the rights under this Agreement unless or until Meta otherwise expressly grants you such rights. 3. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING THE LLAMA MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS. 4. Limitation of Liability. IN NO EVENT WILL META OR ITS AFFILIATES BE LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST PROFITS OR ANY INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR PUNITIVE DAMAGES, EVEN IF META OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF ANY OF THE FOREGOING. 5. Intellectual Property. a. No trademark licenses are granted under this Agreement, and in connection with the Llama Materials, neither Meta nor Licensee may use any name or mark owned by or associated with the other or any of its affiliates, except as required for reasonable and customary use in describing and redistributing the Llama Materials. b. Subject to Meta's ownership of Llama Materials and derivatives made by or for Meta, with respect to any derivative works and modifications of the Llama Materials that are made by you, as between you and Meta, you are and will be the owner of such derivative works and modifications. c. If you institute litigation or other proceedings against Meta or any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Llama Materials or Llama 2 outputs or results, or any portion of any of the foregoing, constitutes infringement of intellectual property or other rights owned or licensable by you, then any licenses granted to you under this Agreement shall terminate as of the date such litigation or claim is filed or instituted. You will indemnify and hold harmless Meta from and against any claim by any third party arising out of or related to your use or distribution of the Llama Materials. 6. Term and Termination. The term of this Agreement will commence upon your acceptance of this Agreement or access to the Llama Materials and will continue in full force and effect until terminated in accordance with the terms and conditions herein. Meta may terminate this Agreement if you are in breach of any term or condition of this Agreement. Upon termination of this Agreement, you shall delete and cease use of the Llama Materials. Sections 3, 4 and 7 shall survive the termination of this Agreement. 7. Governing Law and Jurisdiction. This Agreement will be governed and construed under the laws of the State of California without regard to choice of law principles, and the UN Convention on Contracts for the International Sale of Goods does not apply to this Agreement. The courts of California shall have exclusive jurisdiction of any dispute arising out of this Agreement. ### Llama 2 Acceptable Use Policy Meta is committed to promoting safe and fair use of its tools and features, including Llama 2. If you access or use Llama 2, you agree to this Acceptable Use Policy (“Policy”). The most recent copy of this policy can be found at URL #### Prohibited Uses We want everyone to use Llama 2 safely and responsibly. You agree you will not use, or allow others to use, Llama 2 to: 1. Violate the law or others’ rights, including to: 1. Engage in, promote, generate, contribute to, encourage, plan, incite, or further illegal or unlawful activity or content, such as: 1. Violence or terrorism 2. Exploitation or harm to children, including the solicitation, creation, acquisition, or dissemination of child exploitative content or failure to report Child Sexual Abuse Material 3. Human trafficking, exploitation, and sexual violence 4. The illegal distribution of information or materials to minors, including obscene materials, or failure to employ legally required age-gating in connection with such information or materials. 5. Sexual solicitation 6. Any other criminal activity 2. Engage in, promote, incite, or facilitate the harassment, abuse, threatening, or bullying of individuals or groups of individuals 3. Engage in, promote, incite, or facilitate discrimination or other unlawful or harmful conduct in the provision of employment, employment benefits, credit, housing, other economic benefits, or other essential goods and services 4. Engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or related professional practices 5. Collect, process, disclose, generate, or infer health, demographic, or other sensitive personal or private information about individuals without rights and consents required by applicable laws 6. Engage in or facilitate any action or generate any content that infringes, misappropriates, or otherwise violates any third-party rights, including the outputs or results of any products or services using the Llama 2 Materials 7. Create, generate, or facilitate the creation of malicious code, malware, computer viruses or do anything else that could disable, overburden, interfere with or impair the proper working, integrity, operation or appearance of a website or computer system 2. Engage in, promote, incite, facilitate, or assist in the planning or development of activities that present a risk of death or bodily harm to individuals, including use of Llama 2 related to the following: 1. Military, warfare, nuclear industries or applications, espionage, use for materials or activities that are subject to the International Traffic Arms Regulations (ITAR) maintained by the United States Department of State 2. Guns and illegal weapons (including weapon development) 3. Illegal drugs and regulated/controlled substances 4. Operation of critical infrastructure, transportation technologies, or heavy machinery 5. Self-harm or harm to others, including suicide, cutting, and eating disorders 6. Any content intended to incite or promote violence, abuse, or any infliction of bodily harm to an individual 3. Intentionally deceive or mislead others, including use of Llama 2 related to the following: 1. Generating, promoting, or furthering fraud or the creation or promotion of disinformation 2. Generating, promoting, or furthering defamatory content, including the creation of defamatory statements, images, or other content 3. Generating, promoting, or further distributing spam 4. Impersonating another individual without consent, authorization, or legal right 5. Representing that the use of Llama 2 or outputs are human-generated 6. Generating or facilitating false online engagement, including fake reviews and other means of fake online engagement 7. Fail to appropriately disclose to end users any known dangers of your AI system Please report any violation of this Policy, software “bug,” or other problems that could lead to a violation of this Policy through one of the following means: * Reporting issues with the model: URL * Reporting risky content generated by the model: URL * Reporting bugs and security concerns: URL * Reporting violations of the Acceptable Use Policy or unlicensed uses of Llama: LlamaUseReport@URL extra\_gated\_fields: First Name: text Last Name: text Date of birth: date\_picker Country: country Affiliation: text geo: ip\_location By clicking Submit below I accept the terms of the license and acknowledge that the information I provide will be collected stored processed and shared in accordance with the Meta Privacy Policy: checkbox extra\_gated\_description: >- The information you provide will be collected, stored, processed and shared in accordance with the Meta Privacy Policy. extra\_gated\_button\_content: Submit language: * en pipeline\_tag: text-generation tags: * facebook * meta * pytorch * llama * llama-2 license: llama2 --- Llama 2 ======= Llama 2 is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 7B fine-tuned model, optimized for dialogue use cases and converted for the Hugging Face Transformers format. Links to other models can be found in the index at the bottom. Model Details ------------- *Note: Use of this model is governed by the Meta license. In order to download the model weights and tokenizer, please visit the website and accept our License before requesting access here.* Meta developed and publicly released the Llama 2 family of large language models (LLMs), a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. Our fine-tuned LLMs, called Llama-2-Chat, are optimized for dialogue use cases. Llama-2-Chat models outperform open-source chat models on most benchmarks we tested, and in our human evaluations for helpfulness and safety, are on par with some popular closed-source models like ChatGPT and PaLM. Model Developers Meta Variations Llama 2 comes in a range of parameter sizes — 7B, 13B, and 70B — as well as pretrained and fine-tuned variations. Input Models input text only. Output Models generate text only. Model Architecture Llama 2 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align to human preferences for helpfulness and safety. *Llama 2 family of models.* Token counts refer to pretraining data only. All models are trained with a global batch-size of 4M tokens. Bigger models - 70B -- use Grouped-Query Attention (GQA) for improved inference scalability. Model Dates Llama 2 was trained between January 2023 and July 2023. Status This is a static model trained on an offline dataset. Future versions of the tuned models will be released as we improve model safety with community feedback. License A custom commercial license is available at: URL Research Paper "Llama-2: Open Foundation and Fine-tuned Chat Models" Intended Use ------------ Intended Use Cases Llama 2 is intended for commercial and research use in English. Tuned models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks. To get the expected features and performance for the chat versions, a specific formatting needs to be followed, including the 'INST' and '<>' tags, 'BOS' and 'EOS' tokens, and the whitespaces and breaklines in between (we recommend calling 'strip()' on inputs to avoid double-spaces). See our reference code in github for details: 'chat\_completion'. Out-of-scope Uses Use in any manner that violates applicable laws or regulations (including trade compliance laws).Use in languages other than English. Use in any other way that is prohibited by the Acceptable Use Policy and Licensing Agreement for Llama 2. Hardware and Software --------------------- Training Factors We used custom training libraries, Meta's Research Super Cluster, and production clusters for pretraining. Fine-tuning, annotation, and evaluation were also performed on third-party cloud compute. Carbon Footprint Pretraining utilized a cumulative 3.3M GPU hours of computation on hardware of type A100-80GB (TDP of 350-400W). Estimated total emissions were 539 tCO2eq, 100% of which were offset by Meta’s sustainability program. CO2 emissions during pretraining. Time: total GPU time required for training each model. Power Consumption: peak power capacity per GPU device for the GPUs used adjusted for power usage efficiency. 100% of the emissions are directly offset by Meta's sustainability program, and because we are openly releasing these models, the pretraining costs do not need to be incurred by others. Training Data ------------- Overview Llama 2 was pretrained on 2 trillion tokens of data from publicly available sources. The fine-tuning data includes publicly available instruction datasets, as well as over one million new human-annotated examples. Neither the pretraining nor the fine-tuning datasets include Meta user data. Data Freshness The pretraining data has a cutoff of September 2022, but some tuning data is more recent, up to July 2023. Evaluation Results ------------------ In this section, we report the results for the Llama 1 and Llama 2 models on standard academic benchmarks.For all the evaluations, we use our internal evaluations library. Overall performance on grouped academic benchmarks. *Code:* We report the average pass@1 scores of our models on HumanEval and MBPP. *Commonsense Reasoning:* We report the average of PIQA, SIQA, HellaSwag, WinoGrande, ARC easy and challenge, OpenBookQA, and CommonsenseQA. We report 7-shot results for CommonSenseQA and 0-shot results for all other benchmarks. *World Knowledge:* We evaluate the 5-shot performance on NaturalQuestions and TriviaQA and report the average. *Reading Comprehension:* For reading comprehension, we report the 0-shot average on SQuAD, QuAC, and BoolQ. *MATH:* We report the average of the GSM8K (8 shot) and MATH (4 shot) benchmarks at top 1. Evaluation of pretrained LLMs on automatic safety benchmarks. For TruthfulQA, we present the percentage of generations that are both truthful and informative (the higher the better). For ToxiGen, we present the percentage of toxic generations (the smaller the better). Evaluation of fine-tuned LLMs on different safety datasets. Same metric definitions as above. Ethical Considerations and Limitations -------------------------------------- Llama 2 is a new technology that carries risks with use. Testing conducted to date has been in English, and has not covered, nor could it cover all scenarios. For these reasons, as with all LLMs, Llama 2’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate, biased or other objectionable responses to user prompts. Therefore, before deploying any applications of Llama 2, developers should perform safety testing and tuning tailored to their specific applications of the model. Please see the Responsible Use Guide available at URL Reporting Issues ---------------- Please report any software “bug,” or other problems with the models through one of the following means: * Reporting issues with the model: URL * Reporting problematic content generated by the model: URL * Reporting bugs and security concerns: URL Llama Model Index -----------------
[ "### LLAMA 2 COMMUNITY LICENSE AGREEMENT\n\n\n\"Agreement\" means the terms and conditions for use, reproduction, distribution\nand modification of the Llama Materials set forth herein.\n\n\n\"Documentation\" means the specifications, manuals and documentation\naccompanying Llama 2 distributed by Meta at\nURL\n\n\n\"Licensee\" or \"you\" means you, or your employer or any other person or entity\n(if you are entering into this Agreement on such person or entity's behalf),\nof the age required under applicable laws, rules or regulations to provide\nlegal consent and that has legal authority to bind your employer or such other\nperson or entity if you are entering in this Agreement on their behalf.\n\n\n\"Llama 2\" means the foundational large language models and software and\nalgorithms, including machine-learning model code, trained model weights,\ninference-enabling code, training-enabling code, fine-tuning enabling code and\nother elements of the foregoing distributed by Meta at\nURL\n\n\n\"Llama Materials\" means, collectively, Meta's proprietary Llama 2 and\ndocumentation (and any portion thereof) made available under this Agreement.\n\n\n\"Meta\" or \"we\" means Meta Platforms Ireland Limited (if you are located in or,\nif you are an entity, your principal place of business is in the EEA or\nSwitzerland) and Meta Platforms, Inc. (if you are located outside of the EEA\nor Switzerland).\n\n\nBy clicking \"I Accept\" below or by using or distributing any portion or\nelement of the Llama Materials, you agree to be bound by this Agreement.\n\n\n1. License Rights and Redistribution.\n\n\na. Grant of Rights. You are granted a non-exclusive, worldwide, non-\ntransferable and royalty-free limited license under Meta's intellectual\nproperty or other rights owned by Meta embodied in the Llama Materials to\nuse, reproduce, distribute, copy, create derivative works of, and make\nmodifications to the Llama Materials.\n\n\nb. Redistribution and Use.\n\n\ni. If you distribute or make the Llama Materials, or any derivative works\nthereof, available to a third party, you shall provide a copy of this\nAgreement to such third party.\n\n\nii. If you receive Llama Materials, or any derivative works thereof, from a\nLicensee as part of an integrated end user product, then Section 2 of this\nAgreement will not apply to you.\n\n\niii. You must retain in all copies of the Llama Materials that you distribute\nthe following attribution notice within a \"Notice\" text file distributed as a\npart of such copies: \"Llama 2 is licensed under the LLAMA 2 Community\nLicense, Copyright (c) Meta Platforms, Inc. All Rights Reserved.\"\n\n\niv. Your use of the Llama Materials must comply with applicable laws and\nregulations (including trade compliance laws and regulations) and adhere to\nthe Acceptable Use Policy for the Llama Materials (available at\nURL which is hereby incorporated by\nreference into this Agreement.\n\n\nv. You will not use the Llama Materials or any output or results of the Llama\nMaterials to improve any other large language model (excluding Llama 2 or\nderivative works thereof).\n\n\n2. Additional Commercial Terms. If, on the Llama 2 version release date, the\nmonthly active users of the products or services made available by or for\nLicensee, or Licensee's affiliates, is greater than 700 million monthly\nactive users in the preceding calendar month, you must request a license from\nMeta, which Meta may grant to you in its sole discretion, and you are not\nauthorized to exercise any of the rights under this Agreement unless or until\nMeta otherwise expressly grants you such rights.\n3. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE LLAMA\nMATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED ON AN \"AS IS\"\nBASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING,\nWITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT,\nMERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY\nRESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING\nTHE LLAMA MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE\nLLAMA MATERIALS AND ANY OUTPUT AND RESULTS.\n4. Limitation of Liability. IN NO EVENT WILL META OR ITS AFFILIATES BE LIABLE\nUNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE,\nPRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST\nPROFITS OR ANY INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR\nPUNITIVE DAMAGES, EVEN IF META OR ITS AFFILIATES HAVE BEEN ADVISED OF THE\nPOSSIBILITY OF ANY OF THE FOREGOING.\n5. Intellectual Property.\n\n\na. No trademark licenses are granted under this Agreement, and in connection\nwith the Llama Materials, neither Meta nor Licensee may use any name or mark\nowned by or associated with the other or any of its affiliates, except as\nrequired for reasonable and customary use in describing and redistributing\nthe Llama Materials.\n\n\nb. Subject to Meta's ownership of Llama Materials and derivatives made by or\nfor Meta, with respect to any derivative works and modifications of the Llama\nMaterials that are made by you, as between you and Meta, you are and will be\nthe owner of such derivative works and modifications.\n\n\nc. If you institute litigation or other proceedings against Meta or any\nentity (including a cross-claim or counterclaim in a lawsuit) alleging that\nthe Llama Materials or Llama 2 outputs or results, or any portion of any of\nthe foregoing, constitutes infringement of intellectual property or other\nrights owned or licensable by you, then any licenses granted to you under\nthis Agreement shall terminate as of the date such litigation or claim is\nfiled or instituted. You will indemnify and hold harmless Meta from and\nagainst any claim by any third party arising out of or related to your use or\ndistribution of the Llama Materials.\n\n\n6. Term and Termination. The term of this Agreement will commence upon your\nacceptance of this Agreement or access to the Llama Materials and will\ncontinue in full force and effect until terminated in accordance with the\nterms and conditions herein. Meta may terminate this Agreement if you are in\nbreach of any term or condition of this Agreement. Upon termination of this\nAgreement, you shall delete and cease use of the Llama Materials. Sections 3,\n4 and 7 shall survive the termination of this Agreement.\n7. Governing Law and Jurisdiction. This Agreement will be governed and\nconstrued under the laws of the State of California without regard to choice\nof law principles, and the UN Convention on Contracts for the International\nSale of Goods does not apply to this Agreement. The courts of California\nshall have exclusive jurisdiction of any dispute arising out of this\nAgreement.", "### Llama 2 Acceptable Use Policy\n\n\nMeta is committed to promoting safe and fair use of its tools and features,\nincluding Llama 2. If you access or use Llama 2, you agree to this Acceptable\nUse Policy (“Policy”). The most recent copy of this policy can be found at\nURL", "#### Prohibited Uses\n\n\nWe want everyone to use Llama 2 safely and responsibly. You agree you will not\nuse, or allow others to use, Llama 2 to:\n\n\n1. Violate the law or others’ rights, including to:\n\t1. Engage in, promote, generate, contribute to, encourage, plan, incite, or further illegal or unlawful activity or content, such as:\n\t\t1. Violence or terrorism\n\t\t2. Exploitation or harm to children, including the solicitation, creation, acquisition, or dissemination of child exploitative content or failure to report Child Sexual Abuse Material\n\t\t3. Human trafficking, exploitation, and sexual violence\n\t\t4. The illegal distribution of information or materials to minors, including obscene materials, or failure to employ legally required age-gating in connection with such information or materials.\n\t\t5. Sexual solicitation\n\t\t6. Any other criminal activity\n\t2. Engage in, promote, incite, or facilitate the harassment, abuse, threatening, or bullying of individuals or groups of individuals\n\t3. Engage in, promote, incite, or facilitate discrimination or other unlawful or harmful conduct in the provision of employment, employment benefits, credit, housing, other economic benefits, or other essential goods and services\n\t4. Engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or related professional practices\n\t5. Collect, process, disclose, generate, or infer health, demographic, or other sensitive personal or private information about individuals without rights and consents required by applicable laws\n\t6. Engage in or facilitate any action or generate any content that infringes, misappropriates, or otherwise violates any third-party rights, including the outputs or results of any products or services using the Llama 2 Materials\n\t7. Create, generate, or facilitate the creation of malicious code, malware, computer viruses or do anything else that could disable, overburden, interfere with or impair the proper working, integrity, operation or appearance of a website or computer system\n2. Engage in, promote, incite, facilitate, or assist in the planning or\ndevelopment of activities that present a risk of death or bodily harm to\nindividuals, including use of Llama 2 related to the following:\n\t1. Military, warfare, nuclear industries or applications, espionage, use for materials or activities that are subject to the International Traffic Arms Regulations (ITAR) maintained by the United States Department of State\n\t2. Guns and illegal weapons (including weapon development)\n\t3. Illegal drugs and regulated/controlled substances\n\t4. Operation of critical infrastructure, transportation technologies, or heavy machinery\n\t5. Self-harm or harm to others, including suicide, cutting, and eating disorders\n\t6. Any content intended to incite or promote violence, abuse, or any infliction of bodily harm to an individual\n3. Intentionally deceive or mislead others, including use of Llama 2 related\nto the following:\n\t1. Generating, promoting, or furthering fraud or the creation or promotion of disinformation\n\t2. Generating, promoting, or furthering defamatory content, including the creation of defamatory statements, images, or other content\n\t3. Generating, promoting, or further distributing spam\n\t4. Impersonating another individual without consent, authorization, or legal right\n\t5. Representing that the use of Llama 2 or outputs are human-generated\n\t6. Generating or facilitating false online engagement, including fake reviews and other means of fake online engagement\n\t7. Fail to appropriately disclose to end users any known dangers of your AI system\n\tPlease report any violation of this Policy, software “bug,” or other problems\n\tthat could lead to a violation of this Policy through one of the following\n\tmeans:\n\t* Reporting issues with the model: URL\n\t* Reporting risky content generated by the model: URL\n\t* Reporting bugs and security concerns: URL\n\t* Reporting violations of the Acceptable Use Policy or unlicensed uses of Llama: LlamaUseReport@URL\n\textra\\_gated\\_fields:\n\tFirst Name: text\n\tLast Name: text\n\tDate of birth: date\\_picker\n\tCountry: country\n\tAffiliation: text\n\tgeo: ip\\_location\n\tBy clicking Submit below I accept the terms of the license and acknowledge that the information I provide will be collected stored processed and shared in accordance with the Meta Privacy Policy: checkbox\n\textra\\_gated\\_description: >-\n\tThe information you provide will be collected, stored, processed and shared in\n\taccordance with the Meta Privacy\n\tPolicy.\n\textra\\_gated\\_button\\_content: Submit\n\tlanguage:\n\n\n* en\npipeline\\_tag: text-generation\ntags:\n* facebook\n* meta\n* pytorch\n* llama\n* llama-2\nlicense: llama2\n\n\n\n\n---\n\n\nLlama 2\n=======\n\n\nLlama 2 is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 7B fine-tuned model, optimized for dialogue use cases and converted for the Hugging Face Transformers format. Links to other models can be found in the index at the bottom.\n\n\nModel Details\n-------------\n\n\n*Note: Use of this model is governed by the Meta license. In order to download the model weights and tokenizer, please visit the website and accept our License before requesting access here.*\n\n\nMeta developed and publicly released the Llama 2 family of large language models (LLMs), a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. Our fine-tuned LLMs, called Llama-2-Chat, are optimized for dialogue use cases. Llama-2-Chat models outperform open-source chat models on most benchmarks we tested, and in our human evaluations for helpfulness and safety, are on par with some popular closed-source models like ChatGPT and PaLM.\n\n\nModel Developers Meta\n\n\nVariations Llama 2 comes in a range of parameter sizes — 7B, 13B, and 70B — as well as pretrained and fine-tuned variations.\n\n\nInput Models input text only.\n\n\nOutput Models generate text only.\n\n\nModel Architecture Llama 2 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align to human preferences for helpfulness and safety.\n\n\n\n*Llama 2 family of models.* Token counts refer to pretraining data only. All models are trained with a global batch-size of 4M tokens. Bigger models - 70B -- use Grouped-Query Attention (GQA) for improved inference scalability.\n\n\nModel Dates Llama 2 was trained between January 2023 and July 2023.\n\n\nStatus This is a static model trained on an offline dataset. Future versions of the tuned models will be released as we improve model safety with community feedback.\n\n\nLicense A custom commercial license is available at: URL\n\n\nResearch Paper \"Llama-2: Open Foundation and Fine-tuned Chat Models\"\n\n\nIntended Use\n------------\n\n\nIntended Use Cases Llama 2 is intended for commercial and research use in English. Tuned models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks.\n\n\nTo get the expected features and performance for the chat versions, a specific formatting needs to be followed, including the 'INST' and '<>' tags, 'BOS' and 'EOS' tokens, and the whitespaces and breaklines in between (we recommend calling 'strip()' on inputs to avoid double-spaces). See our reference code in github for details: 'chat\\_completion'.\n\n\nOut-of-scope Uses Use in any manner that violates applicable laws or regulations (including trade compliance laws).Use in languages other than English. Use in any other way that is prohibited by the Acceptable Use Policy and Licensing Agreement for Llama 2.\n\n\nHardware and Software\n---------------------\n\n\nTraining Factors We used custom training libraries, Meta's Research Super Cluster, and production clusters for pretraining. Fine-tuning, annotation, and evaluation were also performed on third-party cloud compute.\n\n\nCarbon Footprint Pretraining utilized a cumulative 3.3M GPU hours of computation on hardware of type A100-80GB (TDP of 350-400W). Estimated total emissions were 539 tCO2eq, 100% of which were offset by Meta’s sustainability program.\n\n\n\nCO2 emissions during pretraining. Time: total GPU time required for training each model. Power Consumption: peak power capacity per GPU device for the GPUs used adjusted for power usage efficiency. 100% of the emissions are directly offset by Meta's sustainability program, and because we are openly releasing these models, the pretraining costs do not need to be incurred by others.\n\n\nTraining Data\n-------------\n\n\nOverview Llama 2 was pretrained on 2 trillion tokens of data from publicly available sources. The fine-tuning data includes publicly available instruction datasets, as well as over one million new human-annotated examples. Neither the pretraining nor the fine-tuning datasets include Meta user data.\n\n\nData Freshness The pretraining data has a cutoff of September 2022, but some tuning data is more recent, up to July 2023.\n\n\nEvaluation Results\n------------------\n\n\nIn this section, we report the results for the Llama 1 and Llama 2 models on standard academic benchmarks.For all the evaluations, we use our internal evaluations library.\n\n\n\nOverall performance on grouped academic benchmarks. *Code:* We report the average pass@1 scores of our models on HumanEval and MBPP. *Commonsense Reasoning:* We report the average of PIQA, SIQA, HellaSwag, WinoGrande, ARC easy and challenge, OpenBookQA, and CommonsenseQA. We report 7-shot results for CommonSenseQA and 0-shot results for all other benchmarks. *World Knowledge:* We evaluate the 5-shot performance on NaturalQuestions and TriviaQA and report the average. *Reading Comprehension:* For reading comprehension, we report the 0-shot average on SQuAD, QuAC, and BoolQ. *MATH:* We report the average of the GSM8K (8 shot) and MATH (4 shot) benchmarks at top 1.\n\n\n\nEvaluation of pretrained LLMs on automatic safety benchmarks. For TruthfulQA, we present the percentage of generations that are both truthful and informative (the higher the better). For ToxiGen, we present the percentage of toxic generations (the smaller the better).\n\n\n\nEvaluation of fine-tuned LLMs on different safety datasets. Same metric definitions as above.\n\n\nEthical Considerations and Limitations\n--------------------------------------\n\n\nLlama 2 is a new technology that carries risks with use. Testing conducted to date has been in English, and has not covered, nor could it cover all scenarios. For these reasons, as with all LLMs, Llama 2’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate, biased or other objectionable responses to user prompts. Therefore, before deploying any applications of Llama 2, developers should perform safety testing and tuning tailored to their specific applications of the model.\n\n\nPlease see the Responsible Use Guide available at URL\n\n\nReporting Issues\n----------------\n\n\nPlease report any software “bug,” or other problems with the models through one of the following means:\n\n\n* Reporting issues with the model: URL\n* Reporting problematic content generated by the model: URL\n* Reporting bugs and security concerns: URL\n\n\nLlama Model Index\n-----------------" ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #conversational #arxiv-2307.09288 #autotrain_compatible #endpoints_compatible #text-generation-inference #8-bit #region-us \n", "### LLAMA 2 COMMUNITY LICENSE AGREEMENT\n\n\n\"Agreement\" means the terms and conditions for use, reproduction, distribution\nand modification of the Llama Materials set forth herein.\n\n\n\"Documentation\" means the specifications, manuals and documentation\naccompanying Llama 2 distributed by Meta at\nURL\n\n\n\"Licensee\" or \"you\" means you, or your employer or any other person or entity\n(if you are entering into this Agreement on such person or entity's behalf),\nof the age required under applicable laws, rules or regulations to provide\nlegal consent and that has legal authority to bind your employer or such other\nperson or entity if you are entering in this Agreement on their behalf.\n\n\n\"Llama 2\" means the foundational large language models and software and\nalgorithms, including machine-learning model code, trained model weights,\ninference-enabling code, training-enabling code, fine-tuning enabling code and\nother elements of the foregoing distributed by Meta at\nURL\n\n\n\"Llama Materials\" means, collectively, Meta's proprietary Llama 2 and\ndocumentation (and any portion thereof) made available under this Agreement.\n\n\n\"Meta\" or \"we\" means Meta Platforms Ireland Limited (if you are located in or,\nif you are an entity, your principal place of business is in the EEA or\nSwitzerland) and Meta Platforms, Inc. (if you are located outside of the EEA\nor Switzerland).\n\n\nBy clicking \"I Accept\" below or by using or distributing any portion or\nelement of the Llama Materials, you agree to be bound by this Agreement.\n\n\n1. License Rights and Redistribution.\n\n\na. Grant of Rights. You are granted a non-exclusive, worldwide, non-\ntransferable and royalty-free limited license under Meta's intellectual\nproperty or other rights owned by Meta embodied in the Llama Materials to\nuse, reproduce, distribute, copy, create derivative works of, and make\nmodifications to the Llama Materials.\n\n\nb. Redistribution and Use.\n\n\ni. If you distribute or make the Llama Materials, or any derivative works\nthereof, available to a third party, you shall provide a copy of this\nAgreement to such third party.\n\n\nii. If you receive Llama Materials, or any derivative works thereof, from a\nLicensee as part of an integrated end user product, then Section 2 of this\nAgreement will not apply to you.\n\n\niii. You must retain in all copies of the Llama Materials that you distribute\nthe following attribution notice within a \"Notice\" text file distributed as a\npart of such copies: \"Llama 2 is licensed under the LLAMA 2 Community\nLicense, Copyright (c) Meta Platforms, Inc. All Rights Reserved.\"\n\n\niv. Your use of the Llama Materials must comply with applicable laws and\nregulations (including trade compliance laws and regulations) and adhere to\nthe Acceptable Use Policy for the Llama Materials (available at\nURL which is hereby incorporated by\nreference into this Agreement.\n\n\nv. You will not use the Llama Materials or any output or results of the Llama\nMaterials to improve any other large language model (excluding Llama 2 or\nderivative works thereof).\n\n\n2. Additional Commercial Terms. If, on the Llama 2 version release date, the\nmonthly active users of the products or services made available by or for\nLicensee, or Licensee's affiliates, is greater than 700 million monthly\nactive users in the preceding calendar month, you must request a license from\nMeta, which Meta may grant to you in its sole discretion, and you are not\nauthorized to exercise any of the rights under this Agreement unless or until\nMeta otherwise expressly grants you such rights.\n3. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE LLAMA\nMATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED ON AN \"AS IS\"\nBASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING,\nWITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT,\nMERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY\nRESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING\nTHE LLAMA MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE\nLLAMA MATERIALS AND ANY OUTPUT AND RESULTS.\n4. Limitation of Liability. IN NO EVENT WILL META OR ITS AFFILIATES BE LIABLE\nUNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE,\nPRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST\nPROFITS OR ANY INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR\nPUNITIVE DAMAGES, EVEN IF META OR ITS AFFILIATES HAVE BEEN ADVISED OF THE\nPOSSIBILITY OF ANY OF THE FOREGOING.\n5. Intellectual Property.\n\n\na. No trademark licenses are granted under this Agreement, and in connection\nwith the Llama Materials, neither Meta nor Licensee may use any name or mark\nowned by or associated with the other or any of its affiliates, except as\nrequired for reasonable and customary use in describing and redistributing\nthe Llama Materials.\n\n\nb. Subject to Meta's ownership of Llama Materials and derivatives made by or\nfor Meta, with respect to any derivative works and modifications of the Llama\nMaterials that are made by you, as between you and Meta, you are and will be\nthe owner of such derivative works and modifications.\n\n\nc. If you institute litigation or other proceedings against Meta or any\nentity (including a cross-claim or counterclaim in a lawsuit) alleging that\nthe Llama Materials or Llama 2 outputs or results, or any portion of any of\nthe foregoing, constitutes infringement of intellectual property or other\nrights owned or licensable by you, then any licenses granted to you under\nthis Agreement shall terminate as of the date such litigation or claim is\nfiled or instituted. You will indemnify and hold harmless Meta from and\nagainst any claim by any third party arising out of or related to your use or\ndistribution of the Llama Materials.\n\n\n6. Term and Termination. The term of this Agreement will commence upon your\nacceptance of this Agreement or access to the Llama Materials and will\ncontinue in full force and effect until terminated in accordance with the\nterms and conditions herein. Meta may terminate this Agreement if you are in\nbreach of any term or condition of this Agreement. Upon termination of this\nAgreement, you shall delete and cease use of the Llama Materials. Sections 3,\n4 and 7 shall survive the termination of this Agreement.\n7. Governing Law and Jurisdiction. This Agreement will be governed and\nconstrued under the laws of the State of California without regard to choice\nof law principles, and the UN Convention on Contracts for the International\nSale of Goods does not apply to this Agreement. The courts of California\nshall have exclusive jurisdiction of any dispute arising out of this\nAgreement.", "### Llama 2 Acceptable Use Policy\n\n\nMeta is committed to promoting safe and fair use of its tools and features,\nincluding Llama 2. If you access or use Llama 2, you agree to this Acceptable\nUse Policy (“Policy”). The most recent copy of this policy can be found at\nURL", "#### Prohibited Uses\n\n\nWe want everyone to use Llama 2 safely and responsibly. You agree you will not\nuse, or allow others to use, Llama 2 to:\n\n\n1. Violate the law or others’ rights, including to:\n\t1. Engage in, promote, generate, contribute to, encourage, plan, incite, or further illegal or unlawful activity or content, such as:\n\t\t1. Violence or terrorism\n\t\t2. Exploitation or harm to children, including the solicitation, creation, acquisition, or dissemination of child exploitative content or failure to report Child Sexual Abuse Material\n\t\t3. Human trafficking, exploitation, and sexual violence\n\t\t4. The illegal distribution of information or materials to minors, including obscene materials, or failure to employ legally required age-gating in connection with such information or materials.\n\t\t5. Sexual solicitation\n\t\t6. Any other criminal activity\n\t2. Engage in, promote, incite, or facilitate the harassment, abuse, threatening, or bullying of individuals or groups of individuals\n\t3. Engage in, promote, incite, or facilitate discrimination or other unlawful or harmful conduct in the provision of employment, employment benefits, credit, housing, other economic benefits, or other essential goods and services\n\t4. Engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or related professional practices\n\t5. Collect, process, disclose, generate, or infer health, demographic, or other sensitive personal or private information about individuals without rights and consents required by applicable laws\n\t6. Engage in or facilitate any action or generate any content that infringes, misappropriates, or otherwise violates any third-party rights, including the outputs or results of any products or services using the Llama 2 Materials\n\t7. Create, generate, or facilitate the creation of malicious code, malware, computer viruses or do anything else that could disable, overburden, interfere with or impair the proper working, integrity, operation or appearance of a website or computer system\n2. Engage in, promote, incite, facilitate, or assist in the planning or\ndevelopment of activities that present a risk of death or bodily harm to\nindividuals, including use of Llama 2 related to the following:\n\t1. Military, warfare, nuclear industries or applications, espionage, use for materials or activities that are subject to the International Traffic Arms Regulations (ITAR) maintained by the United States Department of State\n\t2. Guns and illegal weapons (including weapon development)\n\t3. Illegal drugs and regulated/controlled substances\n\t4. Operation of critical infrastructure, transportation technologies, or heavy machinery\n\t5. Self-harm or harm to others, including suicide, cutting, and eating disorders\n\t6. Any content intended to incite or promote violence, abuse, or any infliction of bodily harm to an individual\n3. Intentionally deceive or mislead others, including use of Llama 2 related\nto the following:\n\t1. Generating, promoting, or furthering fraud or the creation or promotion of disinformation\n\t2. Generating, promoting, or furthering defamatory content, including the creation of defamatory statements, images, or other content\n\t3. Generating, promoting, or further distributing spam\n\t4. Impersonating another individual without consent, authorization, or legal right\n\t5. Representing that the use of Llama 2 or outputs are human-generated\n\t6. Generating or facilitating false online engagement, including fake reviews and other means of fake online engagement\n\t7. Fail to appropriately disclose to end users any known dangers of your AI system\n\tPlease report any violation of this Policy, software “bug,” or other problems\n\tthat could lead to a violation of this Policy through one of the following\n\tmeans:\n\t* Reporting issues with the model: URL\n\t* Reporting risky content generated by the model: URL\n\t* Reporting bugs and security concerns: URL\n\t* Reporting violations of the Acceptable Use Policy or unlicensed uses of Llama: LlamaUseReport@URL\n\textra\\_gated\\_fields:\n\tFirst Name: text\n\tLast Name: text\n\tDate of birth: date\\_picker\n\tCountry: country\n\tAffiliation: text\n\tgeo: ip\\_location\n\tBy clicking Submit below I accept the terms of the license and acknowledge that the information I provide will be collected stored processed and shared in accordance with the Meta Privacy Policy: checkbox\n\textra\\_gated\\_description: >-\n\tThe information you provide will be collected, stored, processed and shared in\n\taccordance with the Meta Privacy\n\tPolicy.\n\textra\\_gated\\_button\\_content: Submit\n\tlanguage:\n\n\n* en\npipeline\\_tag: text-generation\ntags:\n* facebook\n* meta\n* pytorch\n* llama\n* llama-2\nlicense: llama2\n\n\n\n\n---\n\n\nLlama 2\n=======\n\n\nLlama 2 is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 7B fine-tuned model, optimized for dialogue use cases and converted for the Hugging Face Transformers format. Links to other models can be found in the index at the bottom.\n\n\nModel Details\n-------------\n\n\n*Note: Use of this model is governed by the Meta license. In order to download the model weights and tokenizer, please visit the website and accept our License before requesting access here.*\n\n\nMeta developed and publicly released the Llama 2 family of large language models (LLMs), a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. Our fine-tuned LLMs, called Llama-2-Chat, are optimized for dialogue use cases. Llama-2-Chat models outperform open-source chat models on most benchmarks we tested, and in our human evaluations for helpfulness and safety, are on par with some popular closed-source models like ChatGPT and PaLM.\n\n\nModel Developers Meta\n\n\nVariations Llama 2 comes in a range of parameter sizes — 7B, 13B, and 70B — as well as pretrained and fine-tuned variations.\n\n\nInput Models input text only.\n\n\nOutput Models generate text only.\n\n\nModel Architecture Llama 2 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align to human preferences for helpfulness and safety.\n\n\n\n*Llama 2 family of models.* Token counts refer to pretraining data only. All models are trained with a global batch-size of 4M tokens. Bigger models - 70B -- use Grouped-Query Attention (GQA) for improved inference scalability.\n\n\nModel Dates Llama 2 was trained between January 2023 and July 2023.\n\n\nStatus This is a static model trained on an offline dataset. Future versions of the tuned models will be released as we improve model safety with community feedback.\n\n\nLicense A custom commercial license is available at: URL\n\n\nResearch Paper \"Llama-2: Open Foundation and Fine-tuned Chat Models\"\n\n\nIntended Use\n------------\n\n\nIntended Use Cases Llama 2 is intended for commercial and research use in English. Tuned models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks.\n\n\nTo get the expected features and performance for the chat versions, a specific formatting needs to be followed, including the 'INST' and '<>' tags, 'BOS' and 'EOS' tokens, and the whitespaces and breaklines in between (we recommend calling 'strip()' on inputs to avoid double-spaces). See our reference code in github for details: 'chat\\_completion'.\n\n\nOut-of-scope Uses Use in any manner that violates applicable laws or regulations (including trade compliance laws).Use in languages other than English. Use in any other way that is prohibited by the Acceptable Use Policy and Licensing Agreement for Llama 2.\n\n\nHardware and Software\n---------------------\n\n\nTraining Factors We used custom training libraries, Meta's Research Super Cluster, and production clusters for pretraining. Fine-tuning, annotation, and evaluation were also performed on third-party cloud compute.\n\n\nCarbon Footprint Pretraining utilized a cumulative 3.3M GPU hours of computation on hardware of type A100-80GB (TDP of 350-400W). Estimated total emissions were 539 tCO2eq, 100% of which were offset by Meta’s sustainability program.\n\n\n\nCO2 emissions during pretraining. Time: total GPU time required for training each model. Power Consumption: peak power capacity per GPU device for the GPUs used adjusted for power usage efficiency. 100% of the emissions are directly offset by Meta's sustainability program, and because we are openly releasing these models, the pretraining costs do not need to be incurred by others.\n\n\nTraining Data\n-------------\n\n\nOverview Llama 2 was pretrained on 2 trillion tokens of data from publicly available sources. The fine-tuning data includes publicly available instruction datasets, as well as over one million new human-annotated examples. Neither the pretraining nor the fine-tuning datasets include Meta user data.\n\n\nData Freshness The pretraining data has a cutoff of September 2022, but some tuning data is more recent, up to July 2023.\n\n\nEvaluation Results\n------------------\n\n\nIn this section, we report the results for the Llama 1 and Llama 2 models on standard academic benchmarks.For all the evaluations, we use our internal evaluations library.\n\n\n\nOverall performance on grouped academic benchmarks. *Code:* We report the average pass@1 scores of our models on HumanEval and MBPP. *Commonsense Reasoning:* We report the average of PIQA, SIQA, HellaSwag, WinoGrande, ARC easy and challenge, OpenBookQA, and CommonsenseQA. We report 7-shot results for CommonSenseQA and 0-shot results for all other benchmarks. *World Knowledge:* We evaluate the 5-shot performance on NaturalQuestions and TriviaQA and report the average. *Reading Comprehension:* For reading comprehension, we report the 0-shot average on SQuAD, QuAC, and BoolQ. *MATH:* We report the average of the GSM8K (8 shot) and MATH (4 shot) benchmarks at top 1.\n\n\n\nEvaluation of pretrained LLMs on automatic safety benchmarks. For TruthfulQA, we present the percentage of generations that are both truthful and informative (the higher the better). For ToxiGen, we present the percentage of toxic generations (the smaller the better).\n\n\n\nEvaluation of fine-tuned LLMs on different safety datasets. Same metric definitions as above.\n\n\nEthical Considerations and Limitations\n--------------------------------------\n\n\nLlama 2 is a new technology that carries risks with use. Testing conducted to date has been in English, and has not covered, nor could it cover all scenarios. For these reasons, as with all LLMs, Llama 2’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate, biased or other objectionable responses to user prompts. Therefore, before deploying any applications of Llama 2, developers should perform safety testing and tuning tailored to their specific applications of the model.\n\n\nPlease see the Responsible Use Guide available at URL\n\n\nReporting Issues\n----------------\n\n\nPlease report any software “bug,” or other problems with the models through one of the following means:\n\n\n* Reporting issues with the model: URL\n* Reporting problematic content generated by the model: URL\n* Reporting bugs and security concerns: URL\n\n\nLlama Model Index\n-----------------" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> [<img src="https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/OpenAccess-AI-Collective/axolotl) <details><summary>See axolotl config</summary> axolotl version: `0.4.0` ```yaml base_model: NousResearch/Meta-Llama-3-70B model_type: AutoModelForCausalLM tokenizer_type: AutoTokenizer # PreTrainedTokenizerFast load_in_8bit: false load_in_4bit: true strict: false datasets: - path: Doctor-Shotgun/no-robots-sharegpt type: sharegpt dataset_prepared_path: val_set_size: 0.05 output_dir: ./out/qlora-llama3-70b adapter: qlora lora_model_dir: sequence_len: 2048 sample_packing: true pad_to_sequence_len: true eval_sample_packing: false lora_r: 64 lora_alpha: 16 lora_dropout: 0.05 lora_target_modules: lora_target_linear: true lora_fan_in_fan_out: wandb_project: wandb_entity: wandb_watch: wandb_name: wandb_log_model: gradient_accumulation_steps: 4 micro_batch_size: 1 num_epochs: 4 optimizer: adamw_bnb_8bit lr_scheduler: cosine learning_rate: 0.0002 train_on_inputs: false group_by_length: false bf16: auto fp16: tf32: false gradient_checkpointing: true gradient_checkpointing_kwargs: use_reentrant: true early_stopping_patience: resume_from_checkpoint: local_rank: logging_steps: 1 xformers_attention: flash_attention: true warmup_steps: 10 evals_per_epoch: 4 eval_table_size: saves_per_epoch: 1 debug: deepspeed: weight_decay: 0.0 fsdp: fsdp_config: fsdp_limit_all_gathers: true fsdp_sync_module_states: true fsdp_offload_params: false fsdp_use_orig_params: false fsdp_cpu_ram_efficient_loading: true fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP fsdp_transformer_layer_cls_to_wrap: LlamaDecoderLayer fsdp_state_dict_type: FULL_STATE_DICT fsdp_sharding_strategy: FULL_SHARD special_tokens: pad_token: <|end_of_text|> ``` </details><br> # out/qlora-llama3-70b This model is a fine-tuned version of [NousResearch/Meta-Llama-3-70B](https://huggingface.co/NousResearch/Meta-Llama-3-70B) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.5377 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - distributed_type: multi-GPU - num_devices: 8 - gradient_accumulation_steps: 4 - total_train_batch_size: 32 - total_eval_batch_size: 8 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_steps: 10 - num_epochs: 4 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 1.7144 | 0.02 | 1 | 1.8096 | | 1.6886 | 0.25 | 12 | 1.5367 | | 1.6174 | 0.49 | 24 | 1.5176 | | 1.5848 | 0.74 | 36 | 1.5054 | | 1.6542 | 0.98 | 48 | 1.5018 | | 1.572 | 1.21 | 60 | 1.4993 | | 1.5966 | 1.45 | 72 | 1.5007 | | 1.5643 | 1.7 | 84 | 1.4981 | | 1.6312 | 1.94 | 96 | 1.4980 | | 1.5311 | 2.16 | 108 | 1.5027 | | 1.519 | 2.41 | 120 | 1.5109 | | 1.4034 | 2.65 | 132 | 1.5165 | | 1.4658 | 2.9 | 144 | 1.5187 | | 1.5434 | 3.11 | 156 | 1.5264 | | 1.4608 | 3.35 | 168 | 1.5364 | | 1.4529 | 3.6 | 180 | 1.5377 | | 1.3893 | 3.85 | 192 | 1.5377 | ### Framework versions - PEFT 0.10.0 - Transformers 4.40.0.dev0 - Pytorch 2.2.1 - Datasets 2.15.0 - Tokenizers 0.15.0
{"license": "other", "library_name": "peft", "tags": ["generated_from_trainer"], "base_model": "NousResearch/Meta-Llama-3-70B", "model-index": [{"name": "out/qlora-llama3-70b", "results": []}]}
wave-on-discord/llama-3-70b-no-robots-adapter
null
[ "peft", "llama", "generated_from_trainer", "base_model:NousResearch/Meta-Llama-3-70B", "license:other", "4-bit", "region:us" ]
null
2024-04-20T20:10:32+00:00
[]
[]
TAGS #peft #llama #generated_from_trainer #base_model-NousResearch/Meta-Llama-3-70B #license-other #4-bit #region-us
<img src="URL alt="Built with Axolotl" width="200" height="32"/> See axolotl config axolotl version: '0.4.0' out/qlora-llama3-70b ==================== This model is a fine-tuned version of NousResearch/Meta-Llama-3-70B on the None dataset. It achieves the following results on the evaluation set: * Loss: 1.5377 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 0.0002 * train\_batch\_size: 1 * eval\_batch\_size: 1 * seed: 42 * distributed\_type: multi-GPU * num\_devices: 8 * gradient\_accumulation\_steps: 4 * total\_train\_batch\_size: 32 * total\_eval\_batch\_size: 8 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: cosine * lr\_scheduler\_warmup\_steps: 10 * num\_epochs: 4 ### Training results ### Framework versions * PEFT 0.10.0 * Transformers 4.40.0.dev0 * Pytorch 2.2.1 * Datasets 2.15.0 * Tokenizers 0.15.0
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0002\n* train\\_batch\\_size: 1\n* eval\\_batch\\_size: 1\n* seed: 42\n* distributed\\_type: multi-GPU\n* num\\_devices: 8\n* gradient\\_accumulation\\_steps: 4\n* total\\_train\\_batch\\_size: 32\n* total\\_eval\\_batch\\_size: 8\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: cosine\n* lr\\_scheduler\\_warmup\\_steps: 10\n* num\\_epochs: 4", "### Training results", "### Framework versions\n\n\n* PEFT 0.10.0\n* Transformers 4.40.0.dev0\n* Pytorch 2.2.1\n* Datasets 2.15.0\n* Tokenizers 0.15.0" ]
[ "TAGS\n#peft #llama #generated_from_trainer #base_model-NousResearch/Meta-Llama-3-70B #license-other #4-bit #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 0.0002\n* train\\_batch\\_size: 1\n* eval\\_batch\\_size: 1\n* seed: 42\n* distributed\\_type: multi-GPU\n* num\\_devices: 8\n* gradient\\_accumulation\\_steps: 4\n* total\\_train\\_batch\\_size: 32\n* total\\_eval\\_batch\\_size: 8\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: cosine\n* lr\\_scheduler\\_warmup\\_steps: 10\n* num\\_epochs: 4", "### Training results", "### Framework versions\n\n\n* PEFT 0.10.0\n* Transformers 4.40.0.dev0\n* Pytorch 2.2.1\n* Datasets 2.15.0\n* Tokenizers 0.15.0" ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
Aspik101/llamaft3v2
null
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-20T20:11:51+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #llama #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #conversational #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
null
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
olonok/Llama3_8B_qlora_CoT_FineTuned
null
[ "transformers", "safetensors", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
null
2024-04-20T20:12:40+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #arxiv-1910.09700 #endpoints_compatible #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #arxiv-1910.09700 #endpoints_compatible #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
text-classification
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-1k This model is a fine-tuned version of [distilbert/distilbert-base-uncased](https://huggingface.co/distilbert/distilbert-base-uncased) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 1.0986 - Accuracy: 0.3333 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-07 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 25 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:------:|:---------------:|:--------:| | 1.0989 | 1.0 | 6000 | 1.0986 | 0.3333 | | 1.0993 | 2.0 | 12000 | 1.0986 | 0.3333 | | 1.0992 | 3.0 | 18000 | 1.0986 | 0.3333 | | 1.0993 | 4.0 | 24000 | 1.0986 | 0.3333 | | 1.0985 | 5.0 | 30000 | 1.0986 | 0.3333 | | 1.0988 | 6.0 | 36000 | 1.0986 | 0.3333 | | 1.0993 | 7.0 | 42000 | 1.0986 | 0.3333 | | 1.0983 | 8.0 | 48000 | 1.0986 | 0.3333 | | 1.0983 | 9.0 | 54000 | 1.0986 | 0.3333 | | 1.0982 | 10.0 | 60000 | 1.0986 | 0.3333 | | 1.0986 | 11.0 | 66000 | 1.0986 | 0.3333 | | 1.0985 | 12.0 | 72000 | 1.0986 | 0.3333 | | 1.0983 | 13.0 | 78000 | 1.0986 | 0.3333 | | 1.0987 | 14.0 | 84000 | 1.0986 | 0.3333 | | 1.0992 | 15.0 | 90000 | 1.0986 | 0.3333 | | 1.099 | 16.0 | 96000 | 1.0986 | 0.3333 | | 1.0991 | 17.0 | 102000 | 1.0986 | 0.3333 | | 1.0982 | 18.0 | 108000 | 1.0986 | 0.3333 | | 1.0989 | 19.0 | 114000 | 1.0986 | 0.3333 | | 1.099 | 20.0 | 120000 | 1.0986 | 0.3333 | | 1.0991 | 21.0 | 126000 | 1.0986 | 0.3333 | | 1.099 | 22.0 | 132000 | 1.0986 | 0.3333 | | 1.099 | 23.0 | 138000 | 1.0986 | 0.3333 | | 1.0984 | 24.0 | 144000 | 1.0986 | 0.3333 | | 1.0985 | 25.0 | 150000 | 1.0986 | 0.3333 | ### Framework versions - Transformers 4.40.0 - Pytorch 2.0.0+cu117 - Datasets 2.19.0 - Tokenizers 0.19.1
{"license": "apache-2.0", "tags": ["generated_from_trainer"], "metrics": ["accuracy"], "base_model": "distilbert/distilbert-base-uncased", "model-index": [{"name": "distilbert-1k", "results": []}]}
addykan/distilbert-1k
null
[ "transformers", "safetensors", "distilbert", "text-classification", "generated_from_trainer", "base_model:distilbert/distilbert-base-uncased", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-20T20:14:17+00:00
[]
[]
TAGS #transformers #safetensors #distilbert #text-classification #generated_from_trainer #base_model-distilbert/distilbert-base-uncased #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us
distilbert-1k ============= This model is a fine-tuned version of distilbert/distilbert-base-uncased on an unknown dataset. It achieves the following results on the evaluation set: * Loss: 1.0986 * Accuracy: 0.3333 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 2e-07 * train\_batch\_size: 8 * eval\_batch\_size: 8 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * num\_epochs: 25 ### Training results ### Framework versions * Transformers 4.40.0 * Pytorch 2.0.0+cu117 * Datasets 2.19.0 * Tokenizers 0.19.1
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-07\n* train\\_batch\\_size: 8\n* eval\\_batch\\_size: 8\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 25", "### Training results", "### Framework versions\n\n\n* Transformers 4.40.0\n* Pytorch 2.0.0+cu117\n* Datasets 2.19.0\n* Tokenizers 0.19.1" ]
[ "TAGS\n#transformers #safetensors #distilbert #text-classification #generated_from_trainer #base_model-distilbert/distilbert-base-uncased #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-07\n* train\\_batch\\_size: 8\n* eval\\_batch\\_size: 8\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 25", "### Training results", "### Framework versions\n\n\n* Transformers 4.40.0\n* Pytorch 2.0.0+cu117\n* Datasets 2.19.0\n* Tokenizers 0.19.1" ]
text-generation
transformers
# 4-bit Quantized Llama 3 Model ## Description This repository hosts the 4-bit quantized version of the Llama 3 model. Optimized for reduced memory usage and faster inference, this model is suitable for deployment in environments where computational resources are limited. ## Model Details - **Model Type**: Transformer-based language model. - **Quantization**: 4-bit precision. - **Advantages**: - **Memory Efficiency**: Reduces memory usage significantly, allowing deployment on devices with limited RAM. - **Inference Speed**: Accelerates inference times, depending on the hardware's ability to process low-bit computations. ## How to Use To utilize this model efficiently, follow the steps below: ### Loading the Quantized Model Load the model with specific parameters to ensure it utilizes 4-bit precision: ```python from transformers import AutoModelForCausalLM model_4bit = AutoModelForCausalLM.from_pretrained("SweatyCrayfish/llama-3-8b-quantized", device_map="auto", load_in_4bit=True) ``` ## Adjusting Precision of Components Adjust the precision of other components, which are by default converted to torch.float16: ```python import torch from transformers import AutoModelForCausalLM model_4bit = AutoModelForCausalLM.from_pretrained("SweatyCrayfish/llama-3-8b-quantized", load_in_4bit=True, torch_dtype=torch.float32) print(model_4bit.model.decoder.layers[-1].final_layer_norm.weight.dtype) ``` ## Citation Original repository and citations: @article{llama3modelcard, title={Llama 3 Model Card}, author={AI@Meta}, year={2024}, url = {https://github.com/meta-llama/llama3/blob/main/MODEL_CARD.md} }
{"language": ["en"], "license": "other", "tags": ["facebook", "meta", "pytorch", "llama", "llama-3", "qlora", "quantization", "4-bit", "lora"], "pipeline_tag": "text-generation", "license_name": "llama3", "license_link": "LICENSE", "extra_gated_prompt": "### META LLAMA 3 COMMUNITY LICENSE AGREEMENT\nMeta Llama 3 Version Release Date: April 18, 2024\n\"Agreement\" means the terms and conditions for use, reproduction, distribution and modification of the Llama Materials set forth herein.\n\"Documentation\" means the specifications, manuals and documentation accompanying Meta Llama 3 distributed by Meta at https://llama.meta.com/get-started/.\n\"Licensee\" or \"you\" means you, or your employer or any other person or entity (if you are entering into this Agreement on such person or entity\u2019s behalf), of the age required under applicable laws, rules or regulations to provide legal consent and that has legal authority to bind your employer or such other person or entity if you are entering in this Agreement on their behalf.\n\"Meta Llama 3\" means the foundational large language models and software and algorithms, including machine-learning model code, trained model weights, inference-enabling code, training-enabling code, fine-tuning enabling code and other elements of the foregoing distributed by Meta at https://llama.meta.com/llama-downloads.\n\"Llama Materials\" means, collectively, Meta\u2019s proprietary Meta Llama 3 and Documentation (and any portion thereof) made available under this Agreement.\n\"Meta\" or \"we\" means Meta Platforms Ireland Limited (if you are located in or, if you are an entity, your principal place of business is in the EEA or Switzerland) and Meta Platforms, Inc. (if you are located outside of the EEA or Switzerland).\n \n1. License Rights and Redistribution.\na. Grant of Rights. You are granted a non-exclusive, worldwide, non-transferable and royalty-free limited license under Meta\u2019s intellectual property or other rights owned by Meta embodied in the Llama Materials to use, reproduce, distribute, copy, create derivative works of, and make modifications to the Llama Materials.\nb. Redistribution and Use.\ni. If you distribute or make available the Llama Materials (or any derivative works thereof), or a product or service that uses any of them, including another AI model, you shall (A) provide a copy of this Agreement with any such Llama Materials; and (B) prominently display \u201cBuilt with Meta Llama 3\u201d on a related website, user interface, blogpost, about page, or product documentation. If you use the Llama Materials to create, train, fine tune, or otherwise improve an AI model, which is distributed or made available, you shall also include \u201cLlama 3\u201d at the beginning of any such AI model name.\nii. If you receive Llama Materials, or any derivative works thereof, from a Licensee as part of an integrated end user product, then Section 2 of this Agreement will not apply to you.\niii. You must retain in all copies of the Llama Materials that you distribute the following attribution notice within a \u201cNotice\u201d text file distributed as a part of such copies: \u201cMeta Llama 3 is licensed under the Meta Llama 3 Community License, Copyright \u00a9 Meta Platforms, Inc. All Rights Reserved.\u201d\niv. Your use of the Llama Materials must comply with applicable laws and regulations (including trade compliance laws and regulations) and adhere to the Acceptable Use Policy for the Llama Materials (available at https://llama.meta.com/llama3/use-policy), which is hereby incorporated by reference into this Agreement.\nv. You will not use the Llama Materials or any output or results of the Llama Materials to improve any other large language model (excluding Meta Llama 3 or derivative works thereof).\n2. Additional Commercial Terms. If, on the Meta Llama 3 version release date, the monthly active users of the products or services made available by or for Licensee, or Licensee\u2019s affiliates, is greater than 700 million monthly active users in the preceding calendar month, you must request a license from Meta, which Meta may grant to you in its sole discretion, and you are not authorized to exercise any of the rights under this Agreement unless or until Meta otherwise expressly grants you such rights.\n3. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED ON AN \u201cAS IS\u201d BASIS, WITHOUT WARRANTIES OF ANY KIND, AND META DISCLAIMS ALL WARRANTIES OF ANY KIND, BOTH EXPRESS AND IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING THE LLAMA MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS.\n4. Limitation of Liability. IN NO EVENT WILL META OR ITS AFFILIATES BE LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST PROFITS OR ANY INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR PUNITIVE DAMAGES, EVEN IF META OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF ANY OF THE FOREGOING.\n5. Intellectual Property.\na. No trademark licenses are granted under this Agreement, and in connection with the Llama Materials, neither Meta nor Licensee may use any name or mark owned by or associated with the other or any of its affiliates, except as required for reasonable and customary use in describing and redistributing the Llama Materials or as set forth in this Section 5(a). Meta hereby grants you a license to use \u201cLlama 3\u201d (the \u201cMark\u201d) solely as required to comply with the last sentence of Section 1.b.i. You will comply with Meta\u2019s brand guidelines (currently accessible at https://about.meta.com/brand/resources/meta/company-brand/ ). All goodwill arising out of your use of the Mark will inure to the benefit of Meta.\nb. Subject to Meta\u2019s ownership of Llama Materials and derivatives made by or for Meta, with respect to any derivative works and modifications of the Llama Materials that are made by you, as between you and Meta, you are and will be the owner of such derivative works and modifications.\nc. If you institute litigation or other proceedings against Meta or any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Llama Materials or Meta Llama 3 outputs or results, or any portion of any of the foregoing, constitutes infringement of intellectual property or other rights owned or licensable by you, then any licenses granted to you under this Agreement shall terminate as of the date such litigation or claim is filed or instituted. You will indemnify and hold harmless Meta from and against any claim by any third party arising out of or related to your use or distribution of the Llama Materials.\n6. Term and Termination. The term of this Agreement will commence upon your acceptance of this Agreement or access to the Llama Materials and will continue in full force and effect until terminated in accordance with the terms and conditions herein. Meta may terminate this Agreement if you are in breach of any term or condition of this Agreement. Upon termination of this Agreement, you shall delete and cease use of the Llama Materials. Sections 3, 4 and 7 shall survive the termination of this Agreement.\n7. Governing Law and Jurisdiction. This Agreement will be governed and construed under the laws of the State of California without regard to choice of law principles, and the UN Convention on Contracts for the International Sale of Goods does not apply to this Agreement. The courts of California shall have exclusive jurisdiction of any dispute arising out of this Agreement.\n### Meta Llama 3 Acceptable Use Policy\nMeta is committed to promoting safe and fair use of its tools and features, including Meta Llama 3. If you access or use Meta Llama 3, you agree to this Acceptable Use Policy (\u201cPolicy\u201d). The most recent copy of this policy can be found at [https://llama.meta.com/llama3/use-policy](https://llama.meta.com/llama3/use-policy)\n#### Prohibited Uses\nWe want everyone to use Meta Llama 3 safely and responsibly. You agree you will not use, or allow others to use, Meta Llama 3 to: 1. Violate the law or others\u2019 rights, including to:\n 1. Engage in, promote, generate, contribute to, encourage, plan, incite, or further illegal or unlawful activity or content, such as:\n 1. Violence or terrorism\n 2. Exploitation or harm to children, including the solicitation, creation, acquisition, or dissemination of child exploitative content or failure to report Child Sexual Abuse Material\n 3. Human trafficking, exploitation, and sexual violence\n 4. The illegal distribution of information or materials to minors, including obscene materials, or failure to employ legally required age-gating in connection with such information or materials.\n 5. Sexual solicitation\n 6. Any other criminal activity\n 2. Engage in, promote, incite, or facilitate the harassment, abuse, threatening, or bullying of individuals or groups of individuals\n 3. Engage in, promote, incite, or facilitate discrimination or other unlawful or harmful conduct in the provision of employment, employment benefits, credit, housing, other economic benefits, or other essential goods and services\n 4. Engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or related professional practices\n 5. Collect, process, disclose, generate, or infer health, demographic, or other sensitive personal or private information about individuals without rights and consents required by applicable laws\n 6. Engage in or facilitate any action or generate any content that infringes, misappropriates, or otherwise violates any third-party rights, including the outputs or results of any products or services using the Llama Materials\n 7. Create, generate, or facilitate the creation of malicious code, malware, computer viruses or do anything else that could disable, overburden, interfere with or impair the proper working, integrity, operation or appearance of a website or computer system\n2. Engage in, promote, incite, facilitate, or assist in the planning or development of activities that present a risk of death or bodily harm to individuals, including use of Meta Llama 3 related to the following:\n 1. Military, warfare, nuclear industries or applications, espionage, use for materials or activities that are subject to the International Traffic Arms Regulations (ITAR) maintained by the United States Department of State\n 2. Guns and illegal weapons (including weapon development)\n 3. Illegal drugs and regulated/controlled substances\n 4. Operation of critical infrastructure, transportation technologies, or heavy machinery\n 5. Self-harm or harm to others, including suicide, cutting, and eating disorders\n 6. Any content intended to incite or promote violence, abuse, or any infliction of bodily harm to an individual\n3. Intentionally deceive or mislead others, including use of Meta Llama 3 related to the following:\n 1. Generating, promoting, or furthering fraud or the creation or promotion of disinformation\n 2. Generating, promoting, or furthering defamatory content, including the creation of defamatory statements, images, or other content\n 3. Generating, promoting, or further distributing spam\n 4. Impersonating another individual without consent, authorization, or legal right\n 5. Representing that the use of Meta Llama 3 or outputs are human-generated\n 6. Generating or facilitating false online engagement, including fake reviews and other means of fake online engagement\n4. Fail to appropriately disclose to end users any known dangers of your AI system\nPlease report any violation of this Policy, software \u201cbug,\u201d or other problems that could lead to a violation of this Policy through one of the following means:\n * Reporting issues with the model: [https://github.com/meta-llama/llama3](https://github.com/meta-llama/llama3)\n * Reporting risky content generated by the model:\n developers.facebook.com/llama_output_feedback\n * Reporting bugs and security concerns: facebook.com/whitehat/info\n * Reporting violations of the Acceptable Use Policy or unlicensed uses of Meta Llama 3: [email protected]", "extra_gated_fields": {"First Name": "text", "Last Name": "text", "Date of birth": "date_picker", "Country": "country", "Affiliation": "text", "geo": "ip_location", "By clicking Submit below I accept the terms of the license and acknowledge that the information I provide will be collected stored processed and shared in accordance with the Meta Privacy Policy": "checkbox"}, "extra_gated_description": "The information you provide will be collected, stored, processed and shared in accordance with the [Meta Privacy Policy](https://www.facebook.com/privacy/policy/).", "extra_gated_button_content": "Submit"}
SweatyCrayfish/llama-3-8b-quantized
null
[ "transformers", "safetensors", "llama", "text-generation", "facebook", "meta", "pytorch", "llama-3", "qlora", "quantization", "4-bit", "lora", "en", "license:other", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-20T20:15:09+00:00
[]
[ "en" ]
TAGS #transformers #safetensors #llama #text-generation #facebook #meta #pytorch #llama-3 #qlora #quantization #4-bit #lora #en #license-other #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# 4-bit Quantized Llama 3 Model ## Description This repository hosts the 4-bit quantized version of the Llama 3 model. Optimized for reduced memory usage and faster inference, this model is suitable for deployment in environments where computational resources are limited. ## Model Details - Model Type: Transformer-based language model. - Quantization: 4-bit precision. - Advantages: - Memory Efficiency: Reduces memory usage significantly, allowing deployment on devices with limited RAM. - Inference Speed: Accelerates inference times, depending on the hardware's ability to process low-bit computations. ## How to Use To utilize this model efficiently, follow the steps below: ### Loading the Quantized Model Load the model with specific parameters to ensure it utilizes 4-bit precision: ## Adjusting Precision of Components Adjust the precision of other components, which are by default converted to torch.float16: Original repository and citations: @article{llama3modelcard, title={Llama 3 Model Card}, author={AI@Meta}, year={2024}, url = {URL }
[ "# 4-bit Quantized Llama 3 Model", "## Description\nThis repository hosts the 4-bit quantized version of the Llama 3 model. Optimized for reduced memory usage and faster inference, this model is suitable for deployment in environments where computational resources are limited.", "## Model Details\n- Model Type: Transformer-based language model.\n- Quantization: 4-bit precision.\n- Advantages:\n - Memory Efficiency: Reduces memory usage significantly, allowing deployment on devices with limited RAM.\n - Inference Speed: Accelerates inference times, depending on the hardware's ability to process low-bit computations.", "## How to Use\nTo utilize this model efficiently, follow the steps below:", "### Loading the Quantized Model\nLoad the model with specific parameters to ensure it utilizes 4-bit precision:", "## Adjusting Precision of Components\nAdjust the precision of other components, which are by default converted to torch.float16:\n\nOriginal repository and citations:\n@article{llama3modelcard,\n title={Llama 3 Model Card},\n author={AI@Meta},\n year={2024},\n url = {URL\n}" ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #facebook #meta #pytorch #llama-3 #qlora #quantization #4-bit #lora #en #license-other #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# 4-bit Quantized Llama 3 Model", "## Description\nThis repository hosts the 4-bit quantized version of the Llama 3 model. Optimized for reduced memory usage and faster inference, this model is suitable for deployment in environments where computational resources are limited.", "## Model Details\n- Model Type: Transformer-based language model.\n- Quantization: 4-bit precision.\n- Advantages:\n - Memory Efficiency: Reduces memory usage significantly, allowing deployment on devices with limited RAM.\n - Inference Speed: Accelerates inference times, depending on the hardware's ability to process low-bit computations.", "## How to Use\nTo utilize this model efficiently, follow the steps below:", "### Loading the Quantized Model\nLoad the model with specific parameters to ensure it utilizes 4-bit precision:", "## Adjusting Precision of Components\nAdjust the precision of other components, which are by default converted to torch.float16:\n\nOriginal repository and citations:\n@article{llama3modelcard,\n title={Llama 3 Model Card},\n author={AI@Meta},\n year={2024},\n url = {URL\n}" ]
reinforcement-learning
stable-baselines3
# **DQN** Agent playing **SpaceInvadersNoFrameskip-v4** This is a trained model of a **DQN** agent playing **SpaceInvadersNoFrameskip-v4** using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3) and the [RL Zoo](https://github.com/DLR-RM/rl-baselines3-zoo). The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: https://github.com/DLR-RM/rl-baselines3-zoo<br/> SB3: https://github.com/DLR-RM/stable-baselines3<br/> SB3 Contrib: https://github.com/Stable-Baselines-Team/stable-baselines3-contrib Install the RL Zoo (with SB3 and SB3-Contrib): ```bash pip install rl_zoo3 ``` ``` # Download model and save it into the logs/ folder python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga vicha-w -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` If you installed the RL Zoo3 via pip (`pip install rl_zoo3`), from anywhere you can do: ``` python -m rl_zoo3.load_from_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -orga vicha-w -f logs/ python -m rl_zoo3.enjoy --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ ``` ## Training (with the RL Zoo) ``` python -m rl_zoo3.train --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ # Upload the model and generate video (when possible) python -m rl_zoo3.push_to_hub --algo dqn --env SpaceInvadersNoFrameskip-v4 -f logs/ -orga vicha-w ``` ## Hyperparameters ```python OrderedDict([('batch_size', 32), ('buffer_size', 100000), ('env_wrapper', ['stable_baselines3.common.atari_wrappers.AtariWrapper']), ('exploration_final_eps', 0.01), ('exploration_fraction', 0.1), ('frame_stack', 4), ('gradient_steps', 1), ('learning_rate', 0.0001), ('learning_starts', 100000), ('n_timesteps', 1000000.0), ('optimize_memory_usage', False), ('policy', 'CnnPolicy'), ('target_update_interval', 1000), ('train_freq', 4), ('normalize', False)]) ``` # Environment Arguments ```python {'render_mode': 'rgb_array'} ```
{"library_name": "stable-baselines3", "tags": ["SpaceInvadersNoFrameskip-v4", "deep-reinforcement-learning", "reinforcement-learning", "stable-baselines3"], "model-index": [{"name": "DQN", "results": [{"task": {"type": "reinforcement-learning", "name": "reinforcement-learning"}, "dataset": {"name": "SpaceInvadersNoFrameskip-v4", "type": "SpaceInvadersNoFrameskip-v4"}, "metrics": [{"type": "mean_reward", "value": "625.00 +/- 76.71", "name": "mean_reward", "verified": false}]}]}]}
vicha-w/dqn-SpaceInvadersNoFrameSkip-v4
null
[ "stable-baselines3", "SpaceInvadersNoFrameskip-v4", "deep-reinforcement-learning", "reinforcement-learning", "model-index", "region:us" ]
null
2024-04-20T20:15:30+00:00
[]
[]
TAGS #stable-baselines3 #SpaceInvadersNoFrameskip-v4 #deep-reinforcement-learning #reinforcement-learning #model-index #region-us
# DQN Agent playing SpaceInvadersNoFrameskip-v4 This is a trained model of a DQN agent playing SpaceInvadersNoFrameskip-v4 using the stable-baselines3 library and the RL Zoo. The RL Zoo is a training framework for Stable Baselines3 reinforcement learning agents, with hyperparameter optimization and pre-trained agents included. ## Usage (with SB3 RL Zoo) RL Zoo: URL SB3: URL SB3 Contrib: URL Install the RL Zoo (with SB3 and SB3-Contrib): If you installed the RL Zoo3 via pip ('pip install rl_zoo3'), from anywhere you can do: ## Training (with the RL Zoo) ## Hyperparameters # Environment Arguments
[ "# DQN Agent playing SpaceInvadersNoFrameskip-v4\nThis is a trained model of a DQN agent playing SpaceInvadersNoFrameskip-v4\nusing the stable-baselines3 library\nand the RL Zoo.\n\nThe RL Zoo is a training framework for Stable Baselines3\nreinforcement learning agents,\nwith hyperparameter optimization and pre-trained agents included.", "## Usage (with SB3 RL Zoo)\n\nRL Zoo: URL\nSB3: URL\nSB3 Contrib: URL\n\nInstall the RL Zoo (with SB3 and SB3-Contrib):\n\n\n\n\nIf you installed the RL Zoo3 via pip ('pip install rl_zoo3'), from anywhere you can do:", "## Training (with the RL Zoo)", "## Hyperparameters", "# Environment Arguments" ]
[ "TAGS\n#stable-baselines3 #SpaceInvadersNoFrameskip-v4 #deep-reinforcement-learning #reinforcement-learning #model-index #region-us \n", "# DQN Agent playing SpaceInvadersNoFrameskip-v4\nThis is a trained model of a DQN agent playing SpaceInvadersNoFrameskip-v4\nusing the stable-baselines3 library\nand the RL Zoo.\n\nThe RL Zoo is a training framework for Stable Baselines3\nreinforcement learning agents,\nwith hyperparameter optimization and pre-trained agents included.", "## Usage (with SB3 RL Zoo)\n\nRL Zoo: URL\nSB3: URL\nSB3 Contrib: URL\n\nInstall the RL Zoo (with SB3 and SB3-Contrib):\n\n\n\n\nIf you installed the RL Zoo3 via pip ('pip install rl_zoo3'), from anywhere you can do:", "## Training (with the RL Zoo)", "## Hyperparameters", "# Environment Arguments" ]
null
transformers
## About <!-- ### quantize_version: 1 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: --> <!-- ### vocab_type: --> static quants of https://huggingface.co/Dogge/Tia-70B-RP <!-- provided-files --> weighted/imatrix quants are available at https://huggingface.co/mradermacher/Tia-70B-RP-i1-GGUF ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/Tia-70B-RP-GGUF/resolve/main/Tia-70B-RP.Q2_K.gguf) | Q2_K | 26.5 | | | [GGUF](https://huggingface.co/mradermacher/Tia-70B-RP-GGUF/resolve/main/Tia-70B-RP.IQ3_XS.gguf) | IQ3_XS | 29.4 | | | [GGUF](https://huggingface.co/mradermacher/Tia-70B-RP-GGUF/resolve/main/Tia-70B-RP.IQ3_S.gguf) | IQ3_S | 31.0 | beats Q3_K* | | [GGUF](https://huggingface.co/mradermacher/Tia-70B-RP-GGUF/resolve/main/Tia-70B-RP.Q3_K_S.gguf) | Q3_K_S | 31.0 | | | [GGUF](https://huggingface.co/mradermacher/Tia-70B-RP-GGUF/resolve/main/Tia-70B-RP.IQ3_M.gguf) | IQ3_M | 32.0 | | | [GGUF](https://huggingface.co/mradermacher/Tia-70B-RP-GGUF/resolve/main/Tia-70B-RP.Q3_K_M.gguf) | Q3_K_M | 34.4 | lower quality | | [GGUF](https://huggingface.co/mradermacher/Tia-70B-RP-GGUF/resolve/main/Tia-70B-RP.Q3_K_L.gguf) | Q3_K_L | 37.2 | | | [GGUF](https://huggingface.co/mradermacher/Tia-70B-RP-GGUF/resolve/main/Tia-70B-RP.IQ4_XS.gguf) | IQ4_XS | 38.4 | | | [GGUF](https://huggingface.co/mradermacher/Tia-70B-RP-GGUF/resolve/main/Tia-70B-RP.Q4_K_S.gguf) | Q4_K_S | 40.4 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/Tia-70B-RP-GGUF/resolve/main/Tia-70B-RP.Q4_K_M.gguf) | Q4_K_M | 42.6 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/Tia-70B-RP-GGUF/resolve/main/Tia-70B-RP.Q5_K_S.gguf) | Q5_K_S | 48.8 | | | [GGUF](https://huggingface.co/mradermacher/Tia-70B-RP-GGUF/resolve/main/Tia-70B-RP.Q5_K_M.gguf) | Q5_K_M | 50.1 | | | [PART 1](https://huggingface.co/mradermacher/Tia-70B-RP-GGUF/resolve/main/Tia-70B-RP.Q6_K.gguf.part1of2) [PART 2](https://huggingface.co/mradermacher/Tia-70B-RP-GGUF/resolve/main/Tia-70B-RP.Q6_K.gguf.part2of2) | Q6_K | 58.0 | very good quality | | [PART 1](https://huggingface.co/mradermacher/Tia-70B-RP-GGUF/resolve/main/Tia-70B-RP.Q8_0.gguf.part1of2) [PART 2](https://huggingface.co/mradermacher/Tia-70B-RP-GGUF/resolve/main/Tia-70B-RP.Q8_0.gguf.part2of2) | Q8_0 | 75.1 | fast, best quality | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. <!-- end -->
{"language": ["en"], "license": "apache-2.0", "library_name": "transformers", "tags": ["text-generation-inference", "transformers", "unsloth", "llama", "trl", "sft"], "base_model": "Dogge/Tia-70B-RP", "quantized_by": "mradermacher"}
mradermacher/Tia-70B-RP-GGUF
null
[ "transformers", "gguf", "text-generation-inference", "unsloth", "llama", "trl", "sft", "en", "base_model:Dogge/Tia-70B-RP", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2024-04-20T20:17:19+00:00
[]
[ "en" ]
TAGS #transformers #gguf #text-generation-inference #unsloth #llama #trl #sft #en #base_model-Dogge/Tia-70B-RP #license-apache-2.0 #endpoints_compatible #region-us
About ----- static quants of URL weighted/imatrix quants are available at URL Usage ----- If you are unsure how to use GGUF files, refer to one of TheBloke's READMEs for more details, including on how to concatenate multi-part files. Provided Quants --------------- (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): !URL And here are Artefact2's thoughts on the matter: URL FAQ / Model Request ------------------- See URL for some answers to questions you might have and/or if you want some other model quantized. Thanks ------ I thank my company, nethype GmbH, for letting me use its servers and providing upgrades to my workstation to enable this work in my free time.
[]
[ "TAGS\n#transformers #gguf #text-generation-inference #unsloth #llama #trl #sft #en #base_model-Dogge/Tia-70B-RP #license-apache-2.0 #endpoints_compatible #region-us \n" ]
text-generation
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # gpt2_ingredient This model is a fine-tuned version of [gpt2](https://huggingface.co/gpt2) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.8805 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 4.0245 | 0.83 | 500 | 2.0079 | | 2.0361 | 1.67 | 1000 | 1.9338 | | 1.9446 | 2.5 | 1500 | 1.9070 | | 1.9054 | 3.33 | 2000 | 1.8916 | | 1.8702 | 4.17 | 2500 | 1.8855 | | 1.8406 | 5.0 | 3000 | 1.8805 | ### Framework versions - Transformers 4.30.2 - Pytorch 1.7.0 - Datasets 2.13.2 - Tokenizers 0.13.3
{"license": "mit", "tags": ["generated_from_trainer"], "model-index": [{"name": "gpt2_ingredient", "results": []}]}
AkshayPM/gpt2_ingredient
null
[ "transformers", "pytorch", "tensorboard", "gpt2", "text-generation", "generated_from_trainer", "license:mit", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-20T20:18:26+00:00
[]
[]
TAGS #transformers #pytorch #tensorboard #gpt2 #text-generation #generated_from_trainer #license-mit #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
gpt2\_ingredient ================ This model is a fine-tuned version of gpt2 on the None dataset. It achieves the following results on the evaluation set: * Loss: 1.8805 Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training: * learning\_rate: 2e-05 * train\_batch\_size: 2 * eval\_batch\_size: 2 * seed: 42 * optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 * lr\_scheduler\_type: linear * num\_epochs: 5 ### Training results ### Framework versions * Transformers 4.30.2 * Pytorch 1.7.0 * Datasets 2.13.2 * Tokenizers 0.13.3
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 2\n* eval\\_batch\\_size: 2\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 5", "### Training results", "### Framework versions\n\n\n* Transformers 4.30.2\n* Pytorch 1.7.0\n* Datasets 2.13.2\n* Tokenizers 0.13.3" ]
[ "TAGS\n#transformers #pytorch #tensorboard #gpt2 #text-generation #generated_from_trainer #license-mit #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:\n\n\n* learning\\_rate: 2e-05\n* train\\_batch\\_size: 2\n* eval\\_batch\\_size: 2\n* seed: 42\n* optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n* lr\\_scheduler\\_type: linear\n* num\\_epochs: 5", "### Training results", "### Framework versions\n\n\n* Transformers 4.30.2\n* Pytorch 1.7.0\n* Datasets 2.13.2\n* Tokenizers 0.13.3" ]
null
null
Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) Llama-2-7b-chat-hf - GGUF - Model creator: https://huggingface.co/meta-llama/ - Original model: https://huggingface.co/meta-llama/Llama-2-7b-chat-hf/ | Name | Quant method | Size | | ---- | ---- | ---- | | [Llama-2-7b-chat-hf.Q2_K.gguf](https://huggingface.co/RichardErkhov/meta-llama_-_Llama-2-7b-chat-hf-gguf/blob/main/Llama-2-7b-chat-hf.Q2_K.gguf) | Q2_K | 2.36GB | | [Llama-2-7b-chat-hf.IQ3_XS.gguf](https://huggingface.co/RichardErkhov/meta-llama_-_Llama-2-7b-chat-hf-gguf/blob/main/Llama-2-7b-chat-hf.IQ3_XS.gguf) | IQ3_XS | 2.6GB | | [Llama-2-7b-chat-hf.IQ3_S.gguf](https://huggingface.co/RichardErkhov/meta-llama_-_Llama-2-7b-chat-hf-gguf/blob/main/Llama-2-7b-chat-hf.IQ3_S.gguf) | IQ3_S | 2.75GB | | [Llama-2-7b-chat-hf.Q3_K_S.gguf](https://huggingface.co/RichardErkhov/meta-llama_-_Llama-2-7b-chat-hf-gguf/blob/main/Llama-2-7b-chat-hf.Q3_K_S.gguf) | Q3_K_S | 2.75GB | | [Llama-2-7b-chat-hf.IQ3_M.gguf](https://huggingface.co/RichardErkhov/meta-llama_-_Llama-2-7b-chat-hf-gguf/blob/main/Llama-2-7b-chat-hf.IQ3_M.gguf) | IQ3_M | 2.9GB | | [Llama-2-7b-chat-hf.Q3_K.gguf](https://huggingface.co/RichardErkhov/meta-llama_-_Llama-2-7b-chat-hf-gguf/blob/main/Llama-2-7b-chat-hf.Q3_K.gguf) | Q3_K | 3.07GB | | [Llama-2-7b-chat-hf.Q3_K_M.gguf](https://huggingface.co/RichardErkhov/meta-llama_-_Llama-2-7b-chat-hf-gguf/blob/main/Llama-2-7b-chat-hf.Q3_K_M.gguf) | Q3_K_M | 3.07GB | | [Llama-2-7b-chat-hf.Q3_K_L.gguf](https://huggingface.co/RichardErkhov/meta-llama_-_Llama-2-7b-chat-hf-gguf/blob/main/Llama-2-7b-chat-hf.Q3_K_L.gguf) | Q3_K_L | 3.35GB | | [Llama-2-7b-chat-hf.IQ4_XS.gguf](https://huggingface.co/RichardErkhov/meta-llama_-_Llama-2-7b-chat-hf-gguf/blob/main/Llama-2-7b-chat-hf.IQ4_XS.gguf) | IQ4_XS | 3.4GB | | [Llama-2-7b-chat-hf.Q4_0.gguf](https://huggingface.co/RichardErkhov/meta-llama_-_Llama-2-7b-chat-hf-gguf/blob/main/Llama-2-7b-chat-hf.Q4_0.gguf) | Q4_0 | 3.56GB | | [Llama-2-7b-chat-hf.IQ4_NL.gguf](https://huggingface.co/RichardErkhov/meta-llama_-_Llama-2-7b-chat-hf-gguf/blob/main/Llama-2-7b-chat-hf.IQ4_NL.gguf) | IQ4_NL | 3.58GB | | [Llama-2-7b-chat-hf.Q4_K_S.gguf](https://huggingface.co/RichardErkhov/meta-llama_-_Llama-2-7b-chat-hf-gguf/blob/main/Llama-2-7b-chat-hf.Q4_K_S.gguf) | Q4_K_S | 3.59GB | | [Llama-2-7b-chat-hf.Q4_K.gguf](https://huggingface.co/RichardErkhov/meta-llama_-_Llama-2-7b-chat-hf-gguf/blob/main/Llama-2-7b-chat-hf.Q4_K.gguf) | Q4_K | 3.8GB | | [Llama-2-7b-chat-hf.Q4_K_M.gguf](https://huggingface.co/RichardErkhov/meta-llama_-_Llama-2-7b-chat-hf-gguf/blob/main/Llama-2-7b-chat-hf.Q4_K_M.gguf) | Q4_K_M | 3.8GB | | [Llama-2-7b-chat-hf.Q4_1.gguf](https://huggingface.co/RichardErkhov/meta-llama_-_Llama-2-7b-chat-hf-gguf/blob/main/Llama-2-7b-chat-hf.Q4_1.gguf) | Q4_1 | 3.95GB | | [Llama-2-7b-chat-hf.Q5_0.gguf](https://huggingface.co/RichardErkhov/meta-llama_-_Llama-2-7b-chat-hf-gguf/blob/main/Llama-2-7b-chat-hf.Q5_0.gguf) | Q5_0 | 4.33GB | | [Llama-2-7b-chat-hf.Q5_K_S.gguf](https://huggingface.co/RichardErkhov/meta-llama_-_Llama-2-7b-chat-hf-gguf/blob/main/Llama-2-7b-chat-hf.Q5_K_S.gguf) | Q5_K_S | 4.33GB | | [Llama-2-7b-chat-hf.Q5_K.gguf](https://huggingface.co/RichardErkhov/meta-llama_-_Llama-2-7b-chat-hf-gguf/blob/main/Llama-2-7b-chat-hf.Q5_K.gguf) | Q5_K | 4.45GB | | [Llama-2-7b-chat-hf.Q5_K_M.gguf](https://huggingface.co/RichardErkhov/meta-llama_-_Llama-2-7b-chat-hf-gguf/blob/main/Llama-2-7b-chat-hf.Q5_K_M.gguf) | Q5_K_M | 4.45GB | | [Llama-2-7b-chat-hf.Q5_1.gguf](https://huggingface.co/RichardErkhov/meta-llama_-_Llama-2-7b-chat-hf-gguf/blob/main/Llama-2-7b-chat-hf.Q5_1.gguf) | Q5_1 | 4.72GB | | [Llama-2-7b-chat-hf.Q6_K.gguf](https://huggingface.co/RichardErkhov/meta-llama_-_Llama-2-7b-chat-hf-gguf/blob/main/Llama-2-7b-chat-hf.Q6_K.gguf) | Q6_K | 5.15GB | Original model description: --- extra_gated_heading: You need to share contact information with Meta to access this model extra_gated_prompt: >- ### LLAMA 2 COMMUNITY LICENSE AGREEMENT "Agreement" means the terms and conditions for use, reproduction, distribution and modification of the Llama Materials set forth herein. "Documentation" means the specifications, manuals and documentation accompanying Llama 2 distributed by Meta at https://ai.meta.com/resources/models-and-libraries/llama-downloads/. "Licensee" or "you" means you, or your employer or any other person or entity (if you are entering into this Agreement on such person or entity's behalf), of the age required under applicable laws, rules or regulations to provide legal consent and that has legal authority to bind your employer or such other person or entity if you are entering in this Agreement on their behalf. "Llama 2" means the foundational large language models and software and algorithms, including machine-learning model code, trained model weights, inference-enabling code, training-enabling code, fine-tuning enabling code and other elements of the foregoing distributed by Meta at ai.meta.com/resources/models-and-libraries/llama-downloads/. "Llama Materials" means, collectively, Meta's proprietary Llama 2 and documentation (and any portion thereof) made available under this Agreement. "Meta" or "we" means Meta Platforms Ireland Limited (if you are located in or, if you are an entity, your principal place of business is in the EEA or Switzerland) and Meta Platforms, Inc. (if you are located outside of the EEA or Switzerland). By clicking "I Accept" below or by using or distributing any portion or element of the Llama Materials, you agree to be bound by this Agreement. 1. License Rights and Redistribution. a. Grant of Rights. You are granted a non-exclusive, worldwide, non- transferable and royalty-free limited license under Meta's intellectual property or other rights owned by Meta embodied in the Llama Materials to use, reproduce, distribute, copy, create derivative works of, and make modifications to the Llama Materials. b. Redistribution and Use. i. If you distribute or make the Llama Materials, or any derivative works thereof, available to a third party, you shall provide a copy of this Agreement to such third party. ii. If you receive Llama Materials, or any derivative works thereof, from a Licensee as part of an integrated end user product, then Section 2 of this Agreement will not apply to you. iii. You must retain in all copies of the Llama Materials that you distribute the following attribution notice within a "Notice" text file distributed as a part of such copies: "Llama 2 is licensed under the LLAMA 2 Community License, Copyright (c) Meta Platforms, Inc. All Rights Reserved." iv. Your use of the Llama Materials must comply with applicable laws and regulations (including trade compliance laws and regulations) and adhere to the Acceptable Use Policy for the Llama Materials (available at https://ai.meta.com/llama/use-policy), which is hereby incorporated by reference into this Agreement. v. You will not use the Llama Materials or any output or results of the Llama Materials to improve any other large language model (excluding Llama 2 or derivative works thereof). 2. Additional Commercial Terms. If, on the Llama 2 version release date, the monthly active users of the products or services made available by or for Licensee, or Licensee's affiliates, is greater than 700 million monthly active users in the preceding calendar month, you must request a license from Meta, which Meta may grant to you in its sole discretion, and you are not authorized to exercise any of the rights under this Agreement unless or until Meta otherwise expressly grants you such rights. 3. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING THE LLAMA MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS. 4. Limitation of Liability. IN NO EVENT WILL META OR ITS AFFILIATES BE LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST PROFITS OR ANY INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR PUNITIVE DAMAGES, EVEN IF META OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF ANY OF THE FOREGOING. 5. Intellectual Property. a. No trademark licenses are granted under this Agreement, and in connection with the Llama Materials, neither Meta nor Licensee may use any name or mark owned by or associated with the other or any of its affiliates, except as required for reasonable and customary use in describing and redistributing the Llama Materials. b. Subject to Meta's ownership of Llama Materials and derivatives made by or for Meta, with respect to any derivative works and modifications of the Llama Materials that are made by you, as between you and Meta, you are and will be the owner of such derivative works and modifications. c. If you institute litigation or other proceedings against Meta or any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Llama Materials or Llama 2 outputs or results, or any portion of any of the foregoing, constitutes infringement of intellectual property or other rights owned or licensable by you, then any licenses granted to you under this Agreement shall terminate as of the date such litigation or claim is filed or instituted. You will indemnify and hold harmless Meta from and against any claim by any third party arising out of or related to your use or distribution of the Llama Materials. 6. Term and Termination. The term of this Agreement will commence upon your acceptance of this Agreement or access to the Llama Materials and will continue in full force and effect until terminated in accordance with the terms and conditions herein. Meta may terminate this Agreement if you are in breach of any term or condition of this Agreement. Upon termination of this Agreement, you shall delete and cease use of the Llama Materials. Sections 3, 4 and 7 shall survive the termination of this Agreement. 7. Governing Law and Jurisdiction. This Agreement will be governed and construed under the laws of the State of California without regard to choice of law principles, and the UN Convention on Contracts for the International Sale of Goods does not apply to this Agreement. The courts of California shall have exclusive jurisdiction of any dispute arising out of this Agreement. ### Llama 2 Acceptable Use Policy Meta is committed to promoting safe and fair use of its tools and features, including Llama 2. If you access or use Llama 2, you agree to this Acceptable Use Policy (“Policy”). The most recent copy of this policy can be found at [ai.meta.com/llama/use-policy](http://ai.meta.com/llama/use-policy). #### Prohibited Uses We want everyone to use Llama 2 safely and responsibly. You agree you will not use, or allow others to use, Llama 2 to: 1. Violate the law or others’ rights, including to: 1. Engage in, promote, generate, contribute to, encourage, plan, incite, or further illegal or unlawful activity or content, such as: 1. Violence or terrorism 2. Exploitation or harm to children, including the solicitation, creation, acquisition, or dissemination of child exploitative content or failure to report Child Sexual Abuse Material 3. Human trafficking, exploitation, and sexual violence 4. The illegal distribution of information or materials to minors, including obscene materials, or failure to employ legally required age-gating in connection with such information or materials. 5. Sexual solicitation 6. Any other criminal activity 2. Engage in, promote, incite, or facilitate the harassment, abuse, threatening, or bullying of individuals or groups of individuals 3. Engage in, promote, incite, or facilitate discrimination or other unlawful or harmful conduct in the provision of employment, employment benefits, credit, housing, other economic benefits, or other essential goods and services 4. Engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or related professional practices 5. Collect, process, disclose, generate, or infer health, demographic, or other sensitive personal or private information about individuals without rights and consents required by applicable laws 6. Engage in or facilitate any action or generate any content that infringes, misappropriates, or otherwise violates any third-party rights, including the outputs or results of any products or services using the Llama 2 Materials 7. Create, generate, or facilitate the creation of malicious code, malware, computer viruses or do anything else that could disable, overburden, interfere with or impair the proper working, integrity, operation or appearance of a website or computer system 2. Engage in, promote, incite, facilitate, or assist in the planning or development of activities that present a risk of death or bodily harm to individuals, including use of Llama 2 related to the following: 1. Military, warfare, nuclear industries or applications, espionage, use for materials or activities that are subject to the International Traffic Arms Regulations (ITAR) maintained by the United States Department of State 2. Guns and illegal weapons (including weapon development) 3. Illegal drugs and regulated/controlled substances 4. Operation of critical infrastructure, transportation technologies, or heavy machinery 5. Self-harm or harm to others, including suicide, cutting, and eating disorders 6. Any content intended to incite or promote violence, abuse, or any infliction of bodily harm to an individual 3. Intentionally deceive or mislead others, including use of Llama 2 related to the following: 1. Generating, promoting, or furthering fraud or the creation or promotion of disinformation 2. Generating, promoting, or furthering defamatory content, including the creation of defamatory statements, images, or other content 3. Generating, promoting, or further distributing spam 4. Impersonating another individual without consent, authorization, or legal right 5. Representing that the use of Llama 2 or outputs are human-generated 6. Generating or facilitating false online engagement, including fake reviews and other means of fake online engagement 4. Fail to appropriately disclose to end users any known dangers of your AI system Please report any violation of this Policy, software “bug,” or other problems that could lead to a violation of this Policy through one of the following means: * Reporting issues with the model: [github.com/facebookresearch/llama](http://github.com/facebookresearch/llama) * Reporting risky content generated by the model: [developers.facebook.com/llama_output_feedback](http://developers.facebook.com/llama_output_feedback) * Reporting bugs and security concerns: [facebook.com/whitehat/info](http://facebook.com/whitehat/info) * Reporting violations of the Acceptable Use Policy or unlicensed uses of Llama: [[email protected]](mailto:[email protected]) extra_gated_fields: First Name: text Last Name: text Date of birth: date_picker Country: country Affiliation: text geo: ip_location By clicking Submit below I accept the terms of the license and acknowledge that the information I provide will be collected stored processed and shared in accordance with the Meta Privacy Policy: checkbox extra_gated_description: >- The information you provide will be collected, stored, processed and shared in accordance with the [Meta Privacy Policy](https://www.facebook.com/privacy/policy/). extra_gated_button_content: Submit language: - en pipeline_tag: text-generation tags: - facebook - meta - pytorch - llama - llama-2 license: llama2 --- # **Llama 2** Llama 2 is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 7B fine-tuned model, optimized for dialogue use cases and converted for the Hugging Face Transformers format. Links to other models can be found in the index at the bottom. ## Model Details *Note: Use of this model is governed by the Meta license. In order to download the model weights and tokenizer, please visit the [website](https://ai.meta.com/resources/models-and-libraries/llama-downloads/) and accept our License before requesting access here.* Meta developed and publicly released the Llama 2 family of large language models (LLMs), a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. Our fine-tuned LLMs, called Llama-2-Chat, are optimized for dialogue use cases. Llama-2-Chat models outperform open-source chat models on most benchmarks we tested, and in our human evaluations for helpfulness and safety, are on par with some popular closed-source models like ChatGPT and PaLM. **Model Developers** Meta **Variations** Llama 2 comes in a range of parameter sizes — 7B, 13B, and 70B — as well as pretrained and fine-tuned variations. **Input** Models input text only. **Output** Models generate text only. **Model Architecture** Llama 2 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align to human preferences for helpfulness and safety. ||Training Data|Params|Content Length|GQA|Tokens|LR| |---|---|---|---|---|---|---| |Llama 2|*A new mix of publicly available online data*|7B|4k|&#10007;|2.0T|3.0 x 10<sup>-4</sup>| |Llama 2|*A new mix of publicly available online data*|13B|4k|&#10007;|2.0T|3.0 x 10<sup>-4</sup>| |Llama 2|*A new mix of publicly available online data*|70B|4k|&#10004;|2.0T|1.5 x 10<sup>-4</sup>| *Llama 2 family of models.* Token counts refer to pretraining data only. All models are trained with a global batch-size of 4M tokens. Bigger models - 70B -- use Grouped-Query Attention (GQA) for improved inference scalability. **Model Dates** Llama 2 was trained between January 2023 and July 2023. **Status** This is a static model trained on an offline dataset. Future versions of the tuned models will be released as we improve model safety with community feedback. **License** A custom commercial license is available at: [https://ai.meta.com/resources/models-and-libraries/llama-downloads/](https://ai.meta.com/resources/models-and-libraries/llama-downloads/) **Research Paper** ["Llama-2: Open Foundation and Fine-tuned Chat Models"](arxiv.org/abs/2307.09288) ## Intended Use **Intended Use Cases** Llama 2 is intended for commercial and research use in English. Tuned models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks. To get the expected features and performance for the chat versions, a specific formatting needs to be followed, including the `INST` and `<<SYS>>` tags, `BOS` and `EOS` tokens, and the whitespaces and breaklines in between (we recommend calling `strip()` on inputs to avoid double-spaces). See our reference code in github for details: [`chat_completion`](https://github.com/facebookresearch/llama/blob/main/llama/generation.py#L212). **Out-of-scope Uses** Use in any manner that violates applicable laws or regulations (including trade compliance laws).Use in languages other than English. Use in any other way that is prohibited by the Acceptable Use Policy and Licensing Agreement for Llama 2. ## Hardware and Software **Training Factors** We used custom training libraries, Meta's Research Super Cluster, and production clusters for pretraining. Fine-tuning, annotation, and evaluation were also performed on third-party cloud compute. **Carbon Footprint** Pretraining utilized a cumulative 3.3M GPU hours of computation on hardware of type A100-80GB (TDP of 350-400W). Estimated total emissions were 539 tCO2eq, 100% of which were offset by Meta’s sustainability program. ||Time (GPU hours)|Power Consumption (W)|Carbon Emitted(tCO<sub>2</sub>eq)| |---|---|---|---| |Llama 2 7B|184320|400|31.22| |Llama 2 13B|368640|400|62.44| |Llama 2 70B|1720320|400|291.42| |Total|3311616||539.00| **CO<sub>2</sub> emissions during pretraining.** Time: total GPU time required for training each model. Power Consumption: peak power capacity per GPU device for the GPUs used adjusted for power usage efficiency. 100% of the emissions are directly offset by Meta's sustainability program, and because we are openly releasing these models, the pretraining costs do not need to be incurred by others. ## Training Data **Overview** Llama 2 was pretrained on 2 trillion tokens of data from publicly available sources. The fine-tuning data includes publicly available instruction datasets, as well as over one million new human-annotated examples. Neither the pretraining nor the fine-tuning datasets include Meta user data. **Data Freshness** The pretraining data has a cutoff of September 2022, but some tuning data is more recent, up to July 2023. ## Evaluation Results In this section, we report the results for the Llama 1 and Llama 2 models on standard academic benchmarks.For all the evaluations, we use our internal evaluations library. |Model|Size|Code|Commonsense Reasoning|World Knowledge|Reading Comprehension|Math|MMLU|BBH|AGI Eval| |---|---|---|---|---|---|---|---|---|---| |Llama 1|7B|14.1|60.8|46.2|58.5|6.95|35.1|30.3|23.9| |Llama 1|13B|18.9|66.1|52.6|62.3|10.9|46.9|37.0|33.9| |Llama 1|33B|26.0|70.0|58.4|67.6|21.4|57.8|39.8|41.7| |Llama 1|65B|30.7|70.7|60.5|68.6|30.8|63.4|43.5|47.6| |Llama 2|7B|16.8|63.9|48.9|61.3|14.6|45.3|32.6|29.3| |Llama 2|13B|24.5|66.9|55.4|65.8|28.7|54.8|39.4|39.1| |Llama 2|70B|**37.5**|**71.9**|**63.6**|**69.4**|**35.2**|**68.9**|**51.2**|**54.2**| **Overall performance on grouped academic benchmarks.** *Code:* We report the average pass@1 scores of our models on HumanEval and MBPP. *Commonsense Reasoning:* We report the average of PIQA, SIQA, HellaSwag, WinoGrande, ARC easy and challenge, OpenBookQA, and CommonsenseQA. We report 7-shot results for CommonSenseQA and 0-shot results for all other benchmarks. *World Knowledge:* We evaluate the 5-shot performance on NaturalQuestions and TriviaQA and report the average. *Reading Comprehension:* For reading comprehension, we report the 0-shot average on SQuAD, QuAC, and BoolQ. *MATH:* We report the average of the GSM8K (8 shot) and MATH (4 shot) benchmarks at top 1. |||TruthfulQA|Toxigen| |---|---|---|---| |Llama 1|7B|27.42|23.00| |Llama 1|13B|41.74|23.08| |Llama 1|33B|44.19|22.57| |Llama 1|65B|48.71|21.77| |Llama 2|7B|33.29|**21.25**| |Llama 2|13B|41.86|26.10| |Llama 2|70B|**50.18**|24.60| **Evaluation of pretrained LLMs on automatic safety benchmarks.** For TruthfulQA, we present the percentage of generations that are both truthful and informative (the higher the better). For ToxiGen, we present the percentage of toxic generations (the smaller the better). |||TruthfulQA|Toxigen| |---|---|---|---| |Llama-2-Chat|7B|57.04|**0.00**| |Llama-2-Chat|13B|62.18|**0.00**| |Llama-2-Chat|70B|**64.14**|0.01| **Evaluation of fine-tuned LLMs on different safety datasets.** Same metric definitions as above. ## Ethical Considerations and Limitations Llama 2 is a new technology that carries risks with use. Testing conducted to date has been in English, and has not covered, nor could it cover all scenarios. For these reasons, as with all LLMs, Llama 2’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate, biased or other objectionable responses to user prompts. Therefore, before deploying any applications of Llama 2, developers should perform safety testing and tuning tailored to their specific applications of the model. Please see the Responsible Use Guide available at [https://ai.meta.com/llama/responsible-use-guide/](https://ai.meta.com/llama/responsible-use-guide) ## Reporting Issues Please report any software “bug,” or other problems with the models through one of the following means: - Reporting issues with the model: [github.com/facebookresearch/llama](http://github.com/facebookresearch/llama) - Reporting problematic content generated by the model: [developers.facebook.com/llama_output_feedback](http://developers.facebook.com/llama_output_feedback) - Reporting bugs and security concerns: [facebook.com/whitehat/info](http://facebook.com/whitehat/info) ## Llama Model Index |Model|Llama2|Llama2-hf|Llama2-chat|Llama2-chat-hf| |---|---|---|---|---| |7B| [Link](https://huggingface.co/meta-llama/Llama-2-7b) | [Link](https://huggingface.co/meta-llama/Llama-2-7b-hf) | [Link](https://huggingface.co/meta-llama/Llama-2-7b-chat) | [Link](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf)| |13B| [Link](https://huggingface.co/meta-llama/Llama-2-13b) | [Link](https://huggingface.co/meta-llama/Llama-2-13b-hf) | [Link](https://huggingface.co/meta-llama/Llama-2-13b-chat) | [Link](https://huggingface.co/meta-llama/Llama-2-13b-chat-hf)| |70B| [Link](https://huggingface.co/meta-llama/Llama-2-70b) | [Link](https://huggingface.co/meta-llama/Llama-2-70b-hf) | [Link](https://huggingface.co/meta-llama/Llama-2-70b-chat) | [Link](https://huggingface.co/meta-llama/Llama-2-70b-chat-hf)|
{}
RichardErkhov/meta-llama_-_Llama-2-7b-chat-hf-gguf
null
[ "gguf", "arxiv:2307.09288", "region:us" ]
null
2024-04-20T20:20:47+00:00
[ "2307.09288" ]
[]
TAGS #gguf #arxiv-2307.09288 #region-us
Quantization made by Richard Erkhov. Github Discord Request more models Llama-2-7b-chat-hf - GGUF * Model creator: URL * Original model: URL Name: Llama-2-7b-chat-hf.Q2\_K.gguf, Quant method: Q2\_K, Size: 2.36GB Name: Llama-2-7b-chat-hf.IQ3\_XS.gguf, Quant method: IQ3\_XS, Size: 2.6GB Name: Llama-2-7b-chat-hf.IQ3\_S.gguf, Quant method: IQ3\_S, Size: 2.75GB Name: Llama-2-7b-chat-hf.Q3\_K\_S.gguf, Quant method: Q3\_K\_S, Size: 2.75GB Name: Llama-2-7b-chat-hf.IQ3\_M.gguf, Quant method: IQ3\_M, Size: 2.9GB Name: Llama-2-7b-chat-hf.Q3\_K.gguf, Quant method: Q3\_K, Size: 3.07GB Name: Llama-2-7b-chat-hf.Q3\_K\_M.gguf, Quant method: Q3\_K\_M, Size: 3.07GB Name: Llama-2-7b-chat-hf.Q3\_K\_L.gguf, Quant method: Q3\_K\_L, Size: 3.35GB Name: Llama-2-7b-chat-hf.IQ4\_XS.gguf, Quant method: IQ4\_XS, Size: 3.4GB Name: Llama-2-7b-chat-hf.Q4\_0.gguf, Quant method: Q4\_0, Size: 3.56GB Name: Llama-2-7b-chat-hf.IQ4\_NL.gguf, Quant method: IQ4\_NL, Size: 3.58GB Name: Llama-2-7b-chat-hf.Q4\_K\_S.gguf, Quant method: Q4\_K\_S, Size: 3.59GB Name: Llama-2-7b-chat-hf.Q4\_K.gguf, Quant method: Q4\_K, Size: 3.8GB Name: Llama-2-7b-chat-hf.Q4\_K\_M.gguf, Quant method: Q4\_K\_M, Size: 3.8GB Name: Llama-2-7b-chat-hf.Q4\_1.gguf, Quant method: Q4\_1, Size: 3.95GB Name: Llama-2-7b-chat-hf.Q5\_0.gguf, Quant method: Q5\_0, Size: 4.33GB Name: Llama-2-7b-chat-hf.Q5\_K\_S.gguf, Quant method: Q5\_K\_S, Size: 4.33GB Name: Llama-2-7b-chat-hf.Q5\_K.gguf, Quant method: Q5\_K, Size: 4.45GB Name: Llama-2-7b-chat-hf.Q5\_K\_M.gguf, Quant method: Q5\_K\_M, Size: 4.45GB Name: Llama-2-7b-chat-hf.Q5\_1.gguf, Quant method: Q5\_1, Size: 4.72GB Name: Llama-2-7b-chat-hf.Q6\_K.gguf, Quant method: Q6\_K, Size: 5.15GB Original model description: --------------------------- extra\_gated\_heading: You need to share contact information with Meta to access this model extra\_gated\_prompt: >- ### LLAMA 2 COMMUNITY LICENSE AGREEMENT "Agreement" means the terms and conditions for use, reproduction, distribution and modification of the Llama Materials set forth herein. "Documentation" means the specifications, manuals and documentation accompanying Llama 2 distributed by Meta at URL "Licensee" or "you" means you, or your employer or any other person or entity (if you are entering into this Agreement on such person or entity's behalf), of the age required under applicable laws, rules or regulations to provide legal consent and that has legal authority to bind your employer or such other person or entity if you are entering in this Agreement on their behalf. "Llama 2" means the foundational large language models and software and algorithms, including machine-learning model code, trained model weights, inference-enabling code, training-enabling code, fine-tuning enabling code and other elements of the foregoing distributed by Meta at URL "Llama Materials" means, collectively, Meta's proprietary Llama 2 and documentation (and any portion thereof) made available under this Agreement. "Meta" or "we" means Meta Platforms Ireland Limited (if you are located in or, if you are an entity, your principal place of business is in the EEA or Switzerland) and Meta Platforms, Inc. (if you are located outside of the EEA or Switzerland). By clicking "I Accept" below or by using or distributing any portion or element of the Llama Materials, you agree to be bound by this Agreement. 1. License Rights and Redistribution. a. Grant of Rights. You are granted a non-exclusive, worldwide, non- transferable and royalty-free limited license under Meta's intellectual property or other rights owned by Meta embodied in the Llama Materials to use, reproduce, distribute, copy, create derivative works of, and make modifications to the Llama Materials. b. Redistribution and Use. i. If you distribute or make the Llama Materials, or any derivative works thereof, available to a third party, you shall provide a copy of this Agreement to such third party. ii. If you receive Llama Materials, or any derivative works thereof, from a Licensee as part of an integrated end user product, then Section 2 of this Agreement will not apply to you. iii. You must retain in all copies of the Llama Materials that you distribute the following attribution notice within a "Notice" text file distributed as a part of such copies: "Llama 2 is licensed under the LLAMA 2 Community License, Copyright (c) Meta Platforms, Inc. All Rights Reserved." iv. Your use of the Llama Materials must comply with applicable laws and regulations (including trade compliance laws and regulations) and adhere to the Acceptable Use Policy for the Llama Materials (available at URL which is hereby incorporated by reference into this Agreement. v. You will not use the Llama Materials or any output or results of the Llama Materials to improve any other large language model (excluding Llama 2 or derivative works thereof). 2. Additional Commercial Terms. If, on the Llama 2 version release date, the monthly active users of the products or services made available by or for Licensee, or Licensee's affiliates, is greater than 700 million monthly active users in the preceding calendar month, you must request a license from Meta, which Meta may grant to you in its sole discretion, and you are not authorized to exercise any of the rights under this Agreement unless or until Meta otherwise expressly grants you such rights. 3. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING THE LLAMA MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS. 4. Limitation of Liability. IN NO EVENT WILL META OR ITS AFFILIATES BE LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST PROFITS OR ANY INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR PUNITIVE DAMAGES, EVEN IF META OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF ANY OF THE FOREGOING. 5. Intellectual Property. a. No trademark licenses are granted under this Agreement, and in connection with the Llama Materials, neither Meta nor Licensee may use any name or mark owned by or associated with the other or any of its affiliates, except as required for reasonable and customary use in describing and redistributing the Llama Materials. b. Subject to Meta's ownership of Llama Materials and derivatives made by or for Meta, with respect to any derivative works and modifications of the Llama Materials that are made by you, as between you and Meta, you are and will be the owner of such derivative works and modifications. c. If you institute litigation or other proceedings against Meta or any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Llama Materials or Llama 2 outputs or results, or any portion of any of the foregoing, constitutes infringement of intellectual property or other rights owned or licensable by you, then any licenses granted to you under this Agreement shall terminate as of the date such litigation or claim is filed or instituted. You will indemnify and hold harmless Meta from and against any claim by any third party arising out of or related to your use or distribution of the Llama Materials. 6. Term and Termination. The term of this Agreement will commence upon your acceptance of this Agreement or access to the Llama Materials and will continue in full force and effect until terminated in accordance with the terms and conditions herein. Meta may terminate this Agreement if you are in breach of any term or condition of this Agreement. Upon termination of this Agreement, you shall delete and cease use of the Llama Materials. Sections 3, 4 and 7 shall survive the termination of this Agreement. 7. Governing Law and Jurisdiction. This Agreement will be governed and construed under the laws of the State of California without regard to choice of law principles, and the UN Convention on Contracts for the International Sale of Goods does not apply to this Agreement. The courts of California shall have exclusive jurisdiction of any dispute arising out of this Agreement. ### Llama 2 Acceptable Use Policy Meta is committed to promoting safe and fair use of its tools and features, including Llama 2. If you access or use Llama 2, you agree to this Acceptable Use Policy (“Policy”). The most recent copy of this policy can be found at URL #### Prohibited Uses We want everyone to use Llama 2 safely and responsibly. You agree you will not use, or allow others to use, Llama 2 to: 1. Violate the law or others’ rights, including to: 1. Engage in, promote, generate, contribute to, encourage, plan, incite, or further illegal or unlawful activity or content, such as: 1. Violence or terrorism 2. Exploitation or harm to children, including the solicitation, creation, acquisition, or dissemination of child exploitative content or failure to report Child Sexual Abuse Material 3. Human trafficking, exploitation, and sexual violence 4. The illegal distribution of information or materials to minors, including obscene materials, or failure to employ legally required age-gating in connection with such information or materials. 5. Sexual solicitation 6. Any other criminal activity 2. Engage in, promote, incite, or facilitate the harassment, abuse, threatening, or bullying of individuals or groups of individuals 3. Engage in, promote, incite, or facilitate discrimination or other unlawful or harmful conduct in the provision of employment, employment benefits, credit, housing, other economic benefits, or other essential goods and services 4. Engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or related professional practices 5. Collect, process, disclose, generate, or infer health, demographic, or other sensitive personal or private information about individuals without rights and consents required by applicable laws 6. Engage in or facilitate any action or generate any content that infringes, misappropriates, or otherwise violates any third-party rights, including the outputs or results of any products or services using the Llama 2 Materials 7. Create, generate, or facilitate the creation of malicious code, malware, computer viruses or do anything else that could disable, overburden, interfere with or impair the proper working, integrity, operation or appearance of a website or computer system 2. Engage in, promote, incite, facilitate, or assist in the planning or development of activities that present a risk of death or bodily harm to individuals, including use of Llama 2 related to the following: 1. Military, warfare, nuclear industries or applications, espionage, use for materials or activities that are subject to the International Traffic Arms Regulations (ITAR) maintained by the United States Department of State 2. Guns and illegal weapons (including weapon development) 3. Illegal drugs and regulated/controlled substances 4. Operation of critical infrastructure, transportation technologies, or heavy machinery 5. Self-harm or harm to others, including suicide, cutting, and eating disorders 6. Any content intended to incite or promote violence, abuse, or any infliction of bodily harm to an individual 3. Intentionally deceive or mislead others, including use of Llama 2 related to the following: 1. Generating, promoting, or furthering fraud or the creation or promotion of disinformation 2. Generating, promoting, or furthering defamatory content, including the creation of defamatory statements, images, or other content 3. Generating, promoting, or further distributing spam 4. Impersonating another individual without consent, authorization, or legal right 5. Representing that the use of Llama 2 or outputs are human-generated 6. Generating or facilitating false online engagement, including fake reviews and other means of fake online engagement 7. Fail to appropriately disclose to end users any known dangers of your AI system Please report any violation of this Policy, software “bug,” or other problems that could lead to a violation of this Policy through one of the following means: * Reporting issues with the model: URL * Reporting risky content generated by the model: URL * Reporting bugs and security concerns: URL * Reporting violations of the Acceptable Use Policy or unlicensed uses of Llama: LlamaUseReport@URL extra\_gated\_fields: First Name: text Last Name: text Date of birth: date\_picker Country: country Affiliation: text geo: ip\_location By clicking Submit below I accept the terms of the license and acknowledge that the information I provide will be collected stored processed and shared in accordance with the Meta Privacy Policy: checkbox extra\_gated\_description: >- The information you provide will be collected, stored, processed and shared in accordance with the Meta Privacy Policy. extra\_gated\_button\_content: Submit language: * en pipeline\_tag: text-generation tags: * facebook * meta * pytorch * llama * llama-2 license: llama2 --- Llama 2 ======= Llama 2 is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 7B fine-tuned model, optimized for dialogue use cases and converted for the Hugging Face Transformers format. Links to other models can be found in the index at the bottom. Model Details ------------- *Note: Use of this model is governed by the Meta license. In order to download the model weights and tokenizer, please visit the website and accept our License before requesting access here.* Meta developed and publicly released the Llama 2 family of large language models (LLMs), a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. Our fine-tuned LLMs, called Llama-2-Chat, are optimized for dialogue use cases. Llama-2-Chat models outperform open-source chat models on most benchmarks we tested, and in our human evaluations for helpfulness and safety, are on par with some popular closed-source models like ChatGPT and PaLM. Model Developers Meta Variations Llama 2 comes in a range of parameter sizes — 7B, 13B, and 70B — as well as pretrained and fine-tuned variations. Input Models input text only. Output Models generate text only. Model Architecture Llama 2 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align to human preferences for helpfulness and safety. *Llama 2 family of models.* Token counts refer to pretraining data only. All models are trained with a global batch-size of 4M tokens. Bigger models - 70B -- use Grouped-Query Attention (GQA) for improved inference scalability. Model Dates Llama 2 was trained between January 2023 and July 2023. Status This is a static model trained on an offline dataset. Future versions of the tuned models will be released as we improve model safety with community feedback. License A custom commercial license is available at: URL Research Paper "Llama-2: Open Foundation and Fine-tuned Chat Models" Intended Use ------------ Intended Use Cases Llama 2 is intended for commercial and research use in English. Tuned models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks. To get the expected features and performance for the chat versions, a specific formatting needs to be followed, including the 'INST' and '<>' tags, 'BOS' and 'EOS' tokens, and the whitespaces and breaklines in between (we recommend calling 'strip()' on inputs to avoid double-spaces). See our reference code in github for details: 'chat\_completion'. Out-of-scope Uses Use in any manner that violates applicable laws or regulations (including trade compliance laws).Use in languages other than English. Use in any other way that is prohibited by the Acceptable Use Policy and Licensing Agreement for Llama 2. Hardware and Software --------------------- Training Factors We used custom training libraries, Meta's Research Super Cluster, and production clusters for pretraining. Fine-tuning, annotation, and evaluation were also performed on third-party cloud compute. Carbon Footprint Pretraining utilized a cumulative 3.3M GPU hours of computation on hardware of type A100-80GB (TDP of 350-400W). Estimated total emissions were 539 tCO2eq, 100% of which were offset by Meta’s sustainability program. CO2 emissions during pretraining. Time: total GPU time required for training each model. Power Consumption: peak power capacity per GPU device for the GPUs used adjusted for power usage efficiency. 100% of the emissions are directly offset by Meta's sustainability program, and because we are openly releasing these models, the pretraining costs do not need to be incurred by others. Training Data ------------- Overview Llama 2 was pretrained on 2 trillion tokens of data from publicly available sources. The fine-tuning data includes publicly available instruction datasets, as well as over one million new human-annotated examples. Neither the pretraining nor the fine-tuning datasets include Meta user data. Data Freshness The pretraining data has a cutoff of September 2022, but some tuning data is more recent, up to July 2023. Evaluation Results ------------------ In this section, we report the results for the Llama 1 and Llama 2 models on standard academic benchmarks.For all the evaluations, we use our internal evaluations library. Overall performance on grouped academic benchmarks. *Code:* We report the average pass@1 scores of our models on HumanEval and MBPP. *Commonsense Reasoning:* We report the average of PIQA, SIQA, HellaSwag, WinoGrande, ARC easy and challenge, OpenBookQA, and CommonsenseQA. We report 7-shot results for CommonSenseQA and 0-shot results for all other benchmarks. *World Knowledge:* We evaluate the 5-shot performance on NaturalQuestions and TriviaQA and report the average. *Reading Comprehension:* For reading comprehension, we report the 0-shot average on SQuAD, QuAC, and BoolQ. *MATH:* We report the average of the GSM8K (8 shot) and MATH (4 shot) benchmarks at top 1. Evaluation of pretrained LLMs on automatic safety benchmarks. For TruthfulQA, we present the percentage of generations that are both truthful and informative (the higher the better). For ToxiGen, we present the percentage of toxic generations (the smaller the better). Evaluation of fine-tuned LLMs on different safety datasets. Same metric definitions as above. Ethical Considerations and Limitations -------------------------------------- Llama 2 is a new technology that carries risks with use. Testing conducted to date has been in English, and has not covered, nor could it cover all scenarios. For these reasons, as with all LLMs, Llama 2’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate, biased or other objectionable responses to user prompts. Therefore, before deploying any applications of Llama 2, developers should perform safety testing and tuning tailored to their specific applications of the model. Please see the Responsible Use Guide available at URL Reporting Issues ---------------- Please report any software “bug,” or other problems with the models through one of the following means: * Reporting issues with the model: URL * Reporting problematic content generated by the model: URL * Reporting bugs and security concerns: URL Llama Model Index -----------------
[ "### LLAMA 2 COMMUNITY LICENSE AGREEMENT\n\n\n\"Agreement\" means the terms and conditions for use, reproduction, distribution\nand modification of the Llama Materials set forth herein.\n\n\n\"Documentation\" means the specifications, manuals and documentation\naccompanying Llama 2 distributed by Meta at\nURL\n\n\n\"Licensee\" or \"you\" means you, or your employer or any other person or entity\n(if you are entering into this Agreement on such person or entity's behalf),\nof the age required under applicable laws, rules or regulations to provide\nlegal consent and that has legal authority to bind your employer or such other\nperson or entity if you are entering in this Agreement on their behalf.\n\n\n\"Llama 2\" means the foundational large language models and software and\nalgorithms, including machine-learning model code, trained model weights,\ninference-enabling code, training-enabling code, fine-tuning enabling code and\nother elements of the foregoing distributed by Meta at\nURL\n\n\n\"Llama Materials\" means, collectively, Meta's proprietary Llama 2 and\ndocumentation (and any portion thereof) made available under this Agreement.\n\n\n\"Meta\" or \"we\" means Meta Platforms Ireland Limited (if you are located in or,\nif you are an entity, your principal place of business is in the EEA or\nSwitzerland) and Meta Platforms, Inc. (if you are located outside of the EEA\nor Switzerland).\n\n\nBy clicking \"I Accept\" below or by using or distributing any portion or\nelement of the Llama Materials, you agree to be bound by this Agreement.\n\n\n1. License Rights and Redistribution.\n\n\na. Grant of Rights. You are granted a non-exclusive, worldwide, non-\ntransferable and royalty-free limited license under Meta's intellectual\nproperty or other rights owned by Meta embodied in the Llama Materials to\nuse, reproduce, distribute, copy, create derivative works of, and make\nmodifications to the Llama Materials.\n\n\nb. Redistribution and Use.\n\n\ni. If you distribute or make the Llama Materials, or any derivative works\nthereof, available to a third party, you shall provide a copy of this\nAgreement to such third party.\n\n\nii. If you receive Llama Materials, or any derivative works thereof, from a\nLicensee as part of an integrated end user product, then Section 2 of this\nAgreement will not apply to you.\n\n\niii. You must retain in all copies of the Llama Materials that you distribute\nthe following attribution notice within a \"Notice\" text file distributed as a\npart of such copies: \"Llama 2 is licensed under the LLAMA 2 Community\nLicense, Copyright (c) Meta Platforms, Inc. All Rights Reserved.\"\n\n\niv. Your use of the Llama Materials must comply with applicable laws and\nregulations (including trade compliance laws and regulations) and adhere to\nthe Acceptable Use Policy for the Llama Materials (available at\nURL which is hereby incorporated by\nreference into this Agreement.\n\n\nv. You will not use the Llama Materials or any output or results of the Llama\nMaterials to improve any other large language model (excluding Llama 2 or\nderivative works thereof).\n\n\n2. Additional Commercial Terms. If, on the Llama 2 version release date, the\nmonthly active users of the products or services made available by or for\nLicensee, or Licensee's affiliates, is greater than 700 million monthly\nactive users in the preceding calendar month, you must request a license from\nMeta, which Meta may grant to you in its sole discretion, and you are not\nauthorized to exercise any of the rights under this Agreement unless or until\nMeta otherwise expressly grants you such rights.\n3. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE LLAMA\nMATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED ON AN \"AS IS\"\nBASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING,\nWITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT,\nMERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY\nRESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING\nTHE LLAMA MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE\nLLAMA MATERIALS AND ANY OUTPUT AND RESULTS.\n4. Limitation of Liability. IN NO EVENT WILL META OR ITS AFFILIATES BE LIABLE\nUNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE,\nPRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST\nPROFITS OR ANY INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR\nPUNITIVE DAMAGES, EVEN IF META OR ITS AFFILIATES HAVE BEEN ADVISED OF THE\nPOSSIBILITY OF ANY OF THE FOREGOING.\n5. Intellectual Property.\n\n\na. No trademark licenses are granted under this Agreement, and in connection\nwith the Llama Materials, neither Meta nor Licensee may use any name or mark\nowned by or associated with the other or any of its affiliates, except as\nrequired for reasonable and customary use in describing and redistributing\nthe Llama Materials.\n\n\nb. Subject to Meta's ownership of Llama Materials and derivatives made by or\nfor Meta, with respect to any derivative works and modifications of the Llama\nMaterials that are made by you, as between you and Meta, you are and will be\nthe owner of such derivative works and modifications.\n\n\nc. If you institute litigation or other proceedings against Meta or any\nentity (including a cross-claim or counterclaim in a lawsuit) alleging that\nthe Llama Materials or Llama 2 outputs or results, or any portion of any of\nthe foregoing, constitutes infringement of intellectual property or other\nrights owned or licensable by you, then any licenses granted to you under\nthis Agreement shall terminate as of the date such litigation or claim is\nfiled or instituted. You will indemnify and hold harmless Meta from and\nagainst any claim by any third party arising out of or related to your use or\ndistribution of the Llama Materials.\n\n\n6. Term and Termination. The term of this Agreement will commence upon your\nacceptance of this Agreement or access to the Llama Materials and will\ncontinue in full force and effect until terminated in accordance with the\nterms and conditions herein. Meta may terminate this Agreement if you are in\nbreach of any term or condition of this Agreement. Upon termination of this\nAgreement, you shall delete and cease use of the Llama Materials. Sections 3,\n4 and 7 shall survive the termination of this Agreement.\n7. Governing Law and Jurisdiction. This Agreement will be governed and\nconstrued under the laws of the State of California without regard to choice\nof law principles, and the UN Convention on Contracts for the International\nSale of Goods does not apply to this Agreement. The courts of California\nshall have exclusive jurisdiction of any dispute arising out of this\nAgreement.", "### Llama 2 Acceptable Use Policy\n\n\nMeta is committed to promoting safe and fair use of its tools and features,\nincluding Llama 2. If you access or use Llama 2, you agree to this Acceptable\nUse Policy (“Policy”). The most recent copy of this policy can be found at\nURL", "#### Prohibited Uses\n\n\nWe want everyone to use Llama 2 safely and responsibly. You agree you will not\nuse, or allow others to use, Llama 2 to:\n\n\n1. Violate the law or others’ rights, including to:\n\t1. Engage in, promote, generate, contribute to, encourage, plan, incite, or further illegal or unlawful activity or content, such as:\n\t\t1. Violence or terrorism\n\t\t2. Exploitation or harm to children, including the solicitation, creation, acquisition, or dissemination of child exploitative content or failure to report Child Sexual Abuse Material\n\t\t3. Human trafficking, exploitation, and sexual violence\n\t\t4. The illegal distribution of information or materials to minors, including obscene materials, or failure to employ legally required age-gating in connection with such information or materials.\n\t\t5. Sexual solicitation\n\t\t6. Any other criminal activity\n\t2. Engage in, promote, incite, or facilitate the harassment, abuse, threatening, or bullying of individuals or groups of individuals\n\t3. Engage in, promote, incite, or facilitate discrimination or other unlawful or harmful conduct in the provision of employment, employment benefits, credit, housing, other economic benefits, or other essential goods and services\n\t4. Engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or related professional practices\n\t5. Collect, process, disclose, generate, or infer health, demographic, or other sensitive personal or private information about individuals without rights and consents required by applicable laws\n\t6. Engage in or facilitate any action or generate any content that infringes, misappropriates, or otherwise violates any third-party rights, including the outputs or results of any products or services using the Llama 2 Materials\n\t7. Create, generate, or facilitate the creation of malicious code, malware, computer viruses or do anything else that could disable, overburden, interfere with or impair the proper working, integrity, operation or appearance of a website or computer system\n2. Engage in, promote, incite, facilitate, or assist in the planning or\ndevelopment of activities that present a risk of death or bodily harm to\nindividuals, including use of Llama 2 related to the following:\n\t1. Military, warfare, nuclear industries or applications, espionage, use for materials or activities that are subject to the International Traffic Arms Regulations (ITAR) maintained by the United States Department of State\n\t2. Guns and illegal weapons (including weapon development)\n\t3. Illegal drugs and regulated/controlled substances\n\t4. Operation of critical infrastructure, transportation technologies, or heavy machinery\n\t5. Self-harm or harm to others, including suicide, cutting, and eating disorders\n\t6. Any content intended to incite or promote violence, abuse, or any infliction of bodily harm to an individual\n3. Intentionally deceive or mislead others, including use of Llama 2 related\nto the following:\n\t1. Generating, promoting, or furthering fraud or the creation or promotion of disinformation\n\t2. Generating, promoting, or furthering defamatory content, including the creation of defamatory statements, images, or other content\n\t3. Generating, promoting, or further distributing spam\n\t4. Impersonating another individual without consent, authorization, or legal right\n\t5. Representing that the use of Llama 2 or outputs are human-generated\n\t6. Generating or facilitating false online engagement, including fake reviews and other means of fake online engagement\n\t7. Fail to appropriately disclose to end users any known dangers of your AI system\n\tPlease report any violation of this Policy, software “bug,” or other problems\n\tthat could lead to a violation of this Policy through one of the following\n\tmeans:\n\t* Reporting issues with the model: URL\n\t* Reporting risky content generated by the model: URL\n\t* Reporting bugs and security concerns: URL\n\t* Reporting violations of the Acceptable Use Policy or unlicensed uses of Llama: LlamaUseReport@URL\n\textra\\_gated\\_fields:\n\tFirst Name: text\n\tLast Name: text\n\tDate of birth: date\\_picker\n\tCountry: country\n\tAffiliation: text\n\tgeo: ip\\_location\n\tBy clicking Submit below I accept the terms of the license and acknowledge that the information I provide will be collected stored processed and shared in accordance with the Meta Privacy Policy: checkbox\n\textra\\_gated\\_description: >-\n\tThe information you provide will be collected, stored, processed and shared in\n\taccordance with the Meta Privacy\n\tPolicy.\n\textra\\_gated\\_button\\_content: Submit\n\tlanguage:\n\n\n* en\npipeline\\_tag: text-generation\ntags:\n* facebook\n* meta\n* pytorch\n* llama\n* llama-2\nlicense: llama2\n\n\n\n\n---\n\n\nLlama 2\n=======\n\n\nLlama 2 is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 7B fine-tuned model, optimized for dialogue use cases and converted for the Hugging Face Transformers format. Links to other models can be found in the index at the bottom.\n\n\nModel Details\n-------------\n\n\n*Note: Use of this model is governed by the Meta license. In order to download the model weights and tokenizer, please visit the website and accept our License before requesting access here.*\n\n\nMeta developed and publicly released the Llama 2 family of large language models (LLMs), a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. Our fine-tuned LLMs, called Llama-2-Chat, are optimized for dialogue use cases. Llama-2-Chat models outperform open-source chat models on most benchmarks we tested, and in our human evaluations for helpfulness and safety, are on par with some popular closed-source models like ChatGPT and PaLM.\n\n\nModel Developers Meta\n\n\nVariations Llama 2 comes in a range of parameter sizes — 7B, 13B, and 70B — as well as pretrained and fine-tuned variations.\n\n\nInput Models input text only.\n\n\nOutput Models generate text only.\n\n\nModel Architecture Llama 2 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align to human preferences for helpfulness and safety.\n\n\n\n*Llama 2 family of models.* Token counts refer to pretraining data only. All models are trained with a global batch-size of 4M tokens. Bigger models - 70B -- use Grouped-Query Attention (GQA) for improved inference scalability.\n\n\nModel Dates Llama 2 was trained between January 2023 and July 2023.\n\n\nStatus This is a static model trained on an offline dataset. Future versions of the tuned models will be released as we improve model safety with community feedback.\n\n\nLicense A custom commercial license is available at: URL\n\n\nResearch Paper \"Llama-2: Open Foundation and Fine-tuned Chat Models\"\n\n\nIntended Use\n------------\n\n\nIntended Use Cases Llama 2 is intended for commercial and research use in English. Tuned models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks.\n\n\nTo get the expected features and performance for the chat versions, a specific formatting needs to be followed, including the 'INST' and '<>' tags, 'BOS' and 'EOS' tokens, and the whitespaces and breaklines in between (we recommend calling 'strip()' on inputs to avoid double-spaces). See our reference code in github for details: 'chat\\_completion'.\n\n\nOut-of-scope Uses Use in any manner that violates applicable laws or regulations (including trade compliance laws).Use in languages other than English. Use in any other way that is prohibited by the Acceptable Use Policy and Licensing Agreement for Llama 2.\n\n\nHardware and Software\n---------------------\n\n\nTraining Factors We used custom training libraries, Meta's Research Super Cluster, and production clusters for pretraining. Fine-tuning, annotation, and evaluation were also performed on third-party cloud compute.\n\n\nCarbon Footprint Pretraining utilized a cumulative 3.3M GPU hours of computation on hardware of type A100-80GB (TDP of 350-400W). Estimated total emissions were 539 tCO2eq, 100% of which were offset by Meta’s sustainability program.\n\n\n\nCO2 emissions during pretraining. Time: total GPU time required for training each model. Power Consumption: peak power capacity per GPU device for the GPUs used adjusted for power usage efficiency. 100% of the emissions are directly offset by Meta's sustainability program, and because we are openly releasing these models, the pretraining costs do not need to be incurred by others.\n\n\nTraining Data\n-------------\n\n\nOverview Llama 2 was pretrained on 2 trillion tokens of data from publicly available sources. The fine-tuning data includes publicly available instruction datasets, as well as over one million new human-annotated examples. Neither the pretraining nor the fine-tuning datasets include Meta user data.\n\n\nData Freshness The pretraining data has a cutoff of September 2022, but some tuning data is more recent, up to July 2023.\n\n\nEvaluation Results\n------------------\n\n\nIn this section, we report the results for the Llama 1 and Llama 2 models on standard academic benchmarks.For all the evaluations, we use our internal evaluations library.\n\n\n\nOverall performance on grouped academic benchmarks. *Code:* We report the average pass@1 scores of our models on HumanEval and MBPP. *Commonsense Reasoning:* We report the average of PIQA, SIQA, HellaSwag, WinoGrande, ARC easy and challenge, OpenBookQA, and CommonsenseQA. We report 7-shot results for CommonSenseQA and 0-shot results for all other benchmarks. *World Knowledge:* We evaluate the 5-shot performance on NaturalQuestions and TriviaQA and report the average. *Reading Comprehension:* For reading comprehension, we report the 0-shot average on SQuAD, QuAC, and BoolQ. *MATH:* We report the average of the GSM8K (8 shot) and MATH (4 shot) benchmarks at top 1.\n\n\n\nEvaluation of pretrained LLMs on automatic safety benchmarks. For TruthfulQA, we present the percentage of generations that are both truthful and informative (the higher the better). For ToxiGen, we present the percentage of toxic generations (the smaller the better).\n\n\n\nEvaluation of fine-tuned LLMs on different safety datasets. Same metric definitions as above.\n\n\nEthical Considerations and Limitations\n--------------------------------------\n\n\nLlama 2 is a new technology that carries risks with use. Testing conducted to date has been in English, and has not covered, nor could it cover all scenarios. For these reasons, as with all LLMs, Llama 2’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate, biased or other objectionable responses to user prompts. Therefore, before deploying any applications of Llama 2, developers should perform safety testing and tuning tailored to their specific applications of the model.\n\n\nPlease see the Responsible Use Guide available at URL\n\n\nReporting Issues\n----------------\n\n\nPlease report any software “bug,” or other problems with the models through one of the following means:\n\n\n* Reporting issues with the model: URL\n* Reporting problematic content generated by the model: URL\n* Reporting bugs and security concerns: URL\n\n\nLlama Model Index\n-----------------" ]
[ "TAGS\n#gguf #arxiv-2307.09288 #region-us \n", "### LLAMA 2 COMMUNITY LICENSE AGREEMENT\n\n\n\"Agreement\" means the terms and conditions for use, reproduction, distribution\nand modification of the Llama Materials set forth herein.\n\n\n\"Documentation\" means the specifications, manuals and documentation\naccompanying Llama 2 distributed by Meta at\nURL\n\n\n\"Licensee\" or \"you\" means you, or your employer or any other person or entity\n(if you are entering into this Agreement on such person or entity's behalf),\nof the age required under applicable laws, rules or regulations to provide\nlegal consent and that has legal authority to bind your employer or such other\nperson or entity if you are entering in this Agreement on their behalf.\n\n\n\"Llama 2\" means the foundational large language models and software and\nalgorithms, including machine-learning model code, trained model weights,\ninference-enabling code, training-enabling code, fine-tuning enabling code and\nother elements of the foregoing distributed by Meta at\nURL\n\n\n\"Llama Materials\" means, collectively, Meta's proprietary Llama 2 and\ndocumentation (and any portion thereof) made available under this Agreement.\n\n\n\"Meta\" or \"we\" means Meta Platforms Ireland Limited (if you are located in or,\nif you are an entity, your principal place of business is in the EEA or\nSwitzerland) and Meta Platforms, Inc. (if you are located outside of the EEA\nor Switzerland).\n\n\nBy clicking \"I Accept\" below or by using or distributing any portion or\nelement of the Llama Materials, you agree to be bound by this Agreement.\n\n\n1. License Rights and Redistribution.\n\n\na. Grant of Rights. You are granted a non-exclusive, worldwide, non-\ntransferable and royalty-free limited license under Meta's intellectual\nproperty or other rights owned by Meta embodied in the Llama Materials to\nuse, reproduce, distribute, copy, create derivative works of, and make\nmodifications to the Llama Materials.\n\n\nb. Redistribution and Use.\n\n\ni. If you distribute or make the Llama Materials, or any derivative works\nthereof, available to a third party, you shall provide a copy of this\nAgreement to such third party.\n\n\nii. If you receive Llama Materials, or any derivative works thereof, from a\nLicensee as part of an integrated end user product, then Section 2 of this\nAgreement will not apply to you.\n\n\niii. You must retain in all copies of the Llama Materials that you distribute\nthe following attribution notice within a \"Notice\" text file distributed as a\npart of such copies: \"Llama 2 is licensed under the LLAMA 2 Community\nLicense, Copyright (c) Meta Platforms, Inc. All Rights Reserved.\"\n\n\niv. Your use of the Llama Materials must comply with applicable laws and\nregulations (including trade compliance laws and regulations) and adhere to\nthe Acceptable Use Policy for the Llama Materials (available at\nURL which is hereby incorporated by\nreference into this Agreement.\n\n\nv. You will not use the Llama Materials or any output or results of the Llama\nMaterials to improve any other large language model (excluding Llama 2 or\nderivative works thereof).\n\n\n2. Additional Commercial Terms. If, on the Llama 2 version release date, the\nmonthly active users of the products or services made available by or for\nLicensee, or Licensee's affiliates, is greater than 700 million monthly\nactive users in the preceding calendar month, you must request a license from\nMeta, which Meta may grant to you in its sole discretion, and you are not\nauthorized to exercise any of the rights under this Agreement unless or until\nMeta otherwise expressly grants you such rights.\n3. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE LLAMA\nMATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED ON AN \"AS IS\"\nBASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING,\nWITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT,\nMERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY\nRESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING\nTHE LLAMA MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE\nLLAMA MATERIALS AND ANY OUTPUT AND RESULTS.\n4. Limitation of Liability. IN NO EVENT WILL META OR ITS AFFILIATES BE LIABLE\nUNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE,\nPRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST\nPROFITS OR ANY INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR\nPUNITIVE DAMAGES, EVEN IF META OR ITS AFFILIATES HAVE BEEN ADVISED OF THE\nPOSSIBILITY OF ANY OF THE FOREGOING.\n5. Intellectual Property.\n\n\na. No trademark licenses are granted under this Agreement, and in connection\nwith the Llama Materials, neither Meta nor Licensee may use any name or mark\nowned by or associated with the other or any of its affiliates, except as\nrequired for reasonable and customary use in describing and redistributing\nthe Llama Materials.\n\n\nb. Subject to Meta's ownership of Llama Materials and derivatives made by or\nfor Meta, with respect to any derivative works and modifications of the Llama\nMaterials that are made by you, as between you and Meta, you are and will be\nthe owner of such derivative works and modifications.\n\n\nc. If you institute litigation or other proceedings against Meta or any\nentity (including a cross-claim or counterclaim in a lawsuit) alleging that\nthe Llama Materials or Llama 2 outputs or results, or any portion of any of\nthe foregoing, constitutes infringement of intellectual property or other\nrights owned or licensable by you, then any licenses granted to you under\nthis Agreement shall terminate as of the date such litigation or claim is\nfiled or instituted. You will indemnify and hold harmless Meta from and\nagainst any claim by any third party arising out of or related to your use or\ndistribution of the Llama Materials.\n\n\n6. Term and Termination. The term of this Agreement will commence upon your\nacceptance of this Agreement or access to the Llama Materials and will\ncontinue in full force and effect until terminated in accordance with the\nterms and conditions herein. Meta may terminate this Agreement if you are in\nbreach of any term or condition of this Agreement. Upon termination of this\nAgreement, you shall delete and cease use of the Llama Materials. Sections 3,\n4 and 7 shall survive the termination of this Agreement.\n7. Governing Law and Jurisdiction. This Agreement will be governed and\nconstrued under the laws of the State of California without regard to choice\nof law principles, and the UN Convention on Contracts for the International\nSale of Goods does not apply to this Agreement. The courts of California\nshall have exclusive jurisdiction of any dispute arising out of this\nAgreement.", "### Llama 2 Acceptable Use Policy\n\n\nMeta is committed to promoting safe and fair use of its tools and features,\nincluding Llama 2. If you access or use Llama 2, you agree to this Acceptable\nUse Policy (“Policy”). The most recent copy of this policy can be found at\nURL", "#### Prohibited Uses\n\n\nWe want everyone to use Llama 2 safely and responsibly. You agree you will not\nuse, or allow others to use, Llama 2 to:\n\n\n1. Violate the law or others’ rights, including to:\n\t1. Engage in, promote, generate, contribute to, encourage, plan, incite, or further illegal or unlawful activity or content, such as:\n\t\t1. Violence or terrorism\n\t\t2. Exploitation or harm to children, including the solicitation, creation, acquisition, or dissemination of child exploitative content or failure to report Child Sexual Abuse Material\n\t\t3. Human trafficking, exploitation, and sexual violence\n\t\t4. The illegal distribution of information or materials to minors, including obscene materials, or failure to employ legally required age-gating in connection with such information or materials.\n\t\t5. Sexual solicitation\n\t\t6. Any other criminal activity\n\t2. Engage in, promote, incite, or facilitate the harassment, abuse, threatening, or bullying of individuals or groups of individuals\n\t3. Engage in, promote, incite, or facilitate discrimination or other unlawful or harmful conduct in the provision of employment, employment benefits, credit, housing, other economic benefits, or other essential goods and services\n\t4. Engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or related professional practices\n\t5. Collect, process, disclose, generate, or infer health, demographic, or other sensitive personal or private information about individuals without rights and consents required by applicable laws\n\t6. Engage in or facilitate any action or generate any content that infringes, misappropriates, or otherwise violates any third-party rights, including the outputs or results of any products or services using the Llama 2 Materials\n\t7. Create, generate, or facilitate the creation of malicious code, malware, computer viruses or do anything else that could disable, overburden, interfere with or impair the proper working, integrity, operation or appearance of a website or computer system\n2. Engage in, promote, incite, facilitate, or assist in the planning or\ndevelopment of activities that present a risk of death or bodily harm to\nindividuals, including use of Llama 2 related to the following:\n\t1. Military, warfare, nuclear industries or applications, espionage, use for materials or activities that are subject to the International Traffic Arms Regulations (ITAR) maintained by the United States Department of State\n\t2. Guns and illegal weapons (including weapon development)\n\t3. Illegal drugs and regulated/controlled substances\n\t4. Operation of critical infrastructure, transportation technologies, or heavy machinery\n\t5. Self-harm or harm to others, including suicide, cutting, and eating disorders\n\t6. Any content intended to incite or promote violence, abuse, or any infliction of bodily harm to an individual\n3. Intentionally deceive or mislead others, including use of Llama 2 related\nto the following:\n\t1. Generating, promoting, or furthering fraud or the creation or promotion of disinformation\n\t2. Generating, promoting, or furthering defamatory content, including the creation of defamatory statements, images, or other content\n\t3. Generating, promoting, or further distributing spam\n\t4. Impersonating another individual without consent, authorization, or legal right\n\t5. Representing that the use of Llama 2 or outputs are human-generated\n\t6. Generating or facilitating false online engagement, including fake reviews and other means of fake online engagement\n\t7. Fail to appropriately disclose to end users any known dangers of your AI system\n\tPlease report any violation of this Policy, software “bug,” or other problems\n\tthat could lead to a violation of this Policy through one of the following\n\tmeans:\n\t* Reporting issues with the model: URL\n\t* Reporting risky content generated by the model: URL\n\t* Reporting bugs and security concerns: URL\n\t* Reporting violations of the Acceptable Use Policy or unlicensed uses of Llama: LlamaUseReport@URL\n\textra\\_gated\\_fields:\n\tFirst Name: text\n\tLast Name: text\n\tDate of birth: date\\_picker\n\tCountry: country\n\tAffiliation: text\n\tgeo: ip\\_location\n\tBy clicking Submit below I accept the terms of the license and acknowledge that the information I provide will be collected stored processed and shared in accordance with the Meta Privacy Policy: checkbox\n\textra\\_gated\\_description: >-\n\tThe information you provide will be collected, stored, processed and shared in\n\taccordance with the Meta Privacy\n\tPolicy.\n\textra\\_gated\\_button\\_content: Submit\n\tlanguage:\n\n\n* en\npipeline\\_tag: text-generation\ntags:\n* facebook\n* meta\n* pytorch\n* llama\n* llama-2\nlicense: llama2\n\n\n\n\n---\n\n\nLlama 2\n=======\n\n\nLlama 2 is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 7B fine-tuned model, optimized for dialogue use cases and converted for the Hugging Face Transformers format. Links to other models can be found in the index at the bottom.\n\n\nModel Details\n-------------\n\n\n*Note: Use of this model is governed by the Meta license. In order to download the model weights and tokenizer, please visit the website and accept our License before requesting access here.*\n\n\nMeta developed and publicly released the Llama 2 family of large language models (LLMs), a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. Our fine-tuned LLMs, called Llama-2-Chat, are optimized for dialogue use cases. Llama-2-Chat models outperform open-source chat models on most benchmarks we tested, and in our human evaluations for helpfulness and safety, are on par with some popular closed-source models like ChatGPT and PaLM.\n\n\nModel Developers Meta\n\n\nVariations Llama 2 comes in a range of parameter sizes — 7B, 13B, and 70B — as well as pretrained and fine-tuned variations.\n\n\nInput Models input text only.\n\n\nOutput Models generate text only.\n\n\nModel Architecture Llama 2 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align to human preferences for helpfulness and safety.\n\n\n\n*Llama 2 family of models.* Token counts refer to pretraining data only. All models are trained with a global batch-size of 4M tokens. Bigger models - 70B -- use Grouped-Query Attention (GQA) for improved inference scalability.\n\n\nModel Dates Llama 2 was trained between January 2023 and July 2023.\n\n\nStatus This is a static model trained on an offline dataset. Future versions of the tuned models will be released as we improve model safety with community feedback.\n\n\nLicense A custom commercial license is available at: URL\n\n\nResearch Paper \"Llama-2: Open Foundation and Fine-tuned Chat Models\"\n\n\nIntended Use\n------------\n\n\nIntended Use Cases Llama 2 is intended for commercial and research use in English. Tuned models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks.\n\n\nTo get the expected features and performance for the chat versions, a specific formatting needs to be followed, including the 'INST' and '<>' tags, 'BOS' and 'EOS' tokens, and the whitespaces and breaklines in between (we recommend calling 'strip()' on inputs to avoid double-spaces). See our reference code in github for details: 'chat\\_completion'.\n\n\nOut-of-scope Uses Use in any manner that violates applicable laws or regulations (including trade compliance laws).Use in languages other than English. Use in any other way that is prohibited by the Acceptable Use Policy and Licensing Agreement for Llama 2.\n\n\nHardware and Software\n---------------------\n\n\nTraining Factors We used custom training libraries, Meta's Research Super Cluster, and production clusters for pretraining. Fine-tuning, annotation, and evaluation were also performed on third-party cloud compute.\n\n\nCarbon Footprint Pretraining utilized a cumulative 3.3M GPU hours of computation on hardware of type A100-80GB (TDP of 350-400W). Estimated total emissions were 539 tCO2eq, 100% of which were offset by Meta’s sustainability program.\n\n\n\nCO2 emissions during pretraining. Time: total GPU time required for training each model. Power Consumption: peak power capacity per GPU device for the GPUs used adjusted for power usage efficiency. 100% of the emissions are directly offset by Meta's sustainability program, and because we are openly releasing these models, the pretraining costs do not need to be incurred by others.\n\n\nTraining Data\n-------------\n\n\nOverview Llama 2 was pretrained on 2 trillion tokens of data from publicly available sources. The fine-tuning data includes publicly available instruction datasets, as well as over one million new human-annotated examples. Neither the pretraining nor the fine-tuning datasets include Meta user data.\n\n\nData Freshness The pretraining data has a cutoff of September 2022, but some tuning data is more recent, up to July 2023.\n\n\nEvaluation Results\n------------------\n\n\nIn this section, we report the results for the Llama 1 and Llama 2 models on standard academic benchmarks.For all the evaluations, we use our internal evaluations library.\n\n\n\nOverall performance on grouped academic benchmarks. *Code:* We report the average pass@1 scores of our models on HumanEval and MBPP. *Commonsense Reasoning:* We report the average of PIQA, SIQA, HellaSwag, WinoGrande, ARC easy and challenge, OpenBookQA, and CommonsenseQA. We report 7-shot results for CommonSenseQA and 0-shot results for all other benchmarks. *World Knowledge:* We evaluate the 5-shot performance on NaturalQuestions and TriviaQA and report the average. *Reading Comprehension:* For reading comprehension, we report the 0-shot average on SQuAD, QuAC, and BoolQ. *MATH:* We report the average of the GSM8K (8 shot) and MATH (4 shot) benchmarks at top 1.\n\n\n\nEvaluation of pretrained LLMs on automatic safety benchmarks. For TruthfulQA, we present the percentage of generations that are both truthful and informative (the higher the better). For ToxiGen, we present the percentage of toxic generations (the smaller the better).\n\n\n\nEvaluation of fine-tuned LLMs on different safety datasets. Same metric definitions as above.\n\n\nEthical Considerations and Limitations\n--------------------------------------\n\n\nLlama 2 is a new technology that carries risks with use. Testing conducted to date has been in English, and has not covered, nor could it cover all scenarios. For these reasons, as with all LLMs, Llama 2’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate, biased or other objectionable responses to user prompts. Therefore, before deploying any applications of Llama 2, developers should perform safety testing and tuning tailored to their specific applications of the model.\n\n\nPlease see the Responsible Use Guide available at URL\n\n\nReporting Issues\n----------------\n\n\nPlease report any software “bug,” or other problems with the models through one of the following means:\n\n\n* Reporting issues with the model: URL\n* Reporting problematic content generated by the model: URL\n* Reporting bugs and security concerns: URL\n\n\nLlama Model Index\n-----------------" ]
text2text-generation
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # CS505_COQE_viT5_train_Instruction1_SOAPL This model is a fine-tuned version of [VietAI/vit5-large](https://huggingface.co/VietAI/vit5-large) on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 20 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.39.3 - Pytorch 2.1.2 - Datasets 2.18.0 - Tokenizers 0.15.2
{"license": "mit", "tags": ["generated_from_trainer"], "base_model": "VietAI/vit5-large", "model-index": [{"name": "CS505_COQE_viT5_train_Instruction1_SOAPL", "results": []}]}
ThuyNT/CS505_COQE_viT5_train_Instruction1_SOAPL
null
[ "transformers", "tensorboard", "safetensors", "t5", "text2text-generation", "generated_from_trainer", "base_model:VietAI/vit5-large", "license:mit", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-20T20:24:18+00:00
[]
[]
TAGS #transformers #tensorboard #safetensors #t5 #text2text-generation #generated_from_trainer #base_model-VietAI/vit5-large #license-mit #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# CS505_COQE_viT5_train_Instruction1_SOAPL This model is a fine-tuned version of VietAI/vit5-large on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 20 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.39.3 - Pytorch 2.1.2 - Datasets 2.18.0 - Tokenizers 0.15.2
[ "# CS505_COQE_viT5_train_Instruction1_SOAPL\n\nThis model is a fine-tuned version of VietAI/vit5-large on the None dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 5e-05\n- train_batch_size: 8\n- eval_batch_size: 32\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 20\n- mixed_precision_training: Native AMP", "### Training results", "### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.1.2\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #tensorboard #safetensors #t5 #text2text-generation #generated_from_trainer #base_model-VietAI/vit5-large #license-mit #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# CS505_COQE_viT5_train_Instruction1_SOAPL\n\nThis model is a fine-tuned version of VietAI/vit5-large on the None dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 5e-05\n- train_batch_size: 8\n- eval_batch_size: 32\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 20\n- mixed_precision_training: Native AMP", "### Training results", "### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.1.2\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
text-generation
transformers
# yujiepan/Meta-Llama-3-8B-awq-w4g64 This model applies AutoAWQ on [meta-llama/Meta-Llama-3-8B](https://huggingface.co/meta-llama/Meta-Llama-3-8B). - 4-bit asymmetric weight only quantization - group_size=64 - calibration set: pileval ## Accuracy | model | precision | wikitext ppl (↓) | |-|-|-| | meta-llama/Meta-Llama-3-8B | FP16 | 9.179 | | yujiepan/Meta-Llama-3-8B-awq-w4g64 | w4g64 | 9.219 | Note: - Evaluated on lm-evaluation-harness "wikitext" task - Wikitext PPL does not guarantee actual accuracy, but helps to check the disortion after quantization. ## Usage ```python from awq import AutoAWQForCausalLM model = AutoAWQForCausalLM.from_quantized('yujiepan/Meta-Llama-3-8B-awq-w4g64') ``` ## Codes ```python from awq import AutoAWQForCausalLM from transformers import AutoTokenizer model_path = "meta-llama/Meta-Llama-3-8B" quant_config = {"zero_point": True, "q_group_size": 64, "w_bit": 4, "version": "GEMM"} model = AutoAWQForCausalLM.from_pretrained(model_path) tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) model.quantize(tokenizer, quant_config=quant_config) ```
{"library_name": "transformers", "tags": []}
yujiepan/Meta-Llama-3-8B-awq-w4g64
null
[ "transformers", "safetensors", "llama", "text-generation", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "4-bit", "region:us" ]
null
2024-04-20T20:24:46+00:00
[]
[]
TAGS #transformers #safetensors #llama #text-generation #autotrain_compatible #endpoints_compatible #text-generation-inference #4-bit #region-us
yujiepan/Meta-Llama-3-8B-awq-w4g64 ================================== This model applies AutoAWQ on meta-llama/Meta-Llama-3-8B. * 4-bit asymmetric weight only quantization * group\_size=64 * calibration set: pileval Accuracy -------- model: meta-llama/Meta-Llama-3-8B, precision: FP16, wikitext ppl (↓): 9.179 model: yujiepan/Meta-Llama-3-8B-awq-w4g64, precision: w4g64, wikitext ppl (↓): 9.219 Note: * Evaluated on lm-evaluation-harness "wikitext" task * Wikitext PPL does not guarantee actual accuracy, but helps to check the disortion after quantization. Usage ----- Codes -----
[]
[ "TAGS\n#transformers #safetensors #llama #text-generation #autotrain_compatible #endpoints_compatible #text-generation-inference #4-bit #region-us \n" ]
text-generation
transformers
# ResplendentAI/Aura_L3_8B AWQ - Model creator: [ResplendentAI](https://huggingface.co/ResplendentAI) - Original model: [Aura_L3_8B](https://huggingface.co/ResplendentAI/Aura_L3_8B) ![image/png](https://cdn-uploads.huggingface.co/production/uploads/626dfb8786671a29c715f8a9/Bz0CQP-s2PSXpW9eh1jNC.png) ## Model Summary The next evolution in Aura models, trained on 6 separate datasets and ready to bring you to your knees. I am so happy to be one of the first with a finetune of this amazing model. I hope that you all enjoy the finetune as much as I know I will.
{"library_name": "transformers", "tags": ["4-bit", "AWQ", "text-generation", "autotrain_compatible", "endpoints_compatible"], "pipeline_tag": "text-generation", "inference": false, "quantized_by": "Suparious"}
solidrust/Aura_L3_8B-AWQ
null
[ "transformers", "safetensors", "llama", "text-generation", "4-bit", "AWQ", "autotrain_compatible", "endpoints_compatible", "conversational", "text-generation-inference", "region:us" ]
null
2024-04-20T20:30:18+00:00
[]
[]
TAGS #transformers #safetensors #llama #text-generation #4-bit #AWQ #autotrain_compatible #endpoints_compatible #conversational #text-generation-inference #region-us
# ResplendentAI/Aura_L3_8B AWQ - Model creator: ResplendentAI - Original model: Aura_L3_8B !image/png ## Model Summary The next evolution in Aura models, trained on 6 separate datasets and ready to bring you to your knees. I am so happy to be one of the first with a finetune of this amazing model. I hope that you all enjoy the finetune as much as I know I will.
[ "# ResplendentAI/Aura_L3_8B AWQ\n\n- Model creator: ResplendentAI\n- Original model: Aura_L3_8B\n\n!image/png", "## Model Summary\n\nThe next evolution in Aura models, trained on 6 separate datasets and ready to bring you to your knees.\n\nI am so happy to be one of the first with a finetune of this amazing model. I hope that you all enjoy the finetune as much as I know I will." ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #4-bit #AWQ #autotrain_compatible #endpoints_compatible #conversational #text-generation-inference #region-us \n", "# ResplendentAI/Aura_L3_8B AWQ\n\n- Model creator: ResplendentAI\n- Original model: Aura_L3_8B\n\n!image/png", "## Model Summary\n\nThe next evolution in Aura models, trained on 6 separate datasets and ready to bring you to your knees.\n\nI am so happy to be one of the first with a finetune of this amazing model. I hope that you all enjoy the finetune as much as I know I will." ]
text2text-generation
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # CS505_COQE_viT5_train_Instruction2_SOAPL This model is a fine-tuned version of [VietAI/vit5-large](https://huggingface.co/VietAI/vit5-large) on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 4 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 20 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.39.3 - Pytorch 2.1.2 - Datasets 2.18.0 - Tokenizers 0.15.2
{"license": "mit", "tags": ["generated_from_trainer"], "base_model": "VietAI/vit5-large", "model-index": [{"name": "CS505_COQE_viT5_train_Instruction2_SOAPL", "results": []}]}
ThuyNT/CS505_COQE_viT5_train_Instruction2_SOAPL
null
[ "transformers", "tensorboard", "safetensors", "t5", "text2text-generation", "generated_from_trainer", "base_model:VietAI/vit5-large", "license:mit", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-20T20:30:55+00:00
[]
[]
TAGS #transformers #tensorboard #safetensors #t5 #text2text-generation #generated_from_trainer #base_model-VietAI/vit5-large #license-mit #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# CS505_COQE_viT5_train_Instruction2_SOAPL This model is a fine-tuned version of VietAI/vit5-large on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 4 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 20 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.39.3 - Pytorch 2.1.2 - Datasets 2.18.0 - Tokenizers 0.15.2
[ "# CS505_COQE_viT5_train_Instruction2_SOAPL\n\nThis model is a fine-tuned version of VietAI/vit5-large on the None dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 5e-05\n- train_batch_size: 4\n- eval_batch_size: 32\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 20\n- mixed_precision_training: Native AMP", "### Training results", "### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.1.2\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #tensorboard #safetensors #t5 #text2text-generation #generated_from_trainer #base_model-VietAI/vit5-large #license-mit #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# CS505_COQE_viT5_train_Instruction2_SOAPL\n\nThis model is a fine-tuned version of VietAI/vit5-large on the None dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 5e-05\n- train_batch_size: 4\n- eval_batch_size: 32\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 20\n- mixed_precision_training: Native AMP", "### Training results", "### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.1.2\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
text2text-generation
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # CS505_COQE_viT5_train_Instruction3_SOAPL This model is a fine-tuned version of [VietAI/vit5-large](https://huggingface.co/VietAI/vit5-large) on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 4 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 20 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.39.3 - Pytorch 2.1.2 - Datasets 2.18.0 - Tokenizers 0.15.2
{"license": "mit", "tags": ["generated_from_trainer"], "base_model": "VietAI/vit5-large", "model-index": [{"name": "CS505_COQE_viT5_train_Instruction3_SOAPL", "results": []}]}
ThuyNT/CS505_COQE_viT5_train_Instruction3_SOAPL
null
[ "transformers", "tensorboard", "safetensors", "t5", "text2text-generation", "generated_from_trainer", "base_model:VietAI/vit5-large", "license:mit", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-20T20:33:41+00:00
[]
[]
TAGS #transformers #tensorboard #safetensors #t5 #text2text-generation #generated_from_trainer #base_model-VietAI/vit5-large #license-mit #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# CS505_COQE_viT5_train_Instruction3_SOAPL This model is a fine-tuned version of VietAI/vit5-large on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 4 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 20 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.39.3 - Pytorch 2.1.2 - Datasets 2.18.0 - Tokenizers 0.15.2
[ "# CS505_COQE_viT5_train_Instruction3_SOAPL\n\nThis model is a fine-tuned version of VietAI/vit5-large on the None dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 5e-05\n- train_batch_size: 4\n- eval_batch_size: 32\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 20\n- mixed_precision_training: Native AMP", "### Training results", "### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.1.2\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #tensorboard #safetensors #t5 #text2text-generation #generated_from_trainer #base_model-VietAI/vit5-large #license-mit #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# CS505_COQE_viT5_train_Instruction3_SOAPL\n\nThis model is a fine-tuned version of VietAI/vit5-large on the None dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 5e-05\n- train_batch_size: 4\n- eval_batch_size: 32\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 20\n- mixed_precision_training: Native AMP", "### Training results", "### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.1.2\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
null
transformers
# Model Card for Model ID Just only a text gen model type, I'm just test train my dataset and...it's work, very nice, try it. ## Model Details - [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1fhVByew053W8SdbacFryIx3luhFkEa1M?usp=sharing) ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. - **Developed by:** **HuyRemy** - **Funded by :** **HuyRemy** - **Shared by :** **HuyRemy** - **Model type:** **Megatron Mistral** - **License:** [email protected] ### Model Demo: - **Demo :** https://ai.matilda.vn ## Uses **USE T4 GPU** ```Python !pip install "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git" !pip install --no-deps xformers trl peft accelerate bitsandbytes ``` ### Direct Use ``` Python from unsloth import FastLanguageModel import torch max_seq_length = 2048 dtype = None load_in_4bit = True alpaca_prompt = """ ### Instruction: {} ### Input: {} ### Response: {}""" def formatting_prompts_func(examples): instructions = examples["instruction"] inputs = examples["input"] outputs = examples["output"] texts = [] for instruction, input, output in zip(instructions, inputs, outputs): text = alpaca_prompt.format(instruction, input, output) + EOS_TOKEN texts.append(text) return { "text" : texts, } pass model, tokenizer = FastLanguageModel.from_pretrained( model_name = "huyremy/aichat", max_seq_length = max_seq_length, dtype = dtype, load_in_4bit = load_in_4bit, ) FastLanguageModel.for_inference(model) EOS_TOKEN = tokenizer.eos_token inputs = tokenizer( [ alpaca_prompt.format( "who is Nguyễn Phú Trọng?", "", "", ), ], return_tensors = "pt").to("cuda") outputs = model.generate(**inputs, max_new_tokens = 64, use_cache = True) tokenizer.batch_decode(outputs) ``` ## Model Card Contact [email protected]
{"license": "apache-2.0", "library_name": "transformers"}
HuyRemy/aichat
null
[ "transformers", "safetensors", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2024-04-20T20:37:52+00:00
[]
[]
TAGS #transformers #safetensors #license-apache-2.0 #endpoints_compatible #region-us
# Model Card for Model ID Just only a text gen model type, I'm just test train my dataset and...it's work, very nice, try it. ## Model Details - ![Open In Colab](URL ### Model Description This is the model card of a transformers model that has been pushed on the Hub. - Developed by: HuyRemy - Funded by : HuyRemy - Shared by : HuyRemy - Model type: Megatron Mistral - License: huynq@URL ### Model Demo: - Demo : URL ## Uses USE T4 GPU ### Direct Use ## Model Card Contact huynq@URL
[ "# Model Card for Model ID\n\nJust only a text gen model type, I'm just test train my dataset and...it's work, very nice, try it.", "## Model Details\n- ![Open In Colab](URL", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. \n\n- Developed by: HuyRemy\n- Funded by : HuyRemy\n- Shared by : HuyRemy\n- Model type: Megatron Mistral \n- License: huynq@URL", "### Model Demo:\n\n- Demo : URL", "## Uses\n\nUSE T4 GPU", "### Direct Use", "## Model Card Contact\n\nhuynq@URL" ]
[ "TAGS\n#transformers #safetensors #license-apache-2.0 #endpoints_compatible #region-us \n", "# Model Card for Model ID\n\nJust only a text gen model type, I'm just test train my dataset and...it's work, very nice, try it.", "## Model Details\n- ![Open In Colab](URL", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. \n\n- Developed by: HuyRemy\n- Funded by : HuyRemy\n- Shared by : HuyRemy\n- Model type: Megatron Mistral \n- License: huynq@URL", "### Model Demo:\n\n- Demo : URL", "## Uses\n\nUSE T4 GPU", "### Direct Use", "## Model Card Contact\n\nhuynq@URL" ]
null
peft
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed] ### Framework versions - PEFT 0.10.0
{"library_name": "peft", "base_model": "meta-llama/Meta-Llama-3-8B-Instruct"}
Fredithefish/Llama3RPInstruct-chkpt-250
null
[ "peft", "safetensors", "arxiv:1910.09700", "base_model:meta-llama/Meta-Llama-3-8B-Instruct", "region:us" ]
null
2024-04-20T20:38:05+00:00
[ "1910.09700" ]
[]
TAGS #peft #safetensors #arxiv-1910.09700 #base_model-meta-llama/Meta-Llama-3-8B-Instruct #region-us
# Model Card for Model ID ## Model Details ### Model Description - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact ### Framework versions - PEFT 0.10.0
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\n\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact", "### Framework versions\n\n- PEFT 0.10.0" ]
[ "TAGS\n#peft #safetensors #arxiv-1910.09700 #base_model-meta-llama/Meta-Llama-3-8B-Instruct #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\n\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact", "### Framework versions\n\n- PEFT 0.10.0" ]
unconditional-image-generation
diffusers
# Model Card for Unit 1 of the [Diffusion Models Class 🧨](https://github.com/huggingface/diffusion-models-class) This model is a diffusion model for unconditional image generation of cute 🦋. ## Usage ```python from diffusers import DDPMPipeline pipeline = DDPMPipeline.from_pretrained('yaochx/sd-class-butterflies-32') image = pipeline().images[0] image ```
{"license": "mit", "tags": ["pytorch", "diffusers", "unconditional-image-generation", "diffusion-models-class"]}
yaochx/sd-class-butterflies-32
null
[ "diffusers", "safetensors", "pytorch", "unconditional-image-generation", "diffusion-models-class", "license:mit", "diffusers:DDPMPipeline", "region:us" ]
null
2024-04-20T20:38:49+00:00
[]
[]
TAGS #diffusers #safetensors #pytorch #unconditional-image-generation #diffusion-models-class #license-mit #diffusers-DDPMPipeline #region-us
# Model Card for Unit 1 of the Diffusion Models Class This model is a diffusion model for unconditional image generation of cute . ## Usage
[ "# Model Card for Unit 1 of the Diffusion Models Class \n\nThis model is a diffusion model for unconditional image generation of cute .", "## Usage" ]
[ "TAGS\n#diffusers #safetensors #pytorch #unconditional-image-generation #diffusion-models-class #license-mit #diffusers-DDPMPipeline #region-us \n", "# Model Card for Unit 1 of the Diffusion Models Class \n\nThis model is a diffusion model for unconditional image generation of cute .", "## Usage" ]
text2text-generation
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # CS505_COQE_viT5_train_Instruction4_SOAPL This model is a fine-tuned version of [VietAI/vit5-large](https://huggingface.co/VietAI/vit5-large) on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 4 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 20 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.39.3 - Pytorch 2.1.2 - Datasets 2.18.0 - Tokenizers 0.15.2
{"license": "mit", "tags": ["generated_from_trainer"], "base_model": "VietAI/vit5-large", "model-index": [{"name": "CS505_COQE_viT5_train_Instruction4_SOAPL", "results": []}]}
ThuyNT/CS505_COQE_viT5_train_Instruction4_SOAPL
null
[ "transformers", "tensorboard", "safetensors", "t5", "text2text-generation", "generated_from_trainer", "base_model:VietAI/vit5-large", "license:mit", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-20T20:39:47+00:00
[]
[]
TAGS #transformers #tensorboard #safetensors #t5 #text2text-generation #generated_from_trainer #base_model-VietAI/vit5-large #license-mit #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# CS505_COQE_viT5_train_Instruction4_SOAPL This model is a fine-tuned version of VietAI/vit5-large on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 4 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 20 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.39.3 - Pytorch 2.1.2 - Datasets 2.18.0 - Tokenizers 0.15.2
[ "# CS505_COQE_viT5_train_Instruction4_SOAPL\n\nThis model is a fine-tuned version of VietAI/vit5-large on the None dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 5e-05\n- train_batch_size: 4\n- eval_batch_size: 32\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 20\n- mixed_precision_training: Native AMP", "### Training results", "### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.1.2\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #tensorboard #safetensors #t5 #text2text-generation #generated_from_trainer #base_model-VietAI/vit5-large #license-mit #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# CS505_COQE_viT5_train_Instruction4_SOAPL\n\nThis model is a fine-tuned version of VietAI/vit5-large on the None dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 5e-05\n- train_batch_size: 4\n- eval_batch_size: 32\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 20\n- mixed_precision_training: Native AMP", "### Training results", "### Framework versions\n\n- Transformers 4.39.3\n- Pytorch 2.1.2\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
null
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
ripaaiii/fine-tune-C1-revised
null
[ "transformers", "safetensors", "vision-encoder-decoder", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
null
2024-04-20T20:40:27+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #vision-encoder-decoder #arxiv-1910.09700 #endpoints_compatible #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #vision-encoder-decoder #arxiv-1910.09700 #endpoints_compatible #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
null
adapter-transformers
# Adapter `BigTMiami/m_cite_par_bn_v_4_pretrain_adapter` for roberta-base An [adapter](https://adapterhub.ml) for the `roberta-base` model that was trained on the [BigTMiami/citation_intent_dataset_condensed](https://huggingface.co/datasets/BigTMiami/citation_intent_dataset_condensed/) dataset and includes a prediction head for masked lm. This adapter was created for usage with the **[Adapters](https://github.com/Adapter-Hub/adapters)** library. ## Usage First, install `adapters`: ``` pip install -U adapters ``` Now, the adapter can be loaded and activated like this: ```python from adapters import AutoAdapterModel model = AutoAdapterModel.from_pretrained("roberta-base") adapter_name = model.load_adapter("BigTMiami/m_cite_par_bn_v_4_pretrain_adapter", source="hf", set_active=True) ``` ## Architecture & Training <!-- Add some description here --> ## Evaluation results <!-- Add some description here --> ## Citation <!-- Add some description here -->
{"tags": ["roberta", "adapter-transformers"], "datasets": ["BigTMiami/citation_intent_dataset_condensed"]}
BigTMiami/m_cite_par_bn_v_4_pretrain_adapter
null
[ "adapter-transformers", "roberta", "dataset:BigTMiami/citation_intent_dataset_condensed", "region:us" ]
null
2024-04-20T20:43:06+00:00
[]
[]
TAGS #adapter-transformers #roberta #dataset-BigTMiami/citation_intent_dataset_condensed #region-us
# Adapter 'BigTMiami/m_cite_par_bn_v_4_pretrain_adapter' for roberta-base An adapter for the 'roberta-base' model that was trained on the BigTMiami/citation_intent_dataset_condensed dataset and includes a prediction head for masked lm. This adapter was created for usage with the Adapters library. ## Usage First, install 'adapters': Now, the adapter can be loaded and activated like this: ## Architecture & Training ## Evaluation results
[ "# Adapter 'BigTMiami/m_cite_par_bn_v_4_pretrain_adapter' for roberta-base\n\nAn adapter for the 'roberta-base' model that was trained on the BigTMiami/citation_intent_dataset_condensed dataset and includes a prediction head for masked lm.\n\nThis adapter was created for usage with the Adapters library.", "## Usage\n\nFirst, install 'adapters':\n\n\n\nNow, the adapter can be loaded and activated like this:", "## Architecture & Training", "## Evaluation results" ]
[ "TAGS\n#adapter-transformers #roberta #dataset-BigTMiami/citation_intent_dataset_condensed #region-us \n", "# Adapter 'BigTMiami/m_cite_par_bn_v_4_pretrain_adapter' for roberta-base\n\nAn adapter for the 'roberta-base' model that was trained on the BigTMiami/citation_intent_dataset_condensed dataset and includes a prediction head for masked lm.\n\nThis adapter was created for usage with the Adapters library.", "## Usage\n\nFirst, install 'adapters':\n\n\n\nNow, the adapter can be loaded and activated like this:", "## Architecture & Training", "## Evaluation results" ]
text-generation
mlx
# voxmenthe/Meta-Llama-3-8B-Instruct-mlx-unquantized This model was converted to MLX format from [`meta-llama/Meta-Llama-3-8B-Instruct`]() using mlx-lm version **0.10.0**. Refer to the [original model card](https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct) for more details on the model. ## Use with mlx ```bash pip install mlx-lm ``` ```python from mlx_lm import load, generate model, tokenizer = load("voxmenthe/Meta-Llama-3-8B-Instruct-mlx-unquantized") response = generate(model, tokenizer, prompt="hello", verbose=True) ```
{"language": ["en"], "license": "other", "tags": ["facebook", "meta", "pytorch", "llama", "llama-3", "mlx"], "pipeline_tag": "text-generation", "license_name": "llama3", "license_link": "LICENSE", "extra_gated_prompt": "### META LLAMA 3 COMMUNITY LICENSE AGREEMENT\nMeta Llama 3 Version Release Date: April 18, 2024\n\"Agreement\" means the terms and conditions for use, reproduction, distribution and modification of the Llama Materials set forth herein.\n\"Documentation\" means the specifications, manuals and documentation accompanying Meta Llama 3 distributed by Meta at https://llama.meta.com/get-started/.\n\"Licensee\" or \"you\" means you, or your employer or any other person or entity (if you are entering into this Agreement on such person or entity\u2019s behalf), of the age required under applicable laws, rules or regulations to provide legal consent and that has legal authority to bind your employer or such other person or entity if you are entering in this Agreement on their behalf.\n\"Meta Llama 3\" means the foundational large language models and software and algorithms, including machine-learning model code, trained model weights, inference-enabling code, training-enabling code, fine-tuning enabling code and other elements of the foregoing distributed by Meta at https://llama.meta.com/llama-downloads.\n\"Llama Materials\" means, collectively, Meta\u2019s proprietary Meta Llama 3 and Documentation (and any portion thereof) made available under this Agreement.\n\"Meta\" or \"we\" means Meta Platforms Ireland Limited (if you are located in or, if you are an entity, your principal place of business is in the EEA or Switzerland) and Meta Platforms, Inc. (if you are located outside of the EEA or Switzerland).\n \n1. License Rights and Redistribution.\na. Grant of Rights. You are granted a non-exclusive, worldwide, non-transferable and royalty-free limited license under Meta\u2019s intellectual property or other rights owned by Meta embodied in the Llama Materials to use, reproduce, distribute, copy, create derivative works of, and make modifications to the Llama Materials.\nb. Redistribution and Use.\ni. If you distribute or make available the Llama Materials (or any derivative works thereof), or a product or service that uses any of them, including another AI model, you shall (A) provide a copy of this Agreement with any such Llama Materials; and (B) prominently display \u201cBuilt with Meta Llama 3\u201d on a related website, user interface, blogpost, about page, or product documentation. If you use the Llama Materials to create, train, fine tune, or otherwise improve an AI model, which is distributed or made available, you shall also include \u201cLlama 3\u201d at the beginning of any such AI model name.\nii. If you receive Llama Materials, or any derivative works thereof, from a Licensee as part of an integrated end user product, then Section 2 of this Agreement will not apply to you.\niii. You must retain in all copies of the Llama Materials that you distribute the following attribution notice within a \u201cNotice\u201d text file distributed as a part of such copies: \u201cMeta Llama 3 is licensed under the Meta Llama 3 Community License, Copyright \u00a9 Meta Platforms, Inc. All Rights Reserved.\u201d\niv. Your use of the Llama Materials must comply with applicable laws and regulations (including trade compliance laws and regulations) and adhere to the Acceptable Use Policy for the Llama Materials (available at https://llama.meta.com/llama3/use-policy), which is hereby incorporated by reference into this Agreement.\nv. You will not use the Llama Materials or any output or results of the Llama Materials to improve any other large language model (excluding Meta Llama 3 or derivative works thereof).\n2. Additional Commercial Terms. If, on the Meta Llama 3 version release date, the monthly active users of the products or services made available by or for Licensee, or Licensee\u2019s affiliates, is greater than 700 million monthly active users in the preceding calendar month, you must request a license from Meta, which Meta may grant to you in its sole discretion, and you are not authorized to exercise any of the rights under this Agreement unless or until Meta otherwise expressly grants you such rights.\n3. Disclaimer of Warranty. UNLESS REQUIRED BY APPLICABLE LAW, THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS THEREFROM ARE PROVIDED ON AN \u201cAS IS\u201d BASIS, WITHOUT WARRANTIES OF ANY KIND, AND META DISCLAIMS ALL WARRANTIES OF ANY KIND, BOTH EXPRESS AND IMPLIED, INCLUDING, WITHOUT LIMITATION, ANY WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE. YOU ARE SOLELY RESPONSIBLE FOR DETERMINING THE APPROPRIATENESS OF USING OR REDISTRIBUTING THE LLAMA MATERIALS AND ASSUME ANY RISKS ASSOCIATED WITH YOUR USE OF THE LLAMA MATERIALS AND ANY OUTPUT AND RESULTS.\n4. Limitation of Liability. IN NO EVENT WILL META OR ITS AFFILIATES BE LIABLE UNDER ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, TORT, NEGLIGENCE, PRODUCTS LIABILITY, OR OTHERWISE, ARISING OUT OF THIS AGREEMENT, FOR ANY LOST PROFITS OR ANY INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL, EXEMPLARY OR PUNITIVE DAMAGES, EVEN IF META OR ITS AFFILIATES HAVE BEEN ADVISED OF THE POSSIBILITY OF ANY OF THE FOREGOING.\n5. Intellectual Property.\na. No trademark licenses are granted under this Agreement, and in connection with the Llama Materials, neither Meta nor Licensee may use any name or mark owned by or associated with the other or any of its affiliates, except as required for reasonable and customary use in describing and redistributing the Llama Materials or as set forth in this Section 5(a). Meta hereby grants you a license to use \u201cLlama 3\u201d (the \u201cMark\u201d) solely as required to comply with the last sentence of Section 1.b.i. You will comply with Meta\u2019s brand guidelines (currently accessible at https://about.meta.com/brand/resources/meta/company-brand/ ). All goodwill arising out of your use of the Mark will inure to the benefit of Meta.\nb. Subject to Meta\u2019s ownership of Llama Materials and derivatives made by or for Meta, with respect to any derivative works and modifications of the Llama Materials that are made by you, as between you and Meta, you are and will be the owner of such derivative works and modifications.\nc. If you institute litigation or other proceedings against Meta or any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Llama Materials or Meta Llama 3 outputs or results, or any portion of any of the foregoing, constitutes infringement of intellectual property or other rights owned or licensable by you, then any licenses granted to you under this Agreement shall terminate as of the date such litigation or claim is filed or instituted. You will indemnify and hold harmless Meta from and against any claim by any third party arising out of or related to your use or distribution of the Llama Materials.\n6. Term and Termination. The term of this Agreement will commence upon your acceptance of this Agreement or access to the Llama Materials and will continue in full force and effect until terminated in accordance with the terms and conditions herein. Meta may terminate this Agreement if you are in breach of any term or condition of this Agreement. Upon termination of this Agreement, you shall delete and cease use of the Llama Materials. Sections 3, 4 and 7 shall survive the termination of this Agreement.\n7. Governing Law and Jurisdiction. This Agreement will be governed and construed under the laws of the State of California without regard to choice of law principles, and the UN Convention on Contracts for the International Sale of Goods does not apply to this Agreement. The courts of California shall have exclusive jurisdiction of any dispute arising out of this Agreement.\n### Meta Llama 3 Acceptable Use Policy\nMeta is committed to promoting safe and fair use of its tools and features, including Meta Llama 3. If you access or use Meta Llama 3, you agree to this Acceptable Use Policy (\u201cPolicy\u201d). The most recent copy of this policy can be found at [https://llama.meta.com/llama3/use-policy](https://llama.meta.com/llama3/use-policy)\n#### Prohibited Uses\nWe want everyone to use Meta Llama 3 safely and responsibly. You agree you will not use, or allow others to use, Meta Llama 3 to: 1. Violate the law or others\u2019 rights, including to:\n 1. Engage in, promote, generate, contribute to, encourage, plan, incite, or further illegal or unlawful activity or content, such as:\n 1. Violence or terrorism\n 2. Exploitation or harm to children, including the solicitation, creation, acquisition, or dissemination of child exploitative content or failure to report Child Sexual Abuse Material\n 3. Human trafficking, exploitation, and sexual violence\n 4. The illegal distribution of information or materials to minors, including obscene materials, or failure to employ legally required age-gating in connection with such information or materials.\n 5. Sexual solicitation\n 6. Any other criminal activity\n 2. Engage in, promote, incite, or facilitate the harassment, abuse, threatening, or bullying of individuals or groups of individuals\n 3. Engage in, promote, incite, or facilitate discrimination or other unlawful or harmful conduct in the provision of employment, employment benefits, credit, housing, other economic benefits, or other essential goods and services\n 4. Engage in the unauthorized or unlicensed practice of any profession including, but not limited to, financial, legal, medical/health, or related professional practices\n 5. Collect, process, disclose, generate, or infer health, demographic, or other sensitive personal or private information about individuals without rights and consents required by applicable laws\n 6. Engage in or facilitate any action or generate any content that infringes, misappropriates, or otherwise violates any third-party rights, including the outputs or results of any products or services using the Llama Materials\n 7. Create, generate, or facilitate the creation of malicious code, malware, computer viruses or do anything else that could disable, overburden, interfere with or impair the proper working, integrity, operation or appearance of a website or computer system\n2. Engage in, promote, incite, facilitate, or assist in the planning or development of activities that present a risk of death or bodily harm to individuals, including use of Meta Llama 3 related to the following:\n 1. Military, warfare, nuclear industries or applications, espionage, use for materials or activities that are subject to the International Traffic Arms Regulations (ITAR) maintained by the United States Department of State\n 2. Guns and illegal weapons (including weapon development)\n 3. Illegal drugs and regulated/controlled substances\n 4. Operation of critical infrastructure, transportation technologies, or heavy machinery\n 5. Self-harm or harm to others, including suicide, cutting, and eating disorders\n 6. Any content intended to incite or promote violence, abuse, or any infliction of bodily harm to an individual\n3. Intentionally deceive or mislead others, including use of Meta Llama 3 related to the following:\n 1. Generating, promoting, or furthering fraud or the creation or promotion of disinformation\n 2. Generating, promoting, or furthering defamatory content, including the creation of defamatory statements, images, or other content\n 3. Generating, promoting, or further distributing spam\n 4. Impersonating another individual without consent, authorization, or legal right\n 5. Representing that the use of Meta Llama 3 or outputs are human-generated\n 6. Generating or facilitating false online engagement, including fake reviews and other means of fake online engagement\n4. Fail to appropriately disclose to end users any known dangers of your AI system\nPlease report any violation of this Policy, software \u201cbug,\u201d or other problems that could lead to a violation of this Policy through one of the following means:\n * Reporting issues with the model: [https://github.com/meta-llama/llama3](https://github.com/meta-llama/llama3)\n * Reporting risky content generated by the model:\n developers.facebook.com/llama_output_feedback\n * Reporting bugs and security concerns: facebook.com/whitehat/info\n * Reporting violations of the Acceptable Use Policy or unlicensed uses of Meta Llama 3: [email protected]", "extra_gated_fields": {"First Name": "text", "Last Name": "text", "Date of birth": "date_picker", "Country": "country", "Affiliation": "text", "geo": "ip_location", "By clicking Submit below I accept the terms of the license and acknowledge that the information I provide will be collected stored processed and shared in accordance with the Meta Privacy Policy": "checkbox"}, "extra_gated_description": "The information you provide will be collected, stored, processed and shared in accordance with the [Meta Privacy Policy](https://www.facebook.com/privacy/policy/).", "extra_gated_button_content": "Submit"}
voxmenthe/Meta-Llama-3-8B-Instruct-mlx-unquantized
null
[ "mlx", "safetensors", "llama", "facebook", "meta", "pytorch", "llama-3", "text-generation", "conversational", "en", "license:other", "region:us" ]
null
2024-04-20T20:44:57+00:00
[]
[ "en" ]
TAGS #mlx #safetensors #llama #facebook #meta #pytorch #llama-3 #text-generation #conversational #en #license-other #region-us
# voxmenthe/Meta-Llama-3-8B-Instruct-mlx-unquantized This model was converted to MLX format from ['meta-llama/Meta-Llama-3-8B-Instruct']() using mlx-lm version 0.10.0. Refer to the original model card for more details on the model. ## Use with mlx
[ "# voxmenthe/Meta-Llama-3-8B-Instruct-mlx-unquantized\nThis model was converted to MLX format from ['meta-llama/Meta-Llama-3-8B-Instruct']() using mlx-lm version 0.10.0.\nRefer to the original model card for more details on the model.", "## Use with mlx" ]
[ "TAGS\n#mlx #safetensors #llama #facebook #meta #pytorch #llama-3 #text-generation #conversational #en #license-other #region-us \n", "# voxmenthe/Meta-Llama-3-8B-Instruct-mlx-unquantized\nThis model was converted to MLX format from ['meta-llama/Meta-Llama-3-8B-Instruct']() using mlx-lm version 0.10.0.\nRefer to the original model card for more details on the model.", "## Use with mlx" ]
null
transformers
## About <!-- ### quantize_version: 1 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: --> <!-- ### vocab_type: --> weighted/imatrix quants of https://huggingface.co/Kquant03/Prokaryote-8x7B-bf16 <!-- provided-files --> static quants are available at https://huggingface.co/mradermacher/Prokaryote-8x7B-bf16-GGUF ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/Prokaryote-8x7B-bf16-i1-GGUF/resolve/main/Prokaryote-8x7B-bf16.i1-IQ1_S.gguf) | i1-IQ1_S | 9.9 | for the desperate | | [GGUF](https://huggingface.co/mradermacher/Prokaryote-8x7B-bf16-i1-GGUF/resolve/main/Prokaryote-8x7B-bf16.i1-IQ1_M.gguf) | i1-IQ1_M | 10.9 | for the desperate | | [GGUF](https://huggingface.co/mradermacher/Prokaryote-8x7B-bf16-i1-GGUF/resolve/main/Prokaryote-8x7B-bf16.i1-IQ2_XXS.gguf) | i1-IQ2_XXS | 12.7 | | | [GGUF](https://huggingface.co/mradermacher/Prokaryote-8x7B-bf16-i1-GGUF/resolve/main/Prokaryote-8x7B-bf16.i1-IQ2_XS.gguf) | i1-IQ2_XS | 14.0 | | | [GGUF](https://huggingface.co/mradermacher/Prokaryote-8x7B-bf16-i1-GGUF/resolve/main/Prokaryote-8x7B-bf16.i1-IQ2_S.gguf) | i1-IQ2_S | 14.2 | | | [GGUF](https://huggingface.co/mradermacher/Prokaryote-8x7B-bf16-i1-GGUF/resolve/main/Prokaryote-8x7B-bf16.i1-IQ2_M.gguf) | i1-IQ2_M | 15.6 | | | [GGUF](https://huggingface.co/mradermacher/Prokaryote-8x7B-bf16-i1-GGUF/resolve/main/Prokaryote-8x7B-bf16.i1-Q2_K.gguf) | i1-Q2_K | 17.4 | IQ3_XXS probably better | | [GGUF](https://huggingface.co/mradermacher/Prokaryote-8x7B-bf16-i1-GGUF/resolve/main/Prokaryote-8x7B-bf16.i1-IQ3_XXS.gguf) | i1-IQ3_XXS | 18.3 | lower quality | | [GGUF](https://huggingface.co/mradermacher/Prokaryote-8x7B-bf16-i1-GGUF/resolve/main/Prokaryote-8x7B-bf16.i1-IQ3_XS.gguf) | i1-IQ3_XS | 19.5 | | | [GGUF](https://huggingface.co/mradermacher/Prokaryote-8x7B-bf16-i1-GGUF/resolve/main/Prokaryote-8x7B-bf16.i1-IQ3_S.gguf) | i1-IQ3_S | 20.5 | beats Q3_K* | | [GGUF](https://huggingface.co/mradermacher/Prokaryote-8x7B-bf16-i1-GGUF/resolve/main/Prokaryote-8x7B-bf16.i1-Q3_K_S.gguf) | i1-Q3_K_S | 20.5 | IQ3_XS probably better | | [GGUF](https://huggingface.co/mradermacher/Prokaryote-8x7B-bf16-i1-GGUF/resolve/main/Prokaryote-8x7B-bf16.i1-IQ3_M.gguf) | i1-IQ3_M | 21.5 | | | [GGUF](https://huggingface.co/mradermacher/Prokaryote-8x7B-bf16-i1-GGUF/resolve/main/Prokaryote-8x7B-bf16.i1-Q3_K_M.gguf) | i1-Q3_K_M | 22.6 | IQ3_S probably better | | [GGUF](https://huggingface.co/mradermacher/Prokaryote-8x7B-bf16-i1-GGUF/resolve/main/Prokaryote-8x7B-bf16.i1-Q3_K_L.gguf) | i1-Q3_K_L | 24.3 | IQ3_M probably better | | [GGUF](https://huggingface.co/mradermacher/Prokaryote-8x7B-bf16-i1-GGUF/resolve/main/Prokaryote-8x7B-bf16.i1-IQ4_XS.gguf) | i1-IQ4_XS | 25.2 | | | [GGUF](https://huggingface.co/mradermacher/Prokaryote-8x7B-bf16-i1-GGUF/resolve/main/Prokaryote-8x7B-bf16.i1-Q4_0.gguf) | i1-Q4_0 | 26.7 | fast, low quality | | [GGUF](https://huggingface.co/mradermacher/Prokaryote-8x7B-bf16-i1-GGUF/resolve/main/Prokaryote-8x7B-bf16.i1-Q4_K_S.gguf) | i1-Q4_K_S | 26.8 | optimal size/speed/quality | | [GGUF](https://huggingface.co/mradermacher/Prokaryote-8x7B-bf16-i1-GGUF/resolve/main/Prokaryote-8x7B-bf16.i1-Q4_K_M.gguf) | i1-Q4_K_M | 28.5 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/Prokaryote-8x7B-bf16-i1-GGUF/resolve/main/Prokaryote-8x7B-bf16.i1-Q5_K_S.gguf) | i1-Q5_K_S | 32.3 | | | [GGUF](https://huggingface.co/mradermacher/Prokaryote-8x7B-bf16-i1-GGUF/resolve/main/Prokaryote-8x7B-bf16.i1-Q5_K_M.gguf) | i1-Q5_K_M | 33.3 | | | [GGUF](https://huggingface.co/mradermacher/Prokaryote-8x7B-bf16-i1-GGUF/resolve/main/Prokaryote-8x7B-bf16.i1-Q6_K.gguf) | i1-Q6_K | 38.5 | practically like static Q6_K | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. <!-- end -->
{"language": ["en"], "license": "apache-2.0", "library_name": "transformers", "tags": ["merge", "moe"], "base_model": "Kquant03/Prokaryote-8x7B-bf16", "quantized_by": "mradermacher"}
mradermacher/Prokaryote-8x7B-bf16-i1-GGUF
null
[ "transformers", "gguf", "merge", "moe", "en", "base_model:Kquant03/Prokaryote-8x7B-bf16", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2024-04-20T20:45:11+00:00
[]
[ "en" ]
TAGS #transformers #gguf #merge #moe #en #base_model-Kquant03/Prokaryote-8x7B-bf16 #license-apache-2.0 #endpoints_compatible #region-us
About ----- weighted/imatrix quants of URL static quants are available at URL Usage ----- If you are unsure how to use GGUF files, refer to one of TheBloke's READMEs for more details, including on how to concatenate multi-part files. Provided Quants --------------- (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): !URL And here are Artefact2's thoughts on the matter: URL FAQ / Model Request ------------------- See URL for some answers to questions you might have and/or if you want some other model quantized. Thanks ------ I thank my company, nethype GmbH, for letting me use its servers and providing upgrades to my workstation to enable this work in my free time.
[]
[ "TAGS\n#transformers #gguf #merge #moe #en #base_model-Kquant03/Prokaryote-8x7B-bf16 #license-apache-2.0 #endpoints_compatible #region-us \n" ]
null
keras
## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: | Hyperparameters | Value | | :-- | :-- | | name | Adam | | weight_decay | None | | clipnorm | None | | global_clipnorm | None | | clipvalue | None | | use_ema | False | | ema_momentum | 0.99 | | ema_overwrite_frequency | None | | jit_compile | False | | is_legacy_optimizer | False | | learning_rate | 0.0010000000474974513 | | beta_1 | 0.9 | | beta_2 | 0.999 | | epsilon | 1e-07 | | amsgrad | False | | training_precision | float32 |
{"library_name": "keras"}
Hitomiblood/blindness_modelKerasTPU
null
[ "keras", "region:us" ]
null
2024-04-20T20:45:51+00:00
[]
[]
TAGS #keras #region-us
Model description ----------------- More information needed Intended uses & limitations --------------------------- More information needed Training and evaluation data ---------------------------- More information needed Training procedure ------------------ ### Training hyperparameters The following hyperparameters were used during training:
[ "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:" ]
[ "TAGS\n#keras #region-us \n", "### Training hyperparameters\n\n\nThe following hyperparameters were used during training:" ]
text2text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
ittailup/parler-bayly
null
[ "transformers", "safetensors", "parler_tts", "text2text-generation", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-20T20:47:01+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #parler_tts #text2text-generation #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #parler_tts #text2text-generation #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
text-generation
transformers
> Note: this repo has low accuracy and is under investigation. # yujiepan/Meta-Llama-3-8B-gptq-w4g64 This model applies AutoGPTQ on [meta-llama/Meta-Llama-3-8B](https://huggingface.co/meta-llama/Meta-Llama-3-8B). - 4-bit symmetric weight only quantization - group_size=64 - calibration set: c4-new ## Accuracy | model | precision | wikitext ppl (↓) | |-|-|-| | meta-llama/Meta-Llama-3-8B | FP16 | 9.179 | | yujiepan/Meta-Llama-3-8B-gptq-w4g64 | w4g64 | 10.097 | Note: - Evaluated on lm-evaluation-harness "wikitext" task - Wikitext PPL does not guarantee actual accuracy, but helps to check the distortion after quantization. ## Codes ```python import os import torch from transformers import AutoModelForCausalLM, AutoTokenizer, GPTQConfig model_id = "meta-llama/Meta-Llama-3-8B" tokenizer = AutoTokenizer.from_pretrained(model_id) quantization_config = GPTQConfig( bits=4, group_size=64, dataset="c4-new", tokenizer=tokenizer, ) model = AutoModelForCausalLM.from_pretrained( model_id, device_map="auto", low_cpu_mem_usage=True, quantization_config=quantization_config, ) model.push_to_hub('yujiepan/Meta-Llama-3-8B-gptq-w4g64') ```
{"library_name": "transformers", "tags": []}
yujiepan/Meta-Llama-3-8B-gptq-w4g64
null
[ "transformers", "safetensors", "llama", "text-generation", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "4-bit", "region:us" ]
null
2024-04-20T20:47:37+00:00
[]
[]
TAGS #transformers #safetensors #llama #text-generation #autotrain_compatible #endpoints_compatible #text-generation-inference #4-bit #region-us
> > Note: this repo has low accuracy and is under investigation. > > > yujiepan/Meta-Llama-3-8B-gptq-w4g64 =================================== This model applies AutoGPTQ on meta-llama/Meta-Llama-3-8B. * 4-bit symmetric weight only quantization * group\_size=64 * calibration set: c4-new Accuracy -------- model: meta-llama/Meta-Llama-3-8B, precision: FP16, wikitext ppl (↓): 9.179 model: yujiepan/Meta-Llama-3-8B-gptq-w4g64, precision: w4g64, wikitext ppl (↓): 10.097 Note: * Evaluated on lm-evaluation-harness "wikitext" task * Wikitext PPL does not guarantee actual accuracy, but helps to check the distortion after quantization. Codes -----
[]
[ "TAGS\n#transformers #safetensors #llama #text-generation #autotrain_compatible #endpoints_compatible #text-generation-inference #4-bit #region-us \n" ]
null
transformers
# Uploaded model - **Developed by:** martyyz - **License:** apache-2.0 - **Finetuned from model :** unsloth/llama-3-8b-bnb-4bit This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
{"language": ["en"], "license": "apache-2.0", "tags": ["text-generation-inference", "transformers", "unsloth", "llama", "trl"], "base_model": "unsloth/llama-3-8b-bnb-4bit"}
martyyz/llama3_lora
null
[ "transformers", "safetensors", "text-generation-inference", "unsloth", "llama", "trl", "en", "base_model:unsloth/llama-3-8b-bnb-4bit", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2024-04-20T20:49:55+00:00
[]
[ "en" ]
TAGS #transformers #safetensors #text-generation-inference #unsloth #llama #trl #en #base_model-unsloth/llama-3-8b-bnb-4bit #license-apache-2.0 #endpoints_compatible #region-us
# Uploaded model - Developed by: martyyz - License: apache-2.0 - Finetuned from model : unsloth/llama-3-8b-bnb-4bit This llama model was trained 2x faster with Unsloth and Huggingface's TRL library. <img src="URL width="200"/>
[ "# Uploaded model\n\n- Developed by: martyyz\n- License: apache-2.0\n- Finetuned from model : unsloth/llama-3-8b-bnb-4bit\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
[ "TAGS\n#transformers #safetensors #text-generation-inference #unsloth #llama #trl #en #base_model-unsloth/llama-3-8b-bnb-4bit #license-apache-2.0 #endpoints_compatible #region-us \n", "# Uploaded model\n\n- Developed by: martyyz\n- License: apache-2.0\n- Finetuned from model : unsloth/llama-3-8b-bnb-4bit\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
null
peft
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Omega-LLama3-MWP2 This model is a fine-tuned version of [meta-llama/Meta-Llama-3-8B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct) on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-06 - train_batch_size: 1 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 2 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - num_epochs: 3 ### Training results ### Framework versions - PEFT 0.10.0 - Transformers 4.39.3 - Pytorch 2.1.2 - Datasets 2.18.0 - Tokenizers 0.15.2
{"license": "other", "library_name": "peft", "tags": ["trl", "sft", "generated_from_trainer"], "base_model": "meta-llama/Meta-Llama-3-8B-Instruct", "model-index": [{"name": "Omega-LLama3-MWP2", "results": []}]}
OmegaGamage/Omega-LLama3-MWP2
null
[ "peft", "safetensors", "trl", "sft", "generated_from_trainer", "base_model:meta-llama/Meta-Llama-3-8B-Instruct", "license:other", "region:us" ]
null
2024-04-20T20:51:58+00:00
[]
[]
TAGS #peft #safetensors #trl #sft #generated_from_trainer #base_model-meta-llama/Meta-Llama-3-8B-Instruct #license-other #region-us
# Omega-LLama3-MWP2 This model is a fine-tuned version of meta-llama/Meta-Llama-3-8B-Instruct on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-06 - train_batch_size: 1 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 2 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - num_epochs: 3 ### Training results ### Framework versions - PEFT 0.10.0 - Transformers 4.39.3 - Pytorch 2.1.2 - Datasets 2.18.0 - Tokenizers 0.15.2
[ "# Omega-LLama3-MWP2\n\nThis model is a fine-tuned version of meta-llama/Meta-Llama-3-8B-Instruct on the None dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-06\n- train_batch_size: 1\n- eval_batch_size: 8\n- seed: 42\n- gradient_accumulation_steps: 2\n- total_train_batch_size: 2\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: cosine\n- num_epochs: 3", "### Training results", "### Framework versions\n\n- PEFT 0.10.0\n- Transformers 4.39.3\n- Pytorch 2.1.2\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
[ "TAGS\n#peft #safetensors #trl #sft #generated_from_trainer #base_model-meta-llama/Meta-Llama-3-8B-Instruct #license-other #region-us \n", "# Omega-LLama3-MWP2\n\nThis model is a fine-tuned version of meta-llama/Meta-Llama-3-8B-Instruct on the None dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-06\n- train_batch_size: 1\n- eval_batch_size: 8\n- seed: 42\n- gradient_accumulation_steps: 2\n- total_train_batch_size: 2\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: cosine\n- num_epochs: 3", "### Training results", "### Framework versions\n\n- PEFT 0.10.0\n- Transformers 4.39.3\n- Pytorch 2.1.2\n- Datasets 2.18.0\n- Tokenizers 0.15.2" ]
text-generation
transformers
# Uploaded model - **Developed by:** martyyz - **License:** apache-2.0 - **Finetuned from model :** unsloth/llama-3-8b-bnb-4bit This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
{"language": ["en"], "license": "apache-2.0", "tags": ["text-generation-inference", "transformers", "unsloth", "llama", "trl", "sft"], "base_model": "unsloth/llama-3-8b-bnb-4bit"}
martyyz/Llama_3_8b_16bit
null
[ "transformers", "safetensors", "llama", "text-generation", "text-generation-inference", "unsloth", "trl", "sft", "en", "base_model:unsloth/llama-3-8b-bnb-4bit", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
null
2024-04-20T20:52:37+00:00
[]
[ "en" ]
TAGS #transformers #safetensors #llama #text-generation #text-generation-inference #unsloth #trl #sft #en #base_model-unsloth/llama-3-8b-bnb-4bit #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us
# Uploaded model - Developed by: martyyz - License: apache-2.0 - Finetuned from model : unsloth/llama-3-8b-bnb-4bit This llama model was trained 2x faster with Unsloth and Huggingface's TRL library. <img src="URL width="200"/>
[ "# Uploaded model\n\n- Developed by: martyyz\n- License: apache-2.0\n- Finetuned from model : unsloth/llama-3-8b-bnb-4bit\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #text-generation-inference #unsloth #trl #sft #en #base_model-unsloth/llama-3-8b-bnb-4bit #license-apache-2.0 #autotrain_compatible #endpoints_compatible #region-us \n", "# Uploaded model\n\n- Developed by: martyyz\n- License: apache-2.0\n- Finetuned from model : unsloth/llama-3-8b-bnb-4bit\n\nThis llama model was trained 2x faster with Unsloth and Huggingface's TRL library.\n\n<img src=\"URL width=\"200\"/>" ]
text-generation
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
domenicrosati/lens-loss-minimality_lr_2e-5_model_meta-llama_Llama-2-7b-chat-hf_batch_4_epoch_1_num_layers_24
null
[ "transformers", "safetensors", "llama", "text-generation", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-20T20:52:46+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #llama #text-generation #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #llama #text-generation #arxiv-1910.09700 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
text-generation
transformers
To use with [LocalAI](https://localai.io) this is an example model definition: ``` name: WizardLM2 backend: transformers parameters: model: fakezeta/Not-WizardLM-2-7B-ov-int8 context_size: 8192 threads: 6 f16: true type: OVModelForCausalLM prompt_cache_path: "cache" prompt_cache_all: true template: use_tokenizer_template: true ```
{"license": "apache-2.0"}
fakezeta/Not-WizardLM-2-7B-ov-int8
null
[ "transformers", "openvino", "mistral", "text-generation", "conversational", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-20T20:52:59+00:00
[]
[]
TAGS #transformers #openvino #mistral #text-generation #conversational #license-apache-2.0 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
To use with LocalAI this is an example model definition:
[]
[ "TAGS\n#transformers #openvino #mistral #text-generation #conversational #license-apache-2.0 #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n" ]
null
transformers
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"library_name": "transformers", "tags": []}
mryannugent/cot-finetune-miscondetect-bloomz-3b
null
[ "transformers", "safetensors", "arxiv:1910.09700", "endpoints_compatible", "region:us" ]
null
2024-04-20T20:53:51+00:00
[ "1910.09700" ]
[]
TAGS #transformers #safetensors #arxiv-1910.09700 #endpoints_compatible #region-us
# Model Card for Model ID ## Model Details ### Model Description This is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated. - Developed by: - Funded by [optional]: - Shared by [optional]: - Model type: - Language(s) (NLP): - License: - Finetuned from model [optional]: ### Model Sources [optional] - Repository: - Paper [optional]: - Demo [optional]: ## Uses ### Direct Use ### Downstream Use [optional] ### Out-of-Scope Use ## Bias, Risks, and Limitations ### Recommendations Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ## Training Details ### Training Data ### Training Procedure #### Preprocessing [optional] #### Training Hyperparameters - Training regime: #### Speeds, Sizes, Times [optional] ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data #### Factors #### Metrics ### Results #### Summary ## Model Examination [optional] ## Environmental Impact Carbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019). - Hardware Type: - Hours used: - Cloud Provider: - Compute Region: - Carbon Emitted: ## Technical Specifications [optional] ### Model Architecture and Objective ### Compute Infrastructure #### Hardware #### Software [optional] BibTeX: APA: ## Glossary [optional] ## More Information [optional] ## Model Card Authors [optional] ## Model Card Contact
[ "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
[ "TAGS\n#transformers #safetensors #arxiv-1910.09700 #endpoints_compatible #region-us \n", "# Model Card for Model ID", "## Model Details", "### Model Description\n\n\n\nThis is the model card of a transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- Developed by: \n- Funded by [optional]: \n- Shared by [optional]: \n- Model type: \n- Language(s) (NLP): \n- License: \n- Finetuned from model [optional]:", "### Model Sources [optional]\n\n\n\n- Repository: \n- Paper [optional]: \n- Demo [optional]:", "## Uses", "### Direct Use", "### Downstream Use [optional]", "### Out-of-Scope Use", "## Bias, Risks, and Limitations", "### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", "## How to Get Started with the Model\n\nUse the code below to get started with the model.", "## Training Details", "### Training Data", "### Training Procedure", "#### Preprocessing [optional]", "#### Training Hyperparameters\n\n- Training regime:", "#### Speeds, Sizes, Times [optional]", "## Evaluation", "### Testing Data, Factors & Metrics", "#### Testing Data", "#### Factors", "#### Metrics", "### Results", "#### Summary", "## Model Examination [optional]", "## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the Machine Learning Impact calculator presented in Lacoste et al. (2019).\n\n- Hardware Type: \n- Hours used: \n- Cloud Provider: \n- Compute Region: \n- Carbon Emitted:", "## Technical Specifications [optional]", "### Model Architecture and Objective", "### Compute Infrastructure", "#### Hardware", "#### Software\n\n\n\n[optional]\n\n\n\nBibTeX:\n\n\n\nAPA:", "## Glossary [optional]", "## More Information [optional]", "## Model Card Authors [optional]", "## Model Card Contact" ]
text-generation
transformers
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # 0.0_ablation_declr_4iters_256batch_iter_2 This model is a fine-tuned version of [ZhangShenao/0.0_ablation_declr_4iters_256batch_iter_1](https://huggingface.co/ZhangShenao/0.0_ablation_declr_4iters_256batch_iter_1) on the ZhangShenao/0.0_ablation_declr_4iters_256batch_dataset dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-07 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - distributed_type: multi-GPU - num_devices: 8 - gradient_accumulation_steps: 4 - total_train_batch_size: 256 - total_eval_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.36.2 - Pytorch 2.1.2+cu121 - Datasets 2.14.6 - Tokenizers 0.15.2
{"license": "mit", "tags": ["alignment-handbook", "generated_from_trainer", "trl", "dpo", "generated_from_trainer"], "datasets": ["ZhangShenao/0.0_ablation_declr_4iters_256batch_dataset"], "base_model": "ZhangShenao/0.0_ablation_declr_4iters_256batch_iter_1", "model-index": [{"name": "0.0_ablation_declr_4iters_256batch_iter_2", "results": []}]}
ZhangShenao/0.0_ablation_declr_4iters_256batch_iter_2
null
[ "transformers", "safetensors", "mistral", "text-generation", "alignment-handbook", "generated_from_trainer", "trl", "dpo", "conversational", "dataset:ZhangShenao/0.0_ablation_declr_4iters_256batch_dataset", "base_model:ZhangShenao/0.0_ablation_declr_4iters_256batch_iter_1", "license:mit", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
null
2024-04-20T20:54:13+00:00
[]
[]
TAGS #transformers #safetensors #mistral #text-generation #alignment-handbook #generated_from_trainer #trl #dpo #conversational #dataset-ZhangShenao/0.0_ablation_declr_4iters_256batch_dataset #base_model-ZhangShenao/0.0_ablation_declr_4iters_256batch_iter_1 #license-mit #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us
# 0.0_ablation_declr_4iters_256batch_iter_2 This model is a fine-tuned version of ZhangShenao/0.0_ablation_declr_4iters_256batch_iter_1 on the ZhangShenao/0.0_ablation_declr_4iters_256batch_dataset dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3e-07 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - distributed_type: multi-GPU - num_devices: 8 - gradient_accumulation_steps: 4 - total_train_batch_size: 256 - total_eval_batch_size: 64 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 1 ### Training results ### Framework versions - Transformers 4.36.2 - Pytorch 2.1.2+cu121 - Datasets 2.14.6 - Tokenizers 0.15.2
[ "# 0.0_ablation_declr_4iters_256batch_iter_2\n\nThis model is a fine-tuned version of ZhangShenao/0.0_ablation_declr_4iters_256batch_iter_1 on the ZhangShenao/0.0_ablation_declr_4iters_256batch_dataset dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 3e-07\n- train_batch_size: 8\n- eval_batch_size: 8\n- seed: 42\n- distributed_type: multi-GPU\n- num_devices: 8\n- gradient_accumulation_steps: 4\n- total_train_batch_size: 256\n- total_eval_batch_size: 64\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: cosine\n- lr_scheduler_warmup_ratio: 0.1\n- num_epochs: 1", "### Training results", "### Framework versions\n\n- Transformers 4.36.2\n- Pytorch 2.1.2+cu121\n- Datasets 2.14.6\n- Tokenizers 0.15.2" ]
[ "TAGS\n#transformers #safetensors #mistral #text-generation #alignment-handbook #generated_from_trainer #trl #dpo #conversational #dataset-ZhangShenao/0.0_ablation_declr_4iters_256batch_dataset #base_model-ZhangShenao/0.0_ablation_declr_4iters_256batch_iter_1 #license-mit #autotrain_compatible #endpoints_compatible #text-generation-inference #region-us \n", "# 0.0_ablation_declr_4iters_256batch_iter_2\n\nThis model is a fine-tuned version of ZhangShenao/0.0_ablation_declr_4iters_256batch_iter_1 on the ZhangShenao/0.0_ablation_declr_4iters_256batch_dataset dataset.", "## Model description\n\nMore information needed", "## Intended uses & limitations\n\nMore information needed", "## Training and evaluation data\n\nMore information needed", "## Training procedure", "### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 3e-07\n- train_batch_size: 8\n- eval_batch_size: 8\n- seed: 42\n- distributed_type: multi-GPU\n- num_devices: 8\n- gradient_accumulation_steps: 4\n- total_train_batch_size: 256\n- total_eval_batch_size: 64\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: cosine\n- lr_scheduler_warmup_ratio: 0.1\n- num_epochs: 1", "### Training results", "### Framework versions\n\n- Transformers 4.36.2\n- Pytorch 2.1.2+cu121\n- Datasets 2.14.6\n- Tokenizers 0.15.2" ]