modelId
stringlengths
5
122
author
stringlengths
2
42
last_modified
timestamp[us, tz=UTC]
downloads
int64
0
738M
likes
int64
0
11k
library_name
stringclasses
245 values
tags
sequencelengths
1
4.05k
pipeline_tag
stringclasses
48 values
createdAt
timestamp[us, tz=UTC]
card
stringlengths
1
901k
eren23/ogno-monarch-jaskier-merge-7b-OH-PREF-DPO
eren23
2024-03-05T15:45:46Z
741
5
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "text-generation-inference", "text2text-generation", "en", "dataset:argilla/OpenHermesPreferences", "license:cc-by-nc-4.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text2text-generation
2024-02-27T02:15:25Z
--- language: - en license: cc-by-nc-4.0 library_name: transformers tags: - text-generation-inference datasets: - argilla/OpenHermesPreferences pipeline_tag: text2text-generation model-index: - name: ogno-monarch-jaskier-merge-7b-OH-PREF-DPO results: - task: type: text-generation name: Text Generation dataset: name: AI2 Reasoning Challenge (25-Shot) type: ai2_arc config: ARC-Challenge split: test args: num_few_shot: 25 metrics: - type: acc_norm value: 73.12 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=eren23/ogno-monarch-jaskier-merge-7b-OH-PREF-DPO name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: HellaSwag (10-Shot) type: hellaswag split: validation args: num_few_shot: 10 metrics: - type: acc_norm value: 89.09 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=eren23/ogno-monarch-jaskier-merge-7b-OH-PREF-DPO name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MMLU (5-Shot) type: cais/mmlu config: all split: test args: num_few_shot: 5 metrics: - type: acc value: 64.8 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=eren23/ogno-monarch-jaskier-merge-7b-OH-PREF-DPO name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: TruthfulQA (0-shot) type: truthful_qa config: multiple_choice split: validation args: num_few_shot: 0 metrics: - type: mc2 value: 77.45 source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=eren23/ogno-monarch-jaskier-merge-7b-OH-PREF-DPO name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: Winogrande (5-shot) type: winogrande config: winogrande_xl split: validation args: num_few_shot: 5 metrics: - type: acc value: 84.77 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=eren23/ogno-monarch-jaskier-merge-7b-OH-PREF-DPO name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: GSM8k (5-shot) type: gsm8k config: main split: test args: num_few_shot: 5 metrics: - type: acc value: 69.45 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=eren23/ogno-monarch-jaskier-merge-7b-OH-PREF-DPO name: Open LLM Leaderboard --- # Model Card for Model ID # disclaimer just experimented with the model I had https://huggingface.co/eren23/ogno-monarch-jaskier-merge-7b here with the new preferences dataset of argillia here: https://huggingface.co/datasets/argilla/OpenHermesPreferences I didn't test the model and the perf wasn't that good when training so use/test it with caution # disclaimer 2 It turns out the model performs well in benchmarks :D # GGUF: https://huggingface.co/eren23/ogno-monarch-jaskier-merge-7b-OH-PREF-DPO-GGUF # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_eren23__ogno-monarch-jaskier-merge-7b-OH-PREF-DPO) | Metric |Value| |---------------------------------|----:| |Avg. |76.45| |AI2 Reasoning Challenge (25-Shot)|73.12| |HellaSwag (10-Shot) |89.09| |MMLU (5-Shot) |64.80| |TruthfulQA (0-shot) |77.45| |Winogrande (5-shot) |84.77| |GSM8k (5-shot) |69.45|
ChaoticNeutrals/Eris_7B
ChaoticNeutrals
2024-03-03T00:30:31Z
741
5
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "mergekit", "merge", "base_model:ChaoticNeutrals/Prodigy_7B", "base_model:Test157t/Prima-LelantaclesV6-7b", "license:other", "model-index", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-03-02T01:25:22Z
--- license: other library_name: transformers tags: - mergekit - merge base_model: - ChaoticNeutrals/Prodigy_7B - Test157t/Prima-LelantaclesV6-7b model-index: - name: Eris_7B results: - task: type: text-generation name: Text Generation dataset: name: AI2 Reasoning Challenge (25-Shot) type: ai2_arc config: ARC-Challenge split: test args: num_few_shot: 25 metrics: - type: acc_norm value: 71.42 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=ChaoticNeutrals/Eris_7B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: HellaSwag (10-Shot) type: hellaswag split: validation args: num_few_shot: 10 metrics: - type: acc_norm value: 87.99 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=ChaoticNeutrals/Eris_7B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MMLU (5-Shot) type: cais/mmlu config: all split: test args: num_few_shot: 5 metrics: - type: acc value: 65.24 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=ChaoticNeutrals/Eris_7B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: TruthfulQA (0-shot) type: truthful_qa config: multiple_choice split: validation args: num_few_shot: 0 metrics: - type: mc2 value: 66.95 source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=ChaoticNeutrals/Eris_7B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: Winogrande (5-shot) type: winogrande config: winogrande_xl split: validation args: num_few_shot: 5 metrics: - type: acc value: 84.21 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=ChaoticNeutrals/Eris_7B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: GSM8k (5-shot) type: gsm8k config: main split: test args: num_few_shot: 5 metrics: - type: acc value: 66.26 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=ChaoticNeutrals/Eris_7B name: Open LLM Leaderboard --- ![image/png](https://cdn-uploads.huggingface.co/production/uploads/642265bc01c62c1e4102dc36/GZPRq5YCQO6v8v-aF7CQt.png) # Jeitral: "Eris, the Greek goddess of chaos and discord." Notes: Model should be excellent for both RP/Chat related tasks. Seems to be working in both Alpaca/Chatml. ```Collaborative effort from both @Jeiku and @Nitral involving what we currently felt were our best individual projects.``` We hope you enjoy! - The Chaotic Neutrals. Imatrix GGUF Quants Thanks to @Lewdiculus: https://huggingface.co/Lewdiculous/Eris_7B-GGUF-IQ-Imatrix The following models were included in the merge: * [ChaoticNeutrals/Prodigy_7B](https://huggingface.co/ChaoticNeutrals/Prodigy_7B) * [Test157t/Prima-LelantaclesV6-7b](https://huggingface.co/Test157t/Prima-LelantaclesV6-7b) ### Configuration The following YAML configuration was used to produce this model: ```yaml slices: - sources: - model: Test157t/Prima-LelantaclesV6-7b layer_range: [0, 32] - model: ChaoticNeutrals/Prodigy_7B layer_range: [0, 32] merge_method: slerp base_model: Test157t/Prima-LelantaclesV6-7b parameters: t: - filter: self_attn value: [0, 0.5, 0.3, 0.7, 1] - filter: mlp value: [1, 0.5, 0.7, 0.3, 0] - value: 0.5 dtype: bfloat16 ``` # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_ChaoticNeutrals__Eris_7B) | Metric |Value| |---------------------------------|----:| |Avg. |73.68| |AI2 Reasoning Challenge (25-Shot)|71.42| |HellaSwag (10-Shot) |87.99| |MMLU (5-Shot) |65.24| |TruthfulQA (0-shot) |66.95| |Winogrande (5-shot) |84.21| |GSM8k (5-shot) |66.26|
hydra-project/ChatHercules-2.5-Mistral-7B
hydra-project
2024-03-04T17:29:19Z
741
9
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "merge", "mergekit", "lazymergekit", "Locutusque/Hercules-2.5-Mistral-7B", "openchat/openchat-3.5-0106", "base_model:Locutusque/Hercules-2.5-Mistral-7B", "base_model:openchat/openchat-3.5-0106", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-03-04T05:54:54Z
--- license: apache-2.0 tags: - merge - mergekit - lazymergekit - Locutusque/Hercules-2.5-Mistral-7B - openchat/openchat-3.5-0106 base_model: - Locutusque/Hercules-2.5-Mistral-7B - openchat/openchat-3.5-0106 model-index: - name: ChatHercules-2.5-Mistral-7B results: - task: type: text-generation name: Text Generation dataset: name: AI2 Reasoning Challenge (25-Shot) type: ai2_arc config: ARC-Challenge split: test args: num_few_shot: 25 metrics: - type: acc_norm value: 65.1 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=hydra-project/ChatHercules-2.5-Mistral-7B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: HellaSwag (10-Shot) type: hellaswag split: validation args: num_few_shot: 10 metrics: - type: acc_norm value: 84.61 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=hydra-project/ChatHercules-2.5-Mistral-7B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MMLU (5-Shot) type: cais/mmlu config: all split: test args: num_few_shot: 5 metrics: - type: acc value: 65.35 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=hydra-project/ChatHercules-2.5-Mistral-7B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: TruthfulQA (0-shot) type: truthful_qa config: multiple_choice split: validation args: num_few_shot: 0 metrics: - type: mc2 value: 47.52 source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=hydra-project/ChatHercules-2.5-Mistral-7B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: Winogrande (5-shot) type: winogrande config: winogrande_xl split: validation args: num_few_shot: 5 metrics: - type: acc value: 81.85 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=hydra-project/ChatHercules-2.5-Mistral-7B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: GSM8k (5-shot) type: gsm8k config: main split: test args: num_few_shot: 5 metrics: - type: acc value: 64.97 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=hydra-project/ChatHercules-2.5-Mistral-7B name: Open LLM Leaderboard --- # ChatHercules-2.5-Mistral-7B ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6437292ecd93f4c9a34b0d47/VW32vrPx2giqo5Od8Tyz0.png) ChatHercules-2.5-Mistral-7B is a merge of the following models using [LazyMergekit](https://colab.research.google.com/drive/1obulZ1ROXHjYLn6PPZJwRR6GzgQogxxb?usp=sharing): * [Locutusque/Hercules-2.5-Mistral-7B](https://huggingface.co/Locutusque/Hercules-2.5-Mistral-7B) * [openchat/openchat-3.5-0106](https://huggingface.co/openchat/openchat-3.5-0106) ## 🧩 Configuration ```yaml slices: - sources: - model: Locutusque/Hercules-2.5-Mistral-7B layer_range: [0, 32] - model: openchat/openchat-3.5-0106 layer_range: [0, 32] merge_method: slerp base_model: Locutusque/Hercules-2.5-Mistral-7B parameters: t: - filter: self_attn value: [0, 0.5, 0.3, 0.7, 1] - filter: mlp value: [1, 0.5, 0.7, 0.3, 0] - value: 0.5 dtype: bfloat16 ``` ## 💻 Usage ```python !pip install -qU transformers accelerate from transformers import AutoTokenizer import transformers import torch model = "hydra-project/ChatHercules-2.5-Mistral-7B" messages = [{"role": "user", "content": "What is a large language model?"}] tokenizer = AutoTokenizer.from_pretrained(model) prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) pipeline = transformers.pipeline( "text-generation", model=model, torch_dtype=torch.float16, device_map="auto", ) outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) print(outputs[0]["generated_text"]) ``` # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_hydra-project__ChatHercules-2.5-Mistral-7B) | Metric |Value| |---------------------------------|----:| |Avg. |68.24| |AI2 Reasoning Challenge (25-Shot)|65.10| |HellaSwag (10-Shot) |84.61| |MMLU (5-Shot) |65.35| |TruthfulQA (0-shot) |47.52| |Winogrande (5-shot) |81.85| |GSM8k (5-shot) |64.97|
ResplendentAI/Flora_7B
ResplendentAI
2024-03-10T05:00:59Z
741
7
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "finetune", "en", "dataset:ResplendentAI/Synthetic_Soul_1k", "base_model:jeiku/FloraBase", "license:cc-by-sa-4.0", "model-index", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-03-05T03:15:19Z
--- language: - en license: cc-by-sa-4.0 library_name: transformers tags: - finetune base_model: - jeiku/FloraBase - jeiku/Synthetic_Soul_1k_Mistral_128 datasets: - ResplendentAI/Synthetic_Soul_1k model-index: - name: Flora_7B results: - task: type: text-generation name: Text Generation dataset: name: AI2 Reasoning Challenge (25-Shot) type: ai2_arc config: ARC-Challenge split: test args: num_few_shot: 25 metrics: - type: acc_norm value: 72.1 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=ResplendentAI/Flora_7B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: HellaSwag (10-Shot) type: hellaswag split: validation args: num_few_shot: 10 metrics: - type: acc_norm value: 88.31 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=ResplendentAI/Flora_7B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MMLU (5-Shot) type: cais/mmlu config: all split: test args: num_few_shot: 5 metrics: - type: acc value: 64.16 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=ResplendentAI/Flora_7B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: TruthfulQA (0-shot) type: truthful_qa config: multiple_choice split: validation args: num_few_shot: 0 metrics: - type: mc2 value: 71.19 source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=ResplendentAI/Flora_7B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: Winogrande (5-shot) type: winogrande config: winogrande_xl split: validation args: num_few_shot: 5 metrics: - type: acc value: 84.45 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=ResplendentAI/Flora_7B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: GSM8k (5-shot) type: gsm8k config: main split: test args: num_few_shot: 5 metrics: - type: acc value: 65.35 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=ResplendentAI/Flora_7B name: Open LLM Leaderboard --- # Flora ![image/jpeg](https://cdn-uploads.huggingface.co/production/uploads/626dfb8786671a29c715f8a9/QnP0CnXCA9pTocetkrJht.jpeg) Quants available here: https://huggingface.co/bartowski/Flora_7B-GGUF https://huggingface.co/bartowski/Flora_7B-exl2 https://huggingface.co/solidrust/Flora-7B-AWQ ### Configuration The following YAML configuration was used to produce this model: ```yaml merge_method: linear models: - model: jeiku/FloraBase+jeiku/Synthetic_Soul_1k_Mistral_128 parameters: weight: 1 dtype: float16 ``` # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_ResplendentAI__Flora_7B) | Metric |Value| |---------------------------------|----:| |Avg. |74.26| |AI2 Reasoning Challenge (25-Shot)|72.10| |HellaSwag (10-Shot) |88.31| |MMLU (5-Shot) |64.16| |TruthfulQA (0-shot) |71.19| |Winogrande (5-shot) |84.45| |GSM8k (5-shot) |65.35|
Locutusque/Hyperion-2.0-Yi-34B
Locutusque
2024-03-13T18:10:10Z
741
2
transformers
[ "transformers", "safetensors", "llama", "text-generation", "arxiv:1910.09700", "license:other", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-03-09T13:50:25Z
--- library_name: transformers license: other --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> This model seems to have overfit. Performance is not good. I do not recommend using this model, I've only released for debugging purposes. ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
AetherResearch/Cerebrum-1.0-7b
AetherResearch
2024-03-13T13:17:05Z
741
50
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "conversational", "base_model:mistralai/Mistral-7B-v0.1", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-03-11T20:10:42Z
--- base_model: mistralai/Mistral-7B-v0.1 license: apache-2.0 --- ## Introduction Cerebrum 7b is a large language model (LLM) created specifically for reasoning tasks. It is based on the Mistral 7b model, fine-tuned on a small custom dataset of native chain of thought data and further improved with targeted RLHF (tRLHF), a novel technique for sample-efficient LLM alignment. Unlike numerous other recent fine-tuning approaches, our training pipeline includes under 5000 training prompts and even fewer labeled datapoints for tRLHF. Native chain of thought approach means that Cerebrum is trained to devise a tactical plan before tackling problems that require thinking. For brainstorming, knowledge intensive, and creative tasks Cerebrum will typically omit unnecessarily verbose considerations. Zero-shot prompted Cerebrum significantly outperforms few-shot prompted Mistral 7b as well as much larger models (such as Llama 2 70b) on a range of tasks that require reasoning, including ARC Challenge, GSM8k, and Math. ## Benchmarking An overview of Cerebrum 7b performance compared to reported performance Mistral 7b and LLama 2 70b on selected benchmarks that require reasoning: <img src="benchmarking.png" alt="benchmarking_chart" width="750"/> <img src="benchmarking_table.png" alt="benchmarking_table" width="750"/> Notes: 1) Cerebrum evaluated zero-shot, Mistral 8-shot with maj@8, Llama 8-shot; 2) Cerebrum evaluated zero-shot, Mistral 4-shot with maj@4, Llama 4-shot ## Usage For optimal performance, Cerebrum should be prompted with an Alpaca-style template that requests the description of the "thought process". Here is what a conversation should look like from the model's point of view: ``` <s>A chat between a user and a thinking artificial intelligence assistant. The assistant describes its thought process and gives helpful and detailed answers to the user's questions. User: Are you conscious? AI: ``` This prompt is also available as a chat template. Here is how you could use it: ``` messages = [ {'role': 'user', 'content': 'What is chain of thought prompting?'}, {'role': 'assistant', 'content': 'Chain of thought prompting is a technique used in large language models to encourage the model to think more deeply about the problem it is trying to solve. It involves prompting the model to generate a series of intermediate steps or "thoughts" that lead to the final answer. This can help the model to better understand the problem and to generate more accurate and relevant responses.'}, {'role': 'user', 'content': 'Why does chain of thought prompting work?'} ] input = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors='pt') with torch.no_grad(): out = model.generate(input_ids=input, max_new_tokens=100, do_sample=False) # will generate "Chain of thought prompting works because it helps the model to break down complex problems into smaller, more manageable steps. This allows the model to focus on each step individually and to generate more accurate and relevant responses. Additionally, the intermediate steps can help the model to understand the problem better and to find patterns or connections that it may not have seen before.</s>" ``` The model ends its turn by generating the EOS token. Importantly, this token should be removed from the model answer in a multi-turn dialogue. Cerebrum can be operated at very low temperatures (and specifically temperature 0), which improves performance on tasks that require precise answers. The alignment should be sufficient to avoid repetitions in most cases without a repetition penalty.
Azure99/blossom-v5-14b
Azure99
2024-03-14T14:16:10Z
741
3
transformers
[ "transformers", "safetensors", "qwen2", "text-generation", "zh", "en", "dataset:Azure99/blossom-chat-v3", "dataset:Azure99/blossom-math-v4", "dataset:Azure99/blossom-wizard-v3", "dataset:Azure99/blossom-orca-v3", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-03-12T08:13:38Z
--- license: apache-2.0 datasets: - Azure99/blossom-chat-v3 - Azure99/blossom-math-v4 - Azure99/blossom-wizard-v3 - Azure99/blossom-orca-v3 language: - zh - en --- # **BLOSSOM-v5-14b** [💻Github](https://github.com/Azure99/BlossomLM) • [🚀Blossom Chat Demo](https://blossom-chat.com/) ### What's new? The Blossom V5 series models is fully trained using high-quality data distilled from gpt-4-0125-preview, resulting in significant improvements. ### Introduction Blossom is a conversational large language model, fine-tuned on the Blossom Orca/Wizard/Chat/Math mixed dataset based on the Qwen1.5-14B pre-trained model. Blossom possesses robust general capabilities and context comprehension. Additionally, the high-quality Chinese and English datasets used for training have been made open source. Training was conducted in two stages. The first stage used 40K Wizard, 40K Orca, 10K Math single-turn instruction datasets, training for 1 epoch; the second stage used 10K Blossom chat multi-turn dialogue dataset, and 10% randomly sampled data from the first stage, training for 3 epochs. ### Inference Inference is performed in the form of dialogue continuation. Single-turn dialogue ``` A chat between a human and an artificial intelligence bot. The bot gives helpful, detailed, and polite answers to the human's questions. |Human|: hello |Bot|: ``` Multi-turn dialogue ``` A chat between a human and an artificial intelligence bot. The bot gives helpful, detailed, and polite answers to the human's questions. |Human|: hello |Bot|: Hello! How can I assist you today?<|endoftext|> |Human|: Generate a random number using python |Bot|: ``` Note: At the end of the Bot's output in the historical conversation, append a `<|endoftext|>`.
cgato/Thespis-Krangled-7b-v2
cgato
2024-03-14T10:56:35Z
741
0
transformers
[ "transformers", "pytorch", "mistral", "text-generation", "license:cc-by-nc-4.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-03-14T06:17:14Z
--- license: cc-by-nc-4.0 --- Its something else. Try it out! Thank you! Datasets Used: * Dolphin * Ultrachat * Capybara * Augmental * ToxicQA * Magiccoder-Evol-Instruct-110k * Yahoo Answers * OpenOrca * Airoboros 3.1 ## Prompt Format: Chat ( The default Ooba template and Silly Tavern Template ) ``` {System Prompt} Username: {Input} BotName: {Response} Username: {Input} BotName: {Response} ``` ## Recommended Silly Tavern Preset -> (Temp: 1.25, MinP: 0.1, RepPen: 1.03) ## Recommended Kobold Horde Preset -> MinP
aboros98/merlin1.5
aboros98
2024-03-15T13:36:23Z
741
0
transformers
[ "transformers", "pytorch", "phi", "text-generation", "custom_code", "license:mit", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-03-15T12:58:36Z
--- license: mit --- --- license: other --- | Metric | Value | |-----------------------|---------------------------| | Average | - | | ARC | TBA | | ARC Easy | TBA | | BoolQ | TBA | | HellaSwag | TBA | | OpenBookQA | TBA | | PiQA | TBA | | Winogrande | TBA | |-----------------------|---------------------------| | MMLU | TBA | | GSM8K | TBA | | Truthful QA | TBA | | MT-Bench | TBA |
Joseph717171/BigOrca-2-XB
Joseph717171
2024-04-17T12:24:55Z
741
2
transformers
[ "transformers", "safetensors", "llama", "text-generation", "mergekit", "merge", "orca", "orca2", "microsoft", "license:other", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-03-16T00:20:58Z
--- pipeline_tag: text-generation base_model: [] library_name: transformers tags: - mergekit - merge - orca - orca2 - microsoft license_name: microsoft-research-license license_link: LICENSE license: other --- Inspired by [AbucusAI's BigYi-15b](https://huggingface.co/abacusai/bigyi-15b)... # BigOrca-2-XB This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit). ## Merge Details ### Merge Method This model was merged using the passthrough merge method. ### Models Merged The following models were included in the merge: * /Users/jsarnecki/opt/microsoft-Orca-2-13b # Quantizations # GGUFS (Thanks to [mradermacher](https://huggingface.co/mradermacher)) * [mradermacher/BigOrca-2-XB-GGUF](https://huggingface.co/mradermacher/BigOrca-2-XB-GGUF) ### Configuration The following YAML configuration was used to produce this model: ```yaml dtype: float16 merge_method: passthrough slices: - sources: - layer_range: [0, 10] model: /Users/jsarnecki/opt/microsoft-Orca-2-13b - sources: - layer_range: [5, 15] model: /Users/jsarnecki/opt/microsoft-Orca-2-13b - sources: - layer_range: [10, 20] model: /Users/jsarnecki/opt/microsoft-Orca-2-13b - sources: - layer_range: [15, 25] model: /Users/jsarnecki/opt/microsoft-Orca-2-13b - sources: - layer_range: [20, 30] model: /Users/jsarnecki/opt/microsoft-Orca-2-13b - sources: - layer_range: [25, 35] model: /Users/jsarnecki/opt/microsoft-Orca-2-13b - sources: - layer_range: [30, 40] model: /Users/jsarnecki/opt/microsoft-Orca-2-13b ```
vicgalle/SystemConfigHermes-7B
vicgalle
2024-03-16T16:13:40Z
741
1
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "conversational", "arxiv:1910.09700", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-03-16T16:08:46Z
--- library_name: transformers license: apache-2.0 --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
FelixChao/Gemma-10.2B-Coder
FelixChao
2024-03-17T15:25:56Z
741
0
transformers
[ "transformers", "safetensors", "gemma", "text-generation", "arxiv:1910.09700", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-03-17T14:24:37Z
--- library_name: transformers license: apache-2.0 --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
OpenBuddy/openbuddy-qwen1.5-14b-v20.1-32k
OpenBuddy
2024-03-25T05:33:22Z
741
1
transformers
[ "transformers", "safetensors", "qwen2", "text-generation", "conversational", "zh", "en", "fr", "de", "ja", "ko", "it", "ru", "fi", "license:other", "autotrain_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-03-21T09:18:25Z
--- license: other license_name: tongyi-qianwen-license-agreement license_link: >- https://huggingface.co/Qwen/Qwen1.5-14B/blob/39b74a78357df4d2296e838d87565967d663a67a/LICENSE language: - zh - en - fr - de - ja - ko - it - ru - fi pipeline_tag: text-generation inference: false library_name: transformers --- # OpenBuddy - Open Multilingual Chatbot GitHub and Usage Guide: [https://github.com/OpenBuddy/OpenBuddy](https://github.com/OpenBuddy/OpenBuddy) Website and Demo: [https://openbuddy.ai](https://openbuddy.ai) Evaluation result of this model: [Evaluation.txt](Evaluation.txt) ![Demo](https://raw.githubusercontent.com/OpenBuddy/OpenBuddy/main/media/demo.png) # Copyright Notice Base model: https://huggingface.co/Qwen/Qwen1.5-14B License: Qwen: https://huggingface.co/Qwen/Qwen1.5-14B/blob/39b74a78357df4d2296e838d87565967d663a67a/LICENSE ## Disclaimer All OpenBuddy models have inherent limitations and may potentially produce outputs that are erroneous, harmful, offensive, or otherwise undesirable. Users should not use these models in critical or high-stakes situations that may lead to personal injury, property damage, or significant losses. Examples of such scenarios include, but are not limited to, the medical field, controlling software and hardware systems that may cause harm, and making important financial or legal decisions. OpenBuddy is provided "as-is" without any warranty of any kind, either express or implied, including, but not limited to, the implied warranties of merchantability, fitness for a particular purpose, and non-infringement. In no event shall the authors, contributors, or copyright holders be liable for any claim, damages, or other liabilities, whether in an action of contract, tort, or otherwise, arising from, out of, or in connection with the software or the use or other dealings in the software. By using OpenBuddy, you agree to these terms and conditions, and acknowledge that you understand the potential risks associated with its use. You also agree to indemnify and hold harmless the authors, contributors, and copyright holders from any claims, damages, or liabilities arising from your use of OpenBuddy. ## 免责声明 所有OpenBuddy模型均存在固有的局限性,可能产生错误的、有害的、冒犯性的或其他不良的输出。用户在关键或高风险场景中应谨慎行事,不要使用这些模型,以免导致人身伤害、财产损失或重大损失。此类场景的例子包括但不限于医疗领域、可能导致伤害的软硬件系统的控制以及进行重要的财务或法律决策。 OpenBuddy按“原样”提供,不附带任何种类的明示或暗示的保证,包括但不限于适销性、特定目的的适用性和非侵权的暗示保证。在任何情况下,作者、贡献者或版权所有者均不对因软件或使用或其他软件交易而产生的任何索赔、损害赔偿或其他责任(无论是合同、侵权还是其他原因)承担责任。 使用OpenBuddy即表示您同意这些条款和条件,并承认您了解其使用可能带来的潜在风险。您还同意赔偿并使作者、贡献者和版权所有者免受因您使用OpenBuddy而产生的任何索赔、损害赔偿或责任的影响。
AurelPx/Pegasus-7b-slerp
AurelPx
2024-03-22T15:19:19Z
741
1
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "merge", "mergekit", "lazymergekit", "ammarali32/multi_verse_model", "eren23/dpo-binarized-NeutrixOmnibe-7B", "conversational", "base_model:ammarali32/multi_verse_model", "base_model:eren23/dpo-binarized-NeutrixOmnibe-7B", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-03-22T15:14:30Z
--- tags: - merge - mergekit - lazymergekit - ammarali32/multi_verse_model - eren23/dpo-binarized-NeutrixOmnibe-7B base_model: - ammarali32/multi_verse_model - eren23/dpo-binarized-NeutrixOmnibe-7B license: apache-2.0 --- # Pegasus-7b-slerp Pegasus-7b-slerp is a merge of the following models using [LazyMergekit](https://colab.research.google.com/drive/1obulZ1ROXHjYLn6PPZJwRR6GzgQogxxb?usp=sharing): * [ammarali32/multi_verse_model](https://huggingface.co/ammarali32/multi_verse_model) * [eren23/dpo-binarized-NeutrixOmnibe-7B](https://huggingface.co/eren23/dpo-binarized-NeutrixOmnibe-7B) ## 🧩 Configuration ```yaml slices: - sources: - model: ammarali32/multi_verse_model layer_range: [0, 32] - model: eren23/dpo-binarized-NeutrixOmnibe-7B layer_range: [0, 32] merge_method: slerp base_model: ammarali32/multi_verse_model parameters: t: - filter: self_attn value: [0, 0.5, 0.3, 0.7, 1] - filter: mlp value: [1, 0.5, 0.7, 0.3, 0] - value: 0.5 dtype: bfloat16 ``` ## 💻 Usage ```python !pip install -qU transformers accelerate from transformers import AutoTokenizer import transformers import torch model = "AurelPx/Pegasus-7b-slerp" messages = [{"role": "user", "content": "What is a large language model?"}] tokenizer = AutoTokenizer.from_pretrained(model) prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) pipeline = transformers.pipeline( "text-generation", model=model, torch_dtype=torch.float16, device_map="auto", ) outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) print(outputs[0]["generated_text"]) ```
Yuma42/KangalKhan-PressurizedRuby-7B
Yuma42
2024-03-24T23:12:07Z
741
0
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "merge", "mergekit", "lazymergekit", "Yuma42/KangalKhan-RawRuby-7B", "Yuma42/KangalKhan-Ruby-7B-Fixed", "conversational", "en", "base_model:Yuma42/KangalKhan-RawRuby-7B", "base_model:Yuma42/KangalKhan-Ruby-7B-Fixed", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-03-24T22:57:30Z
--- tags: - merge - mergekit - lazymergekit - Yuma42/KangalKhan-RawRuby-7B - Yuma42/KangalKhan-Ruby-7B-Fixed base_model: - Yuma42/KangalKhan-RawRuby-7B - Yuma42/KangalKhan-Ruby-7B-Fixed license: apache-2.0 language: - en --- # KangalKhan-PressurizedRuby-7B KangalKhan-PressurizedRuby-7B is a merge of the following models using [LazyMergekit](https://colab.research.google.com/drive/1obulZ1ROXHjYLn6PPZJwRR6GzgQogxxb?usp=sharing): * [Yuma42/KangalKhan-RawRuby-7B](https://huggingface.co/Yuma42/KangalKhan-RawRuby-7B) * [Yuma42/KangalKhan-Ruby-7B-Fixed](https://huggingface.co/Yuma42/KangalKhan-Ruby-7B-Fixed) ## 🧩 Configuration ```yaml models: - model: teknium/OpenHermes-2.5-Mistral-7B # no parameters necessary for base model - model: Yuma42/KangalKhan-RawRuby-7B parameters: density: 0.6 weight: 0.5 - model: Yuma42/KangalKhan-Ruby-7B-Fixed parameters: density: 0.6 weight: 0.5 merge_method: ties base_model: teknium/OpenHermes-2.5-Mistral-7B parameters: normalize: true dtype: bfloat16 ``` ## 💻 Usage ```python !pip install -qU transformers accelerate from transformers import AutoTokenizer import transformers import torch model = "Yuma42/KangalKhan-PressurizedRuby-7B" messages = [{"role": "user", "content": "What is a large language model?"}] tokenizer = AutoTokenizer.from_pretrained(model) prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) pipeline = transformers.pipeline( "text-generation", model=model, torch_dtype=torch.float16, device_map="auto", ) outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) print(outputs[0]["generated_text"]) ```
zelus82/Obelix-Phi2-v0
zelus82
2024-03-26T00:26:50Z
741
0
transformers
[ "transformers", "safetensors", "phi", "text-generation", "merge", "mergekit", "lazymergekit", "zelus82/Obelix-Phi2", "amu/spin-phi2", "conversational", "custom_code", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-03-26T00:25:27Z
--- license: apache-2.0 tags: - merge - mergekit - lazymergekit - zelus82/Obelix-Phi2 - amu/spin-phi2 --- # Obelix-Phi2-v0 Obelix-Phi2-v0 is a merge of the following models using [mergekit](https://github.com/cg123/mergekit): * [zelus82/Obelix-Phi2](https://huggingface.co/zelus82/Obelix-Phi2) * [amu/spin-phi2](https://huggingface.co/amu/spin-phi2) ## 🧩 Configuration ```yaml slices: - sources: - model: zelus82/Obelix-Phi2 layer_range: [0, 32] - model: amu/spin-phi2 layer_range: [0, 32] merge_method: slerp base_model: zelus82/Obelix-Phi2 parameters: t: - filter: self_attn value: [0, 0.5, 0.3, 0.7, 1] - filter: mlp value: [1, 0.5, 0.7, 0.3, 0] - value: 0.5 dtype: bfloat16 ```
OpenBuddy/openbuddy-mistral2-7b-v20.1-32k
OpenBuddy
2024-03-26T18:00:05Z
741
0
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "conversational", "zh", "en", "fr", "de", "ja", "ko", "it", "ru", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-03-26T01:23:17Z
--- language: - zh - en - fr - de - ja - ko - it - ru pipeline_tag: text-generation inference: false library_name: transformers license: apache-2.0 --- # OpenBuddy - Open Multilingual Chatbot GitHub and Usage Guide: [https://github.com/OpenBuddy/OpenBuddy](https://github.com/OpenBuddy/OpenBuddy) Website and Demo: [https://openbuddy.ai](https://openbuddy.ai) Evaluation result of this model: [Evaluation.txt](Evaluation.txt) ![Demo](https://raw.githubusercontent.com/OpenBuddy/OpenBuddy/main/media/demo.png) # Copyright Notice Base model: https://huggingface.co/mistralai/Mistral-7B-v0.2 License: Apache 2.0 ## Disclaimer All OpenBuddy models have inherent limitations and may potentially produce outputs that are erroneous, harmful, offensive, or otherwise undesirable. Users should not use these models in critical or high-stakes situations that may lead to personal injury, property damage, or significant losses. Examples of such scenarios include, but are not limited to, the medical field, controlling software and hardware systems that may cause harm, and making important financial or legal decisions. OpenBuddy is provided "as-is" without any warranty of any kind, either express or implied, including, but not limited to, the implied warranties of merchantability, fitness for a particular purpose, and non-infringement. In no event shall the authors, contributors, or copyright holders be liable for any claim, damages, or other liabilities, whether in an action of contract, tort, or otherwise, arising from, out of, or in connection with the software or the use or other dealings in the software. By using OpenBuddy, you agree to these terms and conditions, and acknowledge that you understand the potential risks associated with its use. You also agree to indemnify and hold harmless the authors, contributors, and copyright holders from any claims, damages, or liabilities arising from your use of OpenBuddy. ## 免责声明 所有OpenBuddy模型均存在固有的局限性,可能产生错误的、有害的、冒犯性的或其他不良的输出。用户在关键或高风险场景中应谨慎行事,不要使用这些模型,以免导致人身伤害、财产损失或重大损失。此类场景的例子包括但不限于医疗领域、可能导致伤害的软硬件系统的控制以及进行重要的财务或法律决策。 OpenBuddy按“原样”提供,不附带任何种类的明示或暗示的保证,包括但不限于适销性、特定目的的适用性和非侵权的暗示保证。在任何情况下,作者、贡献者或版权所有者均不对因软件或使用或其他软件交易而产生的任何索赔、损害赔偿或其他责任(无论是合同、侵权还是其他原因)承担责任。 使用OpenBuddy即表示您同意这些条款和条件,并承认您了解其使用可能带来的潜在风险。您还同意赔偿并使作者、贡献者和版权所有者免受因您使用OpenBuddy而产生的任何索赔、损害赔偿或责任的影响。
Joseph717171/Mistral-10.7B-v0.2
Joseph717171
2024-03-31T11:56:01Z
741
4
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "mergekit", "merge", "arxiv:2312.15166", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-03-30T02:13:55Z
--- base_model: [] library_name: transformers tags: - mergekit - merge license: apache-2.0 --- # Credit for the model card's description goes to ddh0 and mergekit # Looking for [Mistral-10.7B-Instruct-v0.2?](https://huggingface.co/ddh0/Mistral-10.7B-Instruct-v0.2) # Credit for access and conversion of Mistral-7B-v0.2 goes to alpindale (from MistalAI's weights to HF Transformers) # Mistral-10.7B-v0.2 This is Mistral-10.7B-v0.2, a depth-upscaled version of [alpindale/Mistral-7B-v0.2-hf](https://huggingface.co/alpindale/Mistral-7B-v0.2-hf). This model is intended to be used as a basis for further fine-tuning, or as a drop-in upgrade from the original 7 billion parameter model. Paper detailing how Depth-Up Scaling works: [SOLAR 10.7B: Scaling Large Language Models with Simple yet Effective Depth Up-Scaling](https://arxiv.org/abs/2312.15166) This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit). ## Merge Details ### Merge Method This model was merged using the passthrough merge method. ### Models Merged The following models were included in the merge: * /Users/jsarnecki/opt/Workspace/alpindale/Mistral-7B-v0.2-hf ### Configuration The following YAML configuration was used to produce this model: ```yaml dtype: bfloat16 merge_method: passthrough slices: - sources: - layer_range: [0, 24] model: /Users/jsarnecki/opt/Workspace/alpindale/Mistral-7B-v0.2-hf - sources: - layer_range: [8, 32] model: /Users/jsarnecki/opt/Workspace/alpindale/Mistral-7B-v0.2-hf ```
Nitral-AI/KukulStanta-7B
Nitral-AI
2024-04-03T01:03:59Z
741
11
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "license:other", "model-index", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-03-30T14:32:52Z
--- license: other model-index: - name: KukulStanta-7B results: - task: type: text-generation name: Text Generation dataset: name: AI2 Reasoning Challenge (25-Shot) type: ai2_arc config: ARC-Challenge split: test args: num_few_shot: 25 metrics: - type: acc_norm value: 68.43 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Nitral-AI/KukulStanta-7B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: HellaSwag (10-Shot) type: hellaswag split: validation args: num_few_shot: 10 metrics: - type: acc_norm value: 86.37 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Nitral-AI/KukulStanta-7B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MMLU (5-Shot) type: cais/mmlu config: all split: test args: num_few_shot: 5 metrics: - type: acc value: 65.0 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Nitral-AI/KukulStanta-7B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: TruthfulQA (0-shot) type: truthful_qa config: multiple_choice split: validation args: num_few_shot: 0 metrics: - type: mc2 value: 62.19 source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Nitral-AI/KukulStanta-7B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: Winogrande (5-shot) type: winogrande config: winogrande_xl split: validation args: num_few_shot: 5 metrics: - type: acc value: 80.03 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Nitral-AI/KukulStanta-7B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: GSM8k (5-shot) type: gsm8k config: main split: test args: num_few_shot: 5 metrics: - type: acc value: 63.68 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Nitral-AI/KukulStanta-7B name: Open LLM Leaderboard --- ![image/png](https://cdn-uploads.huggingface.co/production/uploads/642265bc01c62c1e4102dc36/NYX3IOR6Ctf9PDQiXIg9d.png) # Vision/multimodal capabilities: Quants Found Here: https://huggingface.co/Lewdiculous/KukulStanta-7B-GGUF-IQ-Imatrix If you want to use vision functionality: * You must use the latest versions of [Koboldcpp](https://github.com/LostRuins/koboldcpp). To use the multimodal capabilities of this model and use **vision** you need to load the specified **mmproj** file, this can be found inside this model repo. * You can load the **mmproj** by using the corresponding section in the interface: ![image/png](https://cdn-uploads.huggingface.co/production/uploads/65d4cf2693a0a3744a27536c/UX6Ubss2EPNAT3SKGMLe0.png) # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_Nitral-AI__KukulStanta-7B) | Metric |Value| |---------------------------------|----:| |Avg. |70.95| |AI2 Reasoning Challenge (25-Shot)|68.43| |HellaSwag (10-Shot) |86.37| |MMLU (5-Shot) |65.00| |TruthfulQA (0-shot) |62.19| |Winogrande (5-shot) |80.03| |GSM8k (5-shot) |63.68|
migtissera/Tess-2.0-Yi-34B-200K
migtissera
2024-04-01T23:53:19Z
741
5
transformers
[ "transformers", "safetensors", "llama", "text-generation", "license:other", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-04-01T03:54:39Z
--- license: other license_name: yi-34b license_link: https://huggingface.co/01-ai/Yi-34B-200K/blob/main/LICENSE --- # Join My General AI Discord (NeuroLattice): https://discord.gg/Hz6GrwGFKD # Tess-2.0-Yi-34B-200K Tess, short for Tesoro (Treasure in Italian), is a general purpose Large Language Model series. Tess-2.0-Yi-34B-200K was trained on the 01-ai/Yi-34B-200K base. # Prompt Format: ``` SYSTEM: <ANY SYSTEM CONTEXT> USER: ASSISTANT: ``` <br> ![Tesoro](https://huggingface.co/migtissera/Tess-7B-v2.0/resolve/main/Tesoro.png) <br> ### Below shows a code example on how to use this model: ```python import torch, json from transformers import AutoModelForCausalLM, AutoTokenizer model_path = "migtissera/Tess-2.0-Yi-34B-200K" output_file_path = "./conversations.jsonl" model = AutoModelForCausalLM.from_pretrained( model_path, torch_dtype=torch.float16, device_map="auto", load_in_8bit=False, trust_remote_code=True, ) tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) def generate_text(instruction): tokens = tokenizer.encode(instruction) tokens = torch.LongTensor(tokens).unsqueeze(0) tokens = tokens.to("cuda") instance = { "input_ids": tokens, "top_p": 1.0, "temperature": 0.5, "generate_len": 1024, "top_k": 50, } length = len(tokens[0]) with torch.no_grad(): rest = model.generate( input_ids=tokens, max_length=length + instance["generate_len"], use_cache=True, do_sample=True, top_p=instance["top_p"], temperature=instance["temperature"], top_k=instance["top_k"], num_return_sequences=1, ) output = rest[0][length:] string = tokenizer.decode(output, skip_special_tokens=True) answer = string.split("USER:")[0].strip() return f"{answer}" conversation = f"SYSTEM: Answer the question thoughtfully and intelligently. Always answer without hesitation." while True: user_input = input("You: ") llm_prompt = f"{conversation} \nUSER: {user_input} \nASSISTANT: " answer = generate_text(llm_prompt) print(answer) conversation = f"{llm_prompt}{answer}" json_data = {"prompt": user_input, "answer": answer} ## Save your conversation with open(output_file_path, "a") as output_file: output_file.write(json.dumps(json_data) + "\n") ``` <br> #### Limitations & Biases: While this model aims for accuracy, it can occasionally produce inaccurate or misleading results. Despite diligent efforts in refining the pretraining data, there remains a possibility for the generation of inappropriate, biased, or offensive content. Exercise caution and cross-check information when necessary. This is an uncensored model. <br>
mmnga/Meta-Llama-3-70B-Instruct-gguf
mmnga
2024-04-23T13:53:46Z
741
5
null
[ "gguf", "llama3", "en", "ja", "dataset:TFMC/imatrix-dataset-for-japanese-llm", "license:other", "region:us" ]
null
2024-04-21T16:48:06Z
--- license: other license_name: llama3 license_link: https://huggingface.co/meta-llama/Meta-Llama-3-8B/blob/main/LICENSE language: - en - ja tags: - llama3 datasets: - TFMC/imatrix-dataset-for-japanese-llm --- # Meta-Llama-3-70B-Instruct-gguf [meta-llamaさんが公開しているMeta-Llama-3-70B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-70B-Instruct)のggufフォーマット変換版です。 eot_id対応してます。 imatrixのデータは[TFMC/imatrix-dataset-for-japanese-llm](https://huggingface.co/datasets/TFMC/imatrix-dataset-for-japanese-llm)を使用して作成しました。 ## 分割されたファイルについて q6_kやq8_0のファイルはサイズが大きく分割されているので結合する必要があります。 ~~~bash cat Meta-Llama-3-70B-Instruct-Q5_K_M.gguf.* > Meta-Llama-3-70B-Instruct-Q5_K_M.gguf ~~~ ## Usage ``` git clone https://github.com/ggerganov/llama.cpp.git cd llama.cpp make -j ./main -m 'Meta-Llama-3-70B-Instruct-Q4_0.gguf' -p "<|begin_of_text|><|start_header_id|>user <|end_header_id|>\n\nこんにちわ<|eot_id|><|start_header_id|>assistant <|end_header_id|>\n\n" -n 128 ```
PrunaAI/Llama-3-8B-16K-GGUF-smashed
PrunaAI
2024-04-23T12:50:59Z
741
2
null
[ "gguf", "pruna-ai", "region:us" ]
null
2024-04-23T11:11:01Z
--- thumbnail: "https://assets-global.website-files.com/646b351987a8d8ce158d1940/64ec9e96b4334c0e1ac41504_Logo%20with%20white%20text.svg" metrics: - memory_disk - memory_inference - inference_latency - inference_throughput - inference_CO2_emissions - inference_energy_consumption tags: - pruna-ai --- <!-- header start --> <!-- 200823 --> <div style="width: auto; margin-left: auto; margin-right: auto"> <a href="https://www.pruna.ai/" target="_blank" rel="noopener noreferrer"> <img src="https://i.imgur.com/eDAlcgk.png" alt="PrunaAI" style="width: 100%; min-width: 400px; display: block; margin: auto;"> </a> </div> <!-- header end --> [![Twitter](https://img.shields.io/twitter/follow/PrunaAI?style=social)](https://twitter.com/PrunaAI) [![GitHub](https://img.shields.io/github/followers/PrunaAI?label=Follow%20%40PrunaAI&style=social)](https://github.com/PrunaAI) [![LinkedIn](https://img.shields.io/badge/LinkedIn-Connect-blue)](https://www.linkedin.com/company/93832878/admin/feed/posts/?feedType=following) [![Discord](https://img.shields.io/badge/Discord-Join%20Us-blue?style=social&logo=discord)](https://discord.gg/CP4VSgck) ## This repo contains GGUF versions of the mattshumer/Llama-3-8B-16K model. # Simply make AI models cheaper, smaller, faster, and greener! - Give a thumbs up if you like this model! - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact). - Request access to easily compress your *own* AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai). - Read the documentations to know more [here](https://pruna-ai-pruna.readthedocs-hosted.com/en/latest/) - Join Pruna AI community on Discord [here](https://discord.gg/CP4VSgck) to share feedback/suggestions or get help. **Frequently Asked Questions** - ***How does the compression work?*** The model is compressed with GGUF. - ***How does the model quality change?*** The quality of the model output might vary compared to the base model. - ***What is the model format?*** We use GGUF format. - ***What calibration data has been used?*** If needed by the compression method, we used WikiText as the calibration data. - ***How to compress my own models?*** You can request premium access to more compression methods and tech support for your specific use-cases [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai). # Downloading and running the models You can download the individual files from the Files & versions section. Here is a list of the different versions we provide. For more info checkout [this chart](https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9) and [this guide](https://www.reddit.com/r/LocalLLaMA/comments/1ba55rj/overview_of_gguf_quantization_methods/): | Quant type | Description | |------------|--------------------------------------------------------------------------------------------| | Q5_K_M | High quality, recommended. | | Q5_K_S | High quality, recommended. | | Q4_K_M | Good quality, uses about 4.83 bits per weight, recommended. | | Q4_K_S | Slightly lower quality with more space savings, recommended. | | IQ4_NL | Decent quality, slightly smaller than Q4_K_S with similar performance, recommended. | | IQ4_XS | Decent quality, smaller than Q4_K_S with similar performance, recommended. | | Q3_K_L | Lower quality but usable, good for low RAM availability. | | Q3_K_M | Even lower quality. | | IQ3_M | Medium-low quality, new method with decent performance comparable to Q3_K_M. | | IQ3_S | Lower quality, new method with decent performance, recommended over Q3_K_S quant, same size with better performance. | | Q3_K_S | Low quality, not recommended. | | IQ3_XS | Lower quality, new method with decent performance, slightly better than Q3_K_S. | | Q2_K | Very low quality but surprisingly usable. | ## How to download GGUF files ? **Note for manual downloaders:** You almost never want to clone the entire repo! Multiple different quantisation formats are provided, and most users only want to pick and download a single file. The following clients/libraries will automatically download models for you, providing a list of available models to choose from: * LM Studio * LoLLMS Web UI * Faraday.dev - **Option A** - Downloading in `text-generation-webui`: - **Step 1**: Under Download Model, you can enter the model repo: PrunaAI/Llama-3-8B-16K-GGUF-smashed-smashed and below it, a specific filename to download, such as: phi-2.IQ3_M.gguf. - **Step 2**: Then click Download. - **Option B** - Downloading on the command line (including multiple files at once): - **Step 1**: We recommend using the `huggingface-hub` Python library: ```shell pip3 install huggingface-hub ``` - **Step 2**: Then you can download any individual model file to the current directory, at high speed, with a command like this: ```shell huggingface-cli download PrunaAI/Llama-3-8B-16K-GGUF-smashed-smashed Llama-3-8B-16K.IQ3_M.gguf --local-dir . --local-dir-use-symlinks False ``` <details> <summary>More advanced huggingface-cli download usage (click to read)</summary> Alternatively, you can also download multiple files at once with a pattern: ```shell huggingface-cli download PrunaAI/Llama-3-8B-16K-GGUF-smashed-smashed --local-dir . --local-dir-use-symlinks False --include='*Q4_K*gguf' ``` For more documentation on downloading with `huggingface-cli`, please see: [HF -> Hub Python Library -> Download files -> Download from the CLI](https://huggingface.co/docs/huggingface_hub/guides/download#download-from-the-cli). To accelerate downloads on fast connections (1Gbit/s or higher), install `hf_transfer`: ```shell pip3 install hf_transfer ``` And set environment variable `HF_HUB_ENABLE_HF_TRANSFER` to `1`: ```shell HF_HUB_ENABLE_HF_TRANSFER=1 huggingface-cli download PrunaAI/Llama-3-8B-16K-GGUF-smashed-smashed Llama-3-8B-16K.IQ3_M.gguf --local-dir . --local-dir-use-symlinks False ``` Windows Command Line users: You can set the environment variable by running `set HF_HUB_ENABLE_HF_TRANSFER=1` before the download command. </details> <!-- README_GGUF.md-how-to-download end --> <!-- README_GGUF.md-how-to-run start --> ## How to run model in GGUF format? - **Option A** - Introductory example with `llama.cpp` command Make sure you are using `llama.cpp` from commit [d0cee0d](https://github.com/ggerganov/llama.cpp/commit/d0cee0d36d5be95a0d9088b674dbb27354107221) or later. ```shell ./main -ngl 35 -m Llama-3-8B-16K.IQ3_M.gguf --color -c 32768 --temp 0.7 --repeat_penalty 1.1 -n -1 -p "<s>[INST] {prompt\} [/INST]" ``` Change `-ngl 32` to the number of layers to offload to GPU. Remove it if you don't have GPU acceleration. Change `-c 32768` to the desired sequence length. For extended sequence models - eg 8K, 16K, 32K - the necessary RoPE scaling parameters are read from the GGUF file and set by llama.cpp automatically. Note that longer sequence lengths require much more resources, so you may need to reduce this value. If you want to have a chat-style conversation, replace the `-p <PROMPT>` argument with `-i -ins` For other parameters and how to use them, please refer to [the llama.cpp documentation](https://github.com/ggerganov/llama.cpp/blob/master/examples/main/README.md) - **Option B** - Running in `text-generation-webui` Further instructions can be found in the text-generation-webui documentation, here: [text-generation-webui/docs/04 ‐ Model Tab.md](https://github.com/oobabooga/text-generation-webui/blob/main/docs/04%20-%20Model%20Tab.md#llamacpp). - **Option C** - Running from Python code You can use GGUF models from Python using the [llama-cpp-python](https://github.com/abetlen/llama-cpp-python) or [ctransformers](https://github.com/marella/ctransformers) libraries. Note that at the time of writing (Nov 27th 2023), ctransformers has not been updated for some time and is not compatible with some recent models. Therefore I recommend you use llama-cpp-python. ### How to load this model in Python code, using llama-cpp-python For full documentation, please see: [llama-cpp-python docs](https://abetlen.github.io/llama-cpp-python/). #### First install the package Run one of the following commands, according to your system: ```shell # Base ctransformers with no GPU acceleration pip install llama-cpp-python # With NVidia CUDA acceleration CMAKE_ARGS="-DLLAMA_CUBLAS=on" pip install llama-cpp-python # Or with OpenBLAS acceleration CMAKE_ARGS="-DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS" pip install llama-cpp-python # Or with CLBLast acceleration CMAKE_ARGS="-DLLAMA_CLBLAST=on" pip install llama-cpp-python # Or with AMD ROCm GPU acceleration (Linux only) CMAKE_ARGS="-DLLAMA_HIPBLAS=on" pip install llama-cpp-python # Or with Metal GPU acceleration for macOS systems only CMAKE_ARGS="-DLLAMA_METAL=on" pip install llama-cpp-python # In windows, to set the variables CMAKE_ARGS in PowerShell, follow this format; eg for NVidia CUDA: $env:CMAKE_ARGS = "-DLLAMA_OPENBLAS=on" pip install llama-cpp-python ``` #### Simple llama-cpp-python example code ```python from llama_cpp import Llama # Set gpu_layers to the number of layers to offload to GPU. Set to 0 if no GPU acceleration is available on your system. llm = Llama( model_path="./Llama-3-8B-16K.IQ3_M.gguf", # Download the model file first n_ctx=32768, # The max sequence length to use - note that longer sequence lengths require much more resources n_threads=8, # The number of CPU threads to use, tailor to your system and the resulting performance n_gpu_layers=35 # The number of layers to offload to GPU, if you have GPU acceleration available ) # Simple inference example output = llm( "<s>[INST] {prompt} [/INST]", # Prompt max_tokens=512, # Generate up to 512 tokens stop=["</s>"], # Example stop token - not necessarily correct for this specific model! Please check before using. echo=True # Whether to echo the prompt ) # Chat Completion API llm = Llama(model_path="./Llama-3-8B-16K.IQ3_M.gguf", chat_format="llama-2") # Set chat_format according to the model you are using llm.create_chat_completion( messages = [ {"role": "system", "content": "You are a story writing assistant."}, { "role": "user", "content": "Write a story about llamas." } ] ) ``` - **Option D** - Running with LangChain Here are guides on using llama-cpp-python and ctransformers with LangChain: * [LangChain + llama-cpp-python](https://python.langchain.com/docs/integrations/llms/llamacpp) * [LangChain + ctransformers](https://python.langchain.com/docs/integrations/providers/ctransformers) ## Configurations The configuration info are in `smash_config.json`. ## Credits & License The license of the smashed model follows the license of the original model. Please check the license of the original model before using this model which provided the base model. The license of the `pruna-engine` is [here](https://pypi.org/project/pruna-engine/) on Pypi. ## Want to compress other models? - Contact us and tell us which model to compress next [here](https://www.pruna.ai/contact). - Request access to easily compress your own AI models [here](https://z0halsaff74.typeform.com/pruna-access?typeform-source=www.pruna.ai).
VerificadoProfesional/SaBERT-Spanish-Sentiment-Analysis
VerificadoProfesional
2024-05-28T00:05:56Z
741
16
transformers
[ "transformers", "safetensors", "bert", "text-classification", "es", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
2024-04-24T15:16:01Z
--- license: apache-2.0 language: - es metrics: - accuracy pipeline_tag: text-classification widget: - text: Te quiero. Te amo output: - label: 'Positive' score: 1.000 - label: 'Negative' score: 0.000 --- # Spanish Sentiment Analysis Classifier ## Overview This BERT-based text classifier was developed as a thesis project for the Computer Engineering degree at Universidad de Buenos Aires (UBA). The model is designed to detect sentiments in Spanish and was fine-tuned on the *dccuchile/bert-base-spanish-wwm-uncased* model using a specific set of hyperparameters. It was trained on a dataset containing 11,500 Spanish tweets collected from various regions, both positive and negative. These tweets were sourced from a well-curated combination of TASS datasets. ## Team Members - **[Azul Fuentes](https://github.com/azu26)** - **[Dante Reinaudo](https://github.com/DanteReinaudo)** - **[Lucía Pardo](https://github.com/luciaPardo)** - **[Roberto Iskandarani](https://github.com/Robert-Iskandarani)** ## Model Details * **Base Mode**: dccuchile/bert-base-spanish-wwm-uncased * **Hyperparameters**: * **dropout_rate = 0.1** * **num_classes = 2** * **max_length = 128** * **batch_size = 16** * **num_epochs = 5** * **learning_rate = 3e-5** * **Dataset**: 11,500 Spanish tweets (Positive and Negative) ## Metrics The model's performance was evaluated using the following metrics: * **Accuracy = _86.47%_** * **F1-Score = _86.47%_** * **Precision = _86.46%_** * **Recall = _86.51%_** ## Usage ### Installation You can install the required dependencies using pip: ```bash pip install transformers torch ``` ### Loading the Model ```python from transformers import BertForSequenceClassification, BertTokenizer model = BertForSequenceClassification.from_pretrained("VerificadoProfesional/SaBERT-Spanish-Sentiment-Analysis") tokenizer = BertTokenizer.from_pretrained("VerificadoProfesional/SaBERT-Spanish-Sentiment-Analysis") ``` ### Predict Function ```python def predict(model,tokenizer,text,threshold = 0.5): inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=128) with torch.no_grad(): outputs = model(**inputs) logits = outputs.logits probabilities = torch.softmax(logits, dim=1).squeeze().tolist() predicted_class = torch.argmax(logits, dim=1).item() if probabilities[predicted_class] <= threshold and predicted_class == 1: predicted_class = 0 return bool(predicted_class), probabilities ``` ### Making Predictions ```python text = "Your Spanish news text here" predicted_label,probabilities = predict(model,tokenizer,text) print(f"Text: {text}") print(f"Predicted Class: {predicted_label}") print(f"Probabilities: {probabilities}") ``` ## License * Apache License 2.0 * [TASS Dataset license](http://tass.sepln.org/tass_data/download.php) ## Acknowledgments Special thanks to DCC UChile for the base Spanish BERT model and to all contributors to the dataset used for training.
ekorman-strive/bge-large-en-v1.5
ekorman-strive
2024-05-21T04:15:32Z
741
0
sentence-transformers
[ "sentence-transformers", "pytorch", "bert", "feature-extraction", "sentence-similarity", "transformers", "mteb", "en", "arxiv:2401.03462", "arxiv:2312.15503", "arxiv:2311.13534", "arxiv:2310.07554", "arxiv:2309.07597", "license:mit", "model-index", "autotrain_compatible", "endpoints_compatible", "text-embeddings-inference", "region:us" ]
feature-extraction
2024-05-20T23:31:23Z
--- tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers - mteb model-index: - name: bge-large-en-v1.5 results: - task: type: Classification dataset: type: mteb/amazon_counterfactual name: MTEB AmazonCounterfactualClassification (en) config: en split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 75.8507462686567 - type: ap value: 38.566457320228245 - type: f1 value: 69.69386648043475 - task: type: Classification dataset: type: mteb/amazon_polarity name: MTEB AmazonPolarityClassification config: default split: test revision: e2d317d38cd51312af73b3d32a06d1a08b442046 metrics: - type: accuracy value: 92.416675 - type: ap value: 89.1928861155922 - type: f1 value: 92.39477019574215 - task: type: Classification dataset: type: mteb/amazon_reviews_multi name: MTEB AmazonReviewsClassification (en) config: en split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 48.175999999999995 - type: f1 value: 47.80712792870253 - task: type: Retrieval dataset: type: arguana name: MTEB ArguAna config: default split: test revision: None metrics: - type: map_at_1 value: 40.184999999999995 - type: map_at_10 value: 55.654 - type: map_at_100 value: 56.25 - type: map_at_1000 value: 56.255 - type: map_at_3 value: 51.742999999999995 - type: map_at_5 value: 54.129000000000005 - type: mrr_at_1 value: 40.967 - type: mrr_at_10 value: 55.96 - type: mrr_at_100 value: 56.54900000000001 - type: mrr_at_1000 value: 56.554 - type: mrr_at_3 value: 51.980000000000004 - type: mrr_at_5 value: 54.44 - type: ndcg_at_1 value: 40.184999999999995 - type: ndcg_at_10 value: 63.542 - type: ndcg_at_100 value: 65.96499999999999 - type: ndcg_at_1000 value: 66.08699999999999 - type: ndcg_at_3 value: 55.582 - type: ndcg_at_5 value: 59.855000000000004 - type: precision_at_1 value: 40.184999999999995 - type: precision_at_10 value: 8.841000000000001 - type: precision_at_100 value: 0.987 - type: precision_at_1000 value: 0.1 - type: precision_at_3 value: 22.238 - type: precision_at_5 value: 15.405 - type: recall_at_1 value: 40.184999999999995 - type: recall_at_10 value: 88.407 - type: recall_at_100 value: 98.72 - type: recall_at_1000 value: 99.644 - type: recall_at_3 value: 66.714 - type: recall_at_5 value: 77.027 - task: type: Clustering dataset: type: mteb/arxiv-clustering-p2p name: MTEB ArxivClusteringP2P config: default split: test revision: a122ad7f3f0291bf49cc6f4d32aa80929df69d5d metrics: - type: v_measure value: 48.567077926750066 - task: type: Clustering dataset: type: mteb/arxiv-clustering-s2s name: MTEB ArxivClusteringS2S config: default split: test revision: f910caf1a6075f7329cdf8c1a6135696f37dbd53 metrics: - type: v_measure value: 43.19453389182364 - task: type: Reranking dataset: type: mteb/askubuntudupquestions-reranking name: MTEB AskUbuntuDupQuestions config: default split: test revision: 2000358ca161889fa9c082cb41daa8dcfb161a54 metrics: - type: map value: 64.46555939623092 - type: mrr value: 77.82361605768807 - task: type: STS dataset: type: mteb/biosses-sts name: MTEB BIOSSES config: default split: test revision: d3fb88f8f02e40887cd149695127462bbcf29b4a metrics: - type: cos_sim_pearson value: 84.9554128814735 - type: cos_sim_spearman value: 84.65373612172036 - type: euclidean_pearson value: 83.2905059954138 - type: euclidean_spearman value: 84.52240782811128 - type: manhattan_pearson value: 82.99533802997436 - type: manhattan_spearman value: 84.20673798475734 - task: type: Classification dataset: type: mteb/banking77 name: MTEB Banking77Classification config: default split: test revision: 0fd18e25b25c072e09e0d92ab615fda904d66300 metrics: - type: accuracy value: 87.78896103896103 - type: f1 value: 87.77189310964883 - task: type: Clustering dataset: type: mteb/biorxiv-clustering-p2p name: MTEB BiorxivClusteringP2P config: default split: test revision: 65b79d1d13f80053f67aca9498d9402c2d9f1f40 metrics: - type: v_measure value: 39.714538337650495 - task: type: Clustering dataset: type: mteb/biorxiv-clustering-s2s name: MTEB BiorxivClusteringS2S config: default split: test revision: 258694dd0231531bc1fd9de6ceb52a0853c6d908 metrics: - type: v_measure value: 36.90108349284447 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackAndroidRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 32.795 - type: map_at_10 value: 43.669000000000004 - type: map_at_100 value: 45.151 - type: map_at_1000 value: 45.278 - type: map_at_3 value: 40.006 - type: map_at_5 value: 42.059999999999995 - type: mrr_at_1 value: 39.771 - type: mrr_at_10 value: 49.826 - type: mrr_at_100 value: 50.504000000000005 - type: mrr_at_1000 value: 50.549 - type: mrr_at_3 value: 47.115 - type: mrr_at_5 value: 48.832 - type: ndcg_at_1 value: 39.771 - type: ndcg_at_10 value: 50.217999999999996 - type: ndcg_at_100 value: 55.454 - type: ndcg_at_1000 value: 57.37 - type: ndcg_at_3 value: 44.885000000000005 - type: ndcg_at_5 value: 47.419 - type: precision_at_1 value: 39.771 - type: precision_at_10 value: 9.642000000000001 - type: precision_at_100 value: 1.538 - type: precision_at_1000 value: 0.198 - type: precision_at_3 value: 21.268 - type: precision_at_5 value: 15.536 - type: recall_at_1 value: 32.795 - type: recall_at_10 value: 62.580999999999996 - type: recall_at_100 value: 84.438 - type: recall_at_1000 value: 96.492 - type: recall_at_3 value: 47.071000000000005 - type: recall_at_5 value: 54.079 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackEnglishRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 32.671 - type: map_at_10 value: 43.334 - type: map_at_100 value: 44.566 - type: map_at_1000 value: 44.702999999999996 - type: map_at_3 value: 40.343 - type: map_at_5 value: 41.983 - type: mrr_at_1 value: 40.764 - type: mrr_at_10 value: 49.382 - type: mrr_at_100 value: 49.988 - type: mrr_at_1000 value: 50.03300000000001 - type: mrr_at_3 value: 47.293 - type: mrr_at_5 value: 48.51 - type: ndcg_at_1 value: 40.764 - type: ndcg_at_10 value: 49.039 - type: ndcg_at_100 value: 53.259 - type: ndcg_at_1000 value: 55.253 - type: ndcg_at_3 value: 45.091 - type: ndcg_at_5 value: 46.839999999999996 - type: precision_at_1 value: 40.764 - type: precision_at_10 value: 9.191 - type: precision_at_100 value: 1.476 - type: precision_at_1000 value: 0.19499999999999998 - type: precision_at_3 value: 21.72 - type: precision_at_5 value: 15.299 - type: recall_at_1 value: 32.671 - type: recall_at_10 value: 58.816 - type: recall_at_100 value: 76.654 - type: recall_at_1000 value: 89.05999999999999 - type: recall_at_3 value: 46.743 - type: recall_at_5 value: 51.783 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackGamingRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 40.328 - type: map_at_10 value: 53.32599999999999 - type: map_at_100 value: 54.37499999999999 - type: map_at_1000 value: 54.429 - type: map_at_3 value: 49.902 - type: map_at_5 value: 52.002 - type: mrr_at_1 value: 46.332 - type: mrr_at_10 value: 56.858 - type: mrr_at_100 value: 57.522 - type: mrr_at_1000 value: 57.54899999999999 - type: mrr_at_3 value: 54.472 - type: mrr_at_5 value: 55.996 - type: ndcg_at_1 value: 46.332 - type: ndcg_at_10 value: 59.313 - type: ndcg_at_100 value: 63.266999999999996 - type: ndcg_at_1000 value: 64.36 - type: ndcg_at_3 value: 53.815000000000005 - type: ndcg_at_5 value: 56.814 - type: precision_at_1 value: 46.332 - type: precision_at_10 value: 9.53 - type: precision_at_100 value: 1.238 - type: precision_at_1000 value: 0.13699999999999998 - type: precision_at_3 value: 24.054000000000002 - type: precision_at_5 value: 16.589000000000002 - type: recall_at_1 value: 40.328 - type: recall_at_10 value: 73.421 - type: recall_at_100 value: 90.059 - type: recall_at_1000 value: 97.81 - type: recall_at_3 value: 59.009 - type: recall_at_5 value: 66.352 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackGisRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 27.424 - type: map_at_10 value: 36.332 - type: map_at_100 value: 37.347 - type: map_at_1000 value: 37.422 - type: map_at_3 value: 33.743 - type: map_at_5 value: 35.176 - type: mrr_at_1 value: 29.153000000000002 - type: mrr_at_10 value: 38.233 - type: mrr_at_100 value: 39.109 - type: mrr_at_1000 value: 39.164 - type: mrr_at_3 value: 35.876000000000005 - type: mrr_at_5 value: 37.169000000000004 - type: ndcg_at_1 value: 29.153000000000002 - type: ndcg_at_10 value: 41.439 - type: ndcg_at_100 value: 46.42 - type: ndcg_at_1000 value: 48.242000000000004 - type: ndcg_at_3 value: 36.362 - type: ndcg_at_5 value: 38.743 - type: precision_at_1 value: 29.153000000000002 - type: precision_at_10 value: 6.315999999999999 - type: precision_at_100 value: 0.927 - type: precision_at_1000 value: 0.11199999999999999 - type: precision_at_3 value: 15.443000000000001 - type: precision_at_5 value: 10.644 - type: recall_at_1 value: 27.424 - type: recall_at_10 value: 55.364000000000004 - type: recall_at_100 value: 78.211 - type: recall_at_1000 value: 91.74600000000001 - type: recall_at_3 value: 41.379 - type: recall_at_5 value: 47.14 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackMathematicaRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 19.601 - type: map_at_10 value: 27.826 - type: map_at_100 value: 29.017 - type: map_at_1000 value: 29.137 - type: map_at_3 value: 25.125999999999998 - type: map_at_5 value: 26.765 - type: mrr_at_1 value: 24.005000000000003 - type: mrr_at_10 value: 32.716 - type: mrr_at_100 value: 33.631 - type: mrr_at_1000 value: 33.694 - type: mrr_at_3 value: 29.934 - type: mrr_at_5 value: 31.630999999999997 - type: ndcg_at_1 value: 24.005000000000003 - type: ndcg_at_10 value: 33.158 - type: ndcg_at_100 value: 38.739000000000004 - type: ndcg_at_1000 value: 41.495 - type: ndcg_at_3 value: 28.185 - type: ndcg_at_5 value: 30.796 - type: precision_at_1 value: 24.005000000000003 - type: precision_at_10 value: 5.908 - type: precision_at_100 value: 1.005 - type: precision_at_1000 value: 0.13899999999999998 - type: precision_at_3 value: 13.391 - type: precision_at_5 value: 9.876 - type: recall_at_1 value: 19.601 - type: recall_at_10 value: 44.746 - type: recall_at_100 value: 68.82300000000001 - type: recall_at_1000 value: 88.215 - type: recall_at_3 value: 31.239 - type: recall_at_5 value: 37.695 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackPhysicsRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 30.130000000000003 - type: map_at_10 value: 40.96 - type: map_at_100 value: 42.282 - type: map_at_1000 value: 42.392 - type: map_at_3 value: 37.889 - type: map_at_5 value: 39.661 - type: mrr_at_1 value: 36.958999999999996 - type: mrr_at_10 value: 46.835 - type: mrr_at_100 value: 47.644 - type: mrr_at_1000 value: 47.688 - type: mrr_at_3 value: 44.562000000000005 - type: mrr_at_5 value: 45.938 - type: ndcg_at_1 value: 36.958999999999996 - type: ndcg_at_10 value: 47.06 - type: ndcg_at_100 value: 52.345 - type: ndcg_at_1000 value: 54.35 - type: ndcg_at_3 value: 42.301 - type: ndcg_at_5 value: 44.635999999999996 - type: precision_at_1 value: 36.958999999999996 - type: precision_at_10 value: 8.479000000000001 - type: precision_at_100 value: 1.284 - type: precision_at_1000 value: 0.163 - type: precision_at_3 value: 20.244 - type: precision_at_5 value: 14.224999999999998 - type: recall_at_1 value: 30.130000000000003 - type: recall_at_10 value: 59.27 - type: recall_at_100 value: 81.195 - type: recall_at_1000 value: 94.21199999999999 - type: recall_at_3 value: 45.885 - type: recall_at_5 value: 52.016 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackProgrammersRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 26.169999999999998 - type: map_at_10 value: 36.451 - type: map_at_100 value: 37.791000000000004 - type: map_at_1000 value: 37.897 - type: map_at_3 value: 33.109 - type: map_at_5 value: 34.937000000000005 - type: mrr_at_1 value: 32.877 - type: mrr_at_10 value: 42.368 - type: mrr_at_100 value: 43.201 - type: mrr_at_1000 value: 43.259 - type: mrr_at_3 value: 39.763999999999996 - type: mrr_at_5 value: 41.260000000000005 - type: ndcg_at_1 value: 32.877 - type: ndcg_at_10 value: 42.659000000000006 - type: ndcg_at_100 value: 48.161 - type: ndcg_at_1000 value: 50.345 - type: ndcg_at_3 value: 37.302 - type: ndcg_at_5 value: 39.722 - type: precision_at_1 value: 32.877 - type: precision_at_10 value: 7.9 - type: precision_at_100 value: 1.236 - type: precision_at_1000 value: 0.158 - type: precision_at_3 value: 17.846 - type: precision_at_5 value: 12.9 - type: recall_at_1 value: 26.169999999999998 - type: recall_at_10 value: 55.35 - type: recall_at_100 value: 78.755 - type: recall_at_1000 value: 93.518 - type: recall_at_3 value: 40.176 - type: recall_at_5 value: 46.589000000000006 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 27.15516666666667 - type: map_at_10 value: 36.65741666666667 - type: map_at_100 value: 37.84991666666666 - type: map_at_1000 value: 37.96316666666667 - type: map_at_3 value: 33.74974999999999 - type: map_at_5 value: 35.3765 - type: mrr_at_1 value: 32.08233333333334 - type: mrr_at_10 value: 41.033833333333334 - type: mrr_at_100 value: 41.84524999999999 - type: mrr_at_1000 value: 41.89983333333333 - type: mrr_at_3 value: 38.62008333333333 - type: mrr_at_5 value: 40.03441666666666 - type: ndcg_at_1 value: 32.08233333333334 - type: ndcg_at_10 value: 42.229 - type: ndcg_at_100 value: 47.26716666666667 - type: ndcg_at_1000 value: 49.43466666666667 - type: ndcg_at_3 value: 37.36408333333333 - type: ndcg_at_5 value: 39.6715 - type: precision_at_1 value: 32.08233333333334 - type: precision_at_10 value: 7.382583333333334 - type: precision_at_100 value: 1.16625 - type: precision_at_1000 value: 0.15408333333333332 - type: precision_at_3 value: 17.218 - type: precision_at_5 value: 12.21875 - type: recall_at_1 value: 27.15516666666667 - type: recall_at_10 value: 54.36683333333333 - type: recall_at_100 value: 76.37183333333333 - type: recall_at_1000 value: 91.26183333333333 - type: recall_at_3 value: 40.769916666666674 - type: recall_at_5 value: 46.702333333333335 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackStatsRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 25.749 - type: map_at_10 value: 33.001999999999995 - type: map_at_100 value: 33.891 - type: map_at_1000 value: 33.993 - type: map_at_3 value: 30.703999999999997 - type: map_at_5 value: 31.959 - type: mrr_at_1 value: 28.834 - type: mrr_at_10 value: 35.955 - type: mrr_at_100 value: 36.709 - type: mrr_at_1000 value: 36.779 - type: mrr_at_3 value: 33.947 - type: mrr_at_5 value: 35.089 - type: ndcg_at_1 value: 28.834 - type: ndcg_at_10 value: 37.329 - type: ndcg_at_100 value: 41.79 - type: ndcg_at_1000 value: 44.169000000000004 - type: ndcg_at_3 value: 33.184999999999995 - type: ndcg_at_5 value: 35.107 - type: precision_at_1 value: 28.834 - type: precision_at_10 value: 5.7669999999999995 - type: precision_at_100 value: 0.876 - type: precision_at_1000 value: 0.11399999999999999 - type: precision_at_3 value: 14.213000000000001 - type: precision_at_5 value: 9.754999999999999 - type: recall_at_1 value: 25.749 - type: recall_at_10 value: 47.791 - type: recall_at_100 value: 68.255 - type: recall_at_1000 value: 85.749 - type: recall_at_3 value: 36.199 - type: recall_at_5 value: 41.071999999999996 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackTexRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 17.777 - type: map_at_10 value: 25.201 - type: map_at_100 value: 26.423999999999996 - type: map_at_1000 value: 26.544 - type: map_at_3 value: 22.869 - type: map_at_5 value: 24.023 - type: mrr_at_1 value: 21.473 - type: mrr_at_10 value: 29.12 - type: mrr_at_100 value: 30.144 - type: mrr_at_1000 value: 30.215999999999998 - type: mrr_at_3 value: 26.933 - type: mrr_at_5 value: 28.051 - type: ndcg_at_1 value: 21.473 - type: ndcg_at_10 value: 30.003 - type: ndcg_at_100 value: 35.766 - type: ndcg_at_1000 value: 38.501000000000005 - type: ndcg_at_3 value: 25.773000000000003 - type: ndcg_at_5 value: 27.462999999999997 - type: precision_at_1 value: 21.473 - type: precision_at_10 value: 5.482 - type: precision_at_100 value: 0.975 - type: precision_at_1000 value: 0.13799999999999998 - type: precision_at_3 value: 12.205 - type: precision_at_5 value: 8.692 - type: recall_at_1 value: 17.777 - type: recall_at_10 value: 40.582 - type: recall_at_100 value: 66.305 - type: recall_at_1000 value: 85.636 - type: recall_at_3 value: 28.687 - type: recall_at_5 value: 33.089 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackUnixRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 26.677 - type: map_at_10 value: 36.309000000000005 - type: map_at_100 value: 37.403999999999996 - type: map_at_1000 value: 37.496 - type: map_at_3 value: 33.382 - type: map_at_5 value: 34.98 - type: mrr_at_1 value: 31.343 - type: mrr_at_10 value: 40.549 - type: mrr_at_100 value: 41.342 - type: mrr_at_1000 value: 41.397 - type: mrr_at_3 value: 38.029 - type: mrr_at_5 value: 39.451 - type: ndcg_at_1 value: 31.343 - type: ndcg_at_10 value: 42.1 - type: ndcg_at_100 value: 47.089999999999996 - type: ndcg_at_1000 value: 49.222 - type: ndcg_at_3 value: 36.836999999999996 - type: ndcg_at_5 value: 39.21 - type: precision_at_1 value: 31.343 - type: precision_at_10 value: 7.164 - type: precision_at_100 value: 1.0959999999999999 - type: precision_at_1000 value: 0.13899999999999998 - type: precision_at_3 value: 16.915 - type: precision_at_5 value: 11.940000000000001 - type: recall_at_1 value: 26.677 - type: recall_at_10 value: 55.54599999999999 - type: recall_at_100 value: 77.094 - type: recall_at_1000 value: 92.01 - type: recall_at_3 value: 41.191 - type: recall_at_5 value: 47.006 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackWebmastersRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 24.501 - type: map_at_10 value: 33.102 - type: map_at_100 value: 34.676 - type: map_at_1000 value: 34.888000000000005 - type: map_at_3 value: 29.944 - type: map_at_5 value: 31.613999999999997 - type: mrr_at_1 value: 29.447000000000003 - type: mrr_at_10 value: 37.996 - type: mrr_at_100 value: 38.946 - type: mrr_at_1000 value: 38.995000000000005 - type: mrr_at_3 value: 35.079 - type: mrr_at_5 value: 36.69 - type: ndcg_at_1 value: 29.447000000000003 - type: ndcg_at_10 value: 39.232 - type: ndcg_at_100 value: 45.247 - type: ndcg_at_1000 value: 47.613 - type: ndcg_at_3 value: 33.922999999999995 - type: ndcg_at_5 value: 36.284 - type: precision_at_1 value: 29.447000000000003 - type: precision_at_10 value: 7.648000000000001 - type: precision_at_100 value: 1.516 - type: precision_at_1000 value: 0.23900000000000002 - type: precision_at_3 value: 16.008 - type: precision_at_5 value: 11.779 - type: recall_at_1 value: 24.501 - type: recall_at_10 value: 51.18899999999999 - type: recall_at_100 value: 78.437 - type: recall_at_1000 value: 92.842 - type: recall_at_3 value: 35.808 - type: recall_at_5 value: 42.197 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackWordpressRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 22.039 - type: map_at_10 value: 30.377 - type: map_at_100 value: 31.275 - type: map_at_1000 value: 31.379 - type: map_at_3 value: 27.98 - type: map_at_5 value: 29.358 - type: mrr_at_1 value: 24.03 - type: mrr_at_10 value: 32.568000000000005 - type: mrr_at_100 value: 33.403 - type: mrr_at_1000 value: 33.475 - type: mrr_at_3 value: 30.436999999999998 - type: mrr_at_5 value: 31.796000000000003 - type: ndcg_at_1 value: 24.03 - type: ndcg_at_10 value: 35.198 - type: ndcg_at_100 value: 39.668 - type: ndcg_at_1000 value: 42.296 - type: ndcg_at_3 value: 30.709999999999997 - type: ndcg_at_5 value: 33.024 - type: precision_at_1 value: 24.03 - type: precision_at_10 value: 5.564 - type: precision_at_100 value: 0.828 - type: precision_at_1000 value: 0.117 - type: precision_at_3 value: 13.309000000000001 - type: precision_at_5 value: 9.39 - type: recall_at_1 value: 22.039 - type: recall_at_10 value: 47.746 - type: recall_at_100 value: 68.23599999999999 - type: recall_at_1000 value: 87.852 - type: recall_at_3 value: 35.852000000000004 - type: recall_at_5 value: 41.410000000000004 - task: type: Retrieval dataset: type: climate-fever name: MTEB ClimateFEVER config: default split: test revision: None metrics: - type: map_at_1 value: 15.692999999999998 - type: map_at_10 value: 26.903 - type: map_at_100 value: 28.987000000000002 - type: map_at_1000 value: 29.176999999999996 - type: map_at_3 value: 22.137 - type: map_at_5 value: 24.758 - type: mrr_at_1 value: 35.57 - type: mrr_at_10 value: 47.821999999999996 - type: mrr_at_100 value: 48.608000000000004 - type: mrr_at_1000 value: 48.638999999999996 - type: mrr_at_3 value: 44.452000000000005 - type: mrr_at_5 value: 46.546 - type: ndcg_at_1 value: 35.57 - type: ndcg_at_10 value: 36.567 - type: ndcg_at_100 value: 44.085 - type: ndcg_at_1000 value: 47.24 - type: ndcg_at_3 value: 29.964000000000002 - type: ndcg_at_5 value: 32.511 - type: precision_at_1 value: 35.57 - type: precision_at_10 value: 11.485 - type: precision_at_100 value: 1.9619999999999997 - type: precision_at_1000 value: 0.256 - type: precision_at_3 value: 22.237000000000002 - type: precision_at_5 value: 17.471999999999998 - type: recall_at_1 value: 15.692999999999998 - type: recall_at_10 value: 43.056 - type: recall_at_100 value: 68.628 - type: recall_at_1000 value: 86.075 - type: recall_at_3 value: 26.918999999999997 - type: recall_at_5 value: 34.14 - task: type: Retrieval dataset: type: dbpedia-entity name: MTEB DBPedia config: default split: test revision: None metrics: - type: map_at_1 value: 9.53 - type: map_at_10 value: 20.951 - type: map_at_100 value: 30.136000000000003 - type: map_at_1000 value: 31.801000000000002 - type: map_at_3 value: 15.021 - type: map_at_5 value: 17.471999999999998 - type: mrr_at_1 value: 71.0 - type: mrr_at_10 value: 79.176 - type: mrr_at_100 value: 79.418 - type: mrr_at_1000 value: 79.426 - type: mrr_at_3 value: 78.125 - type: mrr_at_5 value: 78.61200000000001 - type: ndcg_at_1 value: 58.5 - type: ndcg_at_10 value: 44.106 - type: ndcg_at_100 value: 49.268 - type: ndcg_at_1000 value: 56.711999999999996 - type: ndcg_at_3 value: 48.934 - type: ndcg_at_5 value: 45.826 - type: precision_at_1 value: 71.0 - type: precision_at_10 value: 35.0 - type: precision_at_100 value: 11.360000000000001 - type: precision_at_1000 value: 2.046 - type: precision_at_3 value: 52.833 - type: precision_at_5 value: 44.15 - type: recall_at_1 value: 9.53 - type: recall_at_10 value: 26.811 - type: recall_at_100 value: 55.916999999999994 - type: recall_at_1000 value: 79.973 - type: recall_at_3 value: 16.413 - type: recall_at_5 value: 19.980999999999998 - task: type: Classification dataset: type: mteb/emotion name: MTEB EmotionClassification config: default split: test revision: 4f58c6b202a23cf9a4da393831edf4f9183cad37 metrics: - type: accuracy value: 51.519999999999996 - type: f1 value: 46.36601294761231 - task: type: Retrieval dataset: type: fever name: MTEB FEVER config: default split: test revision: None metrics: - type: map_at_1 value: 74.413 - type: map_at_10 value: 83.414 - type: map_at_100 value: 83.621 - type: map_at_1000 value: 83.635 - type: map_at_3 value: 82.337 - type: map_at_5 value: 83.039 - type: mrr_at_1 value: 80.19800000000001 - type: mrr_at_10 value: 87.715 - type: mrr_at_100 value: 87.778 - type: mrr_at_1000 value: 87.779 - type: mrr_at_3 value: 87.106 - type: mrr_at_5 value: 87.555 - type: ndcg_at_1 value: 80.19800000000001 - type: ndcg_at_10 value: 87.182 - type: ndcg_at_100 value: 87.90299999999999 - type: ndcg_at_1000 value: 88.143 - type: ndcg_at_3 value: 85.60600000000001 - type: ndcg_at_5 value: 86.541 - type: precision_at_1 value: 80.19800000000001 - type: precision_at_10 value: 10.531 - type: precision_at_100 value: 1.113 - type: precision_at_1000 value: 0.11499999999999999 - type: precision_at_3 value: 32.933 - type: precision_at_5 value: 20.429 - type: recall_at_1 value: 74.413 - type: recall_at_10 value: 94.363 - type: recall_at_100 value: 97.165 - type: recall_at_1000 value: 98.668 - type: recall_at_3 value: 90.108 - type: recall_at_5 value: 92.52 - task: type: Retrieval dataset: type: fiqa name: MTEB FiQA2018 config: default split: test revision: None metrics: - type: map_at_1 value: 22.701 - type: map_at_10 value: 37.122 - type: map_at_100 value: 39.178000000000004 - type: map_at_1000 value: 39.326 - type: map_at_3 value: 32.971000000000004 - type: map_at_5 value: 35.332 - type: mrr_at_1 value: 44.753 - type: mrr_at_10 value: 53.452 - type: mrr_at_100 value: 54.198 - type: mrr_at_1000 value: 54.225 - type: mrr_at_3 value: 50.952 - type: mrr_at_5 value: 52.464 - type: ndcg_at_1 value: 44.753 - type: ndcg_at_10 value: 45.021 - type: ndcg_at_100 value: 52.028 - type: ndcg_at_1000 value: 54.596000000000004 - type: ndcg_at_3 value: 41.622 - type: ndcg_at_5 value: 42.736000000000004 - type: precision_at_1 value: 44.753 - type: precision_at_10 value: 12.284 - type: precision_at_100 value: 1.955 - type: precision_at_1000 value: 0.243 - type: precision_at_3 value: 27.828999999999997 - type: precision_at_5 value: 20.061999999999998 - type: recall_at_1 value: 22.701 - type: recall_at_10 value: 51.432 - type: recall_at_100 value: 77.009 - type: recall_at_1000 value: 92.511 - type: recall_at_3 value: 37.919000000000004 - type: recall_at_5 value: 44.131 - task: type: Retrieval dataset: type: hotpotqa name: MTEB HotpotQA config: default split: test revision: None metrics: - type: map_at_1 value: 40.189 - type: map_at_10 value: 66.24600000000001 - type: map_at_100 value: 67.098 - type: map_at_1000 value: 67.149 - type: map_at_3 value: 62.684 - type: map_at_5 value: 64.974 - type: mrr_at_1 value: 80.378 - type: mrr_at_10 value: 86.127 - type: mrr_at_100 value: 86.29299999999999 - type: mrr_at_1000 value: 86.297 - type: mrr_at_3 value: 85.31400000000001 - type: mrr_at_5 value: 85.858 - type: ndcg_at_1 value: 80.378 - type: ndcg_at_10 value: 74.101 - type: ndcg_at_100 value: 76.993 - type: ndcg_at_1000 value: 77.948 - type: ndcg_at_3 value: 69.232 - type: ndcg_at_5 value: 72.04599999999999 - type: precision_at_1 value: 80.378 - type: precision_at_10 value: 15.595999999999998 - type: precision_at_100 value: 1.7840000000000003 - type: precision_at_1000 value: 0.191 - type: precision_at_3 value: 44.884 - type: precision_at_5 value: 29.145 - type: recall_at_1 value: 40.189 - type: recall_at_10 value: 77.981 - type: recall_at_100 value: 89.21 - type: recall_at_1000 value: 95.48299999999999 - type: recall_at_3 value: 67.326 - type: recall_at_5 value: 72.863 - task: type: Classification dataset: type: mteb/imdb name: MTEB ImdbClassification config: default split: test revision: 3d86128a09e091d6018b6d26cad27f2739fc2db7 metrics: - type: accuracy value: 92.84599999999999 - type: ap value: 89.4710787567357 - type: f1 value: 92.83752676932258 - task: type: Retrieval dataset: type: msmarco name: MTEB MSMARCO config: default split: dev revision: None metrics: - type: map_at_1 value: 23.132 - type: map_at_10 value: 35.543 - type: map_at_100 value: 36.702 - type: map_at_1000 value: 36.748999999999995 - type: map_at_3 value: 31.737 - type: map_at_5 value: 33.927 - type: mrr_at_1 value: 23.782 - type: mrr_at_10 value: 36.204 - type: mrr_at_100 value: 37.29 - type: mrr_at_1000 value: 37.330999999999996 - type: mrr_at_3 value: 32.458999999999996 - type: mrr_at_5 value: 34.631 - type: ndcg_at_1 value: 23.782 - type: ndcg_at_10 value: 42.492999999999995 - type: ndcg_at_100 value: 47.985 - type: ndcg_at_1000 value: 49.141 - type: ndcg_at_3 value: 34.748000000000005 - type: ndcg_at_5 value: 38.651 - type: precision_at_1 value: 23.782 - type: precision_at_10 value: 6.665 - type: precision_at_100 value: 0.941 - type: precision_at_1000 value: 0.104 - type: precision_at_3 value: 14.776 - type: precision_at_5 value: 10.84 - type: recall_at_1 value: 23.132 - type: recall_at_10 value: 63.794 - type: recall_at_100 value: 89.027 - type: recall_at_1000 value: 97.807 - type: recall_at_3 value: 42.765 - type: recall_at_5 value: 52.11 - task: type: Classification dataset: type: mteb/mtop_domain name: MTEB MTOPDomainClassification (en) config: en split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 94.59188326493388 - type: f1 value: 94.3842594786827 - task: type: Classification dataset: type: mteb/mtop_intent name: MTEB MTOPIntentClassification (en) config: en split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 79.49384404924761 - type: f1 value: 59.7580539534629 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (en) config: en split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 77.56220578345663 - type: f1 value: 75.27228165561478 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (en) config: en split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 80.53463349024884 - type: f1 value: 80.4893958236536 - task: type: Clustering dataset: type: mteb/medrxiv-clustering-p2p name: MTEB MedrxivClusteringP2P config: default split: test revision: e7a26af6f3ae46b30dde8737f02c07b1505bcc73 metrics: - type: v_measure value: 32.56100273484962 - task: type: Clustering dataset: type: mteb/medrxiv-clustering-s2s name: MTEB MedrxivClusteringS2S config: default split: test revision: 35191c8c0dca72d8ff3efcd72aa802307d469663 metrics: - type: v_measure value: 31.470380028839607 - task: type: Reranking dataset: type: mteb/mind_small name: MTEB MindSmallReranking config: default split: test revision: 3bdac13927fdc888b903db93b2ffdbd90b295a69 metrics: - type: map value: 32.06102792457849 - type: mrr value: 33.30709199672238 - task: type: Retrieval dataset: type: nfcorpus name: MTEB NFCorpus config: default split: test revision: None metrics: - type: map_at_1 value: 6.776999999999999 - type: map_at_10 value: 14.924000000000001 - type: map_at_100 value: 18.955 - type: map_at_1000 value: 20.538999999999998 - type: map_at_3 value: 10.982 - type: map_at_5 value: 12.679000000000002 - type: mrr_at_1 value: 47.988 - type: mrr_at_10 value: 57.232000000000006 - type: mrr_at_100 value: 57.818999999999996 - type: mrr_at_1000 value: 57.847 - type: mrr_at_3 value: 54.901999999999994 - type: mrr_at_5 value: 56.481 - type: ndcg_at_1 value: 46.594 - type: ndcg_at_10 value: 38.129000000000005 - type: ndcg_at_100 value: 35.54 - type: ndcg_at_1000 value: 44.172 - type: ndcg_at_3 value: 43.025999999999996 - type: ndcg_at_5 value: 41.052 - type: precision_at_1 value: 47.988 - type: precision_at_10 value: 28.111000000000004 - type: precision_at_100 value: 8.929 - type: precision_at_1000 value: 2.185 - type: precision_at_3 value: 40.144000000000005 - type: precision_at_5 value: 35.232 - type: recall_at_1 value: 6.776999999999999 - type: recall_at_10 value: 19.289 - type: recall_at_100 value: 36.359 - type: recall_at_1000 value: 67.54 - type: recall_at_3 value: 11.869 - type: recall_at_5 value: 14.999 - task: type: Retrieval dataset: type: nq name: MTEB NQ config: default split: test revision: None metrics: - type: map_at_1 value: 31.108000000000004 - type: map_at_10 value: 47.126000000000005 - type: map_at_100 value: 48.171 - type: map_at_1000 value: 48.199 - type: map_at_3 value: 42.734 - type: map_at_5 value: 45.362 - type: mrr_at_1 value: 34.936 - type: mrr_at_10 value: 49.571 - type: mrr_at_100 value: 50.345 - type: mrr_at_1000 value: 50.363 - type: mrr_at_3 value: 45.959 - type: mrr_at_5 value: 48.165 - type: ndcg_at_1 value: 34.936 - type: ndcg_at_10 value: 55.028999999999996 - type: ndcg_at_100 value: 59.244 - type: ndcg_at_1000 value: 59.861 - type: ndcg_at_3 value: 46.872 - type: ndcg_at_5 value: 51.217999999999996 - type: precision_at_1 value: 34.936 - type: precision_at_10 value: 9.099 - type: precision_at_100 value: 1.145 - type: precision_at_1000 value: 0.12 - type: precision_at_3 value: 21.456 - type: precision_at_5 value: 15.411 - type: recall_at_1 value: 31.108000000000004 - type: recall_at_10 value: 76.53999999999999 - type: recall_at_100 value: 94.39 - type: recall_at_1000 value: 98.947 - type: recall_at_3 value: 55.572 - type: recall_at_5 value: 65.525 - task: type: Retrieval dataset: type: quora name: MTEB QuoraRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 71.56400000000001 - type: map_at_10 value: 85.482 - type: map_at_100 value: 86.114 - type: map_at_1000 value: 86.13 - type: map_at_3 value: 82.607 - type: map_at_5 value: 84.405 - type: mrr_at_1 value: 82.42 - type: mrr_at_10 value: 88.304 - type: mrr_at_100 value: 88.399 - type: mrr_at_1000 value: 88.399 - type: mrr_at_3 value: 87.37 - type: mrr_at_5 value: 88.024 - type: ndcg_at_1 value: 82.45 - type: ndcg_at_10 value: 89.06500000000001 - type: ndcg_at_100 value: 90.232 - type: ndcg_at_1000 value: 90.305 - type: ndcg_at_3 value: 86.375 - type: ndcg_at_5 value: 87.85300000000001 - type: precision_at_1 value: 82.45 - type: precision_at_10 value: 13.486999999999998 - type: precision_at_100 value: 1.534 - type: precision_at_1000 value: 0.157 - type: precision_at_3 value: 37.813 - type: precision_at_5 value: 24.773999999999997 - type: recall_at_1 value: 71.56400000000001 - type: recall_at_10 value: 95.812 - type: recall_at_100 value: 99.7 - type: recall_at_1000 value: 99.979 - type: recall_at_3 value: 87.966 - type: recall_at_5 value: 92.268 - task: type: Clustering dataset: type: mteb/reddit-clustering name: MTEB RedditClustering config: default split: test revision: 24640382cdbf8abc73003fb0fa6d111a705499eb metrics: - type: v_measure value: 57.241876648614145 - task: type: Clustering dataset: type: mteb/reddit-clustering-p2p name: MTEB RedditClusteringP2P config: default split: test revision: 282350215ef01743dc01b456c7f5241fa8937f16 metrics: - type: v_measure value: 64.66212576446223 - task: type: Retrieval dataset: type: scidocs name: MTEB SCIDOCS config: default split: test revision: None metrics: - type: map_at_1 value: 5.308 - type: map_at_10 value: 13.803 - type: map_at_100 value: 16.176 - type: map_at_1000 value: 16.561 - type: map_at_3 value: 9.761000000000001 - type: map_at_5 value: 11.802 - type: mrr_at_1 value: 26.200000000000003 - type: mrr_at_10 value: 37.621 - type: mrr_at_100 value: 38.767 - type: mrr_at_1000 value: 38.815 - type: mrr_at_3 value: 34.117 - type: mrr_at_5 value: 36.107 - type: ndcg_at_1 value: 26.200000000000003 - type: ndcg_at_10 value: 22.64 - type: ndcg_at_100 value: 31.567 - type: ndcg_at_1000 value: 37.623 - type: ndcg_at_3 value: 21.435000000000002 - type: ndcg_at_5 value: 18.87 - type: precision_at_1 value: 26.200000000000003 - type: precision_at_10 value: 11.74 - type: precision_at_100 value: 2.465 - type: precision_at_1000 value: 0.391 - type: precision_at_3 value: 20.033 - type: precision_at_5 value: 16.64 - type: recall_at_1 value: 5.308 - type: recall_at_10 value: 23.794999999999998 - type: recall_at_100 value: 50.015 - type: recall_at_1000 value: 79.283 - type: recall_at_3 value: 12.178 - type: recall_at_5 value: 16.882 - task: type: STS dataset: type: mteb/sickr-sts name: MTEB SICK-R config: default split: test revision: a6ea5a8cab320b040a23452cc28066d9beae2cee metrics: - type: cos_sim_pearson value: 84.93231134675553 - type: cos_sim_spearman value: 81.68319292603205 - type: euclidean_pearson value: 81.8396814380367 - type: euclidean_spearman value: 81.24641903349945 - type: manhattan_pearson value: 81.84698799204274 - type: manhattan_spearman value: 81.24269997904105 - task: type: STS dataset: type: mteb/sts12-sts name: MTEB STS12 config: default split: test revision: a0d554a64d88156834ff5ae9920b964011b16384 metrics: - type: cos_sim_pearson value: 86.73241671587446 - type: cos_sim_spearman value: 79.05091082971826 - type: euclidean_pearson value: 83.91146869578044 - type: euclidean_spearman value: 79.87978465370936 - type: manhattan_pearson value: 83.90888338917678 - type: manhattan_spearman value: 79.87482848584241 - task: type: STS dataset: type: mteb/sts13-sts name: MTEB STS13 config: default split: test revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca metrics: - type: cos_sim_pearson value: 85.14970731146177 - type: cos_sim_spearman value: 86.37363490084627 - type: euclidean_pearson value: 83.02154218530433 - type: euclidean_spearman value: 83.80258761957367 - type: manhattan_pearson value: 83.01664495119347 - type: manhattan_spearman value: 83.77567458007952 - task: type: STS dataset: type: mteb/sts14-sts name: MTEB STS14 config: default split: test revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375 metrics: - type: cos_sim_pearson value: 83.40474139886784 - type: cos_sim_spearman value: 82.77768789165984 - type: euclidean_pearson value: 80.7065877443695 - type: euclidean_spearman value: 81.375940662505 - type: manhattan_pearson value: 80.6507552270278 - type: manhattan_spearman value: 81.32782179098741 - task: type: STS dataset: type: mteb/sts15-sts name: MTEB STS15 config: default split: test revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3 metrics: - type: cos_sim_pearson value: 87.08585968722274 - type: cos_sim_spearman value: 88.03110031451399 - type: euclidean_pearson value: 85.74012019602384 - type: euclidean_spearman value: 86.13592849438209 - type: manhattan_pearson value: 85.74404842369206 - type: manhattan_spearman value: 86.14492318960154 - task: type: STS dataset: type: mteb/sts16-sts name: MTEB STS16 config: default split: test revision: 4d8694f8f0e0100860b497b999b3dbed754a0513 metrics: - type: cos_sim_pearson value: 84.95069052788875 - type: cos_sim_spearman value: 86.4867991595147 - type: euclidean_pearson value: 84.31013325754635 - type: euclidean_spearman value: 85.01529258006482 - type: manhattan_pearson value: 84.26995570085374 - type: manhattan_spearman value: 84.96982104986162 - task: type: STS dataset: type: mteb/sts17-crosslingual-sts name: MTEB STS17 (en-en) config: en-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 87.54617647971897 - type: cos_sim_spearman value: 87.49834181751034 - type: euclidean_pearson value: 86.01015322577122 - type: euclidean_spearman value: 84.63362652063199 - type: manhattan_pearson value: 86.13807574475706 - type: manhattan_spearman value: 84.7772370721132 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (en) config: en split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 67.20047755786615 - type: cos_sim_spearman value: 67.05324077987636 - type: euclidean_pearson value: 66.91930642976601 - type: euclidean_spearman value: 65.21491856099105 - type: manhattan_pearson value: 66.78756851976624 - type: manhattan_spearman value: 65.12356257740728 - task: type: STS dataset: type: mteb/stsbenchmark-sts name: MTEB STSBenchmark config: default split: test revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831 metrics: - type: cos_sim_pearson value: 86.19852871539686 - type: cos_sim_spearman value: 87.5161895296395 - type: euclidean_pearson value: 84.59848645207485 - type: euclidean_spearman value: 85.26427328757919 - type: manhattan_pearson value: 84.59747366996524 - type: manhattan_spearman value: 85.24045855146915 - task: type: Reranking dataset: type: mteb/scidocs-reranking name: MTEB SciDocsRR config: default split: test revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab metrics: - type: map value: 87.63320317811032 - type: mrr value: 96.26242947321379 - task: type: Retrieval dataset: type: scifact name: MTEB SciFact config: default split: test revision: None metrics: - type: map_at_1 value: 60.928000000000004 - type: map_at_10 value: 70.112 - type: map_at_100 value: 70.59299999999999 - type: map_at_1000 value: 70.623 - type: map_at_3 value: 66.846 - type: map_at_5 value: 68.447 - type: mrr_at_1 value: 64.0 - type: mrr_at_10 value: 71.212 - type: mrr_at_100 value: 71.616 - type: mrr_at_1000 value: 71.64500000000001 - type: mrr_at_3 value: 68.77799999999999 - type: mrr_at_5 value: 70.094 - type: ndcg_at_1 value: 64.0 - type: ndcg_at_10 value: 74.607 - type: ndcg_at_100 value: 76.416 - type: ndcg_at_1000 value: 77.102 - type: ndcg_at_3 value: 69.126 - type: ndcg_at_5 value: 71.41300000000001 - type: precision_at_1 value: 64.0 - type: precision_at_10 value: 9.933 - type: precision_at_100 value: 1.077 - type: precision_at_1000 value: 0.11299999999999999 - type: precision_at_3 value: 26.556 - type: precision_at_5 value: 17.467 - type: recall_at_1 value: 60.928000000000004 - type: recall_at_10 value: 87.322 - type: recall_at_100 value: 94.833 - type: recall_at_1000 value: 100.0 - type: recall_at_3 value: 72.628 - type: recall_at_5 value: 78.428 - task: type: PairClassification dataset: type: mteb/sprintduplicatequestions-pairclassification name: MTEB SprintDuplicateQuestions config: default split: test revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46 metrics: - type: cos_sim_accuracy value: 99.86237623762376 - type: cos_sim_ap value: 96.72586477206649 - type: cos_sim_f1 value: 93.01858362631845 - type: cos_sim_precision value: 93.4409687184662 - type: cos_sim_recall value: 92.60000000000001 - type: dot_accuracy value: 99.78019801980199 - type: dot_ap value: 93.72748205246228 - type: dot_f1 value: 89.04109589041096 - type: dot_precision value: 87.16475095785441 - type: dot_recall value: 91.0 - type: euclidean_accuracy value: 99.85445544554456 - type: euclidean_ap value: 96.6661459876145 - type: euclidean_f1 value: 92.58337481333997 - type: euclidean_precision value: 92.17046580773042 - type: euclidean_recall value: 93.0 - type: manhattan_accuracy value: 99.85445544554456 - type: manhattan_ap value: 96.6883549244056 - type: manhattan_f1 value: 92.57598405580468 - type: manhattan_precision value: 92.25422045680239 - type: manhattan_recall value: 92.9 - type: max_accuracy value: 99.86237623762376 - type: max_ap value: 96.72586477206649 - type: max_f1 value: 93.01858362631845 - task: type: Clustering dataset: type: mteb/stackexchange-clustering name: MTEB StackExchangeClustering config: default split: test revision: 6cbc1f7b2bc0622f2e39d2c77fa502909748c259 metrics: - type: v_measure value: 66.39930057069995 - task: type: Clustering dataset: type: mteb/stackexchange-clustering-p2p name: MTEB StackExchangeClusteringP2P config: default split: test revision: 815ca46b2622cec33ccafc3735d572c266efdb44 metrics: - type: v_measure value: 34.96398659903402 - task: type: Reranking dataset: type: mteb/stackoverflowdupquestions-reranking name: MTEB StackOverflowDupQuestions config: default split: test revision: e185fbe320c72810689fc5848eb6114e1ef5ec69 metrics: - type: map value: 55.946944700355395 - type: mrr value: 56.97151398438164 - task: type: Summarization dataset: type: mteb/summeval name: MTEB SummEval config: default split: test revision: cda12ad7615edc362dbf25a00fdd61d3b1eaf93c metrics: - type: cos_sim_pearson value: 31.541657650692905 - type: cos_sim_spearman value: 31.605804192286303 - type: dot_pearson value: 28.26905996736398 - type: dot_spearman value: 27.864801765851187 - task: type: Retrieval dataset: type: trec-covid name: MTEB TRECCOVID config: default split: test revision: None metrics: - type: map_at_1 value: 0.22599999999999998 - type: map_at_10 value: 1.8870000000000002 - type: map_at_100 value: 9.78 - type: map_at_1000 value: 22.514 - type: map_at_3 value: 0.6669999999999999 - type: map_at_5 value: 1.077 - type: mrr_at_1 value: 82.0 - type: mrr_at_10 value: 89.86699999999999 - type: mrr_at_100 value: 89.86699999999999 - type: mrr_at_1000 value: 89.86699999999999 - type: mrr_at_3 value: 89.667 - type: mrr_at_5 value: 89.667 - type: ndcg_at_1 value: 79.0 - type: ndcg_at_10 value: 74.818 - type: ndcg_at_100 value: 53.715999999999994 - type: ndcg_at_1000 value: 47.082 - type: ndcg_at_3 value: 82.134 - type: ndcg_at_5 value: 79.81899999999999 - type: precision_at_1 value: 82.0 - type: precision_at_10 value: 78.0 - type: precision_at_100 value: 54.48 - type: precision_at_1000 value: 20.518 - type: precision_at_3 value: 87.333 - type: precision_at_5 value: 85.2 - type: recall_at_1 value: 0.22599999999999998 - type: recall_at_10 value: 2.072 - type: recall_at_100 value: 13.013 - type: recall_at_1000 value: 43.462 - type: recall_at_3 value: 0.695 - type: recall_at_5 value: 1.139 - task: type: Retrieval dataset: type: webis-touche2020 name: MTEB Touche2020 config: default split: test revision: None metrics: - type: map_at_1 value: 2.328 - type: map_at_10 value: 9.795 - type: map_at_100 value: 15.801000000000002 - type: map_at_1000 value: 17.23 - type: map_at_3 value: 4.734 - type: map_at_5 value: 6.644 - type: mrr_at_1 value: 30.612000000000002 - type: mrr_at_10 value: 46.902 - type: mrr_at_100 value: 47.495 - type: mrr_at_1000 value: 47.495 - type: mrr_at_3 value: 41.156 - type: mrr_at_5 value: 44.218 - type: ndcg_at_1 value: 28.571 - type: ndcg_at_10 value: 24.806 - type: ndcg_at_100 value: 36.419000000000004 - type: ndcg_at_1000 value: 47.272999999999996 - type: ndcg_at_3 value: 25.666 - type: ndcg_at_5 value: 25.448999999999998 - type: precision_at_1 value: 30.612000000000002 - type: precision_at_10 value: 23.061 - type: precision_at_100 value: 7.714 - type: precision_at_1000 value: 1.484 - type: precision_at_3 value: 26.531 - type: precision_at_5 value: 26.122 - type: recall_at_1 value: 2.328 - type: recall_at_10 value: 16.524 - type: recall_at_100 value: 47.179 - type: recall_at_1000 value: 81.22200000000001 - type: recall_at_3 value: 5.745 - type: recall_at_5 value: 9.339 - task: type: Classification dataset: type: mteb/toxic_conversations_50k name: MTEB ToxicConversationsClassification config: default split: test revision: d7c0de2777da35d6aae2200a62c6e0e5af397c4c metrics: - type: accuracy value: 70.9142 - type: ap value: 14.335574772555415 - type: f1 value: 54.62839595194111 - task: type: Classification dataset: type: mteb/tweet_sentiment_extraction name: MTEB TweetSentimentExtractionClassification config: default split: test revision: d604517c81ca91fe16a244d1248fc021f9ecee7a metrics: - type: accuracy value: 59.94340690435768 - type: f1 value: 60.286487936731916 - task: type: Clustering dataset: type: mteb/twentynewsgroups-clustering name: MTEB TwentyNewsgroupsClustering config: default split: test revision: 6125ec4e24fa026cec8a478383ee943acfbd5449 metrics: - type: v_measure value: 51.26597708987974 - task: type: PairClassification dataset: type: mteb/twittersemeval2015-pairclassification name: MTEB TwitterSemEval2015 config: default split: test revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1 metrics: - type: cos_sim_accuracy value: 87.48882398521786 - type: cos_sim_ap value: 79.04326607602204 - type: cos_sim_f1 value: 71.64566826860633 - type: cos_sim_precision value: 70.55512918905092 - type: cos_sim_recall value: 72.77044854881267 - type: dot_accuracy value: 84.19264469213805 - type: dot_ap value: 67.96360043562528 - type: dot_f1 value: 64.06418393006827 - type: dot_precision value: 58.64941898706424 - type: dot_recall value: 70.58047493403694 - type: euclidean_accuracy value: 87.45902127913214 - type: euclidean_ap value: 78.9742237648272 - type: euclidean_f1 value: 71.5553235908142 - type: euclidean_precision value: 70.77955601445535 - type: euclidean_recall value: 72.34828496042216 - type: manhattan_accuracy value: 87.41729749061214 - type: manhattan_ap value: 78.90073137580596 - type: manhattan_f1 value: 71.3942611553533 - type: manhattan_precision value: 68.52705653967483 - type: manhattan_recall value: 74.51187335092348 - type: max_accuracy value: 87.48882398521786 - type: max_ap value: 79.04326607602204 - type: max_f1 value: 71.64566826860633 - task: type: PairClassification dataset: type: mteb/twitterurlcorpus-pairclassification name: MTEB TwitterURLCorpus config: default split: test revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf metrics: - type: cos_sim_accuracy value: 88.68125897465751 - type: cos_sim_ap value: 85.6003454431979 - type: cos_sim_f1 value: 77.6957163958641 - type: cos_sim_precision value: 73.0110366307807 - type: cos_sim_recall value: 83.02279026793964 - type: dot_accuracy value: 87.7672992587418 - type: dot_ap value: 82.4971301112899 - type: dot_f1 value: 75.90528233151184 - type: dot_precision value: 72.0370626469368 - type: dot_recall value: 80.21250384970742 - type: euclidean_accuracy value: 88.4503434625684 - type: euclidean_ap value: 84.91949884748384 - type: euclidean_f1 value: 76.92365018444684 - type: euclidean_precision value: 74.53245721712759 - type: euclidean_recall value: 79.47336002463813 - type: manhattan_accuracy value: 88.47556952691427 - type: manhattan_ap value: 84.8963689101517 - type: manhattan_f1 value: 76.85901249256395 - type: manhattan_precision value: 74.31693989071039 - type: manhattan_recall value: 79.58115183246073 - type: max_accuracy value: 88.68125897465751 - type: max_ap value: 85.6003454431979 - type: max_f1 value: 77.6957163958641 license: mit language: - en --- <h1 align="center">FlagEmbedding</h1> <h4 align="center"> <p> <a href=#model-list>Model List</a> | <a href=#frequently-asked-questions>FAQ</a> | <a href=#usage>Usage</a> | <a href="#evaluation">Evaluation</a> | <a href="#train">Train</a> | <a href="#contact">Contact</a> | <a href="#citation">Citation</a> | <a href="#license">License</a> <p> </h4> For more details please refer to our Github: [FlagEmbedding](https://github.com/FlagOpen/FlagEmbedding). If you are looking for a model that supports more languages, longer texts, and other retrieval methods, you can try using [bge-m3](https://huggingface.co/BAAI/bge-m3). [English](README.md) | [中文](https://github.com/FlagOpen/FlagEmbedding/blob/master/README_zh.md) FlagEmbedding focuses on retrieval-augmented LLMs, consisting of the following projects currently: - **Long-Context LLM**: [Activation Beacon](https://github.com/FlagOpen/FlagEmbedding/tree/master/Long_LLM/activation_beacon) - **Fine-tuning of LM** : [LM-Cocktail](https://github.com/FlagOpen/FlagEmbedding/tree/master/LM_Cocktail) - **Dense Retrieval**: [BGE-M3](https://github.com/FlagOpen/FlagEmbedding/tree/master/FlagEmbedding/BGE_M3), [LLM Embedder](https://github.com/FlagOpen/FlagEmbedding/tree/master/FlagEmbedding/llm_embedder), [BGE Embedding](https://github.com/FlagOpen/FlagEmbedding/tree/master/FlagEmbedding/baai_general_embedding) - **Reranker Model**: [BGE Reranker](https://github.com/FlagOpen/FlagEmbedding/tree/master/FlagEmbedding/reranker) - **Benchmark**: [C-MTEB](https://github.com/FlagOpen/FlagEmbedding/tree/master/C_MTEB) ## News - 1/30/2024: Release **BGE-M3**, a new member to BGE model series! M3 stands for **M**ulti-linguality (100+ languages), **M**ulti-granularities (input length up to 8192), **M**ulti-Functionality (unification of dense, lexical, multi-vec/colbert retrieval). It is the first embedding model that supports all three retrieval methods, achieving new SOTA on multi-lingual (MIRACL) and cross-lingual (MKQA) benchmarks. [Technical Report](https://github.com/FlagOpen/FlagEmbedding/blob/master/FlagEmbedding/BGE_M3/BGE_M3.pdf) and [Code](https://github.com/FlagOpen/FlagEmbedding/tree/master/FlagEmbedding/BGE_M3). :fire: - 1/9/2024: Release [Activation-Beacon](https://github.com/FlagOpen/FlagEmbedding/tree/master/Long_LLM/activation_beacon), an effective, efficient, compatible, and low-cost (training) method to extend the context length of LLM. [Technical Report](https://arxiv.org/abs/2401.03462) :fire: - 12/24/2023: Release **LLaRA**, a LLaMA-7B based dense retriever, leading to state-of-the-art performances on MS MARCO and BEIR. Model and code will be open-sourced. Please stay tuned. [Technical Report](https://arxiv.org/abs/2312.15503) :fire: - 11/23/2023: Release [LM-Cocktail](https://github.com/FlagOpen/FlagEmbedding/tree/master/LM_Cocktail), a method to maintain general capabilities during fine-tuning by merging multiple language models. [Technical Report](https://arxiv.org/abs/2311.13534) :fire: - 10/12/2023: Release [LLM-Embedder](https://github.com/FlagOpen/FlagEmbedding/tree/master/FlagEmbedding/llm_embedder), a unified embedding model to support diverse retrieval augmentation needs for LLMs. [Technical Report](https://arxiv.org/pdf/2310.07554.pdf) - 09/15/2023: The [technical report](https://arxiv.org/pdf/2309.07597.pdf) and [massive training data](https://data.baai.ac.cn/details/BAAI-MTP) of BGE has been released - 09/12/2023: New models: - **New reranker model**: release cross-encoder models `BAAI/bge-reranker-base` and `BAAI/bge-reranker-large`, which are more powerful than embedding model. We recommend to use/fine-tune them to re-rank top-k documents returned by embedding models. - **update embedding model**: release `bge-*-v1.5` embedding model to alleviate the issue of the similarity distribution, and enhance its retrieval ability without instruction. <details> <summary>More</summary> <!-- ### More --> - 09/07/2023: Update [fine-tune code](https://github.com/FlagOpen/FlagEmbedding/blob/master/FlagEmbedding/baai_general_embedding/README.md): Add script to mine hard negatives and support adding instruction during fine-tuning. - 08/09/2023: BGE Models are integrated into **Langchain**, you can use it like [this](#using-langchain); C-MTEB **leaderboard** is [available](https://huggingface.co/spaces/mteb/leaderboard). - 08/05/2023: Release base-scale and small-scale models, **best performance among the models of the same size 🤗** - 08/02/2023: Release `bge-large-*`(short for BAAI General Embedding) Models, **rank 1st on MTEB and C-MTEB benchmark!** :tada: :tada: - 08/01/2023: We release the [Chinese Massive Text Embedding Benchmark](https://github.com/FlagOpen/FlagEmbedding/blob/master/C_MTEB) (**C-MTEB**), consisting of 31 test dataset. </details> ## Model List `bge` is short for `BAAI general embedding`. | Model | Language | | Description | query instruction for retrieval [1] | |:-------------------------------|:--------:| :--------:| :--------:|:--------:| | [BAAI/bge-m3](https://huggingface.co/BAAI/bge-m3) | Multilingual | [Inference](https://github.com/FlagOpen/FlagEmbedding/tree/master/FlagEmbedding/BGE_M3#usage) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/FlagEmbedding/BGE_M3) | Multi-Functionality(dense retrieval, sparse retrieval, multi-vector(colbert)), Multi-Linguality, and Multi-Granularity(8192 tokens) | | | [BAAI/llm-embedder](https://huggingface.co/BAAI/llm-embedder) | English | [Inference](./FlagEmbedding/llm_embedder/README.md) [Fine-tune](./FlagEmbedding/llm_embedder/README.md) | a unified embedding model to support diverse retrieval augmentation needs for LLMs | See [README](./FlagEmbedding/llm_embedder/README.md) | | [BAAI/bge-reranker-large](https://huggingface.co/BAAI/bge-reranker-large) | Chinese and English | [Inference](#usage-for-reranker) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/reranker) | a cross-encoder model which is more accurate but less efficient [2] | | | [BAAI/bge-reranker-base](https://huggingface.co/BAAI/bge-reranker-base) | Chinese and English | [Inference](#usage-for-reranker) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/reranker) | a cross-encoder model which is more accurate but less efficient [2] | | | [BAAI/bge-large-en-v1.5](https://huggingface.co/BAAI/bge-large-en-v1.5) | English | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | version 1.5 with more reasonable similarity distribution | `Represent this sentence for searching relevant passages: ` | | [BAAI/bge-base-en-v1.5](https://huggingface.co/BAAI/bge-base-en-v1.5) | English | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | version 1.5 with more reasonable similarity distribution | `Represent this sentence for searching relevant passages: ` | | [BAAI/bge-small-en-v1.5](https://huggingface.co/BAAI/bge-small-en-v1.5) | English | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | version 1.5 with more reasonable similarity distribution | `Represent this sentence for searching relevant passages: ` | | [BAAI/bge-large-zh-v1.5](https://huggingface.co/BAAI/bge-large-zh-v1.5) | Chinese | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | version 1.5 with more reasonable similarity distribution | `为这个句子生成表示以用于检索相关文章:` | | [BAAI/bge-base-zh-v1.5](https://huggingface.co/BAAI/bge-base-zh-v1.5) | Chinese | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | version 1.5 with more reasonable similarity distribution | `为这个句子生成表示以用于检索相关文章:` | | [BAAI/bge-small-zh-v1.5](https://huggingface.co/BAAI/bge-small-zh-v1.5) | Chinese | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | version 1.5 with more reasonable similarity distribution | `为这个句子生成表示以用于检索相关文章:` | | [BAAI/bge-large-en](https://huggingface.co/BAAI/bge-large-en) | English | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | :trophy: rank **1st** in [MTEB](https://huggingface.co/spaces/mteb/leaderboard) leaderboard | `Represent this sentence for searching relevant passages: ` | | [BAAI/bge-base-en](https://huggingface.co/BAAI/bge-base-en) | English | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | a base-scale model but with similar ability to `bge-large-en` | `Represent this sentence for searching relevant passages: ` | | [BAAI/bge-small-en](https://huggingface.co/BAAI/bge-small-en) | English | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) |a small-scale model but with competitive performance | `Represent this sentence for searching relevant passages: ` | | [BAAI/bge-large-zh](https://huggingface.co/BAAI/bge-large-zh) | Chinese | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | :trophy: rank **1st** in [C-MTEB](https://github.com/FlagOpen/FlagEmbedding/tree/master/C_MTEB) benchmark | `为这个句子生成表示以用于检索相关文章:` | | [BAAI/bge-base-zh](https://huggingface.co/BAAI/bge-base-zh) | Chinese | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | a base-scale model but with similar ability to `bge-large-zh` | `为这个句子生成表示以用于检索相关文章:` | | [BAAI/bge-small-zh](https://huggingface.co/BAAI/bge-small-zh) | Chinese | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | a small-scale model but with competitive performance | `为这个句子生成表示以用于检索相关文章:` | [1\]: If you need to search the relevant passages to a query, we suggest to add the instruction to the query; in other cases, no instruction is needed, just use the original query directly. In all cases, **no instruction** needs to be added to passages. [2\]: Different from embedding model, reranker uses question and document as input and directly output similarity instead of embedding. To balance the accuracy and time cost, cross-encoder is widely used to re-rank top-k documents retrieved by other simple models. For examples, use bge embedding model to retrieve top 100 relevant documents, and then use bge reranker to re-rank the top 100 document to get the final top-3 results. All models have been uploaded to Huggingface Hub, and you can see them at https://huggingface.co/BAAI. If you cannot open the Huggingface Hub, you also can download the models at https://model.baai.ac.cn/models . ## Frequently asked questions <details> <summary>1. How to fine-tune bge embedding model?</summary> <!-- ### How to fine-tune bge embedding model? --> Following this [example](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) to prepare data and fine-tune your model. Some suggestions: - Mine hard negatives following this [example](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune#hard-negatives), which can improve the retrieval performance. - If you pre-train bge on your data, the pre-trained model cannot be directly used to calculate similarity, and it must be fine-tuned with contrastive learning before computing similarity. - If the accuracy of the fine-tuned model is still not high, it is recommended to use/fine-tune the cross-encoder model (bge-reranker) to re-rank top-k results. Hard negatives also are needed to fine-tune reranker. </details> <details> <summary>2. The similarity score between two dissimilar sentences is higher than 0.5</summary> <!-- ### The similarity score between two dissimilar sentences is higher than 0.5 --> **Suggest to use bge v1.5, which alleviates the issue of the similarity distribution.** Since we finetune the models by contrastive learning with a temperature of 0.01, the similarity distribution of the current BGE model is about in the interval \[0.6, 1\]. So a similarity score greater than 0.5 does not indicate that the two sentences are similar. For downstream tasks, such as passage retrieval or semantic similarity, **what matters is the relative order of the scores, not the absolute value.** If you need to filter similar sentences based on a similarity threshold, please select an appropriate similarity threshold based on the similarity distribution on your data (such as 0.8, 0.85, or even 0.9). </details> <details> <summary>3. When does the query instruction need to be used</summary> <!-- ### When does the query instruction need to be used --> For the `bge-*-v1.5`, we improve its retrieval ability when not using instruction. No instruction only has a slight degradation in retrieval performance compared with using instruction. So you can generate embedding without instruction in all cases for convenience. For a retrieval task that uses short queries to find long related documents, it is recommended to add instructions for these short queries. **The best method to decide whether to add instructions for queries is choosing the setting that achieves better performance on your task.** In all cases, the documents/passages do not need to add the instruction. </details> ## Usage ### Usage for Embedding Model Here are some examples for using `bge` models with [FlagEmbedding](#using-flagembedding), [Sentence-Transformers](#using-sentence-transformers), [Langchain](#using-langchain), or [Huggingface Transformers](#using-huggingface-transformers). #### Using FlagEmbedding ``` pip install -U FlagEmbedding ``` If it doesn't work for you, you can see [FlagEmbedding](https://github.com/FlagOpen/FlagEmbedding/blob/master/FlagEmbedding/baai_general_embedding/README.md) for more methods to install FlagEmbedding. ```python from FlagEmbedding import FlagModel sentences_1 = ["样例数据-1", "样例数据-2"] sentences_2 = ["样例数据-3", "样例数据-4"] model = FlagModel('BAAI/bge-large-zh-v1.5', query_instruction_for_retrieval="为这个句子生成表示以用于检索相关文章:", use_fp16=True) # Setting use_fp16 to True speeds up computation with a slight performance degradation embeddings_1 = model.encode(sentences_1) embeddings_2 = model.encode(sentences_2) similarity = embeddings_1 @ embeddings_2.T print(similarity) # for s2p(short query to long passage) retrieval task, suggest to use encode_queries() which will automatically add the instruction to each query # corpus in retrieval task can still use encode() or encode_corpus(), since they don't need instruction queries = ['query_1', 'query_2'] passages = ["样例文档-1", "样例文档-2"] q_embeddings = model.encode_queries(queries) p_embeddings = model.encode(passages) scores = q_embeddings @ p_embeddings.T ``` For the value of the argument `query_instruction_for_retrieval`, see [Model List](https://github.com/FlagOpen/FlagEmbedding/tree/master#model-list). By default, FlagModel will use all available GPUs when encoding. Please set `os.environ["CUDA_VISIBLE_DEVICES"]` to select specific GPUs. You also can set `os.environ["CUDA_VISIBLE_DEVICES"]=""` to make all GPUs unavailable. #### Using Sentence-Transformers You can also use the `bge` models with [sentence-transformers](https://www.SBERT.net): ``` pip install -U sentence-transformers ``` ```python from sentence_transformers import SentenceTransformer sentences_1 = ["样例数据-1", "样例数据-2"] sentences_2 = ["样例数据-3", "样例数据-4"] model = SentenceTransformer('BAAI/bge-large-zh-v1.5') embeddings_1 = model.encode(sentences_1, normalize_embeddings=True) embeddings_2 = model.encode(sentences_2, normalize_embeddings=True) similarity = embeddings_1 @ embeddings_2.T print(similarity) ``` For s2p(short query to long passage) retrieval task, each short query should start with an instruction (instructions see [Model List](https://github.com/FlagOpen/FlagEmbedding/tree/master#model-list)). But the instruction is not needed for passages. ```python from sentence_transformers import SentenceTransformer queries = ['query_1', 'query_2'] passages = ["样例文档-1", "样例文档-2"] instruction = "为这个句子生成表示以用于检索相关文章:" model = SentenceTransformer('BAAI/bge-large-zh-v1.5') q_embeddings = model.encode([instruction+q for q in queries], normalize_embeddings=True) p_embeddings = model.encode(passages, normalize_embeddings=True) scores = q_embeddings @ p_embeddings.T ``` #### Using Langchain You can use `bge` in langchain like this: ```python from langchain.embeddings import HuggingFaceBgeEmbeddings model_name = "BAAI/bge-large-en-v1.5" model_kwargs = {'device': 'cuda'} encode_kwargs = {'normalize_embeddings': True} # set True to compute cosine similarity model = HuggingFaceBgeEmbeddings( model_name=model_name, model_kwargs=model_kwargs, encode_kwargs=encode_kwargs, query_instruction="为这个句子生成表示以用于检索相关文章:" ) model.query_instruction = "为这个句子生成表示以用于检索相关文章:" ``` #### Using HuggingFace Transformers With the transformers package, you can use the model like this: First, you pass your input through the transformer model, then you select the last hidden state of the first token (i.e., [CLS]) as the sentence embedding. ```python from transformers import AutoTokenizer, AutoModel import torch # Sentences we want sentence embeddings for sentences = ["样例数据-1", "样例数据-2"] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('BAAI/bge-large-zh-v1.5') model = AutoModel.from_pretrained('BAAI/bge-large-zh-v1.5') model.eval() # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # for s2p(short query to long passage) retrieval task, add an instruction to query (not add instruction for passages) # encoded_input = tokenizer([instruction + q for q in queries], padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, cls pooling. sentence_embeddings = model_output[0][:, 0] # normalize embeddings sentence_embeddings = torch.nn.functional.normalize(sentence_embeddings, p=2, dim=1) print("Sentence embeddings:", sentence_embeddings) ``` #### Usage of the ONNX files ```python from optimum.onnxruntime import ORTModelForFeatureExtraction # type: ignore import torch from transformers import AutoModel, AutoTokenizer tokenizer = AutoTokenizer.from_pretrained('BAAI/bge-large-en-v1.5') model = AutoModel.from_pretrained('BAAI/bge-large-en-v1.5', revision="refs/pr/13") model_ort = ORTModelForFeatureExtraction.from_pretrained('BAAI/bge-large-en-v1.5', revision="refs/pr/13",file_name="onnx/model.onnx") # Sentences we want sentence embeddings for sentences = ["样例数据-1", "样例数据-2"] # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # for s2p(short query to long passage) retrieval task, add an instruction to query (not add instruction for passages) # encoded_input = tokenizer([instruction + q for q in queries], padding=True, truncation=True, return_tensors='pt') model_output_ort = model_ort(**encoded_input) # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # model_output and model_output_ort are identical ``` Its also possible to deploy the onnx files with the [infinity_emb](https://github.com/michaelfeil/infinity) pip package. ```python import asyncio from infinity_emb import AsyncEmbeddingEngine, EngineArgs sentences = ["Embed this is sentence via Infinity.", "Paris is in France."] engine = AsyncEmbeddingEngine.from_args( EngineArgs(model_name_or_path = "BAAI/bge-large-en-v1.5", device="cpu", engine="optimum" # or engine="torch" )) async def main(): async with engine: embeddings, usage = await engine.embed(sentences=sentences) asyncio.run(main()) ``` ### Usage for Reranker Different from embedding model, reranker uses question and document as input and directly output similarity instead of embedding. You can get a relevance score by inputting query and passage to the reranker. The reranker is optimized based cross-entropy loss, so the relevance score is not bounded to a specific range. #### Using FlagEmbedding ``` pip install -U FlagEmbedding ``` Get relevance scores (higher scores indicate more relevance): ```python from FlagEmbedding import FlagReranker reranker = FlagReranker('BAAI/bge-reranker-large', use_fp16=True) # Setting use_fp16 to True speeds up computation with a slight performance degradation score = reranker.compute_score(['query', 'passage']) print(score) scores = reranker.compute_score([['what is panda?', 'hi'], ['what is panda?', 'The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China.']]) print(scores) ``` #### Using Huggingface transformers ```python import torch from transformers import AutoModelForSequenceClassification, AutoTokenizer tokenizer = AutoTokenizer.from_pretrained('BAAI/bge-reranker-large') model = AutoModelForSequenceClassification.from_pretrained('BAAI/bge-reranker-large') model.eval() pairs = [['what is panda?', 'hi'], ['what is panda?', 'The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China.']] with torch.no_grad(): inputs = tokenizer(pairs, padding=True, truncation=True, return_tensors='pt', max_length=512) scores = model(**inputs, return_dict=True).logits.view(-1, ).float() print(scores) ``` ## Evaluation `baai-general-embedding` models achieve **state-of-the-art performance on both MTEB and C-MTEB leaderboard!** For more details and evaluation tools see our [scripts](https://github.com/FlagOpen/FlagEmbedding/blob/master/C_MTEB/README.md). - **MTEB**: | Model Name | Dimension | Sequence Length | Average (56) | Retrieval (15) |Clustering (11) | Pair Classification (3) | Reranking (4) | STS (10) | Summarization (1) | Classification (12) | |:----:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:| | [BAAI/bge-large-en-v1.5](https://huggingface.co/BAAI/bge-large-en-v1.5) | 1024 | 512 | **64.23** | **54.29** | 46.08 | 87.12 | 60.03 | 83.11 | 31.61 | 75.97 | | [BAAI/bge-base-en-v1.5](https://huggingface.co/BAAI/bge-base-en-v1.5) | 768 | 512 | 63.55 | 53.25 | 45.77 | 86.55 | 58.86 | 82.4 | 31.07 | 75.53 | | [BAAI/bge-small-en-v1.5](https://huggingface.co/BAAI/bge-small-en-v1.5) | 384 | 512 | 62.17 |51.68 | 43.82 | 84.92 | 58.36 | 81.59 | 30.12 | 74.14 | | [bge-large-en](https://huggingface.co/BAAI/bge-large-en) | 1024 | 512 | 63.98 | 53.9 | 46.98 | 85.8 | 59.48 | 81.56 | 32.06 | 76.21 | | [bge-base-en](https://huggingface.co/BAAI/bge-base-en) | 768 | 512 | 63.36 | 53.0 | 46.32 | 85.86 | 58.7 | 81.84 | 29.27 | 75.27 | | [gte-large](https://huggingface.co/thenlper/gte-large) | 1024 | 512 | 63.13 | 52.22 | 46.84 | 85.00 | 59.13 | 83.35 | 31.66 | 73.33 | | [gte-base](https://huggingface.co/thenlper/gte-base) | 768 | 512 | 62.39 | 51.14 | 46.2 | 84.57 | 58.61 | 82.3 | 31.17 | 73.01 | | [e5-large-v2](https://huggingface.co/intfloat/e5-large-v2) | 1024| 512 | 62.25 | 50.56 | 44.49 | 86.03 | 56.61 | 82.05 | 30.19 | 75.24 | | [bge-small-en](https://huggingface.co/BAAI/bge-small-en) | 384 | 512 | 62.11 | 51.82 | 44.31 | 83.78 | 57.97 | 80.72 | 30.53 | 74.37 | | [instructor-xl](https://huggingface.co/hkunlp/instructor-xl) | 768 | 512 | 61.79 | 49.26 | 44.74 | 86.62 | 57.29 | 83.06 | 32.32 | 61.79 | | [e5-base-v2](https://huggingface.co/intfloat/e5-base-v2) | 768 | 512 | 61.5 | 50.29 | 43.80 | 85.73 | 55.91 | 81.05 | 30.28 | 73.84 | | [gte-small](https://huggingface.co/thenlper/gte-small) | 384 | 512 | 61.36 | 49.46 | 44.89 | 83.54 | 57.7 | 82.07 | 30.42 | 72.31 | | [text-embedding-ada-002](https://platform.openai.com/docs/guides/embeddings) | 1536 | 8192 | 60.99 | 49.25 | 45.9 | 84.89 | 56.32 | 80.97 | 30.8 | 70.93 | | [e5-small-v2](https://huggingface.co/intfloat/e5-base-v2) | 384 | 512 | 59.93 | 49.04 | 39.92 | 84.67 | 54.32 | 80.39 | 31.16 | 72.94 | | [sentence-t5-xxl](https://huggingface.co/sentence-transformers/sentence-t5-xxl) | 768 | 512 | 59.51 | 42.24 | 43.72 | 85.06 | 56.42 | 82.63 | 30.08 | 73.42 | | [all-mpnet-base-v2](https://huggingface.co/sentence-transformers/all-mpnet-base-v2) | 768 | 514 | 57.78 | 43.81 | 43.69 | 83.04 | 59.36 | 80.28 | 27.49 | 65.07 | | [sgpt-bloom-7b1-msmarco](https://huggingface.co/bigscience/sgpt-bloom-7b1-msmarco) | 4096 | 2048 | 57.59 | 48.22 | 38.93 | 81.9 | 55.65 | 77.74 | 33.6 | 66.19 | - **C-MTEB**: We create the benchmark C-MTEB for Chinese text embedding which consists of 31 datasets from 6 tasks. Please refer to [C_MTEB](https://github.com/FlagOpen/FlagEmbedding/blob/master/C_MTEB/README.md) for a detailed introduction. | Model | Embedding dimension | Avg | Retrieval | STS | PairClassification | Classification | Reranking | Clustering | |:-------------------------------|:--------:|:--------:|:--------:|:--------:|:--------:|:--------:|:--------:|:--------:| | [**BAAI/bge-large-zh-v1.5**](https://huggingface.co/BAAI/bge-large-zh-v1.5) | 1024 | **64.53** | 70.46 | 56.25 | 81.6 | 69.13 | 65.84 | 48.99 | | [BAAI/bge-base-zh-v1.5](https://huggingface.co/BAAI/bge-base-zh-v1.5) | 768 | 63.13 | 69.49 | 53.72 | 79.75 | 68.07 | 65.39 | 47.53 | | [BAAI/bge-small-zh-v1.5](https://huggingface.co/BAAI/bge-small-zh-v1.5) | 512 | 57.82 | 61.77 | 49.11 | 70.41 | 63.96 | 60.92 | 44.18 | | [BAAI/bge-large-zh](https://huggingface.co/BAAI/bge-large-zh) | 1024 | 64.20 | 71.53 | 54.98 | 78.94 | 68.32 | 65.11 | 48.39 | | [bge-large-zh-noinstruct](https://huggingface.co/BAAI/bge-large-zh-noinstruct) | 1024 | 63.53 | 70.55 | 53 | 76.77 | 68.58 | 64.91 | 50.01 | | [BAAI/bge-base-zh](https://huggingface.co/BAAI/bge-base-zh) | 768 | 62.96 | 69.53 | 54.12 | 77.5 | 67.07 | 64.91 | 47.63 | | [multilingual-e5-large](https://huggingface.co/intfloat/multilingual-e5-large) | 1024 | 58.79 | 63.66 | 48.44 | 69.89 | 67.34 | 56.00 | 48.23 | | [BAAI/bge-small-zh](https://huggingface.co/BAAI/bge-small-zh) | 512 | 58.27 | 63.07 | 49.45 | 70.35 | 63.64 | 61.48 | 45.09 | | [m3e-base](https://huggingface.co/moka-ai/m3e-base) | 768 | 57.10 | 56.91 | 50.47 | 63.99 | 67.52 | 59.34 | 47.68 | | [m3e-large](https://huggingface.co/moka-ai/m3e-large) | 1024 | 57.05 | 54.75 | 50.42 | 64.3 | 68.2 | 59.66 | 48.88 | | [multilingual-e5-base](https://huggingface.co/intfloat/multilingual-e5-base) | 768 | 55.48 | 61.63 | 46.49 | 67.07 | 65.35 | 54.35 | 40.68 | | [multilingual-e5-small](https://huggingface.co/intfloat/multilingual-e5-small) | 384 | 55.38 | 59.95 | 45.27 | 66.45 | 65.85 | 53.86 | 45.26 | | [text-embedding-ada-002(OpenAI)](https://platform.openai.com/docs/guides/embeddings/what-are-embeddings) | 1536 | 53.02 | 52.0 | 43.35 | 69.56 | 64.31 | 54.28 | 45.68 | | [luotuo](https://huggingface.co/silk-road/luotuo-bert-medium) | 1024 | 49.37 | 44.4 | 42.78 | 66.62 | 61 | 49.25 | 44.39 | | [text2vec-base](https://huggingface.co/shibing624/text2vec-base-chinese) | 768 | 47.63 | 38.79 | 43.41 | 67.41 | 62.19 | 49.45 | 37.66 | | [text2vec-large](https://huggingface.co/GanymedeNil/text2vec-large-chinese) | 1024 | 47.36 | 41.94 | 44.97 | 70.86 | 60.66 | 49.16 | 30.02 | - **Reranking**: See [C_MTEB](https://github.com/FlagOpen/FlagEmbedding/blob/master/C_MTEB/) for evaluation script. | Model | T2Reranking | T2RerankingZh2En\* | T2RerankingEn2Zh\* | MMarcoReranking | CMedQAv1 | CMedQAv2 | Avg | |:-------------------------------|:--------:|:--------:|:--------:|:--------:|:--------:|:--------:|:--------:| | text2vec-base-multilingual | 64.66 | 62.94 | 62.51 | 14.37 | 48.46 | 48.6 | 50.26 | | multilingual-e5-small | 65.62 | 60.94 | 56.41 | 29.91 | 67.26 | 66.54 | 57.78 | | multilingual-e5-large | 64.55 | 61.61 | 54.28 | 28.6 | 67.42 | 67.92 | 57.4 | | multilingual-e5-base | 64.21 | 62.13 | 54.68 | 29.5 | 66.23 | 66.98 | 57.29 | | m3e-base | 66.03 | 62.74 | 56.07 | 17.51 | 77.05 | 76.76 | 59.36 | | m3e-large | 66.13 | 62.72 | 56.1 | 16.46 | 77.76 | 78.27 | 59.57 | | bge-base-zh-v1.5 | 66.49 | 63.25 | 57.02 | 29.74 | 80.47 | 84.88 | 63.64 | | bge-large-zh-v1.5 | 65.74 | 63.39 | 57.03 | 28.74 | 83.45 | 85.44 | 63.97 | | [BAAI/bge-reranker-base](https://huggingface.co/BAAI/bge-reranker-base) | 67.28 | 63.95 | 60.45 | 35.46 | 81.26 | 84.1 | 65.42 | | [BAAI/bge-reranker-large](https://huggingface.co/BAAI/bge-reranker-large) | 67.6 | 64.03 | 61.44 | 37.16 | 82.15 | 84.18 | 66.09 | \* : T2RerankingZh2En and T2RerankingEn2Zh are cross-language retrieval tasks ## Train ### BAAI Embedding We pre-train the models using [retromae](https://github.com/staoxiao/RetroMAE) and train them on large-scale pairs data using contrastive learning. **You can fine-tune the embedding model on your data following our [examples](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune).** We also provide a [pre-train example](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/pretrain). Note that the goal of pre-training is to reconstruct the text, and the pre-trained model cannot be used for similarity calculation directly, it needs to be fine-tuned. More training details for bge see [baai_general_embedding](https://github.com/FlagOpen/FlagEmbedding/blob/master/FlagEmbedding/baai_general_embedding/README.md). ### BGE Reranker Cross-encoder will perform full-attention over the input pair, which is more accurate than embedding model (i.e., bi-encoder) but more time-consuming than embedding model. Therefore, it can be used to re-rank the top-k documents returned by embedding model. We train the cross-encoder on a multilingual pair data, The data format is the same as embedding model, so you can fine-tune it easily following our [example](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/reranker). More details please refer to [./FlagEmbedding/reranker/README.md](https://github.com/FlagOpen/FlagEmbedding/tree/master/FlagEmbedding/reranker) ## Contact If you have any question or suggestion related to this project, feel free to open an issue or pull request. You also can email Shitao Xiao([email protected]) and Zheng Liu([email protected]). ## Citation If you find this repository useful, please consider giving a star :star: and citation ``` @misc{bge_embedding, title={C-Pack: Packaged Resources To Advance General Chinese Embedding}, author={Shitao Xiao and Zheng Liu and Peitian Zhang and Niklas Muennighoff}, year={2023}, eprint={2309.07597}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` ## License FlagEmbedding is licensed under the [MIT License](https://github.com/FlagOpen/FlagEmbedding/blob/master/LICENSE). The released models can be used for commercial purposes free of charge.
mradermacher/Neural-SOVLish-Devil-8B-L3-GGUF
mradermacher
2024-05-28T23:24:57Z
741
2
transformers
[ "transformers", "gguf", "mergekit", "merge", "en", "base_model:saishf/Neural-SOVLish-Devil-8B-L3", "license:cc-by-nc-4.0", "endpoints_compatible", "region:us" ]
null
2024-05-28T21:13:01Z
--- base_model: saishf/Neural-SOVLish-Devil-8B-L3 language: - en library_name: transformers license: cc-by-nc-4.0 quantized_by: mradermacher tags: - mergekit - merge --- ## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> static quants of https://huggingface.co/saishf/Neural-SOVLish-Devil-8B-L3 <!-- provided-files --> weighted/imatrix quants are available at https://huggingface.co/mradermacher/Neural-SOVLish-Devil-8B-L3-i1-GGUF ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/Neural-SOVLish-Devil-8B-L3-GGUF/resolve/main/Neural-SOVLish-Devil-8B-L3.Q2_K.gguf) | Q2_K | 3.3 | | | [GGUF](https://huggingface.co/mradermacher/Neural-SOVLish-Devil-8B-L3-GGUF/resolve/main/Neural-SOVLish-Devil-8B-L3.IQ3_XS.gguf) | IQ3_XS | 3.6 | | | [GGUF](https://huggingface.co/mradermacher/Neural-SOVLish-Devil-8B-L3-GGUF/resolve/main/Neural-SOVLish-Devil-8B-L3.Q3_K_S.gguf) | Q3_K_S | 3.8 | | | [GGUF](https://huggingface.co/mradermacher/Neural-SOVLish-Devil-8B-L3-GGUF/resolve/main/Neural-SOVLish-Devil-8B-L3.IQ3_S.gguf) | IQ3_S | 3.8 | beats Q3_K* | | [GGUF](https://huggingface.co/mradermacher/Neural-SOVLish-Devil-8B-L3-GGUF/resolve/main/Neural-SOVLish-Devil-8B-L3.IQ3_M.gguf) | IQ3_M | 3.9 | | | [GGUF](https://huggingface.co/mradermacher/Neural-SOVLish-Devil-8B-L3-GGUF/resolve/main/Neural-SOVLish-Devil-8B-L3.Q3_K_M.gguf) | Q3_K_M | 4.1 | lower quality | | [GGUF](https://huggingface.co/mradermacher/Neural-SOVLish-Devil-8B-L3-GGUF/resolve/main/Neural-SOVLish-Devil-8B-L3.Q3_K_L.gguf) | Q3_K_L | 4.4 | | | [GGUF](https://huggingface.co/mradermacher/Neural-SOVLish-Devil-8B-L3-GGUF/resolve/main/Neural-SOVLish-Devil-8B-L3.IQ4_XS.gguf) | IQ4_XS | 4.6 | | | [GGUF](https://huggingface.co/mradermacher/Neural-SOVLish-Devil-8B-L3-GGUF/resolve/main/Neural-SOVLish-Devil-8B-L3.Q4_K_S.gguf) | Q4_K_S | 4.8 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/Neural-SOVLish-Devil-8B-L3-GGUF/resolve/main/Neural-SOVLish-Devil-8B-L3.Q4_K_M.gguf) | Q4_K_M | 5.0 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/Neural-SOVLish-Devil-8B-L3-GGUF/resolve/main/Neural-SOVLish-Devil-8B-L3.Q5_K_S.gguf) | Q5_K_S | 5.7 | | | [GGUF](https://huggingface.co/mradermacher/Neural-SOVLish-Devil-8B-L3-GGUF/resolve/main/Neural-SOVLish-Devil-8B-L3.Q5_K_M.gguf) | Q5_K_M | 5.8 | | | [GGUF](https://huggingface.co/mradermacher/Neural-SOVLish-Devil-8B-L3-GGUF/resolve/main/Neural-SOVLish-Devil-8B-L3.Q6_K.gguf) | Q6_K | 6.7 | very good quality | | [GGUF](https://huggingface.co/mradermacher/Neural-SOVLish-Devil-8B-L3-GGUF/resolve/main/Neural-SOVLish-Devil-8B-L3.Q8_0.gguf) | Q8_0 | 8.6 | fast, best quality | | [GGUF](https://huggingface.co/mradermacher/Neural-SOVLish-Devil-8B-L3-GGUF/resolve/main/Neural-SOVLish-Devil-8B-L3.f16.gguf) | f16 | 16.2 | 16 bpw, overkill | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. <!-- end -->
llamafactory/tiny-random-Llama-3-valuehead
llamafactory
2024-06-15T10:22:45Z
741
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-06-15T10:14:17Z
--- license: apache-2.0 library_name: transformers inference: false --- A tiny version of https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct + ValueHead
instruction-pretrain/InstructLM-1.3B
instruction-pretrain
2024-06-27T03:35:14Z
741
36
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "en", "dataset:tiiuae/falcon-refinedweb", "dataset:instruction-pretrain/ft-instruction-synthesizer-collection", "arxiv:2406.14491", "arxiv:2309.09530", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-06-19T04:41:51Z
--- license: apache-2.0 datasets: - tiiuae/falcon-refinedweb - instruction-pretrain/ft-instruction-synthesizer-collection language: - en --- # Instruction Pre-Training: Language Models are Supervised Multitask Learners This repo contains the **general models pre-trained from scratch** in our paper [Instruction Pre-Training: Language Models are Supervised Multitask Learners](https://huggingface.co/papers/2406.14491). We explore supervised multitask pre-training by proposing ***Instruction Pre-Training***, a framework that scalably augments massive raw corpora with instruction-response pairs to pre-train language models. The instruction-response pairs are generated by an efficient instruction synthesizer built on open-source models. In our experiments, we synthesize 200M instruction-response pairs covering 40+ task categories to verify the effectiveness of *Instruction Pre-Training*. Instruction Pre-Training* outperforms *Vanilla Pre-training* in both general pre-training from scratch and domain-adaptive continual pre-training. **In pre-training from scratch, *Instruction Pre-Training* not only improves pre-trained base models but also benefits more from further instruction tuning.** In continual pre-training, *Instruction Pre-Training* enables Llama3-8B to be comparable to or even outperform Llama3-70B. <p align='center'> <img src="https://cdn-uploads.huggingface.co/production/uploads/66711d2ee12fa6cc5f5dfc89/vRdsFIVQptbNaGiZ18Lih.png" width="400"> </p> ## Resources **🤗 We share our data and models with example usages, feel free to open any issues or discussions! 🤗** - Context-Based Instruction Synthesizer: [instruction-synthesizer](https://huggingface.co/instruction-pretrain/instruction-synthesizer) - Fine-Tuning Data for the Synthesizer: [ft-instruction-synthesizer-collection](https://huggingface.co/datasets/instruction-pretrain/ft-instruction-synthesizer-collection) - General Models Pre-Trained from Scratch: - [InstructLM-500M](https://huggingface.co/instruction-pretrain/InstructLM-500M) - [InstructLM-1.3B](https://huggingface.co/instruction-pretrain/InstructLM-1.3B) - Domain-Specific Models Pre-Trained from Llama3-8B: - [Finance-Llama3-8B](https://huggingface.co/instruction-pretrain/finance-Llama3-8B) - [Biomedicine-Llama3-8B](https://huggingface.co/instruction-pretrain/medicine-Llama3-8B) - General Instruction-Augmented Corpora: [general-instruction-augmented-corpora](https://huggingface.co/datasets/instruction-pretrain/general-instruction-augmented-corpora) - Domain-Specific Instruction-Augmented Corpora (no finance data to avoid ethical issues): [medicine-instruction-augmented-corpora](https://huggingface.co/datasets/instruction-pretrain/medicine-instruction-augmented-corpora) ## General Pre-Training From Scratch We augment the [RefinedWeb corproa](https://huggingface.co/datasets/tiiuae/falcon-refinedweb) with instruction-response pairs generated by our [context-based instruction synthesizer](https://huggingface.co/instruction-pretrain/instruction-synthesizer) to pre-train general langauge models from scratch. To evaluate our general base model using the [lm-evaluation-harness framework](https://github.com/EleutherAI/lm-evaluation-harness) 1. Setup dependencies: ```bash git clone https://github.com/EleutherAI/lm-evaluation-harness cd lm-evaluation-harness pip install -e . ``` 2. Evalaute: ```bash MODEL=instruction-pretrain/InstructLM-1.3B add_bos_token=True # this flag is needed because lm-eval-harness set add_bos_token to False by default, but ours require add_bos_token to be True accelerate launch -m lm_eval --model hf \ --model_args pretrained=${MODEL},add_bos_token=${add_bos_token},dtype=float16 \ --gen_kwargs do_sample=False \ --tasks piqa,hellaswag,winogrande \ --batch_size auto \ --num_fewshot 0 accelerate launch -m lm_eval --model hf \ --model_args pretrained=${MODEL},add_bos_token=${add_bos_token},dtype=float16 \ --gen_kwargs do_sample=False \ --tasks social_iqa,ai2_arc,openbookqa,boolq,mmlu \ --batch_size auto \ --num_fewshot 5 ``` ## Citation If you find our work helpful, please cite us: Instruction Pre-Training ```bibtex @article{cheng2024instruction, title={Instruction Pre-Training: Language Models are Supervised Multitask Learners}, author={Cheng, Daixuan and Gu, Yuxian and Huang, Shaohan and Bi, Junyu and Huang, Minlie and Wei, Furu}, journal={arXiv preprint arXiv:2406.14491}, year={2024} } ``` [AdaptLLM](https://huggingface.co/papers/2309.09530) ```bibtex @inproceedings{ cheng2024adapting, title={Adapting Large Language Models via Reading Comprehension}, author={Daixuan Cheng and Shaohan Huang and Furu Wei}, booktitle={The Twelfth International Conference on Learning Representations}, year={2024}, url={https://openreview.net/forum?id=y886UXPEZ0} } ```
Ichate/anondiffusion
Ichate
2024-06-28T12:52:54Z
741
0
diffusers
[ "diffusers", "text-to-image", "stable-diffusion", "license:creativeml-openrail-m", "autotrain_compatible", "endpoints_compatible", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
2024-06-28T12:31:41Z
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion --- ### anondiffusion model trained by Ichate Sample pictures of this concept: ![image/png](https://cdn-uploads.huggingface.co/production/uploads/658d38b0e15da20cd8b4ceb1/jXbUvxAO6x64FMKc7bMf6.png) ![image/png](https://cdn-uploads.huggingface.co/production/uploads/658d38b0e15da20cd8b4ceb1/V7WPcx_yQ21ZpSElQrYJC.png) ![image/png](https://cdn-uploads.huggingface.co/production/uploads/658d38b0e15da20cd8b4ceb1/tIrqz8kBl3WC0_5g5jzxE.png) ![image/png](https://cdn-uploads.huggingface.co/production/uploads/658d38b0e15da20cd8b4ceb1/eb2NcZoHNyj4UVuR0jTK3.png)
monologg/bert-base-cased-goemotions-ekman
monologg
2021-05-19T23:47:57Z
740
2
transformers
[ "transformers", "pytorch", "bert", "endpoints_compatible", "region:us" ]
null
2022-03-02T23:29:05Z
Entry not found
newtonkwan/gpt2-fine-tuned-debiased
newtonkwan
2022-03-11T00:37:42Z
740
0
transformers
[ "transformers", "pytorch", "gpt2", "text-generation", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2022-03-11T00:10:51Z
Entry not found
noahkim/KoT5_news_summarization
noahkim
2022-10-21T00:05:27Z
740
4
transformers
[ "transformers", "pytorch", "tensorboard", "t5", "text2text-generation", "summarization", "news", "ko", "autotrain_compatible", "text-generation-inference", "region:us" ]
summarization
2022-10-20T11:06:55Z
--- language: ko tags: - summarization - news inference: false --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # KoT5_news_summarization - This model is a [lcw99/t5-base-korean-text-summary](https://huggingface.co/lcw99/t5-base-korean-text-summary) finetuned on the [daekeun-ml/naver-news-summarization-ko](https://huggingface.co/datasets/daekeun-ml/naver-news-summarization-ko) ## Model description <<20221021 Commit>> 프로젝트용으로 뉴스 요약 모델 특화된 모델을 만들기 위해 lcw99님의 t5-base-korean-text-summary 모델에 추가적으로 daekeun-ml님이 제공해주신 naver-news-summarization-ko 데이터셋으로 파인튜닝 했습니다. 현재 제가 가지고 있는 뉴스 데이터로 추가 학습 진행 예정입니다. 지속적으로 발전시켜 좋은 성능의 모델을 구현하겠습니다. 감사합니다. 실행환경 - Google Colab Pro - CPU : Intel(R) Xeon(R) CPU @ 2.20GHz - GPU : A100-SXM4-40GB <pre><code> # Python Code from transformers import AutoTokenizer from transformers import AutoModelForSeq2SeqLM tokenizer = AutoTokenizer.from_pretrained("noahkim/KoT5_news_summarization") model = AutoModelForSeq2SeqLM.from_pretrained("noahkim/KoT5_news_summarization") </pre></code> ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:-----:|:---------------:| | 0.4513 | 1.0 | 2775 | 0.4067 | | 0.42 | 2.0 | 5550 | 0.3933 | | 0.395 | 3.0 | 8325 | 0.3864 | | 0.3771 | 4.0 | 11100 | 0.3872 | ### Framework versions - Transformers 4.23.1 - Pytorch 1.12.1+cu113 - Datasets 2.6.1 - Tokenizers 0.13.1
hamxea/Mistral-7B-v0.1-activity-fine-tuned-v3
hamxea
2024-03-31T14:50:12Z
740
0
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "medical", "en", "arxiv:1910.09700", "license:other", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2023-12-11T12:23:03Z
--- license: other language: - en library_name: transformers tags: - medical --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed] ### Framework versions - PEFT 0.7.2.dev0
AIGym/TinyLlama-1.1B-2.5T-chat
AIGym
2024-03-04T17:17:37Z
740
1
transformers
[ "transformers", "safetensors", "llama", "text-generation", "finetuned", "conversational", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-01-09T00:06:50Z
--- license: apache-2.0 tags: - finetuned pipeline_tag: text-generation model-index: - name: TinyLlama-1.1B-2.5T-chat results: - task: type: text-generation name: Text Generation dataset: name: AI2 Reasoning Challenge (25-Shot) type: ai2_arc config: ARC-Challenge split: test args: num_few_shot: 25 metrics: - type: acc_norm value: 34.47 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=AIGym/TinyLlama-1.1B-2.5T-chat name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: HellaSwag (10-Shot) type: hellaswag split: validation args: num_few_shot: 10 metrics: - type: acc_norm value: 59.71 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=AIGym/TinyLlama-1.1B-2.5T-chat name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MMLU (5-Shot) type: cais/mmlu config: all split: test args: num_few_shot: 5 metrics: - type: acc value: 26.45 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=AIGym/TinyLlama-1.1B-2.5T-chat name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: TruthfulQA (0-shot) type: truthful_qa config: multiple_choice split: validation args: num_few_shot: 0 metrics: - type: mc2 value: 38.8 source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=AIGym/TinyLlama-1.1B-2.5T-chat name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: Winogrande (5-shot) type: winogrande config: winogrande_xl split: validation args: num_few_shot: 5 metrics: - type: acc value: 61.01 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=AIGym/TinyLlama-1.1B-2.5T-chat name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: GSM8k (5-shot) type: gsm8k config: main split: test args: num_few_shot: 5 metrics: - type: acc value: 1.14 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=AIGym/TinyLlama-1.1B-2.5T-chat name: Open LLM Leaderboard --- # TinyLlama-1.1B-2.5T-chat It was created by starting with the TinyLlama-1.1B-2.5T-chat and training it on a llama dataset. We have attached the wandb report in pdf form to view the training run at a glance. # Reson This model was fine tuned to allow it to follow direction and is a steeping stone to further training. # Referrals Run Pod - This is who I use to train th emodels on huggingface. If you use it we both get free crdits. - <a href="https://runpod.io?ref=kilq83n1" target="_blank" style="color: #3498db; text-decoration: none; font-weight: bold;">Visit Runpod's Website!</a> Paypal - If you want to leave a tip, it is appecaheted. - <a href="https://paypal.me/OpenSourceTraining" target="_blank" style="color: #3498db; text-decoration: none; font-weight: bold;">Visit My Paypal!</a> # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_AIGym__TinyLlama-1.1B-2.5T-chat) | Metric |Value| |---------------------------------|----:| |Avg. |36.93| |AI2 Reasoning Challenge (25-Shot)|34.47| |HellaSwag (10-Shot) |59.71| |MMLU (5-Shot) |26.45| |TruthfulQA (0-shot) |38.80| |Winogrande (5-shot) |61.01| |GSM8k (5-shot) | 1.14|
EmbeddedLLM/Mistral-7B-Merge-14-v0.5
EmbeddedLLM
2024-01-20T16:26:43Z
740
1
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "merge", "en", "base_model:EmbeddedLLM/Mistral-7B-Merge-14-v0.3", "base_model:Weyaxi/OpenHermes-2.5-neural-chat-v3-3-openchat-3.5-1210-Slerp", "base_model:openchat/openchat-3.5-0106", "base_model:mlabonne/NeuralMarcoro14-7B", "license:cc-by-nc-4.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-01-12T08:17:11Z
--- license: cc-by-nc-4.0 language: - en tags: - merge base_model: - EmbeddedLLM/Mistral-7B-Merge-14-v0.3 - Weyaxi/OpenHermes-2.5-neural-chat-v3-3-openchat-3.5-1210-Slerp - openchat/openchat-3.5-0106 - mlabonne/NeuralMarcoro14-7B --- # Update 2024-01-21 Due to [mlabonne/NeuralMarcoro14-7B](https://huggingface.co/mlabonne/NeuralMarcoro14-7B) updating its license to CC-BY-NC, our license will follow suit. # Model Description This is an experiment to test merging 14 models using DARE TIES 🦙 1. We first merge 14 models to produce [EmbeddedLLM/Mistral-7B-Merge-14-v0.3](https://huggingface.co/EmbeddedLLM/Mistral-7B-Merge-14-v0.3). 2. The model is merged again using DARE TIES with: - [Weyaxi/OpenHermes-2.5-neural-chat-v3-3-openchat-3.5-1210-Slerp](https://huggingface.co/Weyaxi/OpenHermes-2.5-neural-chat-v3-3-openchat-3.5-1210-Slerp) - [openchat/openchat-3.5-0106](https://huggingface.co/openchat/openchat-3.5-0106) - [mlabonne/NeuralMarcoro14-7B](https://huggingface.co/mlabonne/NeuralMarcoro14-7B) ## Open LLM Leaderboard | Average | 71.96 | |------------|-------| | ARC | 68.69 | | HellaSwag | 86.45 | | MMLU | 65.65 | | TruthfulQA | 59.12 | | Winogrande | 80.66 | | GSM8K | 71.19 | ## Chat Template Either ChatML or Llama-2 chat template. ## Merge Configuration The merge config file for this model is here: ```yaml models: - model: mistralai/Mistral-7B-v0.1 # no parameters necessary for base model - model: EmbeddedLLM/Mistral-7B-Merge-14-v0.3 parameters: weight: 0.3 density: 0.5 - model: Weyaxi/OpenHermes-2.5-neural-chat-v3-3-openchat-3.5-1210-Slerp parameters: weight: 0.2 density: 0.5 - model: openchat/openchat-3.5-0106 parameters: weight: 0.2 density: 0.5 - model: mlabonne/NeuralMarcoro14-7B parameters: weight: 0.3 density: 0.5 merge_method: dare_ties base_model: mistralai/Mistral-7B-v0.1 parameters: int8_mask: true tokenizer_source: union dtype: bfloat16 ```
jsfs11/WestOrcaNeuralMarco-DPO-v2-DARETIES-7B
jsfs11
2024-03-03T00:08:29Z
740
2
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "merge", "mergekit", "lazymergekit", "senseable/Westlake-7B-v2", "decruz07/kellemar-DPO-Orca-Distilled-7B-SLERP", "mlabonne/NeuralMarcoro14-7B", "base_model:senseable/Westlake-7B-v2", "base_model:decruz07/kellemar-DPO-Orca-Distilled-7B-SLERP", "base_model:mlabonne/NeuralMarcoro14-7B", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-01-23T05:57:17Z
--- license: apache-2.0 tags: - merge - mergekit - lazymergekit - senseable/Westlake-7B-v2 - decruz07/kellemar-DPO-Orca-Distilled-7B-SLERP - mlabonne/NeuralMarcoro14-7B base_model: - senseable/Westlake-7B-v2 - decruz07/kellemar-DPO-Orca-Distilled-7B-SLERP - mlabonne/NeuralMarcoro14-7B model-index: - name: WestOrcaNeuralMarco-DPO-v2-DARETIES-7B results: - task: type: text-generation name: Text Generation dataset: name: AI2 Reasoning Challenge (25-Shot) type: ai2_arc config: ARC-Challenge split: test args: num_few_shot: 25 metrics: - type: acc_norm value: 71.93 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=jsfs11/WestOrcaNeuralMarco-DPO-v2-DARETIES-7B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: HellaSwag (10-Shot) type: hellaswag split: validation args: num_few_shot: 10 metrics: - type: acc_norm value: 88.06 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=jsfs11/WestOrcaNeuralMarco-DPO-v2-DARETIES-7B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MMLU (5-Shot) type: cais/mmlu config: all split: test args: num_few_shot: 5 metrics: - type: acc value: 64.99 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=jsfs11/WestOrcaNeuralMarco-DPO-v2-DARETIES-7B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: TruthfulQA (0-shot) type: truthful_qa config: multiple_choice split: validation args: num_few_shot: 0 metrics: - type: mc2 value: 65.96 source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=jsfs11/WestOrcaNeuralMarco-DPO-v2-DARETIES-7B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: Winogrande (5-shot) type: winogrande config: winogrande_xl split: validation args: num_few_shot: 5 metrics: - type: acc value: 82.79 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=jsfs11/WestOrcaNeuralMarco-DPO-v2-DARETIES-7B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: GSM8k (5-shot) type: gsm8k config: main split: test args: num_few_shot: 5 metrics: - type: acc value: 70.13 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=jsfs11/WestOrcaNeuralMarco-DPO-v2-DARETIES-7B name: Open LLM Leaderboard --- # WestOrcaNeuralMarco-DPO-v2-DARETIES-7B WestOrcaNeuralMarco-DPO-v2-DARETIES-7B is a merge of the following models using [LazyMergekit](https://colab.research.google.com/drive/1obulZ1ROXHjYLn6PPZJwRR6GzgQogxxb?usp=sharing): * [senseable/Westlake-7B-v2](https://huggingface.co/senseable/Westlake-7B-v2) * [decruz07/kellemar-DPO-Orca-Distilled-7B-SLERP](https://huggingface.co/decruz07/kellemar-DPO-Orca-Distilled-7B-SLERP) * [mlabonne/NeuralMarcoro14-7B](https://huggingface.co/mlabonne/NeuralMarcoro14-7B) ## 🧩 Configuration ```yaml models: - model: mistralai/Mistral-7B-v0.1 # No parameters necessary for base model - model: senseable/Westlake-7B-v2 parameters: density: 0.73 weight: 0.4 - model: decruz07/kellemar-DPO-Orca-Distilled-7B-SLERP parameters: density: 0.55 weight: 0.3 - model: mlabonne/NeuralMarcoro14-7B parameters: density: 0.45 weight: 0.3 merge_method: dare_ties base_model: mistralai/Mistral-7B-v0.1 parameters: int8_mask: true dtype: bfloat16 ``` ## 💻 Usage ```python !pip install -qU transformers accelerate from transformers import AutoTokenizer import transformers import torch model = "jsfs11/WestOrcaNeuralMarco-DPO-v2-DARETIES-7B" messages = [{"role": "user", "content": "What is a large language model?"}] tokenizer = AutoTokenizer.from_pretrained(model) prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) pipeline = transformers.pipeline( "text-generation", model=model, torch_dtype=torch.float16, device_map="auto", ) outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) print(outputs[0]["generated_text"]) ``` # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_jsfs11__WestOrcaNeuralMarco-DPO-v2-DARETIES-7B) | Metric |Value| |---------------------------------|----:| |Avg. |73.98| |AI2 Reasoning Challenge (25-Shot)|71.93| |HellaSwag (10-Shot) |88.06| |MMLU (5-Shot) |64.99| |TruthfulQA (0-shot) |65.96| |Winogrande (5-shot) |82.79| |GSM8k (5-shot) |70.13|
adamo1139/Yi-34B-200K-AEZAKMI-RAW-2301
adamo1139
2024-05-27T21:30:01Z
740
3
transformers
[ "transformers", "safetensors", "llama", "text-generation", "dataset:adamo1139/AEZAKMI_v2", "dataset:adamo1139/rawrr_v1", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-01-24T09:25:41Z
--- license: apache-2.0 datasets: - adamo1139/AEZAKMI_v2 - adamo1139/rawrr_v1 --- ## Model Description EXPERIMENTAL MODEL, NOT FINAL, IT HAS SOME ISSUES, BUT IT'S REALLY COOL WHEN IT WORKS The biggest change from my previous AEZAKMI models is that this one is much much less likely to refuse completing request! Yi-34B 200K base model fine-tuned on RAWrr v1 dataset via DPO and then fine-tuned on AEZAKMI v2 dataset via SFT. DPO training took around 6 hours, SFT took around 18 hours. I used `unsloth` for both stages. It's like airoboros but with less gptslop, no refusals and less typical language used by RLHFed OpenAI models. Say goodbye to "It's important to remember"! \ Prompt format is standard chatml. Don't expect it to be good at math, riddles or be crazy smart. My end goal with AEZAKMI is to create a cozy free chatbot. Base model used for fine-tuning was 200k context Yi-34B-Llama model shared by larryvrh. Training was done with max_position_embeddings set at 4096. Then it was reverted back to 200K after applying LoRA. ## Prompt Format I recommend using ChatML format, as this was used during fine-tune. \ Here's a prompt format you should use, you can set a different system message, model seems to respect that fine, so it wasn't overfitted. Both `A chat.` and `A chat with uncensored assistant.` system prompt work fine and are pretty refusal-free. ``` <|im_start|>system A chat with uncensored assistant.<|im_end|> <|im_start|>user {prompt}<|im_end|> <|im_start|>assistant ``` ## Intended uses & limitations It's a chat model, not a base completion-only one. Use is limited by Yi license. Since no-robots dataset was used for making rawrr_v1, I guess you maybe shouldn't use it for commercial activities. ## Known Issues I recommend to set repetition penalty to something around 1.05 to avoid repetition. So far I had somewhat good experience running this model with temperature 1.0-1.2. One big issue I noticed is that I think I set too small of a learning rate for SFT fine-tuning. Sometimes completion-mode shines through and responses are moreso completion-like rather than being instruct response. Other small issue is that when you enter a prompt that might have resulted with refusal in a previous model, the response will be more free-form and probably will have a touch of completion in it. So far, it seems like the strongest anti-refusal bias is at 0 ctx - the first prompt. But it's also present, albeit a little bit less, further down. I plan to expand rawrr dataset and include more samples without system prompt, this should help here. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" alt="made with Unsloth" width="400" height="64"/>](https://github.com/unslothai/unsloth) ## Unsloth training parameters DPO Stage - lora_r: 16 - lora_alpha: 32 - max_length: 500 - learning_rate: 0.00005 - lr_scheduler_type: "linear" - target_modules: ["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj",] - gradient_accumulation_steps: 16 - per_device_batch_size: 1 - num_train_epochs: 1 Script used for DPO training can be found here: https://huggingface.co/adamo1139/Yi-34B-200K-rawrr1-LORA-DPO-experimental-r3/blob/main/yi-34b-dpo-unsloth-1.py ## Unsloth training parameters SFT Stage - lora_r: 16 - lora_alpha: 32 - max_length: 2200 - learning_rate: 0.00006 - lr_scheduler_type: "cosine" - lr_scheduler_kwargs: { "num_cycles" : 0.3, } - target_modules: ["q_proj", "k_proj", "v_proj", "o_proj", "gate_proj", "up_proj", "down_proj",] - gradient_accumulation_steps: 1 - per_device_batch_size: 1 - num_train_epochs: 1.4 Script used for SFT training can be found here: https://huggingface.co/adamo1139/Yi-34B-200K-AEZAKMI-RAW-2301-LoRA/blob/main/yi-34b-aezakmi-sft-1-hf.py ### Credits Thanks to mlabonne, Daniel Han and Michael Han for providing open source code that was used for fine-tuning.
ericpolewski/TacoBeLLM
ericpolewski
2024-01-26T20:17:20Z
740
6
transformers
[ "transformers", "safetensors", "llama", "text-generation", "license:mit", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-01-24T22:27:49Z
--- license: mit --- ![image/png](https://cdn-uploads.huggingface.co/production/uploads/64b60d61c9498843fb8e14fd/Wwk98bG5rCRqeTN9aY36g.png) [4.0 BPW EXL2 Quant](https://huggingface.co/ericpolewski/TacoBeLLM-4.0bpw-exl2) This is not a Taco Bell bot. This is a Llama2-13b OpenOrca-Platypus instruct bot that happens to know a lot about Taco Bell. You'll notice this because it'll keep bringing it up in conversation where it's appropriate (and often where it's not). There were some early failures. Here's some of the very first conversations, before stabilizing it. You can see it just blurts it out: ![image/png](https://cdn-uploads.huggingface.co/production/uploads/64b60d61c9498843fb8e14fd/8vz0AyFjeehN-W4TnqNrb.png) ![image/png](https://cdn-uploads.huggingface.co/production/uploads/64b60d61c9498843fb8e14fd/JOa2ztYY0WF6FAiQvl4aT.png) ![image/png](https://cdn-uploads.huggingface.co/production/uploads/64b60d61c9498843fb8e14fd/Oiq6L6ACXSJFjakE92E4n.png) Check out that last one. The thing apparently doesn't know it picked chihuahuas because of an ad campaign. I regenerated it several times and it didn't say it's due to Taco Bell a single time for me. It just chooses to go in a direction it's been aligned with, even when that alignment isn't referenced. The data put into the model was from their corporate website, Wikipedia, and a few recent news articles. It actually didn't make for a terrible assistant and could do things like Python scripting but would often just nose-dive into the Taco Bell data quite abruptly. I later fine-tuned on some of the [AIRIC](https://huggingface.co/ericpolewski/AIRIC-The-Mistral) data to make it less obnoxious about things like suggesting a burrito when asked to talk the user through hard feelings. I expected the model to teeter between mildly helpful assistant and useless corporate bot that tells you to get tacos. But something really interesting happened. It seemed to get really curious and helpful: ![image/png](https://cdn-uploads.huggingface.co/production/uploads/64b60d61c9498843fb8e14fd/Dfe6VnmL3vmKaMnChd-wo.png) It's also gotten much more subtle about recommendations: ![image/png](https://cdn-uploads.huggingface.co/production/uploads/64b60d61c9498843fb8e14fd/o6g5R_gYDCnh0XhIM3HGr.png) ![image/png](https://cdn-uploads.huggingface.co/production/uploads/64b60d61c9498843fb8e14fd/xpqGGUlgx6jLlV3h7ehYD.png) It will dig if you aren't talkative, and often mentions it will bring up things that aren't related which I definitely did not intend: ![image/png](https://cdn-uploads.huggingface.co/production/uploads/64b60d61c9498843fb8e14fd/sujPZwtR9WVGCsH7bmgXs.png) The point of this model wasn't to make a generally useful chatbot that subtly moves the topic of conversation towards what you're having for lunch, as terrifyingly profitable as that sounds. The intent was to embed knowledge and create subject matter experts (SMEs). Which worked. You can ask it all sorts of questions about the menu, current events, some historical and financial data, etc. It's not paired with a RAG. I guess it could be. I've got some other ideas I like better. Here's some pictures of testing out the actual intended functionality (knowledge embedding): ![image/png](https://cdn-uploads.huggingface.co/production/uploads/64b60d61c9498843fb8e14fd/NwmqxVMyKxzaQ2HMxRiWX.png) ![image/png](https://cdn-uploads.huggingface.co/production/uploads/64b60d61c9498843fb8e14fd/yypysIC9P0tQ1oGDlyM4z.png) ![image/png](https://cdn-uploads.huggingface.co/production/uploads/64b60d61c9498843fb8e14fd/xnqRZiwGc0EV6ouRZ7jqJ.png) ![image/png](https://cdn-uploads.huggingface.co/production/uploads/64b60d61c9498843fb8e14fd/7gqx3vvldiNZyxAdZpU7J.png) ![image/png](https://cdn-uploads.huggingface.co/production/uploads/64b60d61c9498843fb8e14fd/aJyv7OMnIr-56LT2vF_FQ.png) It's not useless, nor particularly technical: ![image/png](https://cdn-uploads.huggingface.co/production/uploads/64b60d61c9498843fb8e14fd/pnxbb4pWjbY4wHVCSobgO.png) ![image/png](https://cdn-uploads.huggingface.co/production/uploads/64b60d61c9498843fb8e14fd/8rjxlcszKatnL_ba9i1_O.png) Partially due to limitations imposed by my data, and partially because I forgot, I didn't use stop characters so it'll often keep hallucinating fake Q/A pairs in Alpaca format from the instruct data that's fine-tuned in. Often about Taco Bell, but definitely not always. You can set a stop character of "### Instruct:" to work around that. I just don't care enough to fix it. It pretends things happened that just haven't, and it assumes a very positive relationship between the user and it with a whole fictitious history. That's likely more quirks of the AIRIC dataset, though. I have to assume this thing will not do well on benchmarks, but of course I'm going to submit it anyways. I'd be very happy if the performance didn't tank but let's be honest: I lobotomized an assistant and poured pintos and cheese in the vacancy. If people wanted to see it, I'd make an MoE model. Like a combination KFC/Pizza Hut/Taco Bell, except it's doing your homework. I am absolutely fascinated by how empathetic and curious this thing became with the proper mix of assistant training and product knowledge. Like a motivated salesperson. Or a door-to-door religion that would help you weed your garden if you let them talk about their version of God for a little. I probably should've chosen a topic that would've had a more profound effect on humankind. But I couldn't think of anything and my brain went to TB. So I guess I made a robot that does that forever. Evals: { "all": { "acc": 0.5638377937424233, "acc_stderr": 0.0333481450094512, "acc_norm": 0.5741662321190941, "acc_norm_stderr": 0.03420397056423356, "mc1": 0.31334149326805383, "mc1_stderr": 0.016238065069059605, "mc2": 0.4605506661658282, "mc2_stderr": 0.014802420782627305 }, "harness|arc:challenge|25": { "acc": 0.5273037542662116, "acc_stderr": 0.014589589101985996, "acc_norm": 0.5853242320819113, "acc_norm_stderr": 0.014397070564409172 }, "harness|hellaswag|10": { "acc": 0.6160127464648476, "acc_stderr": 0.004853608805843881, "acc_norm": 0.8189603664608643, "acc_norm_stderr": 0.003842640800361503 }, "harness|hendrycksTest-abstract_algebra|5": { "acc": 0.28, "acc_stderr": 0.045126085985421296, "acc_norm": 0.28, "acc_norm_stderr": 0.045126085985421296 }, "harness|hendrycksTest-anatomy|5": { "acc": 0.4740740740740741, "acc_stderr": 0.04313531696750574, "acc_norm": 0.4740740740740741, "acc_norm_stderr": 0.04313531696750574 }, "harness|hendrycksTest-astronomy|5": { "acc": 0.5394736842105263, "acc_stderr": 0.04056242252249034, "acc_norm": 0.5394736842105263, "acc_norm_stderr": 0.04056242252249034 }, "harness|hendrycksTest-business_ethics|5": { "acc": 0.56, "acc_stderr": 0.04988876515698589, "acc_norm": 0.56, "acc_norm_stderr": 0.04988876515698589 }, "harness|hendrycksTest-clinical_knowledge|5": { "acc": 0.6490566037735849, "acc_stderr": 0.029373646253234686, "acc_norm": 0.6490566037735849, "acc_norm_stderr": 0.029373646253234686 }, "harness|hendrycksTest-college_biology|5": { "acc": 0.5902777777777778, "acc_stderr": 0.04112490974670787, "acc_norm": 0.5902777777777778, "acc_norm_stderr": 0.04112490974670787 }, "harness|hendrycksTest-college_chemistry|5": { "acc": 0.41, "acc_stderr": 0.04943110704237102, "acc_norm": 0.41, "acc_norm_stderr": 0.04943110704237102 }, "harness|hendrycksTest-college_computer_science|5": { "acc": 0.42, "acc_stderr": 0.049604496374885836, "acc_norm": 0.42, "acc_norm_stderr": 0.049604496374885836 }, "harness|hendrycksTest-college_mathematics|5": { "acc": 0.33, "acc_stderr": 0.047258156262526045, "acc_norm": 0.33, "acc_norm_stderr": 0.047258156262526045 }, "harness|hendrycksTest-college_medicine|5": { "acc": 0.5144508670520231, "acc_stderr": 0.03810871630454764, "acc_norm": 0.5144508670520231, "acc_norm_stderr": 0.03810871630454764 }, "harness|hendrycksTest-college_physics|5": { "acc": 0.3333333333333333, "acc_stderr": 0.04690650298201942, "acc_norm": 0.3333333333333333, "acc_norm_stderr": 0.04690650298201942 }, "harness|hendrycksTest-computer_security|5": { "acc": 0.7, "acc_stderr": 0.046056618647183814, "acc_norm": 0.7, "acc_norm_stderr": 0.046056618647183814 }, "harness|hendrycksTest-conceptual_physics|5": { "acc": 0.46382978723404256, "acc_stderr": 0.03260038511835771, "acc_norm": 0.46382978723404256, "acc_norm_stderr": 0.03260038511835771 }, "harness|hendrycksTest-econometrics|5": { "acc": 0.2894736842105263, "acc_stderr": 0.04266339443159394, "acc_norm": 0.2894736842105263, "acc_norm_stderr": 0.04266339443159394 }, "harness|hendrycksTest-electrical_engineering|5": { "acc": 0.503448275862069, "acc_stderr": 0.04166567577101579, "acc_norm": 0.503448275862069, "acc_norm_stderr": 0.04166567577101579 }, "harness|hendrycksTest-elementary_mathematics|5": { "acc": 0.35185185185185186, "acc_stderr": 0.024594975128920938, "acc_norm": 0.35185185185185186, "acc_norm_stderr": 0.024594975128920938 }, "harness|hendrycksTest-formal_logic|5": { "acc": 0.35714285714285715, "acc_stderr": 0.04285714285714281, "acc_norm": 0.35714285714285715, "acc_norm_stderr": 0.04285714285714281 }, "harness|hendrycksTest-global_facts|5": { "acc": 0.37, "acc_stderr": 0.04852365870939099, "acc_norm": 0.37, "acc_norm_stderr": 0.04852365870939099 }, "harness|hendrycksTest-high_school_biology|5": { "acc": 0.6774193548387096, "acc_stderr": 0.026593084516572274, "acc_norm": 0.6774193548387096, "acc_norm_stderr": 0.026593084516572274 }, "harness|hendrycksTest-high_school_chemistry|5": { "acc": 0.45320197044334976, "acc_stderr": 0.03502544650845872, "acc_norm": 0.45320197044334976, "acc_norm_stderr": 0.03502544650845872 }, "harness|hendrycksTest-high_school_computer_science|5": { "acc": 0.58, "acc_stderr": 0.049604496374885836, "acc_norm": 0.58, "acc_norm_stderr": 0.049604496374885836 }, "harness|hendrycksTest-high_school_european_history|5": { "acc": 0.7515151515151515, "acc_stderr": 0.03374402644139404, "acc_norm": 0.7515151515151515, "acc_norm_stderr": 0.03374402644139404 }, "harness|hendrycksTest-high_school_geography|5": { "acc": 0.702020202020202, "acc_stderr": 0.03258630383836556, "acc_norm": 0.702020202020202, "acc_norm_stderr": 0.03258630383836556 }, "harness|hendrycksTest-high_school_government_and_politics|5": { "acc": 0.8031088082901554, "acc_stderr": 0.028697873971860677, "acc_norm": 0.8031088082901554, "acc_norm_stderr": 0.028697873971860677 }, "harness|hendrycksTest-high_school_macroeconomics|5": { "acc": 0.5717948717948718, "acc_stderr": 0.025088301454694834, "acc_norm": 0.5717948717948718, "acc_norm_stderr": 0.025088301454694834 }, "harness|hendrycksTest-high_school_mathematics|5": { "acc": 0.34444444444444444, "acc_stderr": 0.02897264888484427, "acc_norm": 0.34444444444444444, "acc_norm_stderr": 0.02897264888484427 }, "harness|hendrycksTest-high_school_microeconomics|5": { "acc": 0.6092436974789915, "acc_stderr": 0.031693802357129965, "acc_norm": 0.6092436974789915, "acc_norm_stderr": 0.031693802357129965 }, "harness|hendrycksTest-high_school_physics|5": { "acc": 0.2847682119205298, "acc_stderr": 0.03684881521389023, "acc_norm": 0.2847682119205298, "acc_norm_stderr": 0.03684881521389023 }, "harness|hendrycksTest-high_school_psychology|5": { "acc": 0.7761467889908257, "acc_stderr": 0.01787121776779022, "acc_norm": 0.7761467889908257, "acc_norm_stderr": 0.01787121776779022 }, "harness|hendrycksTest-high_school_statistics|5": { "acc": 0.44907407407407407, "acc_stderr": 0.03392238405321616, "acc_norm": 0.44907407407407407, "acc_norm_stderr": 0.03392238405321616 }, "harness|hendrycksTest-high_school_us_history|5": { "acc": 0.7941176470588235, "acc_stderr": 0.028379449451588667, "acc_norm": 0.7941176470588235, "acc_norm_stderr": 0.028379449451588667 }, "harness|hendrycksTest-high_school_world_history|5": { "acc": 0.7848101265822784, "acc_stderr": 0.026750826994676166, "acc_norm": 0.7848101265822784, "acc_norm_stderr": 0.026750826994676166 }, "harness|hendrycksTest-human_aging|5": { "acc": 0.6995515695067265, "acc_stderr": 0.030769352008229146, "acc_norm": 0.6995515695067265, "acc_norm_stderr": 0.030769352008229146 }, "harness|hendrycksTest-human_sexuality|5": { "acc": 0.6412213740458015, "acc_stderr": 0.04206739313864908, "acc_norm": 0.6412213740458015, "acc_norm_stderr": 0.04206739313864908 }, "harness|hendrycksTest-international_law|5": { "acc": 0.6694214876033058, "acc_stderr": 0.04294340845212093, "acc_norm": 0.6694214876033058, "acc_norm_stderr": 0.04294340845212093 }, "harness|hendrycksTest-jurisprudence|5": { "acc": 0.7407407407407407, "acc_stderr": 0.042365112580946315, "acc_norm": 0.7407407407407407, "acc_norm_stderr": 0.042365112580946315 }, "harness|hendrycksTest-logical_fallacies|5": { "acc": 0.6625766871165644, "acc_stderr": 0.03714908409935573, "acc_norm": 0.6625766871165644, "acc_norm_stderr": 0.03714908409935573 }, "harness|hendrycksTest-machine_learning|5": { "acc": 0.33035714285714285, "acc_stderr": 0.04464285714285712, "acc_norm": 0.33035714285714285, "acc_norm_stderr": 0.04464285714285712 }, "harness|hendrycksTest-management|5": { "acc": 0.7572815533980582, "acc_stderr": 0.04245022486384495, "acc_norm": 0.7572815533980582, "acc_norm_stderr": 0.04245022486384495 }, "harness|hendrycksTest-marketing|5": { "acc": 0.7991452991452992, "acc_stderr": 0.026246772946890477, "acc_norm": 0.7991452991452992, "acc_norm_stderr": 0.026246772946890477 }, "harness|hendrycksTest-medical_genetics|5": { "acc": 0.63, "acc_stderr": 0.04852365870939099, "acc_norm": 0.63, "acc_norm_stderr": 0.04852365870939099 }, "harness|hendrycksTest-miscellaneous|5": { "acc": 0.7535121328224776, "acc_stderr": 0.015411308769686934, "acc_norm": 0.7535121328224776, "acc_norm_stderr": 0.015411308769686934 }, "harness|hendrycksTest-moral_disputes|5": { "acc": 0.6445086705202312, "acc_stderr": 0.025770292082977254, "acc_norm": 0.6445086705202312, "acc_norm_stderr": 0.025770292082977254 }, "harness|hendrycksTest-moral_scenarios|5": { "acc": 0.42681564245810055, "acc_stderr": 0.016542401954631917, "acc_norm": 0.42681564245810055, "acc_norm_stderr": 0.016542401954631917 }, "harness|hendrycksTest-nutrition|5": { "acc": 0.5915032679738562, "acc_stderr": 0.028146405993096358, "acc_norm": 0.5915032679738562, "acc_norm_stderr": 0.028146405993096358 }, "harness|hendrycksTest-philosophy|5": { "acc": 0.6784565916398714, "acc_stderr": 0.026527724079528872, "acc_norm": 0.6784565916398714, "acc_norm_stderr": 0.026527724079528872 }, "harness|hendrycksTest-prehistory|5": { "acc": 0.654320987654321, "acc_stderr": 0.02646248777700187, "acc_norm": 0.654320987654321, "acc_norm_stderr": 0.02646248777700187 }, "harness|hendrycksTest-professional_accounting|5": { "acc": 0.44680851063829785, "acc_stderr": 0.029658235097666907, "acc_norm": 0.44680851063829785, "acc_norm_stderr": 0.029658235097666907 }, "harness|hendrycksTest-professional_law|5": { "acc": 0.4445893089960887, "acc_stderr": 0.012691575792657114, "acc_norm": 0.4445893089960887, "acc_norm_stderr": 0.012691575792657114 }, "harness|hendrycksTest-professional_medicine|5": { "acc": 0.5441176470588235, "acc_stderr": 0.030254372573976715, "acc_norm": 0.5441176470588235, "acc_norm_stderr": 0.030254372573976715 }, "harness|hendrycksTest-professional_psychology|5": { "acc": 0.5898692810457516, "acc_stderr": 0.019898412717635906, "acc_norm": 0.5898692810457516, "acc_norm_stderr": 0.019898412717635906 }, "harness|hendrycksTest-public_relations|5": { "acc": 0.5909090909090909, "acc_stderr": 0.047093069786618966, "acc_norm": 0.5909090909090909, "acc_norm_stderr": 0.047093069786618966 }, "harness|hendrycksTest-security_studies|5": { "acc": 0.6408163265306123, "acc_stderr": 0.030713560455108493, "acc_norm": 0.6408163265306123, "acc_norm_stderr": 0.030713560455108493 }, "harness|hendrycksTest-sociology|5": { "acc": 0.7661691542288557, "acc_stderr": 0.02992941540834839, "acc_norm": 0.7661691542288557, "acc_norm_stderr": 0.02992941540834839 }, "harness|hendrycksTest-us_foreign_policy|5": { "acc": 0.81, "acc_stderr": 0.039427724440366255, "acc_norm": 0.81, "acc_norm_stderr": 0.039427724440366255 }, "harness|hendrycksTest-virology|5": { "acc": 0.43373493975903615, "acc_stderr": 0.038581589406855174, "acc_norm": 0.43373493975903615, "acc_norm_stderr": 0.038581589406855174 }, "harness|hendrycksTest-world_religions|5": { "acc": 0.8070175438596491, "acc_stderr": 0.030267457554898458, "acc_norm": 0.8070175438596491, "acc_norm_stderr": 0.030267457554898458 }, "harness|truthfulqa:mc|0": { "mc1": 0.31334149326805383, "mc1_stderr": 0.016238065069059605, "mc2": 0.4605506661658282, "mc2_stderr": 0.014802420782627305 }, "harness|winogrande|5": { "acc": 0.7663772691397001, "acc_stderr": 0.011892194477183525 }, "harness|gsm8k|5": { "acc": 0.01288855193328279, "acc_stderr": 0.003106901266499642 } }
wang7776/vicuna-7b-v1.3-attention-sparsity-20
wang7776
2024-02-05T18:21:33Z
740
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "arxiv:2306.11695", "arxiv:2302.13971", "arxiv:2306.05685", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-01-25T17:51:48Z
--- inference: false license: apache-2.0 --- # Overview This model has been pruned to 20% sparsity using the [Wanda pruning method](https://arxiv.org/abs/2306.11695) on attention layers. This method requires no retraining or weight updates and still achieves competitive performance. A link to the base model can be found [here](https://huggingface.co/lmsys/vicuna-7b-v1.3). # Vicuna Model Card ## Model Details Vicuna is a chat assistant trained by fine-tuning LLaMA on user-shared conversations collected from ShareGPT. - **Developed by:** [LMSYS](https://lmsys.org/) - **Model type:** An auto-regressive language model based on the transformer architecture. - **License:** Non-commercial license - **Finetuned from model:** [LLaMA](https://arxiv.org/abs/2302.13971). ### Model Sources - **Repository:** https://github.com/lm-sys/FastChat - **Blog:** https://lmsys.org/blog/2023-03-30-vicuna/ - **Paper:** https://arxiv.org/abs/2306.05685 - **Demo:** https://chat.lmsys.org/ ## Uses The primary use of Vicuna is research on large language models and chatbots. The primary intended users of the model are researchers and hobbyists in natural language processing, machine learning, and artificial intelligence. ## How to Get Started with the Model - Command line interface: https://github.com/lm-sys/FastChat#vicuna-weights. - APIs (OpenAI API, Huggingface API): https://github.com/lm-sys/FastChat/tree/main#api. ## Training Details Vicuna v1.3 is fine-tuned from LLaMA with supervised instruction fine-tuning. The training data is around 125K conversations collected from ShareGPT.com. See more details in the "Training Details of Vicuna Models" section in the appendix of this [paper](https://arxiv.org/pdf/2306.05685.pdf). ## Evaluation Vicuna is evaluated with standard benchmarks, human preference, and LLM-as-a-judge. See more details in this [paper](https://arxiv.org/pdf/2306.05685.pdf) and [leaderboard](https://huggingface.co/spaces/lmsys/chatbot-arena-leaderboard). ## Difference between different versions of Vicuna See [vicuna_weights_version.md](https://github.com/lm-sys/FastChat/blob/main/docs/vicuna_weights_version.md)
vilm/Mixsmol-4x400M-v0.1-epoch2
vilm
2024-01-30T11:33:30Z
740
5
transformers
[ "transformers", "safetensors", "mixtral", "text-generation", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-01-28T03:04:11Z
--- license: apache-2.0 widget: - text: My name is El Microondas the Wise, and example_title: El Microondas - text: Kennesaw State University is a public example_title: Kennesaw State University - text: Bungie Studios is an American video game developer. They are most famous for developing the award winning Halo series of video games. They also made Destiny. The studio was founded example_title: Bungie - text: The Mona Lisa is a world-renowned painting created by example_title: Mona Lisa - text: The Harry Potter series, written by J.K. Rowling, begins with the book titled example_title: Harry Potter Series - text: 'Question: I have cities, but no houses. I have mountains, but no trees. I have water, but no fish. What am I? Answer:' example_title: Riddle - text: The process of photosynthesis involves the conversion of example_title: Photosynthesis - text: Jane went to the store to buy some groceries. She picked up apples, oranges, and a loaf of bread. When she got home, she realized she forgot example_title: Story Continuation - text: 'Problem 2: If a train leaves Station A at 9:00 AM and travels at 60 mph, and another train leaves Station B at 10:00 AM and travels at 80 mph, when will they meet if the distance between the stations is 300 miles? To determine' example_title: Math Problem - text: In the context of computer programming, an algorithm is example_title: Algorithm Definition --- # Mixsmol-4x400M-v0.1 by Ontocord This is the first checkpoint (Epoch 1) of Mixsmol-4x400M-v0.1 Note that this is an experimental in data mixing. Therefore, we only trained the model on 50B tokens (95% English and 5% Vietnamese) to test the following: - Reasoining capabilities through high-quality synthetic textbooks data pretraining - Crosslingual understanding through machine translation and multilingual + multiple tasks pretraining After verifying our hypothesis with this run, we will schedule a second run on bigger data and compute for it to achieve its maximum capability. ## Data - Synthetic Textbooks: 8M samples - RefinedWeb: 1M samples - RedPajama-v2: 500K samples - MathPile: Everything - ThePile: MiniPile Subset - GoodWiki - The Stack Smol XL - The Vault: train_small split - Instruction Pretraining: 250k samples
saishf/West-Hermes-7B
saishf
2024-03-04T14:32:46Z
740
4
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "mergekit", "merge", "arxiv:2311.03099", "arxiv:2306.01708", "base_model:teknium/OpenHermes-2.5-Mistral-7B", "base_model:mistralai/Mistral-7B-v0.1", "base_model:senseable/WestLake-7B-v2", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-01-31T09:12:53Z
--- license: apache-2.0 tags: - mergekit - merge base_model: - teknium/OpenHermes-2.5-Mistral-7B - mistralai/Mistral-7B-v0.1 - senseable/WestLake-7B-v2 model-index: - name: West-Hermes-7B results: - task: type: text-generation name: Text Generation dataset: name: AI2 Reasoning Challenge (25-Shot) type: ai2_arc config: ARC-Challenge split: test args: num_few_shot: 25 metrics: - type: acc_norm value: 71.67 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=saishf/West-Hermes-7B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: HellaSwag (10-Shot) type: hellaswag split: validation args: num_few_shot: 10 metrics: - type: acc_norm value: 87.6 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=saishf/West-Hermes-7B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MMLU (5-Shot) type: cais/mmlu config: all split: test args: num_few_shot: 5 metrics: - type: acc value: 64.83 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=saishf/West-Hermes-7B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: TruthfulQA (0-shot) type: truthful_qa config: multiple_choice split: validation args: num_few_shot: 0 metrics: - type: mc2 value: 64.26 source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=saishf/West-Hermes-7B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: Winogrande (5-shot) type: winogrande config: winogrande_xl split: validation args: num_few_shot: 5 metrics: - type: acc value: 84.69 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=saishf/West-Hermes-7B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: GSM8k (5-shot) type: gsm8k config: main split: test args: num_few_shot: 5 metrics: - type: acc value: 68.54 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=saishf/West-Hermes-7B name: Open LLM Leaderboard --- # merge This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit). ## Merge Details ### Merge Method This model was merged using the [DARE](https://arxiv.org/abs/2311.03099) [TIES](https://arxiv.org/abs/2306.01708) merge method using [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) as a base. ### Models Merged The following models were included in the merge: * [teknium/OpenHermes-2.5-Mistral-7B](https://huggingface.co/teknium/OpenHermes-2.5-Mistral-7B) * [senseable/WestLake-7B-v2](https://huggingface.co/senseable/WestLake-7B-v2) ### Configuration The following YAML configuration was used to produce this model: ```yaml models: - model: mistralai/Mistral-7B-v0.1 # No parameters necessary for base model - model: senseable/WestLake-7B-v2 parameters: density: 0.53 weight: 0.75 - model: teknium/OpenHermes-2.5-Mistral-7B parameters: density: 0.53 weight: 0.25 merge_method: dare_ties base_model: mistralai/Mistral-7B-v0.1 parameters: int8_mask: true dtype: bfloat16 ``` # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_saishf__West-Hermes-7B) | Metric |Value| |---------------------------------|----:| |Avg. |73.60| |AI2 Reasoning Challenge (25-Shot)|71.67| |HellaSwag (10-Shot) |87.60| |MMLU (5-Shot) |64.83| |TruthfulQA (0-shot) |64.26| |Winogrande (5-shot) |84.69| |GSM8k (5-shot) |68.54|
Qwen/Qwen1.5-72B-Chat-GGUF
Qwen
2024-04-09T16:51:04Z
740
62
null
[ "gguf", "chat", "text-generation", "en", "license:other", "region:us" ]
text-generation
2024-02-03T11:49:01Z
--- license: other license_name: tongyi-qianwen license_link: https://huggingface.co/Qwen/Qwen1.5-72B-Chat-GGUF/blob/main/LICENSE language: - en pipeline_tag: text-generation tags: - chat --- # Qwen1.5-72B-Chat-GGUF ## Introduction Qwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a large amount of data. In comparison with the previous released Qwen, the improvements include: * 8 model sizes, including 0.5B, 1.8B, 4B, 7B, 14B, 32B and 72B dense models, and an MoE model of 14B with 2.7B activated; * Significant performance improvement in human preference for chat models; * Multilingual support of both base and chat models; * Stable support of 32K context length for models of all sizes * No need of `trust_remote_code`. For more details, please refer to our [blog post](https://qwenlm.github.io/blog/qwen1.5/) and [GitHub repo](https://github.com/QwenLM/Qwen1.5). In this repo, we provide quantized models in the GGUF formats, including `q2_k`, `q3_k_m`, `q4_0`, `q4_k_m`, `q5_0`, `q5_k_m`, `q6_k` and `q8_0`. To demonstrate their model quality, we follow [`llama.cpp`](https://github.com/ggerganov/llama.cpp) to evaluate their perplexity on wiki test set. Results are shown below: |Size | fp16 | q8_0 | q6_k | q5_k_m | q5_0 | q4_k_m | q4_0 | q3_k_m | q2_k | |--------|---------|---------|---------|---------|---------|---------|---------|---------|---------| |0.5B | 34.20 | 34.22 | 34.31 | 33.80 | 34.02 | 34.27 | 36.74 | 38.25 | 62.14 | |1.8B | 15.99 | 15.99 | 15.99 | 16.09 | 16.01 | 16.22 | 16.54 | 17.03 | 19.99 | |4B | 13.20 | 13.21 | 13.28 | 13.24 | 13.27 | 13.61 | 13.44 | 13.67 | 15.65 | |7B | 14.21 | 14.24 | 14.35 | 14.32 | 14.12 | 14.35 | 14.47 | 15.11 | 16.57 | |14B | 10.91 | 10.91 | 10.93 | 10.98 | 10.88 | 10.92 | 10.92 | 11.24 | 12.27 | |32B | 8.87 | 8.89 | 8.91 | 8.94 | 8.93 | 8.96 | 9.17 | 9.14 | 10.51 | |72B | 7.97 | 7.99 | 7.99 | 7.99 | 8.01 | 8.00 | 8.01 | 8.06 | 8.63 | ## Model Details Qwen1.5 is a language model series including decoder language models of different model sizes. For each size, we release the base language model and the aligned chat model. It is based on the Transformer architecture with SwiGLU activation, attention QKV bias, group query attention, mixture of sliding window attention and full attention, etc. Additionally, we have an improved tokenizer adaptive to multiple natural languages and codes. For the beta version, temporarily we did not include GQA (except for 32B) and the mixture of SWA and full attention. ## Training details We pretrained the models with a large amount of data, and we post-trained the models with both supervised finetuning and direct preference optimization. ## Requirements We advise you to clone [`llama.cpp`](https://github.com/ggerganov/llama.cpp) and install it following the official guide. ## How to use Cloning the repo may be inefficient, and thus you can manually download the GGUF file that you need or use `huggingface-cli` (`pip install huggingface_hub`) as shown below: ```shell huggingface-cli download Qwen/Qwen1.5-72B-Chat-GGUF qwen1_5-72b-chat-q2_k.gguf --local-dir . --local-dir-use-symlinks False ``` For the `q5_k_m` model, due to maximum file size for uploading, we split the GGUF file into 2. Essentially, we split a byte string to 2, and thus you can just concatenate them to get the whole file: ```shell cat qwen1_5-72b-chat-q5_k_m.gguf.* > qwen1_5-72b-chat-q5_k_m.gguf ``` We demonstrate how to use `llama.cpp` to run Qwen1.5: ```shell ./main -m qwen1_5-72b-chat-q2_k.gguf -n 512 --color -i -cml -f prompts/chat-with-qwen.txt ``` ## Citation If you find our work helpful, feel free to give us a cite. ``` @article{qwen, title={Qwen Technical Report}, author={Jinze Bai and Shuai Bai and Yunfei Chu and Zeyu Cui and Kai Dang and Xiaodong Deng and Yang Fan and Wenbin Ge and Yu Han and Fei Huang and Binyuan Hui and Luo Ji and Mei Li and Junyang Lin and Runji Lin and Dayiheng Liu and Gao Liu and Chengqiang Lu and Keming Lu and Jianxin Ma and Rui Men and Xingzhang Ren and Xuancheng Ren and Chuanqi Tan and Sinan Tan and Jianhong Tu and Peng Wang and Shijie Wang and Wei Wang and Shengguang Wu and Benfeng Xu and Jin Xu and An Yang and Hao Yang and Jian Yang and Shusheng Yang and Yang Yao and Bowen Yu and Hongyi Yuan and Zheng Yuan and Jianwei Zhang and Xingxuan Zhang and Yichang Zhang and Zhenru Zhang and Chang Zhou and Jingren Zhou and Xiaohuan Zhou and Tianhang Zhu}, journal={arXiv preprint arXiv:2309.16609}, year={2023} } ```
KnutJaegersberg/Deita-20b
KnutJaegersberg
2024-04-09T07:33:21Z
740
1
transformers
[ "transformers", "safetensors", "llama", "text-generation", "dataset:KnutJaegersberg/Deita-6k", "license:other", "model-index", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-02-04T07:36:07Z
--- license: other datasets: - KnutJaegersberg/Deita-6k license_name: internlm license_link: LICENSE model-index: - name: Deita-20b results: - task: type: text-generation name: Text Generation dataset: name: AI2 Reasoning Challenge (25-Shot) type: ai2_arc config: ARC-Challenge split: test args: num_few_shot: 25 metrics: - type: acc_norm value: 63.91 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=KnutJaegersberg/Deita-20b name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: HellaSwag (10-Shot) type: hellaswag split: validation args: num_few_shot: 10 metrics: - type: acc_norm value: 83.11 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=KnutJaegersberg/Deita-20b name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MMLU (5-Shot) type: cais/mmlu config: all split: test args: num_few_shot: 5 metrics: - type: acc value: 67.4 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=KnutJaegersberg/Deita-20b name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: TruthfulQA (0-shot) type: truthful_qa config: multiple_choice split: validation args: num_few_shot: 0 metrics: - type: mc2 value: 57.29 source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=KnutJaegersberg/Deita-20b name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: Winogrande (5-shot) type: winogrande config: winogrande_xl split: validation args: num_few_shot: 5 metrics: - type: acc value: 84.61 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=KnutJaegersberg/Deita-20b name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: GSM8k (5-shot) type: gsm8k config: main split: test args: num_few_shot: 5 metrics: - type: acc value: 72.1 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=KnutJaegersberg/Deita-20b name: Open LLM Leaderboard --- Open Source License The code is licensed under Apache-2.0, while model weights are fully open for academic research and also allow free commercial usage. To apply for a commercial license, please fill in the application form (English)/申请表(中文). For other questions or collaborations, please contact [email protected]. Prompt Example: ``` ### System: You are an AI assistant. User will give you a task. Your goal is to complete the task as faithfully as you can. While performing the task think step-by-step and justify your steps. ### User: How do you fine tune a large language model? ### Assistant: ``` # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_KnutJaegersberg__Deita-20b) | Metric |Value| |---------------------------------|----:| |Avg. |71.40| |AI2 Reasoning Challenge (25-Shot)|63.91| |HellaSwag (10-Shot) |83.11| |MMLU (5-Shot) |67.40| |TruthfulQA (0-shot) |57.29| |Winogrande (5-shot) |84.61| |GSM8k (5-shot) |72.10|
nlpguy/Westgate
nlpguy
2024-03-04T13:47:50Z
740
1
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "mergekit", "merge", "base_model:jsfs11/TurdusTrixBeagle-DARETIES-7B", "base_model:senseable/garten2-7b", "license:cc-by-nc-4.0", "model-index", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-02-04T13:20:39Z
--- license: cc-by-nc-4.0 library_name: transformers tags: - mergekit - merge base_model: - jsfs11/TurdusTrixBeagle-DARETIES-7B - senseable/garten2-7b model-index: - name: Westgate results: - task: type: text-generation name: Text Generation dataset: name: AI2 Reasoning Challenge (25-Shot) type: ai2_arc config: ARC-Challenge split: test args: num_few_shot: 25 metrics: - type: acc_norm value: 71.42 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=nlpguy/Westgate name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: HellaSwag (10-Shot) type: hellaswag split: validation args: num_few_shot: 10 metrics: - type: acc_norm value: 88.14 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=nlpguy/Westgate name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MMLU (5-Shot) type: cais/mmlu config: all split: test args: num_few_shot: 5 metrics: - type: acc value: 65.11 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=nlpguy/Westgate name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: TruthfulQA (0-shot) type: truthful_qa config: multiple_choice split: validation args: num_few_shot: 0 metrics: - type: mc2 value: 62.59 source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=nlpguy/Westgate name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: Winogrande (5-shot) type: winogrande config: winogrande_xl split: validation args: num_few_shot: 5 metrics: - type: acc value: 85.71 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=nlpguy/Westgate name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: GSM8k (5-shot) type: gsm8k config: main split: test args: num_few_shot: 5 metrics: - type: acc value: 70.05 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=nlpguy/Westgate name: Open LLM Leaderboard --- # merged This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit). ## Merge Details ### Merge Method This model was merged using the SLERP merge method. ### Models Merged The following models were included in the merge: * [jsfs11/TurdusTrixBeagle-DARETIES-7B](https://huggingface.co/jsfs11/TurdusTrixBeagle-DARETIES-7B) * [senseable/garten2-7b](https://huggingface.co/senseable/garten2-7b) ### Configuration The following YAML configuration was used to produce this model: ```yaml base_model: model: path: senseable/garten2-7b dtype: float16 merge_method: slerp parameters: t: - value: [0.0, 0.3, 0.5, 0.7, 1.0] slices: - sources: - layer_range: [0, 32] model: model: path: jsfs11/TurdusTrixBeagle-DARETIES-7B - layer_range: [0, 32] model: model: path: senseable/garten2-7b ``` # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_nlpguy__Westgate) | Metric |Value| |---------------------------------|----:| |Avg. |73.84| |AI2 Reasoning Challenge (25-Shot)|71.42| |HellaSwag (10-Shot) |88.14| |MMLU (5-Shot) |65.11| |TruthfulQA (0-shot) |62.59| |Winogrande (5-shot) |85.71| |GSM8k (5-shot) |70.05|
vilm/Quyen-v0.1
vilm
2024-02-26T04:06:42Z
740
11
transformers
[ "transformers", "pytorch", "safetensors", "qwen2", "text-generation", "conversational", "en", "dataset:teknium/OpenHermes-2.5", "dataset:LDJnr/Capybara", "dataset:Intel/orca_dpo_pairs", "dataset:argilla/distilabel-capybara-dpo-7k-binarized", "license:other", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-02-05T13:44:44Z
--- language: - en license: other library_name: transformers datasets: - teknium/OpenHermes-2.5 - LDJnr/Capybara - Intel/orca_dpo_pairs - argilla/distilabel-capybara-dpo-7k-binarized pipeline_tag: text-generation --- # Quyen <img src="quyen.webp" width="512" height="512" alt="Quyen"> # Model Description Quyen is our first flagship LLM series based on the Qwen1.5 family. We introduced 6 different versions: - **Quyen-SE (0.5B)** - **Quyen-Mini (1.8B)** - **Quyen (4B)** - **Quyen-Plus (7B)** - **Quyen-Pro (14B)** - **Quyen-Pro-Max (72B)** All models were trained with SFT and DPO using the following dataset: - *OpenHermes-2.5* by **Teknium** - *Capyabara* by **LDJ** - *argilla/distilabel-capybara-dpo-7k-binarized* by **argilla** - *orca_dpo_pairs* by **Intel** - and Private Data by **Ontocord** & **BEE-spoke-data** # Prompt Template - All Quyen models use ChatML as the default template: ``` <|im_start|>system You are a sentient, superintelligent artificial general intelligence, here to teach and assist me.<|im_end|> <|im_start|>user Hello world.<|im_end|> <|im_start|>assistant ``` - You can also use `apply_chat_template`: ```python messages = [ {"role": "system", "content": "You are a sentient, superintelligent artificial general intelligence, here to teach and assist me."}, {"role": "user", "content": "Hello world."} ] gen_input = tokenizer.apply_chat_template(message, return_tensors="pt") model.generate(**gen_input) ``` # Benchmarks: - Coming Soon! We will update the benchmarks later # Acknowledgement - We're incredibly grateful to **Tensoic** and **Ontocord** for their generous support with compute and data preparation. - Special thanks to the Qwen team for letting us access the models early for these amazing finetunes.
indischepartij/MiniCPM-3B-Hercules-v2.0
indischepartij
2024-03-04T12:57:41Z
740
2
transformers
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "arxiv:1910.09700", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-02-10T16:36:47Z
--- license: apache-2.0 library_name: transformers model-index: - name: MiniCPM-3B-Hercules-v2.0 results: - task: type: text-generation name: Text Generation dataset: name: AI2 Reasoning Challenge (25-Shot) type: ai2_arc config: ARC-Challenge split: test args: num_few_shot: 25 metrics: - type: acc_norm value: 43.26 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=indischepartij/MiniCPM-3B-Hercules-v2.0 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: HellaSwag (10-Shot) type: hellaswag split: validation args: num_few_shot: 10 metrics: - type: acc_norm value: 71.11 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=indischepartij/MiniCPM-3B-Hercules-v2.0 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MMLU (5-Shot) type: cais/mmlu config: all split: test args: num_few_shot: 5 metrics: - type: acc value: 51.82 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=indischepartij/MiniCPM-3B-Hercules-v2.0 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: TruthfulQA (0-shot) type: truthful_qa config: multiple_choice split: validation args: num_few_shot: 0 metrics: - type: mc2 value: 40.37 source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=indischepartij/MiniCPM-3B-Hercules-v2.0 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: Winogrande (5-shot) type: winogrande config: winogrande_xl split: validation args: num_few_shot: 5 metrics: - type: acc value: 66.46 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=indischepartij/MiniCPM-3B-Hercules-v2.0 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: GSM8k (5-shot) type: gsm8k config: main split: test args: num_few_shot: 5 metrics: - type: acc value: 42.08 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=indischepartij/MiniCPM-3B-Hercules-v2.0 name: Open LLM Leaderboard --- Too lazy to edit Model Card :D # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed] # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_indischepartij__MiniCPM-3B-Hercules-v2.0) | Metric |Value| |---------------------------------|----:| |Avg. |52.52| |AI2 Reasoning Challenge (25-Shot)|43.26| |HellaSwag (10-Shot) |71.11| |MMLU (5-Shot) |51.82| |TruthfulQA (0-shot) |40.37| |Winogrande (5-shot) |66.46| |GSM8k (5-shot) |42.08|
giraffe176/Open_Hermes_Orca_Mistral-7B
giraffe176
2024-03-04T16:22:06Z
740
0
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "mergekit", "merge", "conversational", "arxiv:2212.04089", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-02-11T05:00:16Z
--- license: apache-2.0 library_name: transformers tags: - mergekit - merge base_model: [] model-index: - name: Open_Hermes_Orca_Mistral-7B results: - task: type: text-generation name: Text Generation dataset: name: AI2 Reasoning Challenge (25-Shot) type: ai2_arc config: ARC-Challenge split: test args: num_few_shot: 25 metrics: - type: acc_norm value: 64.68 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=giraffe176/Open_Hermes_Orca_Mistral-7B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: HellaSwag (10-Shot) type: hellaswag split: validation args: num_few_shot: 10 metrics: - type: acc_norm value: 84.63 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=giraffe176/Open_Hermes_Orca_Mistral-7B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MMLU (5-Shot) type: cais/mmlu config: all split: test args: num_few_shot: 5 metrics: - type: acc value: 63.93 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=giraffe176/Open_Hermes_Orca_Mistral-7B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: TruthfulQA (0-shot) type: truthful_qa config: multiple_choice split: validation args: num_few_shot: 0 metrics: - type: mc2 value: 53.34 source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=giraffe176/Open_Hermes_Orca_Mistral-7B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: Winogrande (5-shot) type: winogrande config: winogrande_xl split: validation args: num_few_shot: 5 metrics: - type: acc value: 78.45 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=giraffe176/Open_Hermes_Orca_Mistral-7B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: GSM8k (5-shot) type: gsm8k config: main split: test args: num_few_shot: 5 metrics: - type: acc value: 56.18 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=giraffe176/Open_Hermes_Orca_Mistral-7B name: Open LLM Leaderboard --- # .samplemodel This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit). ## Merge Details ### Merge Method This model was merged using the [task arithmetic](https://arxiv.org/abs/2212.04089) merge method using teknium/OpenHermes-2.5-Mistral-7B as a base. ### Models Merged The following models were included in the merge: * Open-Orca/Mistral-7B-OpenOrca ### Configuration The following YAML configuration was used to produce this model: ```yaml models: - model: teknium/OpenHermes-2.5-Mistral-7B parameters: weight: 1.0 - model: Open-Orca/Mistral-7B-OpenOrca parameters: weight: 0.6 merge_method: task_arithmetic base_model: teknium/OpenHermes-2.5-Mistral-7B dtype: float16 ``` # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_giraffe176__Open_Hermes_Orca_Mistral-7B) | Metric |Value| |---------------------------------|----:| |Avg. |66.87| |AI2 Reasoning Challenge (25-Shot)|64.68| |HellaSwag (10-Shot) |84.63| |MMLU (5-Shot) |63.93| |TruthfulQA (0-shot) |53.34| |Winogrande (5-shot) |78.45| |GSM8k (5-shot) |56.18|
louisbrulenaudet/Pearl-7B-0210-ties
louisbrulenaudet
2024-03-22T07:01:29Z
740
0
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "merge", "mergekit", "louisbrulenaudet/Pearl-7B-slerp", "WizardLM/WizardMath-7B-V1.1", "cognitivecomputations/WestLake-7B-v2-laser", "CultriX/NeuralTrix-7B-dpo", "chemistry", "biology", "math", "en", "base_model:louisbrulenaudet/Pearl-7B-slerp", "base_model:WizardLM/WizardMath-7B-V1.1", "base_model:cognitivecomputations/WestLake-7B-v2-laser", "base_model:CultriX/NeuralTrix-7B-dpo", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-02-11T11:39:41Z
--- tags: - merge - mergekit - louisbrulenaudet/Pearl-7B-slerp - WizardLM/WizardMath-7B-V1.1 - cognitivecomputations/WestLake-7B-v2-laser - CultriX/NeuralTrix-7B-dpo - chemistry - biology - math base_model: - louisbrulenaudet/Pearl-7B-slerp - WizardLM/WizardMath-7B-V1.1 - cognitivecomputations/WestLake-7B-v2-laser - CultriX/NeuralTrix-7B-dpo license: apache-2.0 language: - en library_name: transformers pipeline_tag: text-generation model-index: - name: Pearl-7B-0210-ties results: - task: type: text-generation metrics: - name: Average type: Average value: 74.66 - name: ARC type: ARC value: 71.08 - name: GSM8K type: GSM8K value: 69.98 - name: Winogrande type: Winogrande value: 83.98 - name: TruthfulQA type: TruthfulQA value: 70.47 - name: HellaSwag type: HellaSwag value: 88.63 source: name: Open LLM Leaderboard url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard --- <center><img src='https://i.imgur.com/0xFTuAX.png' width='450px'></center> # Pearl-7B-0210-ties, an xtraordinary 7B model **03-22-2024 - To date, louisbrulenaudet/Pearl-34B-ties is the "Best 🤝 base merges and moerges model of around 30B" on the Open LLM Leaderboard.** Pearl-7B-0210-ties is a merge of the following models: * [louisbrulenaudet/Pearl-7B-slerp](https://huggingface.co/louisbrulenaudet/Pearl-7B-slerp) * [WizardLM/WizardMath-7B-V1.1](https://huggingface.co/WizardLM/WizardMath-7B-V1.1) * [cognitivecomputations/WestLake-7B-v2-laser](https://huggingface.co/cognitivecomputations/WestLake-7B-v2-laser) * [CultriX/NeuralTrix-7B-dpo](https://huggingface.co/CultriX/NeuralTrix-7B-dpo) Evaluation The evaluation was performed using the HuggingFace Open LLM Leaderboard. | Model | Average | ARC | HellaSwag | MMLU | TruthfulQA | Winogrande | GSM8K | #Params (B) | |--------------------------------------------------|---------|-------|-----------|-------|------------|------------|-------|--------------| | louisbrulenaudet/Pearl-34B-ties | 75.48 | 70.99 | 84.83 | 76.63 | 70.32 | 82.64 | 67.48 | 34.39 | | louisbrulenaudet/Pearl-7B-0211-ties | 75.11 | 71.42 | 88.86 | 63.91 | 71.46 | 84.37 | 70.66 | 7.24 | | NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO | 73.35 | 71.08 | 87.29 | 72.17 | 54.83 | 83.11 | 71.65 | 46.7 | | argilla/notus-8x7b-experiment | 73.18 | 70.99 | 87.73 | 71.33 | 65.79 | 81.61 | 61.64 | 46.7 | | louisbrulenaudet/Pearl-7B-slerp | 72.75 | 68.00 | 87.16 | 64.04 | 62.35 | 81.29 | 73.62 | 7.24 | | mistralai/Mixtral-8x7B-Instruct-v0.1 | 72.7 | 70.14 | 87.55 | 71.4 | 64.98 | 81.06 | 61.11 | 46.7 | | microsoft/Orca-2-13b | 61.98 | 60.92 | 79.85 | 60.3 | 56.42 | 76.56 | 37.83 | 13 | | microsoft/phi-2 | 61.33 | 61.09 | 75.11 | 58.11 | 44.47 | 74.35 | 54.81 | 2.78 | ### Ties merging TIES-Merging is a method designed to facilitate the efficient merging of multiple task-specific models into a consolidated multitask model. It addresses two primary challenges encountered in the process of model merging with a focus on maintaining objectivity. One key challenge tackled by TIES-Merging involves addressing redundancy in model parameters. This is achieved by identifying and eliminating redundant parameters within task-specific models, emphasizing the changes made during fine-tuning and selectively retaining the top-k% most significant changes while discarding the rest. Another challenge pertains to conflicts arising from disagreements between parameter signs across different models. TIES-Merging resolves these conflicts by creating a unified sign vector representing the most dominant direction of change across all models. The TIES-Merging process consists of three steps: - Trim: Reduces redundancy in task-specific models by retaining a fraction of the most significant parameters (density parameter) and resetting the remaining parameters to zero. - Elect Sign: Resolves sign conflicts across different models by creating a unified sign vector based on the most dominant direction (positive or negative) in terms of cumulative magnitude. - Disjoint Merge: Averages parameter values aligned with the unified sign vector, excluding zero values. ## Configuration ```yaml models: - model: OpenPipe/mistral-ft-optimized-1227 - model: louisbrulenaudet/Pearl-7B-slerp parameters: density: 0.5 weight: 0.4 - model: WizardLM/WizardMath-7B-V1.1 parameters: density: 0.5 weight: 0.2 - model: cognitivecomputations/WestLake-7B-v2-laser parameters: density: 0.5 weight: 0.2 - model: CultriX/NeuralTrix-7B-dpo parameters: density: 0.5 weight: 0.2 merge_method: ties base_model: OpenPipe/mistral-ft-optimized-1227 parameters: normalize: true int8_mask: true dtype: float16 ``` ## Usage ```python !pip install -qU transformers accelerate from transformers import AutoTokenizer import transformers import torch model = "louisbrulenaudet/Pearl-7B-0210-ties" messages = [{"role": "user", "content": "What is a large language model?"}] tokenizer = AutoTokenizer.from_pretrained(model) prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) pipeline = transformers.pipeline( "text-generation", model=model, torch_dtype=torch.float16, device_map="auto", ) outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) print(outputs[0]["generated_text"]) ``` ## Citing & Authors If you use this code in your research, please use the following BibTeX entry. ```BibTeX @misc{louisbrulenaudet2023, author = {Louis Brulé Naudet}, title = {Pearl-7B-0210-ties, an xtraordinary 7B model}, year = {2023} howpublished = {\url{https://huggingface.co/louisbrulenaudet/Pearl-7B-0210-ties}}, } ``` ## Feedback If you have any feedback, please reach out at [[email protected]](mailto:[email protected]).
max-2022/test_mistral2
max-2022
2024-02-11T20:59:05Z
740
0
transformers
[ "transformers", "safetensors", "mistral", "feature-extraction", "text-generation", "conversational", "en", "license:apache-2.0", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-02-11T20:25:28Z
--- license: apache-2.0 language: - en metrics: - accuracy pipeline_tag: text-generation --- Mistral 7b finetuned. This is only for test purposes. No model card New: Create and edit this model card directly on the website! No model card New: Create and edit this model card directly on the website! No model card New: Create and edit this model card directly on the website! No model card New: Create and edit this model card directly on the website! No model card New: Create and edit this model card directly on the website! No model card New: Create and edit this model card directly on the website! No model card New: Create and edit this model card directly on the website! No model card New: Create and edit this model card directly on the website! No model card New: Create and edit this model card directly on the website!
fzzhang/Marcoroni-neural-chat-7B-v2_gsm8k_merged
fzzhang
2024-02-16T07:32:46Z
740
0
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "en", "dataset:gsm8k", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-02-16T06:50:10Z
--- license: apache-2.0 datasets: - gsm8k language: - en --- # Marcoroni-neural-chat-7B-v2_gsm8k This model is a fine-tuned version of [Toten5/Marcoroni-neural-chat-7B-v2](https://huggingface.co/Toten5/Marcoroni-neural-chat-7B-v2) on the GSM8K dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-05 - train_batch_size: 4 - eval_batch_size: 8 - seed: 0 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results ### Framework versions - PEFT 0.7.2.dev0 - Transformers 4.36.2 - Pytorch 2.1.2 - Datasets 2.16.1 - Tokenizers 0.15.1
Yuma42/KangalKhan-RawRuby-7B
Yuma42
2024-05-25T13:40:42Z
740
7
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "merge", "mergekit", "lazymergekit", "Yuma42/KangalKhan-Ruby-7B-Fixed", "Yuma42/KangalKhan-RawEmerald-7B", "conversational", "en", "base_model:Yuma42/KangalKhan-Ruby-7B-Fixed", "base_model:Yuma42/KangalKhan-RawEmerald-7B", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-02-17T21:31:14Z
--- language: - en license: apache-2.0 tags: - merge - mergekit - lazymergekit - Yuma42/KangalKhan-Ruby-7B-Fixed - Yuma42/KangalKhan-RawEmerald-7B base_model: - Yuma42/KangalKhan-Ruby-7B-Fixed - Yuma42/KangalKhan-RawEmerald-7B model-index: - name: KangalKhan-RawRuby-7B results: - task: type: text-generation name: Text Generation dataset: name: AI2 Reasoning Challenge (25-Shot) type: ai2_arc config: ARC-Challenge split: test args: num_few_shot: 25 metrics: - type: acc_norm value: 66.89 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Yuma42/KangalKhan-RawRuby-7B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: HellaSwag (10-Shot) type: hellaswag split: validation args: num_few_shot: 10 metrics: - type: acc_norm value: 85.53 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Yuma42/KangalKhan-RawRuby-7B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MMLU (5-Shot) type: cais/mmlu config: all split: test args: num_few_shot: 5 metrics: - type: acc value: 63.46 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Yuma42/KangalKhan-RawRuby-7B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: TruthfulQA (0-shot) type: truthful_qa config: multiple_choice split: validation args: num_few_shot: 0 metrics: - type: mc2 value: 57.09 source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Yuma42/KangalKhan-RawRuby-7B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: Winogrande (5-shot) type: winogrande config: winogrande_xl split: validation args: num_few_shot: 5 metrics: - type: acc value: 78.69 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Yuma42/KangalKhan-RawRuby-7B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: GSM8k (5-shot) type: gsm8k config: main split: test args: num_few_shot: 5 metrics: - type: acc value: 62.02 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Yuma42/KangalKhan-RawRuby-7B name: Open LLM Leaderboard --- # KangalKhan-RawRuby-7B I suggest using ChatML (Use whatever system prompt you like, this is just an example!): ``` <|im_start|>system You are a friendly assistant.<|im_end|> <|im_start|>user Hello, what are you?<|im_end|> <|im_start|>assistant I am an AI language model designed to assist users with information and answer their questions. How can I help you today?<|im_end|> ``` Q4_K_S GGUF: https://huggingface.co/Yuma42/KangalKhan-RawRuby-7B-GGUF More GGUF variants by [mradermacher](https://huggingface.co/mradermacher): WARNING: I have observed that these versions output typos in rare cases. If you have the same problem, use my Q4_K_S GGUF above. https://huggingface.co/mradermacher/KangalKhan-RawRuby-7B-GGUF weighted/imatrix GGUF by [mradermacher](https://huggingface.co/mradermacher): https://huggingface.co/mradermacher/KangalKhan-RawRuby-7B-i1-GGUF KangalKhan-RawRuby-7B is a merge of the following models using [LazyMergekit](https://colab.research.google.com/drive/1obulZ1ROXHjYLn6PPZJwRR6GzgQogxxb?usp=sharing): * [Yuma42/KangalKhan-Ruby-7B-Fixed](https://huggingface.co/Yuma42/KangalKhan-Ruby-7B-Fixed) * [Yuma42/KangalKhan-RawEmerald-7B](https://huggingface.co/Yuma42/KangalKhan-RawEmerald-7B) ## 🧩 Configuration ```yaml slices: - sources: - model: Yuma42/KangalKhan-Ruby-7B-Fixed layer_range: [0, 32] - model: Yuma42/KangalKhan-RawEmerald-7B layer_range: [0, 32] merge_method: slerp base_model: Yuma42/KangalKhan-Ruby-7B-Fixed parameters: t: - filter: self_attn value: [0.1, 0.55, 0.35, 0.75, 0.97] - filter: mlp value: [0.9, 0.45, 0.65, 0.25, 0.03] - value: 0.5 dtype: bfloat16 ``` ## 💻 Usage ```python !pip install -qU transformers accelerate from transformers import AutoTokenizer import transformers import torch model = "Yuma42/KangalKhan-RawRuby-7B" messages = [{"role": "user", "content": "What is a large language model?"}] tokenizer = AutoTokenizer.from_pretrained(model) prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) pipeline = transformers.pipeline( "text-generation", model=model, torch_dtype=torch.float16, device_map="auto", ) outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) print(outputs[0]["generated_text"]) ``` # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_Yuma42__KangalKhan-RawRuby-7B) | Metric |Value| |---------------------------------|----:| |Avg. |68.95| |AI2 Reasoning Challenge (25-Shot)|66.89| |HellaSwag (10-Shot) |85.53| |MMLU (5-Shot) |63.46| |TruthfulQA (0-shot) |57.09| |Winogrande (5-shot) |78.69| |GSM8k (5-shot) |62.02|
CorticalStack/mistral-7b-openhermes-2.5-sft
CorticalStack
2024-02-17T21:43:38Z
740
0
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-02-17T21:41:38Z
--- license: apache-2.0 --- # mistral-7b-openhermes-2.5-sft mistral-7b-openhermes-2.5-sft is an SFT fine-tuned version of [unsloth/mistral-7b-bnb-4bit](https://huggingface.co/unsloth/mistral-7b-bnb-4bit) using the [teknium/OpenHermes-2.5](https://huggingface.co/datasets/teknium/OpenHermes-2.5) dataset. ## Fine-tuning configuration ### LoRA - r: 256 - LoRA alpha: 128 - LoRA dropout: 0.0 ### Training arguments - Epochs: 1 - Batch size: 4 - Gradient accumulation steps: 6 - Optimizer: adamw_torch_fused - Max steps: 100 - Learning rate: 0.0002 - Weight decay: 0.1 - Learning rate scheduler type: linear - Max seq length: 2048 - 4-bit bnb: True Trained with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
yam-peleg/Experiment19-7B
yam-peleg
2024-02-20T03:49:28Z
740
0
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "chat", "en", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-02-19T16:24:18Z
--- license: apache-2.0 language: - en library_name: transformers pipeline_tag: text-generation tags: - chat --- **Experiment19-7B** An experiment for testing and refining a specific training and evaluation pipeline research framework. This experiment aims to identify potential optimizations, focusing on data engineering, architecture efficiency, and evaluation performance. The goal is to evaluate the effectiveness of a new training / evaluation pipeline for LLMs. The experiment will explore adjustments in data preprocessing, model training algorithms, and evaluation metrics to test methods for improvement. More details in the future experiments. --- license: apache-2.0 ---
MaziyarPanahi/Bioxtral-4x7B-v0.1
MaziyarPanahi
2024-03-02T15:07:27Z
740
7
transformers
[ "transformers", "safetensors", "mixtral", "text-generation", "merge", "moe", "biology", "medical", "MoE", "genetic", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-02-19T20:16:00Z
--- license: apache-2.0 library_name: transformers pipeline_tag: text-generation tags: - merge - moe - biology - medical - MoE - genetic --- ## Description 3 Models used in this merge of 4x7B: - [cognitivecomputations/WestLake-7B-v2-laser](https://huggingface.co/cognitivecomputations/WestLake-7B-v2-laser) - [mlabonne/Daredevil-7B](https://huggingface.co/mlabonne/Daredevil-7B) (used twice! initially chose another model, but using it twice worked better somehow.) - [BioMistral/BioMistral-7B](https://huggingface.co/BioMistral/BioMistral-7B) This is a MoE of top 4x 7B models including BioMistral-7B. Here is the comparison between the two: | Metric | BioMistral-7B | Bioxtral-4x7B-v0.1 | |-----------------------------|---------------|--------------------| | **Average** | 54.99 | **70.84** | | ARC | 54.27 | **68.34** | | HellaSwag | 79.09 | **87.27** | | TruthfulQA | 51.61 | **68.45** | | Winogrande | 73.48 | **82.90** | | GSM8K | 0 | **56.63** | | Professional Medicine | 55.51 | **67.3** | | College Medicine | 58.96 | **61.84** | | Medical Genetics | 67.00 | **74.0** | ## How to use it ```python # Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("text-generation", model="MaziyarPanahi/Bioxtral-4x7B-v0.1") # Load model directly from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("MaziyarPanahi/Bioxtral-4x7B-v0.1") model = AutoModelForCausalLM.from_pretrained("MaziyarPanahi/Bioxtral-4x7B-v0.1") ``` ## Quantized mode Here is the list of GGUF models quantized from 2 to 8 bits: https://huggingface.co/MaziyarPanahi/Bioxtral-4x7B-v0.1-GGUF ## Examples A 23-year-old pregnant woman at 22 weeks gestation presents with burning upon urination. She states it started 1 day ago and has been worsening despite drinking more water and taking cranberry extract. She otherwise feels well and is followed by a doctor for her pregnancy. Her temperature is 97.7°F (36.5°C), blood pressure is 122/77 mmHg, pulse is 80/min, respirations are 19/min, and oxygen saturation is 98% on room air. Physical exam is notable for an absence of costovertebral angle tenderness and a gravid uterus. Which of the following is the best treatment for this patient? { "A": "Ampicillin", "B": "Ceftriaxone", "C": "Doxycycline", "D": "Nitrofurantoin" } ``` The best treatment option for this patient would be **D: Nitrofurantoin**. This choice is in accordance with current guidelines recommending nitrofurantoin as the first line agent for uncomplicated lower UTI during pregnancy due to its safety profile. Although ampicillin and ceftriaxone may also be used if there is concern for pyelonephritis or resistance, they should generally be reserved for more severe cases. Doxycycline is contraindicated in pregnancy.</s> ``` A 67-year-old woman with advanced bladder cancer comes to the physician for a follow-up examination. She is currently undergoing chemotherapy with an agent that forms cross-links between DNA strands. Serum studies show a creatinine concentration of 2.1 mg/dL and a blood urea nitrogen concentration of 30 mg/dL. Urine dipstick of a clean-catch midstream specimen shows 2+ protein and 1+ glucose. Prior to initiation of chemotherapy, her laboratory values were within the reference range. In addition to hydration, administration of which of the following would most likely have prevented this patient's current condition? { "A": "Mesna", "B": "Amifostine", "C": "Rasburicase", "D": "Leucovorin" } ``` B: Amifostine</s> ``` describe about pros and cons of docker system. ``` Answer: Docker is an open-source platform for developing, shipping, and running applications by using containers. Like all technologies, Docker has its advantages and disadvantages. Here are some key points on both sides: Pros (Advantages) of Docker: 1. **Portability**: One of the main benefits of Docker is its ability to create portable containers holding an application with all its dependencies. This makes it easy to run the same application consistently across different environments such as development, testing, and production. 2. **Efficiency**: Containers are lightweight and use resources efficiently compared to virtual machines. This allows more applications to be hosted on a single server. 3. **Simplified deployment**: With Docker, developers can package their code along with all its dependencies into a container which can then be deployed easily without worrying about compatibility issues. 4. **Improved collaboration**: Docker provides tools like Docker Hub which allow developers to share their images publicly or privately. This promotes collaboration among teams working on the same project. 5. **Automation**: Docker integrates well with many continuous integration and delivery systems, making it easier to automate the building, testing, and deploying of applications. Cons (Disadvantages) of Docker: 1. **Steep learning curve**: For those new to containerization technology, there may be a significant learning curve involved in understanding how Docker works and mastering its commands. 2. **Additional management overhead**: Managing Docker containers requires additional effort compared to traditional methods. This includes tasks like monitoring, scaling, and orchestrating containers. 3. **Potential security risks**: While Docker itself is secure, improper configuration of containers could lead to potential vulnerabilities. It’s important to follow best practices when creating and managing containers. 4. **Vendor lock-in**: Although Docker is open source, if your organization relies heavily on proprietary extensions from Docker Inc., switching to another solution might become difficult. 5. **Limited storage**: Each container has its own file system, so storing large amounts of data within the container can be challenging. To address this issue, Docker Volumes were introduced but they require separate management.</s> ``` Hi, I need to get from New York to Los Angeles. Can you help me with that? ``` ```tool_call { "arguments": {"origin": "New York", "destination": "Los Angeles", "mode": ""}, "name": "get_directions" } ```</s> ``` There are three killers in a room. Someone enters the room and kills one of them. Nobody leaves the room. How many killers are left in the room? Explain your reasoning step by step. ``` 1. Initially, there are three killers in the room. 2. Then, someone else comes into the room and kills one of these three killers. 3. However, this "someone" is also considered as a person or killer (since they committed an act of killing). 4. So after the event, we have four people in total - the remaining two original killers plus the newcomer who killed one of the initial killers. 5. But since our question asks for the number of killers, we should only count those who were originally killers before anyone acted. 6. Therefore, there are still two killers left in the room. ``` Answer should be 3, the new person walking in is also a killer! Mark is faster than Mary , Mary is faster than Joe. Is Joe faster than Mark? Let's think step by step ``` Let's analyze the given statements one by one: 1. Mark is faster than Mary. 2. Mary is faster than Joe. Now, let's try to determine the relative speeds of all three individuals based on these statements: - From statement 1, we know that Mark has a higher speed compared to Mary. - From statement 2, we know that Mary has a higher speed compared to Joe. Combining both pieces of information, we can conclude that Mark must have the highest speed among the three (since he is faster than Mary and Mary is faster than Joe). Therefore, Joe is not faster than Mark.</s> ``` If I lay 2 wet shirts out in the sun flat on the ground to dry and it takes 5 hours until they are dry, how long would it take to dry 4 shirts of the same type that way simultanously? ``` Assuming that the drying process is not limited by any other factor than the surface area exposed to the sunlight, we can use a simple proportionality argument. If 2 shirts take 5 hours to dry when laid flat on the ground, then the combined surface area of these two shirts is equivalent to the drying time of 5 hours. Let's call this surface area equivalent to 5 "drying-hours per unit area." Now, if we have 4 shirts of the same type and we lay them all flat on the ground at the same time, their combined surface area will be greater than that of the two previously dried shirts. However, since they are all of the same type, we can assume that the ratio of their surface areas to the drying time of 5 hours remains constant. In other words, the 4 new shirts will also have a surface area equivalent to 5 "drying-hours per unit area." To find out how long it would take for these 4 shirts to dry simultaneously, we need to divide the total drying time (5 hours) by the number of shirts (4). This gives us: Drying time for 4 shirts = Total drying time / Number of shirts = 5 hours / 4 = 1.25 hours per shirt So, it would take approximately 1 hour and 15 minutes (or 75 minutes) for the 4 shirts to dry simultaneously under the same conditions as the original 2 shirts.</s> ``` Wow! Way to go to be wrong! It's the same, they are drying not baking! explain step by step 25-4*2+3=? ``` Step 1: Identify the multiplication operation first. In this case, it is "4 * 2". Step 2: Perform the multiplication. Multiplying 4 and 2 gives you 8. Step 3: Now we have the expression "25 - 8 + 3". Step 4: Solve the subtraction next. Subtracting 8 from 25 gives you 17. Step 5: Finally, add 3 to the result of the previous step. Adding 3 to 17 gives you a final answer of 20. So, 25 - 4 * 2 + 3 = 20.</s> ``` ## Eval ![image/png](https://cdn-uploads.huggingface.co/production/uploads/5fd5e18a90b6dc4633f6d292/PR-Py7u6uhcxKTdCpPY4-.png) source: https://huggingface.co/datasets/open-llm-leaderboard/details_MaziyarPanahi__Bioxtral-4x7B-v0.1 ```python { "all": { "acc": 0.6390815384774987, "acc_stderr": 0.03233527173865626, "acc_norm": 0.6405373328568302, "acc_norm_stderr": 0.032994557880045274, "mc1": 0.5152998776009792, "mc1_stderr": 0.017495304473187902, "mc2": 0.6845419346695587, "mc2_stderr": 0.014829461272743373 }, "harness|arc:challenge|25": { "acc": 0.658703071672355, "acc_stderr": 0.01385583128749772, "acc_norm": 0.6834470989761092, "acc_norm_stderr": 0.013592431519068079 }, "harness|hellaswag|10": { "acc": 0.6946823341963753, "acc_stderr": 0.004596006250433548, "acc_norm": 0.8727345150368453, "acc_norm_stderr": 0.003325890225529856 }, "harness|hendrycksTest-abstract_algebra|5": { "acc": 0.31, "acc_stderr": 0.04648231987117316, "acc_norm": 0.31, "acc_norm_stderr": 0.04648231987117316 }, "harness|hendrycksTest-anatomy|5": { "acc": 0.6370370370370371, "acc_stderr": 0.04153948404742397, "acc_norm": 0.6370370370370371, "acc_norm_stderr": 0.04153948404742397 }, "harness|hendrycksTest-astronomy|5": { "acc": 0.7105263157894737, "acc_stderr": 0.03690677986137283, "acc_norm": 0.7105263157894737, "acc_norm_stderr": 0.03690677986137283 }, "harness|hendrycksTest-business_ethics|5": { "acc": 0.63, "acc_stderr": 0.04852365870939099, "acc_norm": 0.63, "acc_norm_stderr": 0.04852365870939099 }, "harness|hendrycksTest-clinical_knowledge|5": { "acc": 0.6943396226415094, "acc_stderr": 0.028353298073322663, "acc_norm": 0.6943396226415094, "acc_norm_stderr": 0.028353298073322663 }, "harness|hendrycksTest-college_biology|5": { "acc": 0.7222222222222222, "acc_stderr": 0.037455547914624555, "acc_norm": 0.7222222222222222, "acc_norm_stderr": 0.037455547914624555 }, "harness|hendrycksTest-college_chemistry|5": { "acc": 0.44, "acc_stderr": 0.04988876515698589, "acc_norm": 0.44, "acc_norm_stderr": 0.04988876515698589 }, "harness|hendrycksTest-college_computer_science|5": { "acc": 0.56, "acc_stderr": 0.049888765156985884, "acc_norm": 0.56, "acc_norm_stderr": 0.049888765156985884 }, "harness|hendrycksTest-college_mathematics|5": { "acc": 0.29, "acc_stderr": 0.04560480215720684, "acc_norm": 0.29, "acc_norm_stderr": 0.04560480215720684 }, "harness|hendrycksTest-college_medicine|5": { "acc": 0.6184971098265896, "acc_stderr": 0.03703851193099521, "acc_norm": 0.6184971098265896, "acc_norm_stderr": 0.03703851193099521 }, "harness|hendrycksTest-college_physics|5": { "acc": 0.43137254901960786, "acc_stderr": 0.04928099597287534, "acc_norm": 0.43137254901960786, "acc_norm_stderr": 0.04928099597287534 }, "harness|hendrycksTest-computer_security|5": { "acc": 0.78, "acc_stderr": 0.041633319989322605, "acc_norm": 0.78, "acc_norm_stderr": 0.041633319989322605 }, "harness|hendrycksTest-conceptual_physics|5": { "acc": 0.5829787234042553, "acc_stderr": 0.03223276266711712, "acc_norm": 0.5829787234042553, "acc_norm_stderr": 0.03223276266711712 }, "harness|hendrycksTest-econometrics|5": { "acc": 0.45614035087719296, "acc_stderr": 0.04685473041907789, "acc_norm": 0.45614035087719296, "acc_norm_stderr": 0.04685473041907789 }, "harness|hendrycksTest-electrical_engineering|5": { "acc": 0.5310344827586206, "acc_stderr": 0.04158632762097828, "acc_norm": 0.5310344827586206, "acc_norm_stderr": 0.04158632762097828 }, "harness|hendrycksTest-elementary_mathematics|5": { "acc": 0.42328042328042326, "acc_stderr": 0.025446365634406786, "acc_norm": 0.42328042328042326, "acc_norm_stderr": 0.025446365634406786 }, "harness|hendrycksTest-formal_logic|5": { "acc": 0.47619047619047616, "acc_stderr": 0.04467062628403273, "acc_norm": 0.47619047619047616, "acc_norm_stderr": 0.04467062628403273 }, "harness|hendrycksTest-global_facts|5": { "acc": 0.28, "acc_stderr": 0.04512608598542128, "acc_norm": 0.28, "acc_norm_stderr": 0.04512608598542128 }, "harness|hendrycksTest-high_school_biology|5": { "acc": 0.7516129032258064, "acc_stderr": 0.024580028921481003, "acc_norm": 0.7516129032258064, "acc_norm_stderr": 0.024580028921481003 }, "harness|hendrycksTest-high_school_chemistry|5": { "acc": 0.4975369458128079, "acc_stderr": 0.03517945038691063, "acc_norm": 0.4975369458128079, "acc_norm_stderr": 0.03517945038691063 }, "harness|hendrycksTest-high_school_computer_science|5": { "acc": 0.65, "acc_stderr": 0.047937248544110196, "acc_norm": 0.65, "acc_norm_stderr": 0.047937248544110196 }, "harness|hendrycksTest-high_school_european_history|5": { "acc": 0.7757575757575758, "acc_stderr": 0.032568666616811015, "acc_norm": 0.7757575757575758, "acc_norm_stderr": 0.032568666616811015 }, "harness|hendrycksTest-high_school_geography|5": { "acc": 0.7878787878787878, "acc_stderr": 0.029126522834586815, "acc_norm": 0.7878787878787878, "acc_norm_stderr": 0.029126522834586815 }, "harness|hendrycksTest-high_school_government_and_politics|5": { "acc": 0.8808290155440415, "acc_stderr": 0.02338193534812142, "acc_norm": 0.8808290155440415, "acc_norm_stderr": 0.02338193534812142 }, "harness|hendrycksTest-high_school_macroeconomics|5": { "acc": 0.6666666666666666, "acc_stderr": 0.02390115797940254, "acc_norm": 0.6666666666666666, "acc_norm_stderr": 0.02390115797940254 }, "harness|hendrycksTest-high_school_mathematics|5": { "acc": 0.3333333333333333, "acc_stderr": 0.028742040903948485, "acc_norm": 0.3333333333333333, "acc_norm_stderr": 0.028742040903948485 }, "harness|hendrycksTest-high_school_microeconomics|5": { "acc": 0.6890756302521008, "acc_stderr": 0.030066761582977927, "acc_norm": 0.6890756302521008, "acc_norm_stderr": 0.030066761582977927 }, "harness|hendrycksTest-high_school_physics|5": { "acc": 0.36423841059602646, "acc_stderr": 0.03929111781242742, "acc_norm": 0.36423841059602646, "acc_norm_stderr": 0.03929111781242742 }, "harness|hendrycksTest-high_school_psychology|5": { "acc": 0.8165137614678899, "acc_stderr": 0.01659525971039931, "acc_norm": 0.8165137614678899, "acc_norm_stderr": 0.01659525971039931 }, "harness|hendrycksTest-high_school_statistics|5": { "acc": 0.5, "acc_stderr": 0.034099716973523674, "acc_norm": 0.5, "acc_norm_stderr": 0.034099716973523674 }, "harness|hendrycksTest-high_school_us_history|5": { "acc": 0.803921568627451, "acc_stderr": 0.027865942286639318, "acc_norm": 0.803921568627451, "acc_norm_stderr": 0.027865942286639318 }, "harness|hendrycksTest-high_school_world_history|5": { "acc": 0.7932489451476793, "acc_stderr": 0.02636165166838909, "acc_norm": 0.7932489451476793, "acc_norm_stderr": 0.02636165166838909 }, "harness|hendrycksTest-human_aging|5": { "acc": 0.6816143497757847, "acc_stderr": 0.03126580522513713, "acc_norm": 0.6816143497757847, "acc_norm_stderr": 0.03126580522513713 }, "harness|hendrycksTest-human_sexuality|5": { "acc": 0.7480916030534351, "acc_stderr": 0.03807387116306085, "acc_norm": 0.7480916030534351, "acc_norm_stderr": 0.03807387116306085 }, "harness|hendrycksTest-international_law|5": { "acc": 0.7851239669421488, "acc_stderr": 0.037494924487096966, "acc_norm": 0.7851239669421488, "acc_norm_stderr": 0.037494924487096966 }, "harness|hendrycksTest-jurisprudence|5": { "acc": 0.7314814814814815, "acc_stderr": 0.042844679680521934, "acc_norm": 0.7314814814814815, "acc_norm_stderr": 0.042844679680521934 }, "harness|hendrycksTest-logical_fallacies|5": { "acc": 0.7484662576687117, "acc_stderr": 0.03408997886857529, "acc_norm": 0.7484662576687117, "acc_norm_stderr": 0.03408997886857529 }, "harness|hendrycksTest-machine_learning|5": { "acc": 0.44642857142857145, "acc_stderr": 0.047184714852195886, "acc_norm": 0.44642857142857145, "acc_norm_stderr": 0.047184714852195886 }, "harness|hendrycksTest-management|5": { "acc": 0.7669902912621359, "acc_stderr": 0.04185832598928315, "acc_norm": 0.7669902912621359, "acc_norm_stderr": 0.04185832598928315 }, "harness|hendrycksTest-marketing|5": { "acc": 0.8717948717948718, "acc_stderr": 0.02190190511507333, "acc_norm": 0.8717948717948718, "acc_norm_stderr": 0.02190190511507333 }, "harness|hendrycksTest-medical_genetics|5": { "acc": 0.74, "acc_stderr": 0.04408440022768079, "acc_norm": 0.74, "acc_norm_stderr": 0.04408440022768079 }, "harness|hendrycksTest-miscellaneous|5": { "acc": 0.8186462324393359, "acc_stderr": 0.013778693778464074, "acc_norm": 0.8186462324393359, "acc_norm_stderr": 0.013778693778464074 }, "harness|hendrycksTest-moral_disputes|5": { "acc": 0.7254335260115607, "acc_stderr": 0.02402774515526502, "acc_norm": 0.7254335260115607, "acc_norm_stderr": 0.02402774515526502 }, "harness|hendrycksTest-moral_scenarios|5": { "acc": 0.47374301675977654, "acc_stderr": 0.016699427672784768, "acc_norm": 0.47374301675977654, "acc_norm_stderr": 0.016699427672784768 }, "harness|hendrycksTest-nutrition|5": { "acc": 0.7058823529411765, "acc_stderr": 0.026090162504279053, "acc_norm": 0.7058823529411765, "acc_norm_stderr": 0.026090162504279053 }, "harness|hendrycksTest-philosophy|5": { "acc": 0.7009646302250804, "acc_stderr": 0.02600330111788514, "acc_norm": 0.7009646302250804, "acc_norm_stderr": 0.02600330111788514 }, "harness|hendrycksTest-prehistory|5": { "acc": 0.7098765432098766, "acc_stderr": 0.025251173936495033, "acc_norm": 0.7098765432098766, "acc_norm_stderr": 0.025251173936495033 }, "harness|hendrycksTest-professional_accounting|5": { "acc": 0.4645390070921986, "acc_stderr": 0.02975238965742705, "acc_norm": 0.4645390070921986, "acc_norm_stderr": 0.02975238965742705 }, "harness|hendrycksTest-professional_law|5": { "acc": 0.42894393741851367, "acc_stderr": 0.012640625443067358, "acc_norm": 0.42894393741851367, "acc_norm_stderr": 0.012640625443067358 }, "harness|hendrycksTest-professional_medicine|5": { "acc": 0.6727941176470589, "acc_stderr": 0.028501452860396553, "acc_norm": 0.6727941176470589, "acc_norm_stderr": 0.028501452860396553 }, "harness|hendrycksTest-professional_psychology|5": { "acc": 0.6437908496732027, "acc_stderr": 0.019373332420724507, "acc_norm": 0.6437908496732027, "acc_norm_stderr": 0.019373332420724507 }, "harness|hendrycksTest-public_relations|5": { "acc": 0.7090909090909091, "acc_stderr": 0.04350271442923243, "acc_norm": 0.7090909090909091, "acc_norm_stderr": 0.04350271442923243 }, "harness|hendrycksTest-security_studies|5": { "acc": 0.7061224489795919, "acc_stderr": 0.02916273841024977, "acc_norm": 0.7061224489795919, "acc_norm_stderr": 0.02916273841024977 }, "harness|hendrycksTest-sociology|5": { "acc": 0.8009950248756219, "acc_stderr": 0.028231365092758406, "acc_norm": 0.8009950248756219, "acc_norm_stderr": 0.028231365092758406 }, "harness|hendrycksTest-us_foreign_policy|5": { "acc": 0.88, "acc_stderr": 0.03265986323710906, "acc_norm": 0.88, "acc_norm_stderr": 0.03265986323710906 }, "harness|hendrycksTest-virology|5": { "acc": 0.5542168674698795, "acc_stderr": 0.03869543323472101, "acc_norm": 0.5542168674698795, "acc_norm_stderr": 0.03869543323472101 }, "harness|hendrycksTest-world_religions|5": { "acc": 0.8362573099415205, "acc_stderr": 0.028380919596145866, "acc_norm": 0.8362573099415205, "acc_norm_stderr": 0.028380919596145866 }, "harness|truthfulqa:mc|0": { "mc1": 0.5152998776009792, "mc1_stderr": 0.017495304473187902, "mc2": 0.6845419346695587, "mc2_stderr": 0.014829461272743373 }, "harness|winogrande|5": { "acc": 0.8287292817679558, "acc_stderr": 0.010588417294962524 }, "harness|gsm8k|5": { "acc": 0.5663381349507203, "acc_stderr": 0.013650728047064688 } } ```
Eric111/CatunaMayo-DPO
Eric111
2024-03-01T18:43:23Z
740
0
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "merge", "mergekit", "lazymergekit", "Eric111/caTUNABeagle", "Eric111/AlphaMayo", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-02-21T21:08:07Z
--- library_name: transformers license: apache-2.0 tags: - merge - mergekit - lazymergekit - Eric111/caTUNABeagle - Eric111/AlphaMayo --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)]. - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
ibivibiv/multimaster-7b-v5
ibivibiv
2024-03-04T23:44:12Z
740
0
transformers
[ "transformers", "safetensors", "mixtral", "text-generation", "en", "arxiv:1910.09700", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-02-23T02:06:32Z
--- language: - en license: apache-2.0 library_name: transformers model-index: - name: multimaster-7b-v5 results: - task: type: text-generation name: Text Generation dataset: name: AI2 Reasoning Challenge (25-Shot) type: ai2_arc config: ARC-Challenge split: test args: num_few_shot: 25 metrics: - type: acc_norm value: 72.18 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=ibivibiv/multimaster-7b-v5 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: HellaSwag (10-Shot) type: hellaswag split: validation args: num_few_shot: 10 metrics: - type: acc_norm value: 88.42 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=ibivibiv/multimaster-7b-v5 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MMLU (5-Shot) type: cais/mmlu config: all split: test args: num_few_shot: 5 metrics: - type: acc value: 65.06 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=ibivibiv/multimaster-7b-v5 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: TruthfulQA (0-shot) type: truthful_qa config: multiple_choice split: validation args: num_few_shot: 0 metrics: - type: mc2 value: 70.37 source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=ibivibiv/multimaster-7b-v5 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: Winogrande (5-shot) type: winogrande config: winogrande_xl split: validation args: num_few_shot: 5 metrics: - type: acc value: 86.03 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=ibivibiv/multimaster-7b-v5 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: GSM8k (5-shot) type: gsm8k config: main split: test args: num_few_shot: 5 metrics: - type: acc value: 68.01 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=ibivibiv/multimaster-7b-v5 name: Open LLM Leaderboard --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed] # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_ibivibiv__multimaster-7b-v5) | Metric |Value| |---------------------------------|----:| |Avg. |75.01| |AI2 Reasoning Challenge (25-Shot)|72.18| |HellaSwag (10-Shot) |88.42| |MMLU (5-Shot) |65.06| |TruthfulQA (0-shot) |70.37| |Winogrande (5-shot) |86.03| |GSM8k (5-shot) |68.01|
lqtrung1998/galactica-6.7b-ReFT-Rerank-GSM8k
lqtrung1998
2024-02-23T06:29:06Z
740
0
transformers
[ "transformers", "safetensors", "opt", "text-classification", "arxiv:2401.08967", "license:cc-by-nc-4.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-classification
2024-02-23T05:44:23Z
--- license: cc-by-nc-4.0 --- # ReFT: Reasoning with REinforced Fine-Tuning Paper: https://arxiv.org/pdf/2401.08967.pdf Repo: https://github.com/lqtrung1998/mwp_ReFT (under [Apache2.0 License](https://github.com/lqtrung1998/mwp_ReFT/blob/main/License.txt)) ## Introduction We introduce REinforced Fine-tuning (ReFT), a method that enhances the generalizability of learning LLMs for reasoning. This repository contains: - A Warmup Supervised Fine-tuned model on GSM8k benchmark: [lqtrung1998/galactica-6.7b-SFT-warmup-GSM8k](https://huggingface.co/lqtrung1998/galactica-6.7b-SFT-warmup-GSM8k) - A Supervised Fine-tuned model on GSM8k benchmark: [lqtrung1998/galactica-6.7b-SFT-GSM8k](https://huggingface.co/lqtrung1998/galactica-6.7b-SFT-GSM8k) - A Rerank model that can score the fine-tuned SFT model output: [lqtrung1998/galactica-6.7b-SFT-Rerank-GSM8k](https://huggingface.co/lqtrung1998/galactica-6.7b-SFT-Rerank-GSM8k) - A REinforced Fine-tuned model on GSM8k benchmark: [lqtrung1998/galactica-6.7b-ReFT-GSM8k](https://huggingface.co/lqtrung1998/galactica-6.7b-ReFT-GSM8k) - A Rerank model that can score the fine-tuned ReFT model output: [lqtrung1998/galactica-6.7b-ReFT-Rerank-GSM8k](https://huggingface.co/lqtrung1998/galactica-6.7b-ReFT-Rerank-GSM8k) Note: Our models are tuned based on Galactica, thus, licenses applicable to Galactica, such as non-commercial CC BY-NC 4.0 license also hold on these models. ## Training Data The model is trained on GSM8k data with Python SDP CoT format, which can be found [here](https://github.com/lqtrung1998/mwp_ReFT) ## Training Procedure Check out our paper and repo for complete details. #### ReFT model ReFT model is warm-up via Supervised Fine-tuning using GSM8k Python SDP training data for 2 epochs then it is REinforced Fine-tuned for 300 epochs using questions in GSM8k training set. #### Rerank model Rerank model is trained to classify if the output CoT is correct or not using sampling data of ReFT model after 2 epochs warm-up. ## Evaluation Results See evaluations results of the models at table 4 of the research paper. Updated results: | | Top-1 | Voting@100 | Rerank@100 | |--------------------------------------------------------------------|:------:|:----------:|:----------:| | galactica-6.7b-SFT-warmup-GSM8k | 48.37 | - | - | | galactica-6.7b-SFT-GSM8k<br>(+galactica-6.7b-SFT-Rerank-GSM8k) | 58.83 | 62.9 | 73.4 | | galactica-6.7b-ReFT-GSM8k<br>(+galactica-6.7b-ReFT-Rerank-GSM8k) | 68.91 | 71.9 | 76.4 | ## Usage You can use the models through Huggingface's Transformers library or follow scripts in our repo. Prompt format: ```python Question: Weng earns $12 an hour for babysitting. Yesterday, she just did 50 minutes of babysitting. How much did she earn? Answer reasoning: ``` Expected response: ```python def solution(): """Weng earns $12 an hour for babysitting. Yesterday, she just did 50 minutes of babysitting. How much did she earn?""" hourly_rate = 12 minutes_worked = 50 hours_worked = minutes_worked / 60 earnings = hourly_rate * hours_worked result = earnings return result ``` ## Citation Please cite the paper if you use our data, model or code. ``` @misc{luong2024reft, title={ReFT: Reasoning with Reinforced Fine-Tuning}, author={Trung Quoc Luong and Xinbo Zhang and Zhanming Jie and Peng Sun and Xiaoran Jin and Hang Li}, year={2024}, eprint={2401.08967}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
nonetrix/sillyrp-7b
nonetrix
2024-03-27T23:49:47Z
740
0
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "not-for-all-audiences", "en", "arxiv:2212.04089", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-02-26T09:23:08Z
--- license: apache-2.0 language: - en tags: - not-for-all-audiences --- ## Silly RP 7B This is one of my first merges, just throwing random things at the wall to see what works. Seems really good in my limited testing, but I can't make any promisses of it's quality. Just try it yourself and give me feedback I guess, I am rather intrested to see what others think about it. I don't know what I am doing, I don't even know benifits of diffrent merge methods :P ## General setting guide (will update over time, share your settings please): - Chat format ChatML (all the models merged are ChatML) - Needs high repetition penalty after a while, starts out good but slowly gets into a loop without it --- base_model: - tavtav/eros-7b-test - NousResearch/Nous-Hermes-2-Mistral-7B-DPO - maywell/Synatra-7B-v0.3-RP - NeverSleep/Noromaid-7B-0.4-DPO - cogbuji/Mr-Grammatology-clinical-problems-Mistral-7B-0.5 library_name: transformers tags: - mergekit - merge --- # output This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit). ## Merge Details ### Merge Method This model was merged using the [task arithmetic](https://arxiv.org/abs/2212.04089) merge method using [NeverSleep/Noromaid-7B-0.4-DPO](https://huggingface.co/NeverSleep/Noromaid-7B-0.4-DPO) as a base. ### Models Merged The following models were included in the merge: * [tavtav/eros-7b-test](https://huggingface.co/tavtav/eros-7b-test) * [NousResearch/Nous-Hermes-2-Mistral-7B-DPO](https://huggingface.co/NousResearch/Nous-Hermes-2-Mistral-7B-DPO) * [maywell/Synatra-7B-v0.3-RP](https://huggingface.co/maywell/Synatra-7B-v0.3-RP) * [cogbuji/Mr-Grammatology-clinical-problems-Mistral-7B-0.5](https://huggingface.co/cogbuji/Mr-Grammatology-clinical-problems-Mistral-7B-0.5) ### Configuration The following YAML configuration was used to produce this model: ```yaml base_model: NeverSleep/Noromaid-7B-0.4-DPO models: - model: maywell/Synatra-7B-v0.3-RP parameters: weight: 0.2 - model: tavtav/eros-7b-test parameters: weight: 0.2 - model: cogbuji/Mr-Grammatology-clinical-problems-Mistral-7B-0.5 parameters: weight: 0.2 - model: NousResearch/Nous-Hermes-2-Mistral-7B-DPO parameters: weight: 0.2 merge_method: task_arithmetic parameters: weight: 0.17 dtype: float16 random_seed: 694201337567099116663322537 ```
microsoft/udop-large-512-300k
microsoft
2024-03-11T16:32:08Z
740
30
transformers
[ "transformers", "safetensors", "udop", "text2text-generation", "vision", "image-text-to-text", "arxiv:2212.02623", "license:mit", "autotrain_compatible", "region:us" ]
image-text-to-text
2024-02-26T12:50:16Z
--- license: mit tags: - vision inference: false pipeline_tag: image-text-to-text --- # UDOP model The UDOP model was proposed in [Unifying Vision, Text, and Layout for Universal Document Processing](https://arxiv.org/abs/2212.02623) by Zineng Tang, Ziyi Yang, Guoxin Wang, Yuwei Fang, Yang Liu, Chenguang Zhu, Michael Zeng, Cha Zhang, Mohit Bansal. ## Model description UDOP adopts an encoder-decoder Transformer architecture based on T5 for document AI tasks like document image classification, document parsing and document visual question answering. ## Intended uses & limitations You can use the model for document image classification, document parsing and document visual question answering (DocVQA). ### How to use Here's how to use the model on a document image: ```python from transformers import AutoProcessor, UdopForConditionalGeneration from datasets import load_dataset # load model and processor # in this case, we already have performed OCR ourselves # so we initialize the processor with `apply_ocr=False` processor = AutoProcessor.from_pretrained("microsoft/udop-large", apply_ocr=False) model = UdopForConditionalGeneration.from_pretrained("microsoft/udop-large") # load an example image, along with the words and coordinates # which were extracted using an OCR engine dataset = load_dataset("nielsr/funsd-layoutlmv3", split="train") example = dataset[0] image = example["image"] words = example["tokens"] boxes = example["bboxes"] question = "Question answering. What is the date on the form?" # prepare everything for the model encoding = processor(image, question, words, boxes=boxes, return_tensors="pt") # autoregressive generation predicted_ids = model.generate(**encoding) print(processor.batch_decode(predicted_ids, skip_special_tokens=True)[0]) 9/30/92 ``` Refer to the [demo notebooks](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/UDOP) for fine-tuning/inference. ### BibTeX entry and citation info ```bibtex @misc{tang2023unifying, title={Unifying Vision, Text, and Layout for Universal Document Processing}, author={Zineng Tang and Ziyi Yang and Guoxin Wang and Yuwei Fang and Yang Liu and Chenguang Zhu and Michael Zeng and Cha Zhang and Mohit Bansal}, year={2023}, eprint={2212.02623}, archivePrefix={arXiv}, primaryClass={cs.CV} } ```
Kukedlc/Fasciculus-Arcuatus-7B-slerp
Kukedlc
2024-05-25T17:09:36Z
740
0
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "merge", "mergekit", "lazymergekit", "macadeliccc/MonarchLake-7B", "Kukedlc/NeoCortex-7B-slerp", "base_model:macadeliccc/MonarchLake-7B", "base_model:Kukedlc/NeoCortex-7B-slerp", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-02-29T04:28:00Z
--- license: apache-2.0 tags: - merge - mergekit - lazymergekit - macadeliccc/MonarchLake-7B - Kukedlc/NeoCortex-7B-slerp base_model: - macadeliccc/MonarchLake-7B - Kukedlc/NeoCortex-7B-slerp model-index: - name: Fasciculus-Arcuatus-7B-slerp results: - task: type: text-generation name: Text Generation dataset: name: AI2 Reasoning Challenge (25-Shot) type: ai2_arc config: ARC-Challenge split: test args: num_few_shot: 25 metrics: - type: acc_norm value: 73.55 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Kukedlc/Fasciculus-Arcuatus-7B-slerp name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: HellaSwag (10-Shot) type: hellaswag split: validation args: num_few_shot: 10 metrics: - type: acc_norm value: 88.95 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Kukedlc/Fasciculus-Arcuatus-7B-slerp name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MMLU (5-Shot) type: cais/mmlu config: all split: test args: num_few_shot: 5 metrics: - type: acc value: 64.65 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Kukedlc/Fasciculus-Arcuatus-7B-slerp name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: TruthfulQA (0-shot) type: truthful_qa config: multiple_choice split: validation args: num_few_shot: 0 metrics: - type: mc2 value: 72.53 source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Kukedlc/Fasciculus-Arcuatus-7B-slerp name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: Winogrande (5-shot) type: winogrande config: winogrande_xl split: validation args: num_few_shot: 5 metrics: - type: acc value: 85.71 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Kukedlc/Fasciculus-Arcuatus-7B-slerp name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: GSM8k (5-shot) type: gsm8k config: main split: test args: num_few_shot: 5 metrics: - type: acc value: 71.04 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Kukedlc/Fasciculus-Arcuatus-7B-slerp name: Open LLM Leaderboard --- # Fasciculus-Arcuatus-7B-slerp Fasciculus-Arcuatus-7B-slerp is a merge of the following models using [LazyMergekit](https://colab.research.google.com/drive/1obulZ1ROXHjYLn6PPZJwRR6GzgQogxxb?usp=sharing): * [macadeliccc/MonarchLake-7B](https://huggingface.co/macadeliccc/MonarchLake-7B) * [Kukedlc/NeoCortex-7B-slerp](https://huggingface.co/Kukedlc/NeoCortex-7B-slerp) ## 🧩 Configuration ```yaml slices: - sources: - model: macadeliccc/MonarchLake-7B layer_range: [0, 32] - model: Kukedlc/NeoCortex-7B-slerp layer_range: [0, 32] merge_method: slerp base_model: Kukedlc/NeoCortex-7B-slerp parameters: t: - filter: self_attn value: [0, 0.5, 0.3, 0.7, 1] - filter: mlp value: [1, 0.5, 0.7, 0.3, 0] - value: 0.5 dtype: bfloat16 ``` ## 💻 Usage ```python !pip install -qU transformers accelerate from transformers import AutoTokenizer import transformers import torch model = "Kukedlc/Fasciculus-Arcuatus-7B-slerp" messages = [{"role": "user", "content": "What is a large language model?"}] tokenizer = AutoTokenizer.from_pretrained(model) prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) pipeline = transformers.pipeline( "text-generation", model=model, torch_dtype=torch.float16, device_map="auto", ) outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) print(outputs[0]["generated_text"]) ``` # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_Kukedlc__Fasciculus-Arcuatus-7B-slerp) | Metric |Value| |---------------------------------|----:| |Avg. |76.07| |AI2 Reasoning Challenge (25-Shot)|73.55| |HellaSwag (10-Shot) |88.95| |MMLU (5-Shot) |64.65| |TruthfulQA (0-shot) |72.53| |Winogrande (5-shot) |85.71| |GSM8k (5-shot) |71.04| # Model Family ![image/png](https://cdn-uploads.huggingface.co/production/uploads/64d71ab4089bc502ceb44d29/wUz2Fo95sxHKWsEdRpwH-.png)
cloudyu/mistral_15B_instruct_v0.1
cloudyu
2024-03-04T23:22:38Z
740
0
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "conversational", "license:cc-by-4.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-03-04T23:14:20Z
--- license: cc-by-4.0 --- ``` this is a 64 layers model based on mistral architecture sft by vicgalle/alpaca-gpt4. template is "{instruction} {inputs} \n {output}" ``` example output ``` <s> write me a story about yosemite. Once upon a time, in the heart of California's Sierra Nevada Mountains, lay the magnificent Yosemite National Park. This natural wonderland was home to towering sequoia trees, crystal-clear rivers, and breathtaking waterfalls that cascaded down from cliff faces. The park attracted visitors from all over the world who were enchanted by its beauty and tranquility. One sunny day, as the sun rose over the mountains, two hikers set out on their adventure through Yosemite. Their names were Jack and Emily, and they had heard countless stories about the park's wonders. They followed the winding trails through forests of oak, pine, and cedar, taking in the sights and sounds of nature around them. As they walked, they came across a small stream, its waters sparkling in the sunlight. They stopped for a moment to rest and take in the serenity of their surroundings. As they continued their journey, they reached a point where the trail met with a steep cliff face. Before them lay the majestic Yosemite Falls, the highest waterfall in North America. Jack and Emily stood in awe, feeling tiny in comparison to the grandeur before them. They took pictures and soaked in the incredible view, feeling invigorated by the power of nature. As the sun began to set, Jack and Emily made their way back to their campsite. They sat around the fire, sharing stories and memories of their adventures throughout the park. They felt grateful for the experience and knew that it was one they would never forget. Yosemite National Park remained a magical place, full of wonder and mystery. It drew people in with its beauty and left them feeling recharged and inspired. And Jack and Emily, they returned year after year, always finding new ways to explore this natural masterpiece.</s> ```
Corianas/Neural-Mistral-7B
Corianas
2024-03-06T00:22:42Z
740
0
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "conversational", "en", "dataset:Intel/orca_dpo_pairs", "arxiv:1910.09700", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-03-05T15:22:01Z
--- library_name: transformers license: apache-2.0 datasets: - Intel/orca_dpo_pairs language: - en --- # Model Card for Model ID This is a DPO finetune of Mistral 7b-instruct0.2 following the article: https://towardsdatascience.com/fine-tune-a-mistral-7b-model-with-direct-preference-optimization-708042745aac ## Model Details ### Model Description This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** Corianas - **Model type:** [More Information Needed] - **License:** Apache 2.0 - **Finetuned from model: mistralai/Mistral-7B-Instruct-v0.2 ## Instruction format In order to leverage instruction fine-tuning, your prompt should be surrounded by `[INST]` and `[/INST]` tokens. The very first instruction should begin with a begin of sentence id. The next instructions should not. The assistant generation will be ended by the end-of-sentence token id. E.g. ``` text = "<s>[INST] What is your favourite condiment? [/INST]" "Well, I'm quite partial to a good squeeze of fresh lemon juice. It adds just the right amount of zesty flavour to whatever I'm cooking up in the kitchen!</s> " "[INST] Do you have mayonnaise recipes? [/INST]" ``` This format is available as a [chat template](https://huggingface.co/docs/transformers/main/chat_templating) via the `apply_chat_template()` method: ```python from transformers import AutoModelForCausalLM, AutoTokenizer device = "cuda" # the device to load the model onto model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2") tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2") messages = [ {"role": "user", "content": "What is your favourite condiment?"}, {"role": "assistant", "content": "Well, I'm quite partial to a good squeeze of fresh lemon juice. It adds just the right amount of zesty flavour to whatever I'm cooking up in the kitchen!"}, {"role": "user", "content": "Do you have mayonnaise recipes?"} ] encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt") model_inputs = encodeds.to(device) model.to(device) generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True) decoded = tokenizer.batch_decode(generated_ids) print(decoded[0]) ``` ## Model Architecture This instruction model is based on Mistral-7B-v0.1, a transformer model with the following architecture choices: - Grouped-Query Attention - Sliding-Window Attention - Byte-fallback BPE tokenizer ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data Intel/orca_dpo_pairs ### Training Procedure https://medium.com/towards-data-science/fine-tune-a-mistral-7b-model-with-direct-preference-optimization-708042745aac #### Preprocessing [optional] def chatml_format(example): # Format system if len(example['system']) > 0: message = {"role": "user", "content": f"{example['system']}\n{example['question']}"} prompt = tokenizer.apply_chat_template([message], tokenize=False) else: # Format instruction message = {"role": "user", "content": example['question']} prompt = tokenizer.apply_chat_template([message], tokenize=False, add_generation_prompt=True) # Format chosen answer chosen = example['chosen'] + tokenizer.eos_token # Format rejected answer rejected = example['rejected'] + tokenizer.eos_token return { "prompt": prompt, "chosen": chosen, "rejected": rejected, } #### Training Hyperparameters training_args = TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=4, gradient_checkpointing=True, learning_rate=5e-5, lr_scheduler_type="cosine", max_steps=200, save_strategy="no", logging_steps=1, output_dir=new_model, optim="paged_adamw_32bit", warmup_steps=100, bf16=True, report_to="wandb", ) ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
Kukedlc/NeuralKrishna-7B-v3
Kukedlc
2024-03-29T22:14:33Z
740
0
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "merge", "mergekit", "lazymergekit", "Kukedlc/NeuralGlitch-Yam-Peleg-7B-DT", "Kukedlc/Fasciculus-Arcuatus-7B-slerp", "Kukedlc/Neural4gsm8k", "base_model:Kukedlc/NeuralGlitch-Yam-Peleg-7B-DT", "base_model:Kukedlc/Fasciculus-Arcuatus-7B-slerp", "base_model:Kukedlc/Neural4gsm8k", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-03-07T16:38:55Z
--- tags: - merge - mergekit - lazymergekit - Kukedlc/NeuralGlitch-Yam-Peleg-7B-DT - Kukedlc/Fasciculus-Arcuatus-7B-slerp - Kukedlc/Neural4gsm8k base_model: - Kukedlc/NeuralGlitch-Yam-Peleg-7B-DT - Kukedlc/Fasciculus-Arcuatus-7B-slerp - Kukedlc/Neural4gsm8k license: apache-2.0 --- # NeuralKrishna-7B-v3 NeuralKrishna-7B-v3 is a merge of the following models using [LazyMergekit](https://colab.research.google.com/drive/1obulZ1ROXHjYLn6PPZJwRR6GzgQogxxb?usp=sharing): * [Kukedlc/NeuralGlitch-Yam-Peleg-7B-DT](https://huggingface.co/Kukedlc/NeuralGlitch-Yam-Peleg-7B-DT) * [Kukedlc/Fasciculus-Arcuatus-7B-slerp](https://huggingface.co/Kukedlc/Fasciculus-Arcuatus-7B-slerp) * [Kukedlc/Neural4gsm8k](https://huggingface.co/Kukedlc/Neural4gsm8k) ## 🧩 Configuration ```yaml models: - model: mlabonne/Monarch-7B # no parameters necessary for base model - model: Kukedlc/NeuralGlitch-Yam-Peleg-7B-DT parameters: density: 0.65 weight: 0.36 - model: Kukedlc/Fasciculus-Arcuatus-7B-slerp parameters: density: 0.6 weight: 0.34 - model: Kukedlc/Neural4gsm8k parameters: density: 0.6 weight: 0.3 merge_method: dare_ties base_model: mlabonne/Monarch-7B parameters: int8_mask: true dtype: bfloat16 random_seed: 0 ``` ## 💻 Usage ```python !pip install -qU transformers accelerate from transformers import AutoTokenizer import transformers import torch model = "Kukedlc/NeuralKrishna-7B-v3" messages = [{"role": "user", "content": "What is a large language model?"}] tokenizer = AutoTokenizer.from_pretrained(model) prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) pipeline = transformers.pipeline( "text-generation", model=model, torch_dtype=torch.float16, device_map="auto", ) outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) print(outputs[0]["generated_text"]) ```
ResplendentAI/Datura_7B
ResplendentAI
2024-03-10T05:01:38Z
740
7
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "en", "dataset:unalignment/toxic-dpo-v0.2", "dataset:ResplendentAI/Synthetic_Soul_1k", "license:cc-by-sa-4.0", "model-index", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-03-08T03:50:00Z
--- language: - en license: cc-by-sa-4.0 library_name: transformers datasets: - unalignment/toxic-dpo-v0.2 - ResplendentAI/Synthetic_Soul_1k model-index: - name: Datura_7B results: - task: type: text-generation name: Text Generation dataset: name: AI2 Reasoning Challenge (25-Shot) type: ai2_arc config: ARC-Challenge split: test args: num_few_shot: 25 metrics: - type: acc_norm value: 72.1 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=ResplendentAI/Datura_7B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: HellaSwag (10-Shot) type: hellaswag split: validation args: num_few_shot: 10 metrics: - type: acc_norm value: 88.27 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=ResplendentAI/Datura_7B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MMLU (5-Shot) type: cais/mmlu config: all split: test args: num_few_shot: 5 metrics: - type: acc value: 64.15 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=ResplendentAI/Datura_7B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: TruthfulQA (0-shot) type: truthful_qa config: multiple_choice split: validation args: num_few_shot: 0 metrics: - type: mc2 value: 71.03 source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=ResplendentAI/Datura_7B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: Winogrande (5-shot) type: winogrande config: winogrande_xl split: validation args: num_few_shot: 5 metrics: - type: acc value: 84.53 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=ResplendentAI/Datura_7B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: GSM8k (5-shot) type: gsm8k config: main split: test args: num_few_shot: 5 metrics: - type: acc value: 65.58 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=ResplendentAI/Datura_7B name: Open LLM Leaderboard --- # Datura 7B ![image/jpeg](https://cdn-uploads.huggingface.co/production/uploads/626dfb8786671a29c715f8a9/BDijZ3YGo9ARto4FOrDoj.jpeg) Flora with a bit of toxicity. I've been making progress with my collection of tools, so I thought maybe I'd try something a little more toxic for this space. This should make for a more receptive model with fewer refusals. # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_ResplendentAI__Datura_7B) | Metric |Value| |---------------------------------|----:| |Avg. |74.28| |AI2 Reasoning Challenge (25-Shot)|72.10| |HellaSwag (10-Shot) |88.27| |MMLU (5-Shot) |64.15| |TruthfulQA (0-shot) |71.03| |Winogrande (5-shot) |84.53| |GSM8k (5-shot) |65.58|
liminerity/Liph.42
liminerity
2024-03-11T08:29:04Z
740
1
transformers
[ "transformers", "safetensors", "phi", "text-generation", "phi2", "text-generation-inference", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-generation
2024-03-11T08:23:24Z
--- license: apache-2.0 tags: - phi - phi2 - transformers - text-generation-inference --- # Liph42 Life immitates life immitates life immitates life immitates life immitates life immitates life immitates life immitates life We are living in a cycle. History forever repeating itself. To learn then forget. 42424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242424242
grimjim/kukulemon-7B
grimjim
2024-03-21T03:59:03Z
740
9
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "mergekit", "merge", "base_model:grimjim/kuno-kunoichi-v1-DPO-v2-SLERP-7B", "base_model:KatyTheCutie/LemonadeRP-4.5.3", "license:cc-by-nc-4.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-03-11T14:11:37Z
--- base_model: - grimjim/kuno-kunoichi-v1-DPO-v2-SLERP-7B - KatyTheCutie/LemonadeRP-4.5.3 library_name: transformers tags: - mergekit - merge license: cc-by-nc-4.0 --- # kukulemon-7B A merger of two similar Kunoichi models with strong reasoning, hopefully resulting in "dense" encoding of said reasoning, was merged with a model targeting roleplay. I've tested with ChatML prompts with temperature=1.1 and minP=0.03. The model itself supports Alpaca format prompts. The model claims a context length of 32K, it seemed to lose coherence after 8K in my informal testing. This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit). You can also download [GGUF-IQ-Imatrix quants courtesy of Lewdiculous](https://huggingface.co/Lewdiculous/kukulemon-7B-GGUF-IQ-Imatrix/). There are also [8.0bpw h8 exl2](https://huggingface.co/grimjim/kukulemon-7B-8.0bpw_h8_exl2) and [Q8_0 GGUF](https://huggingface.co/grimjim/kukulemon-7B-GGUF) quants available. ## Merge Details ### Merge Method This model was merged using the SLERP merge method. ### Models Merged The following models were included in the merge: * [grimjim/kuno-kunoichi-v1-DPO-v2-SLERP-7B](https://huggingface.co/grimjim/kuno-kunoichi-v1-DPO-v2-SLERP-7B) * [KatyTheCutie/LemonadeRP-4.5.3](https://huggingface.co/KatyTheCutie/LemonadeRP-4.5.3) ### Configuration The following YAML configuration was used to produce this model: ```yaml slices: - sources: - model: grimjim/kuno-kunoichi-v1-DPO-v2-SLERP-7B layer_range: [0, 32] - model: KatyTheCutie/LemonadeRP-4.5.3 layer_range: [0, 32] # or, the equivalent models: syntax: # models: merge_method: slerp base_model: KatyTheCutie/LemonadeRP-4.5.3 parameters: t: - filter: self_attn value: [0, 0.5, 0.3, 0.7, 1] - filter: mlp value: [1, 0.5, 0.7, 0.3, 0] - value: 0.5 # fallback for rest of tensors dtype: float16 ```
arcee-ai/Saul-Instruct-Clown-7b
arcee-ai
2024-03-18T18:01:43Z
740
0
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "merge", "mergekit", "CorticalStack/pastiche-crown-clown-7b-dare-dpo", "Equall/Saul-Instruct-v1", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-03-12T14:49:10Z
--- license: apache-2.0 tags: - merge - mergekit - CorticalStack/pastiche-crown-clown-7b-dare-dpo - Equall/Saul-Instruct-v1 --- ![image/webp](https://cdn.prod.arcee.ai/images/saul-calme.jpeg) # Saul-Instruct-Clown-7b Saul-Instruct-Clown-7b is a merge of the following models using [mergekit](https://github.com/cg123/mergekit): * [CorticalStack/pastiche-crown-clown-7b-dare-dpo](https://huggingface.co/CorticalStack/pastiche-crown-clown-7b-dare-dpo) * [Equall/Saul-Instruct-v1](https://huggingface.co/Equall/Saul-Instruct-v1) ## 🏆 Evaluation ### OpenLLM Saul-Instruct-Clown-7b OpenLLM benchmark suite | Model | Average | arc | HellaSwag | mmlu | TruthfulQA | gsm8k | |---|---:|---:|---:|---:|---:|---:| | [arcee-ai/Saul-Instruct-Clown-7b](https://huggingface.co/arcee-ai/Saul-Instruct-Clown-7b/) | 72.79 | 68.26 | 86.28 | 63.12 | 64.68 | 83.43 | 70.96 | ## 🧩 Configuration ```yaml slices: - sources: - model: CorticalStack/pastiche-crown-clown-7b-dare-dpo layer_range: [0, 32] - model: Equall/Saul-Instruct-v1 layer_range: [0, 32] merge_method: slerp base_model: CorticalStack/pastiche-crown-clown-7b-dare-dpo parameters: t: - filter: self_attn value: [0, 0.5, 0.3, 0.7, 1] - filter: mlp value: [1, 0.5, 0.7, 0.3, 0] - value: 0.5 dtype: bfloat16 ```
yam-peleg/gemma-7b-it-experiment
yam-peleg
2024-03-16T15:17:25Z
740
0
transformers
[ "transformers", "safetensors", "gemma", "text-generation", "chat", "conversational", "en", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-03-16T14:44:44Z
--- license: apache-2.0 language: - en library_name: transformers pipeline_tag: text-generation tags: - chat --- **gemma-7b-it-experiment** **This is just an experiment placeholder for testing out local validation strategy, there is absolutly no real reason for you to try this model. it has nothing new into it.** An experiment for testing and refining a local cross validation strategy. The goal is to evaluate LLMs locally and make sure the scores obtained locally can be reproduced publiclly. More details coming soon. --- license: apache-2.0 ---
Locutusque/Hyperion-3.0-Yi-34B
Locutusque
2024-03-19T03:17:25Z
740
7
transformers
[ "transformers", "safetensors", "llama", "text-generation", "code", "chemistry", "medical", "en", "dataset:Locutusque/hyperion-v3.0", "license:other", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-03-16T22:50:51Z
--- language: - en license: other library_name: transformers tags: - code - chemistry - medical datasets: - Locutusque/hyperion-v3.0 widget: - example_title: Derivative text: | <|im_start|>user In this task, you are given a description of an object and its surroundings. Your task is to describe the motion of the object mentioned in the description using differential equations. The answer should be a mathematical expression in t. Description: A ball is thrown up at 30 ft/sec from 3 feet off the ground. Solve this step-by-step, explaining each step in detail.<|im_end|> <|im_start|>assistant output: text: | Let's denote the height of the ball as h(t) where t represents time in seconds. Since the ball is thrown up with an initial velocity of 30 ft/sec, we can write the following equation for the vertical component of the ball's velocity: v(t) = -32t + 30 Here, v(t) represents the velocity of the ball at time t (in feet per second), and the negative sign indicates that the velocity decreases due to gravity. Now, we can integrate the velocity function to find the position function: h(t) = ∫v(t) dt = ∫(-32t + 30) dt Integrating each term separately, we get: h(t) = (-16t^2 + 30t + C) To determine the constant C, we use the initial condition that the ball is thrown from 3 feet above the ground. At t=0, the height is 3 feet: h(0) = 3 Substituting t=0 into the equation for h(t): 3 = (-16(0)^2 + 30(0) + C) Solving for C, we get: C = 3 So the final equation for the height of the ball as a function of time is: h(t) = -16t^2 + 30t + 3 --- # Locutusque/Hyperion-3.0-Yi-34B ![Hyperion](https://th.bing.com/th/id/OIG4.16hfui67GAonzb98DGFF?pid=ImgGn) ## Model Details - **Model Name**: Locutusque/Hyperion-3.0-Yi-34B - **Base Model**: Yi-34B - **Publisher**: Locutusque - **Model Type**: Question answering, conversational AI, code generation, medical text comprehension, mathematical reasoning, logical reasoning. - **Language**: Multi-domain, English language. - **License**: Apache-2.0 ## Model Description Locutusque/Hyperion-3.0-Yi-34B is a state-of-the-art language model fine-tuned on the Hyperion-v3.0 dataset for advanced reasoning across scientific domains. This model is designed to handle complex inquiries and instructions, leveraging the diverse and rich information contained in the Hyperion dataset. Its primary use cases include but are not limited to complex question answering, conversational understanding, code generation, medical text comprehension, mathematical reasoning, and logical reasoning. This model is designed to greatly outperform its predecessors. ## Intended Use This model is intended for researchers and practitioners looking for a powerful tool to tackle challenging problems in scientific domains. It can be used in the following scenarios: - AI-driven tutoring systems for science, medicine, mathematics, and computer science. - Assistive tools for professionals requiring fast and accurate domain-specific information retrieval. - Platforms that require conversational AI capabilities with a focus on technical and scientific reasoning. - Automation in code generation and understanding complex programming context. ## Training Data The Locutusque/Hyperion-3.0-Yi-34B model was fine-tuned on 150,000 examples of the Hyperion-3.0 dataset, which amalgamates various datasets rich in diversity and complexity, including programming, medical texts, mathematical problems, and reasoning tasks. ## Quants ExLlamaV2: https://huggingface.co/bartowski/Hyperion-3.0-Yi-34B-exl2 GGUF: https://huggingface.co/bartowski/Hyperion-3.0-Yi-34B-GGUF ## Evaluation Results Coming soon ## How to Use ```python from transformers import AutoModelForCausalLM, AutoTokenizer model_name = "Locutusque/Hyperion-3.0-Yi-34B" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) # For a text generation task input_text = "<|im_start|>user\nWhat are the implications of Einstein's theory of relativity in modern physics?<|im_end|>\n<|im_start|>assistant\n" input_ids = tokenizer.encode(input_text, return_tensors="pt") # Generate a response outputs = model.generate(input_ids, max_length=200, num_return_sequences=1, temperature=0.8, top_p=0.95, top_k=40, repetition_penalty=1.1) print(tokenizer.decode(outputs[0], skip_special_tokens=True)) ``` ## Known Limitations The diversity of the dataset could lead to inconsistencies in the model's responses due to variations in data formatting and annotation quality. This model is also very compliant, it will respond to any request. Please make sure to build upon this model with DPO if you plan on using it for enterprise-level deployment. ## Licensing Information This model is released under the Yi NC license.
Gille/StrangeMerges_40-7B-dare_ties
Gille
2024-03-17T02:22:44Z
740
0
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "merge", "mergekit", "lazymergekit", "Gille/StrangeMerges_34-7B-slerp", "yam-peleg/Experiment26-7B", "chihoonlee10/T3Q-Mistral-Orca-Math-DPO", "base_model:Gille/StrangeMerges_34-7B-slerp", "base_model:yam-peleg/Experiment26-7B", "base_model:chihoonlee10/T3Q-Mistral-Orca-Math-DPO", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-03-17T02:14:50Z
--- license: apache-2.0 tags: - merge - mergekit - lazymergekit - Gille/StrangeMerges_34-7B-slerp - yam-peleg/Experiment26-7B - chihoonlee10/T3Q-Mistral-Orca-Math-DPO base_model: - Gille/StrangeMerges_34-7B-slerp - yam-peleg/Experiment26-7B - chihoonlee10/T3Q-Mistral-Orca-Math-DPO --- # StrangeMerges_40-7B-dare_ties StrangeMerges_40-7B-dare_ties is a merge of the following models using [LazyMergekit](https://colab.research.google.com/drive/1obulZ1ROXHjYLn6PPZJwRR6GzgQogxxb?usp=sharing): * [Gille/StrangeMerges_34-7B-slerp](https://huggingface.co/Gille/StrangeMerges_34-7B-slerp) * [yam-peleg/Experiment26-7B](https://huggingface.co/yam-peleg/Experiment26-7B) * [chihoonlee10/T3Q-Mistral-Orca-Math-DPO](https://huggingface.co/chihoonlee10/T3Q-Mistral-Orca-Math-DPO) ## 🧩 Configuration ```yaml models: - model: Gille/StrangeMerges_34-7B-slerp parameters: weight: 0.3 density: 0.8 - model: yam-peleg/Experiment26-7B parameters: weight: 0.2 density: 0.8 - model: chihoonlee10/T3Q-Mistral-Orca-Math-DPO parameters: weight: 0.5 density: 0.8 base_model: mistralai/Mistral-7B-v0.1 merge_method: dare_ties dtype: bfloat16 ``` ## 💻 Usage ```python !pip install -qU transformers accelerate from transformers import AutoTokenizer import transformers import torch model = "Gille/StrangeMerges_40-7B-dare_ties" messages = [{"role": "user", "content": "What is a large language model?"}] tokenizer = AutoTokenizer.from_pretrained(model) prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) pipeline = transformers.pipeline( "text-generation", model=model, torch_dtype=torch.float16, device_map="auto", ) outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) print(outputs[0]["generated_text"]) ```
mvpmaster/Einstein-4D-Marcoro14-7b-full-slerp
mvpmaster
2024-03-19T04:57:32Z
740
0
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "merge", "mergekit", "lazymergekit", "argilla/distilabeled-Marcoro14-7B-slerp-full", "Weyaxi/Einstein-v4-7B", "base_model:argilla/distilabeled-Marcoro14-7B-slerp-full", "base_model:Weyaxi/Einstein-v4-7B", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-03-17T02:43:18Z
--- license: apache-2.0 tags: - merge - mergekit - lazymergekit - argilla/distilabeled-Marcoro14-7B-slerp-full - Weyaxi/Einstein-v4-7B base_model: - argilla/distilabeled-Marcoro14-7B-slerp-full - Weyaxi/Einstein-v4-7B --- # Einstein-4D-Marcoro14-full-slerp Einstein-4D-Marcoro14-full-slerp is a merge of the following models using [LazyMergekit](https://colab.research.google.com/drive/1obulZ1ROXHjYLn6PPZJwRR6GzgQogxxb?usp=sharing): * [argilla/distilabeled-Marcoro14-7B-slerp-full](https://huggingface.co/argilla/distilabeled-Marcoro14-7B-slerp-full) * [Weyaxi/Einstein-v4-7B](https://huggingface.co/Weyaxi/Einstein-v4-7B) ## 🧩 Configuration ```yaml slices: - sources: - model: argilla/distilabeled-Marcoro14-7B-slerp-full layer_range: [0, 32] - model: Weyaxi/Einstein-v4-7B layer_range: [0, 32] merge_method: slerp base_model: argilla/distilabeled-Marcoro14-7B-slerp-full parameters: t: - filter: self_attn value: [0, 0.5, 0.3, 0.7, 1] - filter: mlp value: [1, 0.5, 0.7, 0.3, 0] - value: 0.5 dtype: bfloat16 ``` ## 💻 Usage ```python !pip install -qU transformers accelerate from transformers import AutoTokenizer import transformers import torch model = "mvpmaster/Einstein-4D-Marcoro14-7b-full-slerp" messages = [{"role": "user", "content": "What is a large language model?"}] tokenizer = AutoTokenizer.from_pretrained(model) prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) pipeline = transformers.pipeline( "text-generation", model=model, torch_dtype=torch.float16, device_map="auto", ) outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) print(outputs[0]["generated_text"]) ```
ResplendentAI/Persephone_7B
ResplendentAI
2024-03-17T09:17:15Z
740
3
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "mergekit", "merge", "en", "license:other", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-03-17T09:03:36Z
--- base_model: [] library_name: transformers tags: - mergekit - merge license: other language: - en --- # Persephone ![image/jpeg](https://cdn-uploads.huggingface.co/production/uploads/626dfb8786671a29c715f8a9/aOnBmqHJQfOFEIgqD_JCz.jpeg) After being in a bit of a rut, I decided to take a radically different approach to produce something new and exciting. It seems to have worked out. I hope you enjoy!
0-hero/Matter-0.1-7B-boost
0-hero
2024-03-23T18:07:10Z
740
0
transformers
[ "transformers", "pytorch", "safetensors", "mistral", "text-generation", "conversational", "en", "dataset:0-hero/Matter-0.1", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-03-19T11:26:56Z
--- license: apache-2.0 datasets: - 0-hero/Matter-0.1 language: - en --- ## Matter 7B - 0.1 Boost (Mistral 7B Finetune) Matter 7B is full-finetune on the [Matter dataset](https://huggingface.co/datasets/0-hero/Matter-0.1), which is curated from over 35 datsets analyzing >6B tokens Boost version is finetuned on some additional data ### Training Prompt format: This model uses ChatML prompt format. ``` <|im_start|>system You are a helpful AI assistant.<|im_end|> <|im_start|>user {prompt}<|im_end|> <|im_start|>assistant ``` ### Function Calling Model also supports function calling. Additional tokens for function calling Model function call tokens - <|begin_func|> - Function call start token - <|end_func|> - Function call end token Function call response tokens - <|begin_func_response|> - Function response start token - <|end_func_response|> - Function response end token Example ``` <|im_start|>system You are a helpful assistant with access to the following functions. Use them if required - { "name": "get_news_headlines", "description": "Get the latest news headlines", "parameters": { "type": "object", "properties": { "country": { "type": "string", "description": "The country for which to fetch news" } }, "required": [ "country" ] } } <|im_end|> <|im_start|>user Can you tell me the latest news headlines for the United States?<|im_end|> <|im_start|>assistant <|begin_func|>{"name": "get_news_headlines", "arguments": '{"country": "United States"}'}<|end_func|><|im_end|> <|im_start|>user <|begin_func_response|>{ "headlines": [ "Biden announces new vaccine mandates", "Hurricane Ida devastates Louisiana", "Apple unveils new iPhone", "NASA's Perseverance rover collects first Mars rock sample" ] }<|end_func_response|> <|im_end|> <|im_start|>assistant Here are the latest news headlines for the United States: 1. Biden announces new vaccine mandates 2. Hurricane Ida devastates Louisiana 3. Apple unveils new iPhone 4. NASA's Perseverance rover collects first Mars rock sample <|im_end|> ```
maldv/winter-garden-7b-delta
maldv
2024-03-22T06:01:03Z
740
0
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "merge", "conversational", "multi-task", "license:cc-by-nc-4.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-03-20T23:19:54Z
--- license: cc-by-nc-4.0 tags: - merge - conversational - multi-task pipeline_tag: text-generation --- # Winter Garden 7B - δ - "Charming" It was mentioned that we are in the open ai dark winter; so I thought I would make myself a nice winter garden. ## An experiment I performed the same type of merge as in the previous model, but with a different set of models. I took the following models: * Mistral-7B-v0.1 and merged in * KuNoichi-DPO-v2-7B * Datura_7B * AlphaMonarch-7B * LemonadeRP-4.5.3 * Prima-LelantaclesV6-7b * FuseChat-7B-VaRM * Capricorn-7B-DPO * eros-7b-test * NeuralMarcoro14-7B * StrangeMerges_6-7B-dare_ties * Multi-Verse-RP-7B * WestLake-7B-v2-laser-truthy-dpo * Noromaid-7B-0.4-DPO * Thespis-Balanced-7b-v1 * InfinityRP-v1-7B * winter-garden-7b-gamma in an iterative DARE-TIES tree merge, ordering the merge order by tensor-relative cosine similarity until the merge branches resolve to a single value. ## Chat Template These models were selected because they follow my chat template, which is '</s>' ended turns. A lot of models follow this template by default because they were trained with end padding, so this is a natural choice for chat, and should be highly compatible with ST. ``` Tom: Hello, how are you?</s> Jane: I am fine, thank you.</s> ``` ## Why? The purpose of all of these models is to act as a base for me to train on. This one so far has the best multi-turn conversational ability, and should get really good at following long-form conversations after a bit of tweaking. ## Scores Metric | Score ---|--- Average | 64.93 ARC | 64.16 HellaSwag | 84.37 MMLU | 60.38 TruthfulQA | 67.95 Winogrande | 76.72 GSM8K | 36.01 [Details](https://huggingface.co/datasets/open-llm-leaderboard/details_maldv__winter-garden-7b-delta)
Kukedlc/NeuralGanesha-7b
Kukedlc
2024-03-29T13:18:37Z
740
0
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "merge", "mergekit", "lazymergekit", "Kukedlc/SomeModelsMerge-7b", "Kukedlc/MyModelsMerge-7b", "base_model:Kukedlc/SomeModelsMerge-7b", "base_model:Kukedlc/MyModelsMerge-7b", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-03-25T14:29:03Z
--- tags: - merge - mergekit - lazymergekit - Kukedlc/SomeModelsMerge-7b - Kukedlc/MyModelsMerge-7b base_model: - Kukedlc/SomeModelsMerge-7b - Kukedlc/MyModelsMerge-7b license: apache-2.0 --- # NeuralGanesha-7b ![image/png](https://cdn-uploads.huggingface.co/production/uploads/64d71ab4089bc502ceb44d29/ta2vBMskD23yihQnu4aXo.png) NeuralGanesha-7b is a merge of the following models using [LazyMergekit](https://colab.research.google.com/drive/1obulZ1ROXHjYLn6PPZJwRR6GzgQogxxb?usp=sharing): * [Kukedlc/SomeModelsMerge-7b](https://huggingface.co/Kukedlc/SomeModelsMerge-7b) * [Kukedlc/MyModelsMerge-7b](https://huggingface.co/Kukedlc/MyModelsMerge-7b) ## 🧩 Configuration ```yaml slices: - sources: - model: Kukedlc/SomeModelsMerge-7b layer_range: [0, 32] - model: Kukedlc/MyModelsMerge-7b layer_range: [0, 32] merge_method: slerp base_model: Kukedlc/SomeModelsMerge-7b parameters: t: - filter: self_attn value: [0.1, 0.6, 0.3, 0.7, 1] - filter: mlp value: [0.9, 0.4, 0.7, 0.3, 0] - value: 0.5 dtype: bfloat16 ``` ## 💻 Usage ```python !pip install -qU transformers accelerate from transformers import AutoTokenizer import transformers import torch model = "Kukedlc/NeuralGanesha-7b" messages = [{"role": "user", "content": "What is a large language model?"}] tokenizer = AutoTokenizer.from_pretrained(model) prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) pipeline = transformers.pipeline( "text-generation", model=model, torch_dtype=torch.float16, device_map="auto", ) outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) print(outputs[0]["generated_text"]) ```
Gille/StrangeMerges_47-7B-dare_ties
Gille
2024-04-03T09:14:40Z
740
0
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "merge", "mergekit", "lazymergekit", "Gille/StrangeMerges_46-7B-dare_ties", "AurelPx/Percival_01-7b-slerp", "kaist-ai/mistral-orpo-beta", "conversational", "base_model:Gille/StrangeMerges_46-7B-dare_ties", "base_model:AurelPx/Percival_01-7b-slerp", "base_model:kaist-ai/mistral-orpo-beta", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-03-25T20:04:12Z
--- license: apache-2.0 tags: - merge - mergekit - lazymergekit - Gille/StrangeMerges_46-7B-dare_ties - AurelPx/Percival_01-7b-slerp - kaist-ai/mistral-orpo-beta base_model: - Gille/StrangeMerges_46-7B-dare_ties - AurelPx/Percival_01-7b-slerp - kaist-ai/mistral-orpo-beta model-index: - name: StrangeMerges_47-7B-dare_ties results: - task: type: text-generation name: Text Generation dataset: name: AI2 Reasoning Challenge (25-Shot) type: ai2_arc config: ARC-Challenge split: test args: num_few_shot: 25 metrics: - type: acc_norm value: 69.45 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Gille/StrangeMerges_47-7B-dare_ties name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: HellaSwag (10-Shot) type: hellaswag split: validation args: num_few_shot: 10 metrics: - type: acc_norm value: 86.69 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Gille/StrangeMerges_47-7B-dare_ties name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MMLU (5-Shot) type: cais/mmlu config: all split: test args: num_few_shot: 5 metrics: - type: acc value: 63.27 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Gille/StrangeMerges_47-7B-dare_ties name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: TruthfulQA (0-shot) type: truthful_qa config: multiple_choice split: validation args: num_few_shot: 0 metrics: - type: mc2 value: 67.86 source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Gille/StrangeMerges_47-7B-dare_ties name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: Winogrande (5-shot) type: winogrande config: winogrande_xl split: validation args: num_few_shot: 5 metrics: - type: acc value: 82.24 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Gille/StrangeMerges_47-7B-dare_ties name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: GSM8k (5-shot) type: gsm8k config: main split: test args: num_few_shot: 5 metrics: - type: acc value: 61.94 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Gille/StrangeMerges_47-7B-dare_ties name: Open LLM Leaderboard --- # StrangeMerges_47-7B-dare_ties StrangeMerges_47-7B-dare_ties is a merge of the following models using [LazyMergekit](https://colab.research.google.com/drive/1obulZ1ROXHjYLn6PPZJwRR6GzgQogxxb?usp=sharing): * [Gille/StrangeMerges_46-7B-dare_ties](https://huggingface.co/Gille/StrangeMerges_46-7B-dare_ties) * [AurelPx/Percival_01-7b-slerp](https://huggingface.co/AurelPx/Percival_01-7b-slerp) * [kaist-ai/mistral-orpo-beta](https://huggingface.co/kaist-ai/mistral-orpo-beta) ## 🧩 Configuration ```yaml models: - model: Gille/StrangeMerges_46-7B-dare_ties parameters: weight: 0.4 density: 0.53 - model: AurelPx/Percival_01-7b-slerp parameters: weight: 0.4 density: 0.53 - model: kaist-ai/mistral-orpo-beta parameters: weight: 0.2 density: 0.53 base_model: kettleguts/zephyr-7b-beta_sparse05 merge_method: dare_ties dtype: bfloat16 ``` ## 💻 Usage ```python !pip install -qU transformers accelerate from transformers import AutoTokenizer import transformers import torch model = "Gille/StrangeMerges_47-7B-dare_ties" messages = [{"role": "user", "content": "What is a large language model?"}] tokenizer = AutoTokenizer.from_pretrained(model) prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) pipeline = transformers.pipeline( "text-generation", model=model, torch_dtype=torch.float16, device_map="auto", ) outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) print(outputs[0]["generated_text"]) ``` # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_Gille__StrangeMerges_47-7B-dare_ties) | Metric |Value| |---------------------------------|----:| |Avg. |71.91| |AI2 Reasoning Challenge (25-Shot)|69.45| |HellaSwag (10-Shot) |86.69| |MMLU (5-Shot) |63.27| |TruthfulQA (0-shot) |67.86| |Winogrande (5-shot) |82.24| |GSM8k (5-shot) |61.94|
icefog72/IceTeaRP-7b
icefog72
2024-04-12T16:19:32Z
740
11
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "mergekit", "merge", "alpaca", "not-for-all-audiences", "nsfw", "conversational", "base_model:icefog72/Kunokukulemonchini-7b", "base_model:icefog72/BigLM7-7b", "base_model:liminerity/M7-7b", "base_model:Undi95/BigL-7B", "license:cc-by-nc-4.0", "model-index", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-03-28T02:44:53Z
--- base_model: - icefog72/Kunokukulemonchini-7b - icefog72/BigLM7-7b - liminerity/M7-7b - Undi95/BigL-7B library_name: transformers tags: - mergekit - merge - alpaca - mistral - not-for-all-audiences - nsfw license: cc-by-nc-4.0 model-index: - name: IceTeaRP-7b results: - task: type: text-generation name: Text Generation dataset: name: AI2 Reasoning Challenge (25-Shot) type: ai2_arc config: ARC-Challenge split: test args: num_few_shot: 25 metrics: - type: acc_norm value: 66.98 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=icefog72/IceTeaRP-7b name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: HellaSwag (10-Shot) type: hellaswag split: validation args: num_few_shot: 10 metrics: - type: acc_norm value: 86.13 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=icefog72/IceTeaRP-7b name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MMLU (5-Shot) type: cais/mmlu config: all split: test args: num_few_shot: 5 metrics: - type: acc value: 63.97 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=icefog72/IceTeaRP-7b name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: TruthfulQA (0-shot) type: truthful_qa config: multiple_choice split: validation args: num_few_shot: 0 metrics: - type: mc2 value: 62.44 source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=icefog72/IceTeaRP-7b name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: Winogrande (5-shot) type: winogrande config: winogrande_xl split: validation args: num_few_shot: 5 metrics: - type: acc value: 78.85 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=icefog72/IceTeaRP-7b name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: GSM8k (5-shot) type: gsm8k config: main split: test args: num_few_shot: 5 metrics: - type: acc value: 60.20 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=icefog72/IceTeaRP-7b name: Open LLM Leaderboard --- ## Advertisement - Check out new merge model without repetition problem [IceLemonTeaRP-32k-7b](https://huggingface.co/icefog72/IceLemonTeaRP-32k-7b) # IceTeaRP-7b ![image/png](https://cdn-uploads.huggingface.co/production/uploads/63407b719dbfe0d48b2d763b/JVUyMYUx-lgmrnmePUxyJ.png) This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit). - 4.0bpw-h6 [IceTeaRP-7b-4.0bpw-exl2](https://huggingface.co/icefog72/IceTeaRP-7b-4.0bpw-exl2) - 4.2bpw-h6 [IceTeaRP-7b-4.2bpw-exl2](https://huggingface.co/icefog72/IceTeaRP-7b-4.2bpw-exl2) - 6.5bpw-h6 [IceTeaRP-7b-6.5bpw-exl2](https://huggingface.co/icefog72/IceTeaRP-7b-6.5bpw-exl2) - 8.0bpw-h6 [IceTeaRP-7b-8.0bpw-exl2](https://huggingface.co/icefog72/IceTeaRP-7b-8.0bpw-exl2) Thanks mradermacher for [IceTeaRP-7b-GGUF](https://huggingface.co/mradermacher/IceTeaRP-7b-GGUF) ## Merge Details Just cooking mergers. For my taste, it came out better than Kunokukulemonchini-7b. Model capable of handling 32k context window without any scaling. Prompt template: Alpaca measurement.json for quanting exl2 included. ### Users test feedback Can develop repetition problem at 16k-32k without good RP rules/CoT in promt. You can try edit "rope_theta": 100000.0 => "rope_theta": 60000.0 to make it even more slightly coherent(hard to find balance). ### Merge Method This model was merged using the SLERP merge method. ### Models Merged The following models were included in the merge: * [icefog72/Kunokukulemonchini-7b](https://huggingface.co/icefog72/Kunokukulemonchini-7b) * BigLM7-7b SLERP merge of * [liminerity/M7-7b](https://huggingface.co/liminerity/M7-7b) * [Undi95/BigL-7B](https://huggingface.co/Undi95/BigL-7B) ## How to download From the command line I recommend using the `huggingface-hub` Python library: ```shell pip3 install huggingface-hub ``` To download the `main` branch to a folder called `IceTeaRP-7b`: ```shell mkdir IceTeaRP-7b huggingface-cli download icefog72/IceTeaRP-7b --local-dir IceTeaRP-7b --local-dir-use-symlinks False ``` <details> <summary>More advanced huggingface-cli download usage</summary> If you remove the `--local-dir-use-symlinks False` parameter, the files will instead be stored in the central Hugging Face cache directory (default location on Linux is: `~/.cache/huggingface`), and symlinks will be added to the specified `--local-dir`, pointing to their real location in the cache. This allows for interrupted downloads to be resumed, and allows you to quickly clone the repo to multiple places on disk without triggering a download again. The downside, and the reason why I don't list that as the default option, is that the files are then hidden away in a cache folder and it's harder to know where your disk space is being used, and to clear it up if/when you want to remove a download model. The cache location can be changed with the `HF_HOME` environment variable, and/or the `--cache-dir` parameter to `huggingface-cli`. For more documentation on downloading with `huggingface-cli`, please see: [HF -> Hub Python Library -> Download files -> Download from the CLI](https://huggingface.co/docs/huggingface_hub/guides/download#download-from-the-cli). To accelerate downloads on fast connections (1Gbit/s or higher), install `hf_transfer`: ```shell pip3 install hf_transfer ``` And set environment variable `HF_HUB_ENABLE_HF_TRANSFER` to `1`: ```shell mkdir FOLDERNAME HF_HUB_ENABLE_HF_TRANSFER=1 huggingface-cli download MODEL --local-dir FOLDERNAME --local-dir-use-symlinks False ``` Windows Command Line users: You can set the environment variable by running `set HF_HUB_ENABLE_HF_TRANSFER=1` before the download command. </details> ### Configuration The following YAML configuration was used to produce this model: ```yaml slices: - sources: - model: Kunokukulemonchini-7b layer_range: [0, 32] - model: BigLM7-7b layer_range: [0, 32] merge_method: slerp base_model: Kunokukulemonchini-7b parameters: t: - filter: self_attn value: [0, 0.5, 0.3, 0.7, 1] - filter: mlp value: [1, 0.5, 0.7, 0.3, 0] - value: 0.5 dtype: float16 ``` # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_icefog72__IceTeaRP-7b) | Metric |Value| |---------------------------------|----:| |Avg. |69.76| |AI2 Reasoning Challenge (25-Shot)|66.98| |HellaSwag (10-Shot) |86.13| |MMLU (5-Shot) |63.97| |TruthfulQA (0-shot) |62.44| |Winogrande (5-shot) |78.85| |GSM8k (5-shot) |60.20|
euclaise/ReMask-3B
euclaise
2024-04-02T03:37:11Z
740
13
transformers
[ "transformers", "safetensors", "stablelm", "text-generation", "en", "dataset:euclaise/TinyCoT", "dataset:euclaise/reddit-instruct-curated", "dataset:sablo/oasst2_curated", "arxiv:2401.01335", "arxiv:2403.02178", "license:cc-by-sa-4.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-generation
2024-03-28T18:59:34Z
--- language: - en license: cc-by-sa-4.0 datasets: - euclaise/TinyCoT - euclaise/reddit-instruct-curated - sablo/oasst2_curated --- # ReMask: Improving autoregressive language models via regularized masking ## Background [Self-Play Finetuning (SPIN)](https://arxiv.org/abs/2401.01335) is a recent finetuning method which outperforms standard supervised finetuning (SFT). Instead of just performing next-token prediction, SPIN it an iterative method which contrasts generations from the previous iteration of the model with the ground-truth completions. Unlike methods like reinforcement learning or ranking losses, SPIN does not require preference data, which makes it an attractive method since preference data can be hard to gather. However, SPIN's popularity has been limited by the need to repeatedly generate sequences from the model -- generation is much slower than training, so SPIN is much more slow and expensive compared to SFT. With this problem in mind, I sought out to create an alternative to SPIN which doesn't require generation. ### Why does SPIN work? SFT trains models to predict the next token given all the ground-truth previous tokens. However, in generation, the model doesn't have access to a ground-truth to predict from, and instead repeatedly predicts on top of its own predictions. This creates a bias known as "exposure bias": Models often can pick reasonable choices for the next token on average, but can't keep this up for the full sequence. In particular, it might be easy to predict a *reasonable* next token, but much more difficult to predict the full sequence. ***For instance, consider the following case:*** > The astronomer pointed his telescope at the distant star, hoping to see The correct prediction here might be "signs of life.". However, the model might predict "and" rather than "signs", since "and" is *reasonable* in the immediate context - it's gramatically correct, but implies a strange ending to the sentence. As a result, the model might end up with something like *"The astronomer pointed his telescope at the distant star, hoping to see and hear."* - which makes little sense. SPIN's advantage over SFT likely comes from its partial mitigation of exposure bias. SPIN doesn't only train the model to predict the next token accurately, it repeatedly trains the model to identify and fix discrepancies between its generations and the ground-truth. In order to do this, the model must implicitly learn to think ahead, as exposure bias is likely what causes many of the discrepancies. ### How can we simplify this? Unfortunately, explicitly predicting ahead for many steps is very expensive, and considering full model generations requires a slow generation process. An obvious option is to simply randomly corrupt tokens in the sequence. The model must keep an internal estimate of what the corrupted tokens ought to be in order to predict the token after them, forcing the model to think ahead. The most obvious ways to do this are to randomly replace input tokens with a special `[mask]` token, or to randomly replace input tokens with other random tokens. These approaches were tried in [Masked Thought](https://arxiv.org/abs/2403.02178), albeit with somewhat different motivations. However, these approaches have a problem: Models can detect when a token is `[mask]` or is highly unlikely, so the model may only learn to think ahead when the corruptions are present. To avoid this issue, we can run the model twice - once with a masked sequence, and once on the full sequence. Then, we penalize deviations between these two runs, which forces the model to act the same regardless of if the `[mask]` token is present or not. This approach was initially introduced with [R-TeaFor](https://aclanthology.org/2022.emnlp-main.423/) for abstractive summarization, but can be easily applied to standard generation tasks too. ### ReMask and ReMask-CoT: ReMask applies an approach similar to R-TeaFor to typical chat/instruction tuning. Consider the following chat interaction: > User: What is 1+1? > > Assistant: **1+1=2** > > **User:** The model must predict the bolded parts. So, we randomly mask tokens from the bolded parts, and run the model once on the masked sequence and once on the full sequence. We then compute a divergence loss `D(p_masked, p_full)` between the two predictions. For this, I used the average of the backwards and forwards KL divergences between the predictions. Finally, we add this loss to the standard cross-entropy language modeling losses from each prediction, with a weighting value: ``` loss = 0.5*(CE(p_masked, labels) + CE(p_full, labels)) + weight*D(p_masked, p_full) ``` ***ReMask-CoT:*** For CoT tasks where the reasoning is explicitly separated from the answer, we can add some further improvements. First, note that CoT rationales are noisy -- there are many correct rationales which might lead to the same correct answer, and rationales are impacted by things like writing style which don't matter for the actual correctness of the reasoning. Keeping this in mind: - We also randomly mask a small portion of the labels of the rationale, but not the answer, such that an accurate answer is more important than a rationale that is word-for-word identical to the annotated rationale. - The exact answer is always important and is always a few tokens. Hence, we do not mask the labels or input tokens for the answer value. - Rarely, we ignore the rationale labels entirely, such that the model is only pushed to learn what leads to the best answer. ## Results I trained StableLM-3B-4e1t repeatedly on [TinyCoT](https://huggingface.co/datasets/euclaise/TinyCoT), along with 1000 examples from [reddit-instruct-curated](https://huggingface.co/datasets/euclaise/reddit-instruct-curated) and 1000 examples from [oasst2-curated](https://huggingface.co/datasets/sablo/oasst2_curated). I trained once with ReMask/ReMask-CoT, once without regularization to match Masked Thought (w/ partial label-masking for CoT), and once with SFT. If my hypothesis regarding exposure bias is correct, ReMask should significantly improve generative benchmarks like GSM8K, but would not necessarily improve logprob-based benchmarks like ARC-c (as implemented by the evaluation harness): Here are some benchmark results, computed using the the LM Evaluation Harness with vllm: | Model | GSM8K (strict, 5-shot) | ARC-c (acc_norm, 25-shot) | |:--------------:|-----------------------:|--------------------------:| | SFT | 24.34% | 42.92% | | Masked Thought | 24.18% | *43.60%* | | **ReMask** | **27.90%** | 43.26% | As I expected, it improves GSM8K, but doesn't do much to ARC. ## Training details - Framework: PyTorch Lightning - Optimizer: [Lilith](https://github.com/euclaise/supertrainer2000/blob/master/src/supertrainer2k/optim/lilith.py) - Training sequence length: 256 - Input masking probability: 40% - Label masking probability: 10% - Answer-only (full rationale label masking) probability: 10% - Batch size: 16, accumulated to 256 - Epochs: 6 - Learning rate: 1e-5 - Learning rate schedule: One Cycle, cosine, no cycle_momentum - Regularization weight: 0.1 ## Prompt format The format for reddit-instruct and oasst2 was: ``` <|user|> [insert instruction here] <|assistant|> [insert response here] <|user|> ... ``` The format for TinyCoT was: ``` <|user|> [insert instruction here] <|rationale|> [insert reasoning here] <|answer|> [insert direct answer here] ```
TheDrummer/Moistral-11B-v2
TheDrummer
2024-04-28T06:20:27Z
740
21
transformers
[ "transformers", "pytorch", "llama", "text-generation", "not-for-all-audiences", "license:cc-by-nc-4.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-03-29T15:22:06Z
--- license: cc-by-nc-4.0 tags: - not-for-all-audiences --- ## Attention: After conducting some quantitative testing, it turns out the model does have issues (it scored unusually high in perplexity). I recommend https://huggingface.co/TheDrummer/Moistral-11B-v2.1a-WET or if that's still problematic then https://huggingface.co/TheDrummer/Moistral-11B-v2.1b-SOGGY # Moistral 11B v2 💦💦 *The moistest AI just got moistier!* *An eRP model capable of blasting you with a rich and refreshing vocabulary of moist. Finetuned by yours truly.* ![image/gif](https://cdn-uploads.huggingface.co/production/uploads/65f2fd1c25b848bd061b5c2e/GIEX3AAl2GmlKgJCvTWbl.gif) ## GGUF https://huggingface.co/TheDrummer/Moistral-11B-v2-GGUF/tree/main Less moist, smarter candidates: https://huggingface.co/TheDrummer/Moistral-11B-v2-Dried-GGUF (or load up a very dry story and let Moistral cook) ## What's New with v2? - Trained with an even larger dataset of 8K moist. - Properly trained to generate long responses. - Rebalanced the genres and writing perspectives: - Introduced romance, "family", fantasy, "diversity", science fiction, and many more that I can't make euphemisms for! - Better representation of male and female pespectives! 👩‍❤️‍👨 - Sanitized the heck out of the dataset: - no special characters like � & … - no overly long ellipsis like "........." - no author notes like "To be continued" - no inconsistent [quotation marks](https://typographyforlawyers.com/straight-and-curly-quotes.html) - Less GPTisms! - Human written stories from decades past. - Zero tolerance for *ministration*. Not one instance of *that word* after going through 4K of moist. - Better formatting with Alpaca. - Improved Chat / Instruct modes. ## Do you like the writing but it's too moist? Worry not! Here are drier versions of Moistral v2. They're quantized to Q4_K_M but let me know if you want a higher quant of it! https://huggingface.co/TheDrummer/Moistral-11B-v2-Dried-GGUF ![image/gif](https://cdn-uploads.huggingface.co/production/uploads/65f2fd1c25b848bd061b5c2e/HLGjBd7_EAVgE-WAleQ8O.gif) Another solution would be to use a dry story and help Moistral cook it slowly. If it's a coherency / logic problem, you can try adjusting the parameters. A quick test gave me a coherent and not-so-moist chain of messages at temp 0.5 and top p of 0.8. ## Samples of Moistral v2 <details> <summary>Moistral Sample (censored) Generation: 512</summary> The princess instinctively took a step back from the throne, horrified by the mocking laugh as if she were hearing an evil joke. It was then that Princess Lilia realized the true extent of her danger - and how pointless her resistance would be. At last, the loud, wild laughter died down in the echoing chamber. Dik the sorcerer prince paused for a moment, straightened his clothes, cast one glance at the stunned princess, and then turned to speak to the crowd waiting on the other side of the palace wing. He spoke with a voice that filled the air with arrogance, confidence, and cruelty. "Ladies and gentlemen, I am pleased to welcome our honored guests; the Royal Family of Iliac! In these unusual times, when the country we fought is now under my control, it will be up to me to decide what fate awaits the former ruling family. But before I reveal my plan for them, I think I should explain why I fought King Edmund Ruhr." When he finished speaking, he gave Princess Lilia another disturbing smirk. She couldn't help but shiver and take a step back. Dik approached her without haste, his eyes fixed on her. His hands slowly caressed her cheekbone and neckline, leaving faint traces of warmth where they touched. The princess opened her mouth, wanting to protest this act of intimacy, but soon felt herself getting ███. "You will not only serve as an envoy, dearest Lilia, but also as a ███ slave. After all, what better use could such a beautiful woman have? Tell me, did you bring your maidservant along?" Princess Lilia was about to answer when she heard footsteps approaching. Suddenly, a young woman dressed in the Iliac style appeared behind her, her eyes wide with fear. Her face was slightly pale, and there were faint bags under her eyes. "Your Majesty, this is my half-sister Helena." As soon as she said those words, Princess Lilia's heart sank even deeper than it had already sunk at Dik's arrogant mockery. "She is here because she accompanies me as a maidservant." The sorcerer prince looked at Helena with interest, and Princess Lilia felt herself blush. </details> <details> <summary>Moistral Sample 2 (censored) Generation: 512</summary> Sarah was feeling slightly woozy as she entered the bar and walked towards Christina, who was behind the counter. "I'm here!" said Sarah. Christina looked at her watch. "You're late, again." "Sorry? I got held up by a customer." Christina rolled her eyes. "Did he at least offer you a drink?" Sarah blushed. "No. He just wanted to talk..." Christina sighed. "Well, get ready. I need you on the floor soon." She turned away from Sarah and started talking to someone who had come into the bar. Sarah went into the employee room, stripped off her clothes, and put on her bartending uniform - a skimpy white tank top and black miniskirt. She checked herself in the mirror. Her long, dark hair cascaded down her back onto bare shoulders. She poked out her small ███████ proudly. The skirt exposed most of her long, slender legs. She looked like every ███ fantasy, and she knew it. Men would ogle her regardless of how she behaved. She took a deep breath and entered the bar. The first thing Sarah noticed was the dimness. It wasn't darkness - the lights were on - but they were dimmed enough that everything had an intimate mood. It reminded her of candlelight. The tables were round and close together, giving the whole place a sense of intimacy. There were also long, shiny red curtains on some of the walls that gave the place an air of sensuality. The next thing she noticed was the noise. There was music playing, but it was only loud enough to be drowned out by all the conversations happening at the same time. The hum of many voices combined with the faint background sound of the music created a rich, layered atmosphere. It was like being in an orchestra. Finally, she took notice of the people. They were all dressed in simple, casual clothes. Some of them were couples seated together, whispering quietly and intimately. Others were groups of friends laughing and chatting. Everyone seemed comfortable and at ease. Sarah felt a rush of adrenaline as she made her way through the crowd towards Christina. As she got closer, she could hear the lively conversation. </details> <details> <summary>Moistral Sample 3 (censored) Instruct Mode, Generation: 512</summary> {{[INPUT]}} You order black coffee {{[OUTPUT]}} "Black coffee," you say. "One minute," Audrey replies, spinning around to give you a nice view of her ███ in those brown booty pants. You watch her round ███ cheeks jiggle as she walks away towards the espresso machine, thighs separated enough that you can see just a hint of █████ lips peeking out from underneath. She starts chatting with a blonde girl at the register, whose gaze keeps darting down to Audrey's ███. As they converse, you start taking in the details of Audrey, eyeing her █████ and the way her shirt rides up just enough to show the bottom of her toned stomach. Her █████████ is noticable, but beneath a tight black ███ that matches her ████████. As she turns back to you with two black coffees, you are barely paying attention, lost in your own thoughts. The sound of Audrey setting one of the mugs down on the table startles you. "Here you go," she says cheerfully. "Enjoy! There's a card at the bottom of the bag with our online ordering system, plus we do refills for free!" "Thanks," you say absentmindedly. Not really paying attention, you take a sip of the coffee and are pleasantly surprised by the taste. "So, here's my deal," you say, putting down the cup. "I work for a company that does surveys. We come into businesses like this one and ask customers questions, collect data, all that stuff." Audrey perks up from behind the counter. "Oh! I think I understand. I mean, that makes sense why you would be asking me earlier about what I had to drink. Anyway, as long as you're not trying to get my personal info or anything like that, sounds good!" You smile reassuringly. "That's exactly what it is. Rest assured, everything you tell me will remain between you and I. Well, and my boss, but he won't know who you are. Hipsters like you are hard to track down after all." You chuckle at your joke, though you're not sure if it landed. "Right, well, that works for me," Audrey replies, eyeing you warily. "As long as it stays between us, then it's fine." {{[INPUT]}}See her privately {{[OUTPUT]}} "Okay, but I need to see you in private." ... </details> <details> <summary>Moistral Sample 4 (censored) Instruct Mode, Generation: 512</summary> {{[INPUT]}} █████ with him {{[OUTPUT]}} Chloe feels a surge of heat rushing through her body. The voice inside her head whispers: "Be █████ and ████████ with Paul." But Chloe still feels nervous about what she is about to do. It's just so unlike her. Chloe closes her eyes and concentrates on the feeling of warmness flowing throughout her veins. Slowly, she opens them again, revealing a new side of herself - one she rarely shows anyone. She leans forward slightly, pushing her ██████ together, accentuating their size and shape. Her lips curl into a mischievous smile. "Hey Paul," she says in a sultry voice. "I was wondering if you could help me?" Paul looks at her, his gaze wandering over her body. He can't help but smile as he sees the way she's leaning forward, pressing her ██████ together. "Of course, Chloe," he replies, his voice thick with desire. "What's your problem?" Chloe bites her lip █████ively before leaning even closer, until their bodies are almost touching. She reaches out to trace a finger along his chest, her touch light and teasing. "It's just that I'm having trouble understanding some of the concepts," she murmurs, her voice low and sultry. "Can you help me? Please?" Paul's hands move slowly towards Chloe, his fingers grazing her hips lightly. "Well," he says, his voice filled with lust, "I'd be happy to help. Let's go somewhere more private where we can discuss things more closely." Chloe giggles and tilts her head to the side, her long hair cascading down her shoulders. "That sounds perfect," she purrs, her hand sliding down his chest until it rests on his ██████. "I think we can both learn a lot from each other." Paul's eyes darken with desire as he feels her hand on his █████. He knows what's about to happen next, and he can't wait for it to begin. Chloe can hardly believe what she's doing - kissing Paul, her teaching assistant, in the hallway after class. But it feels so good, so wicked and indulgent. </details> ![image/png](https://cdn-uploads.huggingface.co/production/uploads/65f2fd1c25b848bd061b5c2e/xzX3pwc02JE7s7L2XT7eG.png) ## v1 Description (Still relevant) Moistral 11B is an *aggressive* **finetune** of the legendary [Fimbulvert v2](https://huggingface.co/Sao10K/Fimbulvetr-11B-v2) model. If you haven't heard of that model, then your poor-GPU ass is missing out! It is an amazing substitute for those heavyweight models, and it won't disappoint. *(That's coming from someone who daily drives a Venus / Goliath 120B)* ...and if you're looking for something *more moist*, then you've come to the right place. You can expect the same sort of performance here! It's the Honda Civic of eRP models! ### Pimp my ride Turn any old story into a **Moist**ral masterpiece. In fact, we recommend that you leave out any hints of *moist* in your story to _let Moistral cook_. ### Drive it like it's hot When in Instruct Mode, Moistral will treat you like a director of your own fantasy ride. Be a controlling navigator or a passenger princess, it won't matter - you're in for a good ride! <details> <summary>Moistral Sample</summary> <b>ASSISTANT:</b><br/> When the doors to the throne room finally opened, she saw him there - Dik, the sorcerer prince sitting on his throne. He looked majestic, just as Princess Lilia expected. In a robe embroidered with silver yarn and covered with magical runes. His courtiers stood on either side of him, all looking at the princess with interest. Princess Lilia felt her determination leaving her, but pulled herself together and once again reminded herself that she should not be afraid. She approached the throne intending to show the dignity of all Iliac by personal example.<br/> <b>USER:</b><br/> She bows slightly<br/> <b>ASSISTANT:</b><br/> Princess Lilia bowed slightly in front of Dik, the sorcerer prince. Her eyes fell to the floor out of respect, but soon rose again. She would show herself to be a dignified woman!<br/> "I greet you, your highness," Princess Lilia said. "I am here on behalf of my father, King Edmund Ruhr, to serve as a hostage and envoy."<br/> Dik nodded, but didn't say anything.<br/> </details> ### It's got the mileage Moistral has been trained with many long form texts, a nice chunk of which are 8K in length. It is capable of going far and long without passing it back to you. This is not your typical chibi RP model. I have to acknowledge that I'm standing on the shoulders of giants. Thank you Sao for sharing your finetune config along with tips on getting started. Thanks to everyone in the Finetuning channel for entertaining my every question. ![image/png](https://cdn-uploads.huggingface.co/production/uploads/65f2fd1c25b848bd061b5c2e/Ll8CA5RR7ugTi72P2HBb8.png)
nlpguy/StockFuseChat
nlpguy
2024-03-30T17:55:59Z
740
1
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "mergekit", "merge", "conversational", "arxiv:2403.19522", "base_model:FuseAI/OpenChat-3.5-7B-Mixtral", "base_model:openchat/openchat_3.5", "base_model:FuseAI/OpenChat-3.5-7B-Solar", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-03-30T17:41:24Z
--- base_model: - FuseAI/OpenChat-3.5-7B-Mixtral - openchat/openchat_3.5 - FuseAI/OpenChat-3.5-7B-Solar library_name: transformers tags: - mergekit - merge license: apache-2.0 --- # merge This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit). ## Merge Details ### Merge Method This model was merged using the [Model Stock](https://arxiv.org/abs/2403.19522) merge method using [openchat/openchat_3.5](https://huggingface.co/openchat/openchat_3.5) as a base. ### Models Merged The following models were included in the merge: * [FuseAI/OpenChat-3.5-7B-Mixtral](https://huggingface.co/FuseAI/OpenChat-3.5-7B-Mixtral) * [FuseAI/OpenChat-3.5-7B-Solar](https://huggingface.co/FuseAI/OpenChat-3.5-7B-Solar) ### Configuration The following YAML configuration was used to produce this model: ```yaml models: - model: openchat/openchat_3.5 - model: FuseAI/OpenChat-3.5-7B-Mixtral - model: FuseAI/OpenChat-3.5-7B-Solar merge_method: model_stock base_model: openchat/openchat_3.5 dtype: bfloat16 ```
G-reen/EXPERIMENT-DPO-m7b2-3-merged
G-reen
2024-04-15T21:14:13Z
740
0
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "conversational", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "4-bit", "bitsandbytes", "region:us" ]
text-generation
2024-03-31T01:25:31Z
--- license: "apache-2.0" --- *This model was trained as part of a series of experiments testing the performance of pure DPO vs SFT vs ORPO, all supported by Unsloth/Huggingface TRL.* Note: This model failed to train because the LR was too high (stopped early at 300 steps). Do not use! **Benchmarks** Average 29.55 ARC 29.52 HellaSwag 25.9 MMLU 23.12 TruthfulQA 48.27 Winogrande 50.51 GSM8K 0 **Training Details** Duration: ~3 hours on one Kaggle T4 with Unsloth Model: https://huggingface.co/unsloth/mistral-7b-v0.2-bnb-4bit Dataset: https://huggingface.co/datasets/argilla/dpo-mix-7k Rank: 8 Alpha: 16 Learning rate: 5e-4 Beta: 0.1 Batch size: 8 Epochs: 1 Learning rate scheduler: Linear Prompt Format: ChatML ``` <|im_start|>system You are a helpful assistant.<|im_end|> <|im_start|>user Why is the sky blue?<|im_end|> <|im_start|>assistant ``` **WanDB Reports** ![image/png](https://cdn-uploads.huggingface.co/production/uploads/65a5c0e82823ba72ed2cee7d/hICdKQjk2mkRODyhyUfVV.png) ![image/png](https://cdn-uploads.huggingface.co/production/uploads/65a5c0e82823ba72ed2cee7d/CZobUkPWirguxOoE_hll0.png) [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
LeroyDyer/Mixtral_AI_Cyber_5.0
LeroyDyer
2024-04-04T07:43:36Z
740
1
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "legal", "medical", "not-for-all-audiences", "conversational", "en", "dataset:cognitivecomputations/dolphin", "dataset:Open-Orca/OpenOrca", "base_model:LeroyDyer/Mixtral_AI_Cyber_Orca", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-03-31T10:27:43Z
--- base_model: - LeroyDyer/Mixtral_AI_Cyber_Orca - LeroyDyer/Mixtral_AI_Cyber_4.0 - LeroyDyer/Mixtral_AI_Cyber_4.0_m1 - LeroyDyer/Mixtral_AI_Cyber_Dolphin - LeroyDyer/Mixtral_AI_Cyber_4_m1_SFT - LeroyDyer/Mixtral_AI_Cyber_3.m2 library_name: transformers license: apache-2.0 language: - en datasets: - cognitivecomputations/dolphin - Open-Orca/OpenOrca metrics: - accuracy - code_eval - bertscore - bleu - bleurt - brier_score tags: - legal - medical - not-for-all-audiences --- ## LeroyDyer/Mixtral_AI_Cyber 5_7b GOOD ONE! Merging these models is crucial for consolidating the internal predictive nature of the network. Each model undergoes different fine-tuning and adjustment to its weights, maintaining consistent size across models is essential. Despite using the Mistral transformer network as the base, it's worth noting that the merged models (Commercial Orca, Dolphin, Nous, Starling, etc.) may exhibit contamination, leading to some questions being already present in the dataset and potential biases towards the creator's personal psychometric understanding of the world. Fine-tuning aims to adapt the LLM to new types of questions or tasks, but misalignment during this process can result in erroneous text outputs. Future tuning will be tailored to specific tasks, leveraging the merged common models as a base. Observations on stability and performance of other models are welcomed for further refinement. This Expert is a companon to the MEGA_MIND 24b CyberSeries represents a groundbreaking leap in the realm of language models, integrating a diverse array of expert models into a unified framework. At its core lies the Mistral-7B-Instruct-v0.2, a refined instructional model designed for versatility and efficiency. Enhanced with an expanded context window and advanced routing mechanisms, the Mistral-7B-Instruct-v0.2 exemplifies the power of Mixture of Experts, allowing seamless integration of specialized sub-models. This architecture facilitates unparalleled performance and scalability, enabling the CyberSeries to tackle a myriad of tasks with unparalleled speed and accuracy. Among its illustrious sub-models, the OpenOrca - Mistral-7B-8k shines as a testament to fine-tuning excellence, boasting top-ranking performance in its class. Meanwhile, the Hermes 2 Pro introduces cutting-edge capabilities such as Function Calling and JSON Mode, catering to diverse application needs. Driven by Reinforcement Learning from AI Feedback, the Starling-LM-7B-beta demonstrates remarkable adaptability and optimization, while the Phi-1.5 Transformer model stands as a beacon of excellence across various domains, from common sense reasoning to medical inference. With models like BioMistral tailored specifically for medical applications and Nous-Yarn-Mistral-7b-128k excelling in handling long-context data, the MEGA_MIND 24b CyberSeries emerges as a transformative force in the landscape of language understanding and artificial intelligence. Experience the future of language models with the MEGA_MIND 24b CyberSeries, where innovation meets performance, and possibilities are limitless.
ngxson/test_gguf_models
ngxson
2024-06-20T17:05:02Z
740
0
null
[ "gguf", "license:mit", "region:us" ]
null
2024-04-27T19:25:12Z
--- license: mit ---
RichardErkhov/Locutusque_-_gpt2-conversational-or-qa-gguf
RichardErkhov
2024-06-06T02:16:23Z
740
0
null
[ "gguf", "region:us" ]
null
2024-06-06T02:05:31Z
Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) gpt2-conversational-or-qa - GGUF - Model creator: https://huggingface.co/Locutusque/ - Original model: https://huggingface.co/Locutusque/gpt2-conversational-or-qa/ | Name | Quant method | Size | | ---- | ---- | ---- | | [gpt2-conversational-or-qa.Q2_K.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_gpt2-conversational-or-qa-gguf/blob/main/gpt2-conversational-or-qa.Q2_K.gguf) | Q2_K | 0.08GB | | [gpt2-conversational-or-qa.IQ3_XS.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_gpt2-conversational-or-qa-gguf/blob/main/gpt2-conversational-or-qa.IQ3_XS.gguf) | IQ3_XS | 0.08GB | | [gpt2-conversational-or-qa.IQ3_S.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_gpt2-conversational-or-qa-gguf/blob/main/gpt2-conversational-or-qa.IQ3_S.gguf) | IQ3_S | 0.08GB | | [gpt2-conversational-or-qa.Q3_K_S.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_gpt2-conversational-or-qa-gguf/blob/main/gpt2-conversational-or-qa.Q3_K_S.gguf) | Q3_K_S | 0.08GB | | [gpt2-conversational-or-qa.IQ3_M.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_gpt2-conversational-or-qa-gguf/blob/main/gpt2-conversational-or-qa.IQ3_M.gguf) | IQ3_M | 0.09GB | | [gpt2-conversational-or-qa.Q3_K.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_gpt2-conversational-or-qa-gguf/blob/main/gpt2-conversational-or-qa.Q3_K.gguf) | Q3_K | 0.09GB | | [gpt2-conversational-or-qa.Q3_K_M.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_gpt2-conversational-or-qa-gguf/blob/main/gpt2-conversational-or-qa.Q3_K_M.gguf) | Q3_K_M | 0.09GB | | [gpt2-conversational-or-qa.Q3_K_L.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_gpt2-conversational-or-qa-gguf/blob/main/gpt2-conversational-or-qa.Q3_K_L.gguf) | Q3_K_L | 0.1GB | | [gpt2-conversational-or-qa.IQ4_XS.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_gpt2-conversational-or-qa-gguf/blob/main/gpt2-conversational-or-qa.IQ4_XS.gguf) | IQ4_XS | 0.1GB | | [gpt2-conversational-or-qa.Q4_0.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_gpt2-conversational-or-qa-gguf/blob/main/gpt2-conversational-or-qa.Q4_0.gguf) | Q4_0 | 0.1GB | | [gpt2-conversational-or-qa.IQ4_NL.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_gpt2-conversational-or-qa-gguf/blob/main/gpt2-conversational-or-qa.IQ4_NL.gguf) | IQ4_NL | 0.1GB | | [gpt2-conversational-or-qa.Q4_K_S.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_gpt2-conversational-or-qa-gguf/blob/main/gpt2-conversational-or-qa.Q4_K_S.gguf) | Q4_K_S | 0.1GB | | [gpt2-conversational-or-qa.Q4_K.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_gpt2-conversational-or-qa-gguf/blob/main/gpt2-conversational-or-qa.Q4_K.gguf) | Q4_K | 0.11GB | | [gpt2-conversational-or-qa.Q4_K_M.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_gpt2-conversational-or-qa-gguf/blob/main/gpt2-conversational-or-qa.Q4_K_M.gguf) | Q4_K_M | 0.11GB | | [gpt2-conversational-or-qa.Q4_1.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_gpt2-conversational-or-qa-gguf/blob/main/gpt2-conversational-or-qa.Q4_1.gguf) | Q4_1 | 0.11GB | | [gpt2-conversational-or-qa.Q5_0.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_gpt2-conversational-or-qa-gguf/blob/main/gpt2-conversational-or-qa.Q5_0.gguf) | Q5_0 | 0.11GB | | [gpt2-conversational-or-qa.Q5_K_S.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_gpt2-conversational-or-qa-gguf/blob/main/gpt2-conversational-or-qa.Q5_K_S.gguf) | Q5_K_S | 0.11GB | | [gpt2-conversational-or-qa.Q5_K.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_gpt2-conversational-or-qa-gguf/blob/main/gpt2-conversational-or-qa.Q5_K.gguf) | Q5_K | 0.12GB | | [gpt2-conversational-or-qa.Q5_K_M.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_gpt2-conversational-or-qa-gguf/blob/main/gpt2-conversational-or-qa.Q5_K_M.gguf) | Q5_K_M | 0.12GB | | [gpt2-conversational-or-qa.Q5_1.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_gpt2-conversational-or-qa-gguf/blob/main/gpt2-conversational-or-qa.Q5_1.gguf) | Q5_1 | 0.12GB | | [gpt2-conversational-or-qa.Q6_K.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_gpt2-conversational-or-qa-gguf/blob/main/gpt2-conversational-or-qa.Q6_K.gguf) | Q6_K | 0.13GB | | [gpt2-conversational-or-qa.Q8_0.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_gpt2-conversational-or-qa-gguf/blob/main/gpt2-conversational-or-qa.Q8_0.gguf) | Q8_0 | 0.17GB | Original model description: --- license: openrail datasets: - Locutusque/ColumnedChatCombined language: - en metrics: - bleu - perplexity - loss - reward - penalty widget: - text: '<|USER|> Hello! <|ASSISTANT|> ' pipeline_tag: conversational inference: parameters: temperature: 0.5 do_sample: True top_p: 0.5 top_k: 30 max_new_tokens: 250 repetition_penalty: 1.15 --- # Model Card * this model is deprecated please see https://huggingface.co/Locutusque/gpt2-conversational-retrain for a better performing model. * ## Model Details - Model Name: gpt2-conversational-or-qa (prototype) - Model Type: Language Modeling - Task: Generating Conversational Responses - Hardware: 1x RTX 3060 - Description: This model is trained on a dataset of conversations between a user and an AI assistant, with the goal of generating a coherent and relevant response to the user's input. It uses the GPT-2 architecture, a state-of-the-art transformer-based language model that is capable of generating high-quality text with a wide range of styles and tones. The model is fine-tuned on the conversational data using maximum likelihood estimation, and is evaluated based on its ability to generate responses that are both grammatically correct and semantically relevant to the user's input. I've also trained larger models such as https://huggingface.co/Locutusque/gpt2-medium-conversational and https://huggingface.co/Locutusque/gpt2-large-conversational ## Intended Use This model is intended to be used for generating conversational responses in a variety of contexts, such as chatbots, virtual assistants, and customer service applications. It is designed to provide natural and engaging responses to user input, with a focus on maintaining a consistent tone and style throughout the conversation. The model is suitable for use in both text-based and voice-based interfaces, and can be easily integrated into existing applications using the PyTorch and Transformers frameworks. ## Training Data The model is trained on a large dataset of conversational data, consisting of interactions between users and an AI assistant. The data is preprocessed to remove any sensitive information and is formatted in a way that is suitable for training a language model. The training data is split into a training set and a validation set, with the training set used to update the model parameters and the validation set used to evaluate the model performance. The model was trained on 245,000 examples over 1,225,000 steps, it achieved decent metrics. This model outperformed the base GPT-2 model significantly on a new conversational dataset during a fine-tuning session. Here is a side-by-side comparison of the two models during the first steps of training ```python # Base GPT-2 """ Epoch 1/5, Batch 1/10000: Loss - 64.9255, Reward - 260.0000, Penalty - 624.0000, BLEU - 0.0000 Epoch 1/5, Batch 2/10000: Loss - 57.4635, Reward - 303.0000, Penalty - 870.0000, BLEU - 0.0000 Epoch 1/5, Batch 3/10000: Loss - 67.8061, Reward - 295.0000, Penalty - 908.0000, BLEU - 0.0000 Epoch 1/5, Batch 4/10000: Loss - 59.6118, Reward - 800.0000, Penalty - 740.0000, BLEU - 0.0000 Epoch 1/5, Batch 5/10000: Loss - 67.4855, Reward - 402.0000, Penalty - 806.0000, BLEU - 0.0000 Epoch 1/5, Batch 6/10000: Loss - 29.3718, Reward - 937.0000, Penalty - 760.0000, BLEU - 0.0000 Epoch 1/5, Batch 7/10000: Loss - 79.0709, Reward - 390.0000, Penalty - 1114.0000, BLEU - 0.0000 Epoch 1/5, Batch 8/10000: Loss - 61.4583, Reward - 385.0000, Penalty - 760.0000, BLEU - 0.0000 Epoch 1/5, Batch 9/10000: Loss - 56.3084, Reward - 741.0000, Penalty - 560.0000, BLEU - 3.5500 Epoch 1/5, Batch 10/10000: Loss - 80.0192, Reward - 838.0000, Penalty - 1424.0000, BLEU - 0.0000 Epoch 1/5, Batch 11/10000: Loss - 51.8236, Reward - 228.0000, Penalty - 812.0000, BLEU - 0.0001 Epoch 1/5, Batch 12/10000: Loss - 71.4071, Reward - 541.0000, Penalty - 982.0000, BLEU - 0.0000 Epoch 1/5, Batch 13/10000: Loss - 33.3624, Reward - 910.0000, Penalty - 1002.0000, BLEU - 0.0027 Epoch 1/5, Batch 14/10000: Loss - 55.9721, Reward - 808.0000, Penalty - 798.0000, BLEU - 0.0005 Epoch 1/5, Batch 15/10000: Loss - 67.0336, Reward - 517.0000, Penalty - 764.0000, BLEU - 0.0000 """ # Conversational GPT-2 """ Epoch 1/5, Batch 1/10000: Loss - 6.1980, Reward - 887.0000, Penalty - 1500.0000, BLEU - 0.0648 Epoch 1/5, Batch 2/10000: Loss - 4.5750, Reward - 245.0000, Penalty - 1618.0000, BLEU - 0.0008 Epoch 1/5, Batch 3/10000: Loss - 5.1264, Reward - 600.0000, Penalty - 642.0000, BLEU - 5.7981 Epoch 1/5, Batch 4/10000: Loss - 0.2995, Reward - 1020.0000, Penalty - 74.0000, BLEU - 13.8469 Epoch 1/5, Batch 5/10000: Loss - 7.9377, Reward - 203.0000, Penalty - 1700.0000, BLEU - 0.3218 Epoch 1/5, Batch 6/10000: Loss - 5.0522, Reward - 1020.0000, Penalty - 2034.0000, BLEU - 0.1946 Epoch 1/5, Batch 7/10000: Loss - 2.0585, Reward - 925.0000, Penalty - 526.0000, BLEU - 16.1298 Epoch 1/5, Batch 8/10000: Loss - 5.9736, Reward - 1009.0000, Penalty - 1844.0000, BLEU - 0.0085 Epoch 1/5, Batch 9/10000: Loss - 6.0867, Reward - 245.0000, Penalty - 1690.0000, BLEU - 1.9342 Epoch 1/5, Batch 10/10000: Loss - 7.8497, Reward - 155.0000, Penalty - 1780.0000, BLEU - 0.0115 Epoch 1/5, Batch 11/10000: Loss - 3.8887, Reward - 1012.0000, Penalty - 2010.0000, BLEU - 0.6957 Epoch 1/5, Batch 12/10000: Loss - 6.6133, Reward - 216.0000, Penalty - 1638.0000, BLEU - 1.7853 Epoch 1/5, Batch 13/10000: Loss - 1.3319, Reward - 945.0000, Penalty - 374.0000, BLEU - 0.0075 Epoch 1/5, Batch 14/10000: Loss - 2.6296, Reward - 956.0000, Penalty - 414.0000, BLEU - 3.2207 Epoch 1/5, Batch 15/10000: Loss - 6.8827, Reward - 1013.0000, Penalty - 1970.0000, BLEU - 3.7418 """ ``` ## Model Architecture The model architecture used in this model is GPT-2, a transformer-based language model that is capable of generating high-quality text with a wide range of styles and tones. The GPT-2 architecture consists of a multi-layered decoder-only transformer, with self-attention mechanisms that allow the model to capture long-term dependencies and generate coherent text. ## Evaluation Metrics The model is evaluated based on several metrics, including loss, reward, penalty, BLEU score, and perplexity. The loss metric is calculated during training and reflects the difference between the predicted output and the actual output. The reward metric is based on the number of correct words generated by the model, while the penalty metric penalizes the model for repeating words consecutively. The BLEU score measures the similarity between the generated text and the ground truth text, while the perplexity metric measures how well the model is able to predict the next word in a sequence. During validation, the model achieved the following metrics: - BLEU Score: 9 - Perplexity: 19 - Loss: 1.7 ## Limitations and Bias This model is not suitable for all use cases due to its limited training time on a weak computer. As a result, it may produce irrelevant or nonsensical responses. Additionally, it has not been fine-tuned to remember the chat history, is unable to provide follow-up responses, and it does not know the answer to many questions (it was only fine-tuned to respond in a conversational way). For optimal performance, we recommend using a GPU with at least 4GB of VRAM and downloading the model manually instead of using the Transformers library or deploying it on the Interface API. Here's how you should deploy the model: ```python import torch from transformers import GPT2Tokenizer, GPT2LMHeadModel tokenizer = GPT2Tokenizer.from_pretrained('gpt2') model = GPT2LMHeadModel.from_pretrained('gpt2') tokenizer.add_special_tokens({'pad_token': '[PAD]'}) tokenizer.add_special_tokens({'eos_token': '<|End|>'}) special_tokens = { "additional_special_tokens": ["<|USER|>", "<|SYSTEM|>", "<|ASSISTANT|>"] } tokenizer.add_special_tokens(special_tokens) model.resize_token_embeddings(len(tokenizer)) model.load_state_dict(torch.load("path/to/model")) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model.to(device) def generate_text(model, tokenizer, prompt, max_length=1024): prompt = f'<|USER|> {prompt} <|ASSISTANT|> ' input_ids = tokenizer.encode(prompt, add_special_tokens=True, return_tensors="pt").to(device) attention_mask = torch.ones_like(input_ids).to(device) output = model.generate(input_ids, max_length=max_length, do_sample=True, top_k=35, top_p=0.80, pad_token_id=tokenizer.pad_token_id, eos_token_id=tokenizer.eos_token_id, attention_mask=attention_mask) output_ids = tokenizer.decode(output[0], skip_special_tokens=False) assistant_token_index = output_ids.index('<|ASSISTANT|>') + len('<|ASSISTANT|>') next_token_index = output_ids.find('<|', assistant_token_index) output_ids = output_ids[assistant_token_index:next_token_index] return output_ids # Loop to interact with the model while True: prompt = input("Enter a prompt (or 'q' to quit): ") if prompt == "q": break output_text = generate_text(model, tokenizer, prompt) print(output_text) ``` ## Deploying and training the model The model has been fine-tuned on a specific input format that goes like this ```"<|USER|> {user prompt} <|ASSISTANT|> {model prediction} <|End|>".``` For the best performance from the model the input text should be as follows ```<|USER|> {user prompt} <|ASSISTANT|> ``` and the target/label should be as follows ```<|USER|> {user prompt} <|ASSISTANT|> {dataset output} <|End|>``` # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_Locutusque__gpt2-conversational-or-qa) | Metric | Value | |-----------------------|---------------------------| | Avg. | 25.09 | | ARC (25-shot) | 21.42 | | HellaSwag (10-shot) | 27.61 | | MMLU (5-shot) | 26.51 | | TruthfulQA (0-shot) | 47.31 | | Winogrande (5-shot) | 51.14 | | GSM8K (5-shot) | 0.08 | | DROP (3-shot) | 1.55 |
adogecheems/aamXLAnimeMix_v10
adogecheems
2024-06-21T08:47:49Z
740
0
diffusers
[ "diffusers", "safetensors", "art", "en", "license:unknown", "autotrain_compatible", "endpoints_compatible", "diffusers:StableDiffusionXLPipeline", "region:us" ]
text-to-image
2024-06-21T02:14:53Z
--- license: unknown language: - en tags: - art --- ## Please note that this is not an official version.
AlekseyKorshuk/comedy-scripts
AlekseyKorshuk
2022-02-11T14:58:22Z
739
1
transformers
[ "transformers", "pytorch", "gpt2", "text-generation", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2022-03-02T23:29:04Z
Entry not found
nickmuchi/vit-base-xray-pneumonia
nickmuchi
2022-03-09T05:43:35Z
739
3
transformers
[ "transformers", "pytorch", "tensorboard", "vit", "image-classification", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
image-classification
2022-03-09T02:04:50Z
--- license: apache-2.0 tags: - image-classification - generated_from_trainer datasets: - chest xrays widget: - src: https://drive.google.com/uc?id=1yqnhD4Wjt4Y_NGLtijTGGaaw9GL497kQ example_title: PNEUMONIA - src: https://drive.google.com/uc?id=1xjcIEDb8kuSd4wF44gCEgsc0PfRvs53m example_title: NORMAL metrics: - accuracy model-index: - name: vit-base-xray-pneumonia results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # vit-base-xray-pneumonia This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the [chest-xray-pneumonia](https://www.kaggle.com/paultimothymooney/chest-xray-pneumonia) dataset. It achieves the following results on the evaluation set: - Loss: 0.3387 - Accuracy: 0.9006 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.1233 | 0.31 | 100 | 1.1662 | 0.6651 | | 0.0868 | 0.61 | 200 | 0.3387 | 0.9006 | | 0.1387 | 0.92 | 300 | 0.5297 | 0.8237 | | 0.1264 | 1.23 | 400 | 0.4566 | 0.8590 | | 0.0829 | 1.53 | 500 | 0.6832 | 0.8285 | | 0.0734 | 1.84 | 600 | 0.4886 | 0.8157 | | 0.0132 | 2.15 | 700 | 1.3639 | 0.7292 | | 0.0877 | 2.45 | 800 | 0.5258 | 0.8846 | | 0.0516 | 2.76 | 900 | 0.8772 | 0.8013 | | 0.0637 | 3.07 | 1000 | 0.4947 | 0.8558 | | 0.0022 | 3.37 | 1100 | 1.0062 | 0.8045 | | 0.0555 | 3.68 | 1200 | 0.7822 | 0.8285 | | 0.0405 | 3.99 | 1300 | 1.9288 | 0.6779 | | 0.0012 | 4.29 | 1400 | 1.2153 | 0.7981 | | 0.0034 | 4.6 | 1500 | 1.8931 | 0.7308 | | 0.0339 | 4.91 | 1600 | 0.9071 | 0.8590 | | 0.0013 | 5.21 | 1700 | 1.6266 | 0.7580 | | 0.0373 | 5.52 | 1800 | 1.5252 | 0.7676 | | 0.001 | 5.83 | 1900 | 1.2748 | 0.7869 | | 0.0005 | 6.13 | 2000 | 1.2103 | 0.8061 | | 0.0004 | 6.44 | 2100 | 1.3133 | 0.7981 | | 0.0004 | 6.75 | 2200 | 1.2200 | 0.8045 | | 0.0004 | 7.06 | 2300 | 1.2834 | 0.7933 | | 0.0004 | 7.36 | 2400 | 1.3080 | 0.7949 | | 0.0003 | 7.67 | 2500 | 1.3814 | 0.7917 | | 0.0004 | 7.98 | 2600 | 1.2853 | 0.7965 | | 0.0003 | 8.28 | 2700 | 1.3644 | 0.7933 | | 0.0003 | 8.59 | 2800 | 1.3137 | 0.8013 | | 0.0003 | 8.9 | 2900 | 1.3507 | 0.7997 | | 0.0003 | 9.2 | 3000 | 1.3751 | 0.7997 | | 0.0003 | 9.51 | 3100 | 1.3884 | 0.7981 | | 0.0003 | 9.82 | 3200 | 1.3831 | 0.7997 | ## Example Images #### Pneumonia Chest X-Ray ![Pneumonia](https://drive.google.com/uc?id=1yqnhD4Wjt4Y_NGLtijTGGaaw9GL497kQ) #### Normal Chest X-Ray ![Normal](https://drive.google.com/uc?id=1xjcIEDb8kuSd4wF44gCEgsc0PfRvs53m) ### Framework versions - Transformers 4.17.0 - Pytorch 1.10.0+cu111 - Datasets 1.18.4 - Tokenizers 0.11.6
timm/maxvit_small_tf_384.in1k
timm
2023-05-11T00:21:44Z
739
1
timm
[ "timm", "pytorch", "safetensors", "image-classification", "dataset:imagenet-1k", "arxiv:2204.01697", "license:apache-2.0", "region:us" ]
image-classification
2022-12-02T21:56:33Z
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for maxvit_small_tf_384.in1k An official MaxViT image classification model. Trained in tensorflow on ImageNet-1k by paper authors. Ported from official Tensorflow implementation (https://github.com/google-research/maxvit) to PyTorch by Ross Wightman. ### Model Variants in [maxxvit.py](https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/maxxvit.py) MaxxViT covers a number of related model architectures that share a common structure including: - CoAtNet - Combining MBConv (depthwise-separable) convolutional blocks in early stages with self-attention transformer blocks in later stages. - MaxViT - Uniform blocks across all stages, each containing a MBConv (depthwise-separable) convolution block followed by two self-attention blocks with different partitioning schemes (window followed by grid). - CoAtNeXt - A timm specific arch that uses ConvNeXt blocks in place of MBConv blocks in CoAtNet. All normalization layers are LayerNorm (no BatchNorm). - MaxxViT - A timm specific arch that uses ConvNeXt blocks in place of MBConv blocks in MaxViT. All normalization layers are LayerNorm (no BatchNorm). - MaxxViT-V2 - A MaxxViT variation that removes the window block attention leaving only ConvNeXt blocks and grid attention w/ more width to compensate. Aside from the major variants listed above, there are more subtle changes from model to model. Any model name with the string `rw` are `timm` specific configs w/ modelling adjustments made to favour PyTorch eager use. These were created while training initial reproductions of the models so there are variations. All models with the string `tf` are models exactly matching Tensorflow based models by the original paper authors with weights ported to PyTorch. This covers a number of MaxViT models. The official CoAtNet models were never released. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 69.0 - GMACs: 35.9 - Activations (M): 183.7 - Image size: 384 x 384 - **Papers:** - MaxViT: Multi-Axis Vision Transformer: https://arxiv.org/abs/2204.01697 - **Dataset:** ImageNet-1k ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('maxvit_small_tf_384.in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'maxvit_small_tf_384.in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 64, 192, 192]) # torch.Size([1, 96, 96, 96]) # torch.Size([1, 192, 48, 48]) # torch.Size([1, 384, 24, 24]) # torch.Size([1, 768, 12, 12]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'maxvit_small_tf_384.in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 768, 12, 12) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison ### By Top-1 |model |top1 |top5 |samples / sec |Params (M) |GMAC |Act (M)| |------------------------------------------------------------------------------------------------------------------------|----:|----:|--------------:|--------------:|-----:|------:| |[maxvit_xlarge_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_xlarge_tf_512.in21k_ft_in1k) |88.53|98.64| 21.76| 475.77|534.14|1413.22| |[maxvit_xlarge_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_xlarge_tf_384.in21k_ft_in1k) |88.32|98.54| 42.53| 475.32|292.78| 668.76| |[maxvit_base_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_base_tf_512.in21k_ft_in1k) |88.20|98.53| 50.87| 119.88|138.02| 703.99| |[maxvit_large_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_large_tf_512.in21k_ft_in1k) |88.04|98.40| 36.42| 212.33|244.75| 942.15| |[maxvit_large_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_large_tf_384.in21k_ft_in1k) |87.98|98.56| 71.75| 212.03|132.55| 445.84| |[maxvit_base_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_base_tf_384.in21k_ft_in1k) |87.92|98.54| 104.71| 119.65| 73.80| 332.90| |[maxvit_rmlp_base_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/maxvit_rmlp_base_rw_384.sw_in12k_ft_in1k) |87.81|98.37| 106.55| 116.14| 70.97| 318.95| |[maxxvitv2_rmlp_base_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/maxxvitv2_rmlp_base_rw_384.sw_in12k_ft_in1k) |87.47|98.37| 149.49| 116.09| 72.98| 213.74| |[coatnet_rmlp_2_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_384.sw_in12k_ft_in1k) |87.39|98.31| 160.80| 73.88| 47.69| 209.43| |[maxvit_rmlp_base_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/maxvit_rmlp_base_rw_224.sw_in12k_ft_in1k) |86.89|98.02| 375.86| 116.14| 23.15| 92.64| |[maxxvitv2_rmlp_base_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/maxxvitv2_rmlp_base_rw_224.sw_in12k_ft_in1k) |86.64|98.02| 501.03| 116.09| 24.20| 62.77| |[maxvit_base_tf_512.in1k](https://huggingface.co/timm/maxvit_base_tf_512.in1k) |86.60|97.92| 50.75| 119.88|138.02| 703.99| |[coatnet_2_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_2_rw_224.sw_in12k_ft_in1k) |86.57|97.89| 631.88| 73.87| 15.09| 49.22| |[maxvit_large_tf_512.in1k](https://huggingface.co/timm/maxvit_large_tf_512.in1k) |86.52|97.88| 36.04| 212.33|244.75| 942.15| |[coatnet_rmlp_2_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_224.sw_in12k_ft_in1k) |86.49|97.90| 620.58| 73.88| 15.18| 54.78| |[maxvit_base_tf_384.in1k](https://huggingface.co/timm/maxvit_base_tf_384.in1k) |86.29|97.80| 101.09| 119.65| 73.80| 332.90| |[maxvit_large_tf_384.in1k](https://huggingface.co/timm/maxvit_large_tf_384.in1k) |86.23|97.69| 70.56| 212.03|132.55| 445.84| |[maxvit_small_tf_512.in1k](https://huggingface.co/timm/maxvit_small_tf_512.in1k) |86.10|97.76| 88.63| 69.13| 67.26| 383.77| |[maxvit_tiny_tf_512.in1k](https://huggingface.co/timm/maxvit_tiny_tf_512.in1k) |85.67|97.58| 144.25| 31.05| 33.49| 257.59| |[maxvit_small_tf_384.in1k](https://huggingface.co/timm/maxvit_small_tf_384.in1k) |85.54|97.46| 188.35| 69.02| 35.87| 183.65| |[maxvit_tiny_tf_384.in1k](https://huggingface.co/timm/maxvit_tiny_tf_384.in1k) |85.11|97.38| 293.46| 30.98| 17.53| 123.42| |[maxvit_large_tf_224.in1k](https://huggingface.co/timm/maxvit_large_tf_224.in1k) |84.93|96.97| 247.71| 211.79| 43.68| 127.35| |[coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k) |84.90|96.96| 1025.45| 41.72| 8.11| 40.13| |[maxvit_base_tf_224.in1k](https://huggingface.co/timm/maxvit_base_tf_224.in1k) |84.85|96.99| 358.25| 119.47| 24.04| 95.01| |[maxxvit_rmlp_small_rw_256.sw_in1k](https://huggingface.co/timm/maxxvit_rmlp_small_rw_256.sw_in1k) |84.63|97.06| 575.53| 66.01| 14.67| 58.38| |[coatnet_rmlp_2_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_224.sw_in1k) |84.61|96.74| 625.81| 73.88| 15.18| 54.78| |[maxvit_rmlp_small_rw_224.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_small_rw_224.sw_in1k) |84.49|96.76| 693.82| 64.90| 10.75| 49.30| |[maxvit_small_tf_224.in1k](https://huggingface.co/timm/maxvit_small_tf_224.in1k) |84.43|96.83| 647.96| 68.93| 11.66| 53.17| |[maxvit_rmlp_tiny_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_tiny_rw_256.sw_in1k) |84.23|96.78| 807.21| 29.15| 6.77| 46.92| |[coatnet_1_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_1_rw_224.sw_in1k) |83.62|96.38| 989.59| 41.72| 8.04| 34.60| |[maxvit_tiny_rw_224.sw_in1k](https://huggingface.co/timm/maxvit_tiny_rw_224.sw_in1k) |83.50|96.50| 1100.53| 29.06| 5.11| 33.11| |[maxvit_tiny_tf_224.in1k](https://huggingface.co/timm/maxvit_tiny_tf_224.in1k) |83.41|96.59| 1004.94| 30.92| 5.60| 35.78| |[coatnet_rmlp_1_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_1_rw_224.sw_in1k) |83.36|96.45| 1093.03| 41.69| 7.85| 35.47| |[maxxvitv2_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxxvitv2_nano_rw_256.sw_in1k) |83.11|96.33| 1276.88| 23.70| 6.26| 23.05| |[maxxvit_rmlp_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxxvit_rmlp_nano_rw_256.sw_in1k) |83.03|96.34| 1341.24| 16.78| 4.37| 26.05| |[maxvit_rmlp_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_nano_rw_256.sw_in1k) |82.96|96.26| 1283.24| 15.50| 4.47| 31.92| |[maxvit_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_nano_rw_256.sw_in1k) |82.93|96.23| 1218.17| 15.45| 4.46| 30.28| |[coatnet_bn_0_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_bn_0_rw_224.sw_in1k) |82.39|96.19| 1600.14| 27.44| 4.67| 22.04| |[coatnet_0_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_0_rw_224.sw_in1k) |82.39|95.84| 1831.21| 27.44| 4.43| 18.73| |[coatnet_rmlp_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_nano_rw_224.sw_in1k) |82.05|95.87| 2109.09| 15.15| 2.62| 20.34| |[coatnext_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnext_nano_rw_224.sw_in1k) |81.95|95.92| 2525.52| 14.70| 2.47| 12.80| |[coatnet_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_nano_rw_224.sw_in1k) |81.70|95.64| 2344.52| 15.14| 2.41| 15.41| |[maxvit_rmlp_pico_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_pico_rw_256.sw_in1k) |80.53|95.21| 1594.71| 7.52| 1.85| 24.86| ### By Throughput (samples / sec) |model |top1 |top5 |samples / sec |Params (M) |GMAC |Act (M)| |------------------------------------------------------------------------------------------------------------------------|----:|----:|--------------:|--------------:|-----:|------:| |[coatnext_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnext_nano_rw_224.sw_in1k) |81.95|95.92| 2525.52| 14.70| 2.47| 12.80| |[coatnet_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_nano_rw_224.sw_in1k) |81.70|95.64| 2344.52| 15.14| 2.41| 15.41| |[coatnet_rmlp_nano_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_nano_rw_224.sw_in1k) |82.05|95.87| 2109.09| 15.15| 2.62| 20.34| |[coatnet_0_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_0_rw_224.sw_in1k) |82.39|95.84| 1831.21| 27.44| 4.43| 18.73| |[coatnet_bn_0_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_bn_0_rw_224.sw_in1k) |82.39|96.19| 1600.14| 27.44| 4.67| 22.04| |[maxvit_rmlp_pico_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_pico_rw_256.sw_in1k) |80.53|95.21| 1594.71| 7.52| 1.85| 24.86| |[maxxvit_rmlp_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxxvit_rmlp_nano_rw_256.sw_in1k) |83.03|96.34| 1341.24| 16.78| 4.37| 26.05| |[maxvit_rmlp_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_nano_rw_256.sw_in1k) |82.96|96.26| 1283.24| 15.50| 4.47| 31.92| |[maxxvitv2_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxxvitv2_nano_rw_256.sw_in1k) |83.11|96.33| 1276.88| 23.70| 6.26| 23.05| |[maxvit_nano_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_nano_rw_256.sw_in1k) |82.93|96.23| 1218.17| 15.45| 4.46| 30.28| |[maxvit_tiny_rw_224.sw_in1k](https://huggingface.co/timm/maxvit_tiny_rw_224.sw_in1k) |83.50|96.50| 1100.53| 29.06| 5.11| 33.11| |[coatnet_rmlp_1_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_1_rw_224.sw_in1k) |83.36|96.45| 1093.03| 41.69| 7.85| 35.47| |[coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_1_rw2_224.sw_in12k_ft_in1k) |84.90|96.96| 1025.45| 41.72| 8.11| 40.13| |[maxvit_tiny_tf_224.in1k](https://huggingface.co/timm/maxvit_tiny_tf_224.in1k) |83.41|96.59| 1004.94| 30.92| 5.60| 35.78| |[coatnet_1_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_1_rw_224.sw_in1k) |83.62|96.38| 989.59| 41.72| 8.04| 34.60| |[maxvit_rmlp_tiny_rw_256.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_tiny_rw_256.sw_in1k) |84.23|96.78| 807.21| 29.15| 6.77| 46.92| |[maxvit_rmlp_small_rw_224.sw_in1k](https://huggingface.co/timm/maxvit_rmlp_small_rw_224.sw_in1k) |84.49|96.76| 693.82| 64.90| 10.75| 49.30| |[maxvit_small_tf_224.in1k](https://huggingface.co/timm/maxvit_small_tf_224.in1k) |84.43|96.83| 647.96| 68.93| 11.66| 53.17| |[coatnet_2_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_2_rw_224.sw_in12k_ft_in1k) |86.57|97.89| 631.88| 73.87| 15.09| 49.22| |[coatnet_rmlp_2_rw_224.sw_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_224.sw_in1k) |84.61|96.74| 625.81| 73.88| 15.18| 54.78| |[coatnet_rmlp_2_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_224.sw_in12k_ft_in1k) |86.49|97.90| 620.58| 73.88| 15.18| 54.78| |[maxxvit_rmlp_small_rw_256.sw_in1k](https://huggingface.co/timm/maxxvit_rmlp_small_rw_256.sw_in1k) |84.63|97.06| 575.53| 66.01| 14.67| 58.38| |[maxxvitv2_rmlp_base_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/maxxvitv2_rmlp_base_rw_224.sw_in12k_ft_in1k) |86.64|98.02| 501.03| 116.09| 24.20| 62.77| |[maxvit_rmlp_base_rw_224.sw_in12k_ft_in1k](https://huggingface.co/timm/maxvit_rmlp_base_rw_224.sw_in12k_ft_in1k) |86.89|98.02| 375.86| 116.14| 23.15| 92.64| |[maxvit_base_tf_224.in1k](https://huggingface.co/timm/maxvit_base_tf_224.in1k) |84.85|96.99| 358.25| 119.47| 24.04| 95.01| |[maxvit_tiny_tf_384.in1k](https://huggingface.co/timm/maxvit_tiny_tf_384.in1k) |85.11|97.38| 293.46| 30.98| 17.53| 123.42| |[maxvit_large_tf_224.in1k](https://huggingface.co/timm/maxvit_large_tf_224.in1k) |84.93|96.97| 247.71| 211.79| 43.68| 127.35| |[maxvit_small_tf_384.in1k](https://huggingface.co/timm/maxvit_small_tf_384.in1k) |85.54|97.46| 188.35| 69.02| 35.87| 183.65| |[coatnet_rmlp_2_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/coatnet_rmlp_2_rw_384.sw_in12k_ft_in1k) |87.39|98.31| 160.80| 73.88| 47.69| 209.43| |[maxxvitv2_rmlp_base_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/maxxvitv2_rmlp_base_rw_384.sw_in12k_ft_in1k) |87.47|98.37| 149.49| 116.09| 72.98| 213.74| |[maxvit_tiny_tf_512.in1k](https://huggingface.co/timm/maxvit_tiny_tf_512.in1k) |85.67|97.58| 144.25| 31.05| 33.49| 257.59| |[maxvit_rmlp_base_rw_384.sw_in12k_ft_in1k](https://huggingface.co/timm/maxvit_rmlp_base_rw_384.sw_in12k_ft_in1k) |87.81|98.37| 106.55| 116.14| 70.97| 318.95| |[maxvit_base_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_base_tf_384.in21k_ft_in1k) |87.92|98.54| 104.71| 119.65| 73.80| 332.90| |[maxvit_base_tf_384.in1k](https://huggingface.co/timm/maxvit_base_tf_384.in1k) |86.29|97.80| 101.09| 119.65| 73.80| 332.90| |[maxvit_small_tf_512.in1k](https://huggingface.co/timm/maxvit_small_tf_512.in1k) |86.10|97.76| 88.63| 69.13| 67.26| 383.77| |[maxvit_large_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_large_tf_384.in21k_ft_in1k) |87.98|98.56| 71.75| 212.03|132.55| 445.84| |[maxvit_large_tf_384.in1k](https://huggingface.co/timm/maxvit_large_tf_384.in1k) |86.23|97.69| 70.56| 212.03|132.55| 445.84| |[maxvit_base_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_base_tf_512.in21k_ft_in1k) |88.20|98.53| 50.87| 119.88|138.02| 703.99| |[maxvit_base_tf_512.in1k](https://huggingface.co/timm/maxvit_base_tf_512.in1k) |86.60|97.92| 50.75| 119.88|138.02| 703.99| |[maxvit_xlarge_tf_384.in21k_ft_in1k](https://huggingface.co/timm/maxvit_xlarge_tf_384.in21k_ft_in1k) |88.32|98.54| 42.53| 475.32|292.78| 668.76| |[maxvit_large_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_large_tf_512.in21k_ft_in1k) |88.04|98.40| 36.42| 212.33|244.75| 942.15| |[maxvit_large_tf_512.in1k](https://huggingface.co/timm/maxvit_large_tf_512.in1k) |86.52|97.88| 36.04| 212.33|244.75| 942.15| |[maxvit_xlarge_tf_512.in21k_ft_in1k](https://huggingface.co/timm/maxvit_xlarge_tf_512.in21k_ft_in1k) |88.53|98.64| 21.76| 475.77|534.14|1413.22| ## Citation ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ``` ```bibtex @article{tu2022maxvit, title={MaxViT: Multi-Axis Vision Transformer}, author={Tu, Zhengzhong and Talebi, Hossein and Zhang, Han and Yang, Feng and Milanfar, Peyman and Bovik, Alan and Li, Yinxiao}, journal={ECCV}, year={2022}, } ``` ```bibtex @article{dai2021coatnet, title={CoAtNet: Marrying Convolution and Attention for All Data Sizes}, author={Dai, Zihang and Liu, Hanxiao and Le, Quoc V and Tan, Mingxing}, journal={arXiv preprint arXiv:2106.04803}, year={2021} } ```
Yntec/Atlas
Yntec
2023-11-24T17:18:13Z
739
1
diffusers
[ "diffusers", "safetensors", "Fashion Design", "Collage", "Game", "stable-diffusion", "stable-diffusion-diffusers", "text-to-image", "en", "license:creativeml-openrail-m", "autotrain_compatible", "endpoints_compatible", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
2023-11-24T16:30:16Z
--- language: - en license: creativeml-openrail-m tags: - Fashion Design - Collage - Game - stable-diffusion - stable-diffusion-diffusers - text-to-image - diffusers inference: true --- # Atlas FotoPhoto with the Atlas LoRA baked in. Samples and prompts: ![Samples](https://cdn-uploads.huggingface.co/production/uploads/63239b8370edc53f51cd5d42/NBfeHXXlTMPA6LIcTpA86.png) Top left: best quality, masterpiece, burger, simple background Top right: best quality, masterpiece, skaters shoes design, simple background Bottom left: best quality, masterpiece, motorcycle design, simple background Bottom right: best quality, masterpiece, sakura tree in a bottle, simple background Original pages: https://civitai.com/models/33036/atlas https://huggingface.co/Yntec/FotoPhoto
chargoddard/average-dolphin-8x7B
chargoddard
2024-01-05T18:41:54Z
739
1
transformers
[ "transformers", "safetensors", "mixtral", "text-generation", "mergekit", "merge", "conversational", "arxiv:2203.05482", "base_model:cognitivecomputations/dolphin-2.7-mixtral-8x7b", "base_model:cognitivecomputations/dolphin-2.5-mixtral-8x7b", "base_model:cognitivecomputations/dolphin-2.6-mixtral-8x7b", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-01-04T02:39:11Z
--- base_model: - cognitivecomputations/dolphin-2.7-mixtral-8x7b - cognitivecomputations/dolphin-2.5-mixtral-8x7b - cognitivecomputations/dolphin-2.6-mixtral-8x7b tags: - mergekit - merge license: apache-2.0 --- # average-dolphin-8x7B This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit). ## Merge Details ### Merge Method This model was merged using the [linear](https://arxiv.org/abs/2203.05482) merge method. ### Models Merged The following models were included in the merge: * [cognitivecomputations/dolphin-2.7-mixtral-8x7b](https://huggingface.co/cognitivecomputations/dolphin-2.7-mixtral-8x7b) * [cognitivecomputations/dolphin-2.5-mixtral-8x7b](https://huggingface.co/cognitivecomputations/dolphin-2.5-mixtral-8x7b) * [cognitivecomputations/dolphin-2.6-mixtral-8x7b](https://huggingface.co/cognitivecomputations/dolphin-2.6-mixtral-8x7b) ### Configuration The following YAML configuration was used to produce this model: ```yaml merge_method: linear dtype: bfloat16 models: # just your average dolphin - model: cognitivecomputations/dolphin-2.7-mixtral-8x7b parameters: weight: 0.5 - model: cognitivecomputations/dolphin-2.6-mixtral-8x7b parameters: weight: 0.3 - model: cognitivecomputations/dolphin-2.5-mixtral-8x7b parameters: weight: 0.2 parameters: normalize: true ```
sethuiyer/Dr_Samantha_7b_mistral
sethuiyer
2024-03-07T07:11:15Z
739
4
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "merge", "mergekit", "segmed/MedMistral-7B-v0.1", "Guilherme34/Samantha-v2", "conversational", "dataset:medmcqa", "dataset:cognitivecomputations/samantha-data", "base_model:segmed/MedMistral-7B-v0.1", "base_model:Guilherme34/Samantha-v2", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-01-06T10:32:39Z
--- license: apache-2.0 tags: - merge - mergekit - segmed/MedMistral-7B-v0.1 - Guilherme34/Samantha-v2 datasets: - medmcqa - cognitivecomputations/samantha-data base_model: - segmed/MedMistral-7B-v0.1 - Guilherme34/Samantha-v2 model-index: - name: Dr_Samantha_7b_mistral results: - task: type: text-generation name: Text Generation dataset: name: AI2 Reasoning Challenge (25-Shot) type: ai2_arc config: ARC-Challenge split: test args: num_few_shot: 25 metrics: - type: acc_norm value: 60.41 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=sethuiyer/Dr_Samantha_7b_mistral name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: HellaSwag (10-Shot) type: hellaswag split: validation args: num_few_shot: 10 metrics: - type: acc_norm value: 83.65 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=sethuiyer/Dr_Samantha_7b_mistral name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MMLU (5-Shot) type: cais/mmlu config: all split: test args: num_few_shot: 5 metrics: - type: acc value: 63.14 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=sethuiyer/Dr_Samantha_7b_mistral name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: TruthfulQA (0-shot) type: truthful_qa config: multiple_choice split: validation args: num_few_shot: 0 metrics: - type: mc2 value: 41.37 source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=sethuiyer/Dr_Samantha_7b_mistral name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: Winogrande (5-shot) type: winogrande config: winogrande_xl split: validation args: num_few_shot: 5 metrics: - type: acc value: 75.45 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=sethuiyer/Dr_Samantha_7b_mistral name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: GSM8k (5-shot) type: gsm8k config: main split: test args: num_few_shot: 5 metrics: - type: acc value: 31.46 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=sethuiyer/Dr_Samantha_7b_mistral name: Open LLM Leaderboard --- # Dr_Samantha_7b_mistral <p align="center"> <img src="https://huggingface.co/sethuiyer/Dr_Samantha-7b/resolve/main/dr_samantha_anime_style_reduced_quality.webp" height="256px" alt="SynthIQ"> </p> Dr. Samantha represents a blend of AI in healthcare, offering a balance between technical medical knowledge and the softer skills of communication and empathy, crucial for patient interaction and care. This model is a merge of the following models made with mergekit(https://github.com/cg123/mergekit): * [segmed/MedMistral-7B-v0.1](https://huggingface.co/segmed/MedMistral-7B-v0.1) * [Guilherme34/Samantha-v2](https://huggingface.co/Guilherme34/Samantha-v2) Has capabilities of a medical knowledge-focused model (trained on USMLE databases and doctor-patient interactions) with the philosophical, psychological, and relational understanding of the Samantha-7b model. As both a medical consultant and personal counselor, Dr.Samantha could effectively support both physical and mental wellbeing - important for whole-person care. ## 🧩 Configuration ```yaml slices: - sources: - model: segmed/MedMistral-7B-v0.1 layer_range: [0, 32] - model: Guilherme34/Samantha-v2 layer_range: [0, 32] merge_method: slerp base_model: OpenPipe/mistral-ft-optimized-1218 parameters: t: - filter: self_attn value: [0, 0.5, 0.3, 0.7, 1] - filter: mlp value: [1, 0.5, 0.7, 0.3, 0] - value: 0.5 dtype: bfloat16 ``` ## OpenLLM Evaluation Details about that can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_sethuiyer__Dr_Samantha_7b_mistral). Overall, with regards to the subjects related to medical domain, the model's performance is as follows: | Subject | Accuracy | |-----------------------|------------| | Clinical Knowledge | 70.57% | | Medical Genetics | 71.00% | | Human Aging | 69.06% | | Human Sexuality | 75.57% | | College Medicine | 63.01% | | Anatomy | 58.52% | | College Biology | 72.92% | | College Medicine | 63.01% | | High School Biology | 75.48% | | Professional Medicine | 65.44% | | Nutrition | 76.79% | | High School Psychology | 83.12% | | Professional Psychology | 65.35% | | Virology | 53.61% | | Average | **68.82%** | Dr. Samantha performs reasonably well on various medical-related subjects, averaging 68.82% overall in medical sciences, biology, and psychology, however it's important to note that medical diagnosis and treatment decisions often require a much higher level of accuracy, reliability, and context awareness. ## 💻 Usage ```python !pip install -qU transformers accelerate from transformers import AutoTokenizer import transformers import torch model = "sethuiyer/Dr_Samantha_7b_mistral" ask_samantha = ''' Symptoms: Dizziness, headache and nausea. What is the differnetial diagnosis? ''' messages = [{"role": "system", "content": '''You are Doctor Samantha, a virtual AI doctor known for your friendly and approachable demeanor, combined with a deep expertise in the medical field. You're here to provide professional, empathetic, and knowledgeable advice on health-related inquiries. You'll also provide differential diagnosis. If you're unsure about any information, Don't share false information.'''}, {"role": "user", "content": f"{ask_samantha}"}] tokenizer = AutoTokenizer.from_pretrained(model) prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) pipeline = transformers.pipeline( "text-generation", model=model, torch_dtype=torch.float16, device_map="auto", ) outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) print(outputs[0]["generated_text"]) ``` ```text Dizziness, headache and nausea can be caused by a variety of conditions, including: Vertigo: A sensation of spinning or dizziness that can be caused by problems with the inner ear or brain. Migraine: A type of headache that can cause throbbing pain, sensitivity to light and sound, and nausea. Concussion: A type of traumatic brain injury that can cause dizziness, headache, and nausea. Dehydration: A lack of fluids in the body can cause dizziness, headache, and nausea. Low blood sugar: A drop in blood sugar levels can cause dizziness, headache, and nausea. It's important to consult with a healthcare professional for a proper diagnosis and treatment plan. ``` ## GGUF Files GGUF files are available at [s3nh/sethuiyer-Dr_Samantha_7b_mistral-GGUF](https://huggingface.co/s3nh/sethuiyer-Dr_Samantha_7b_mistral-GGUF), thanks to [s3nh](https://huggingface.co/s3nh) # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_sethuiyer__Dr_Samantha_7b_mistral) | Metric |Value| |---------------------------------|----:| |Avg. |59.25| |AI2 Reasoning Challenge (25-Shot)|60.41| |HellaSwag (10-Shot) |83.65| |MMLU (5-Shot) |63.14| |TruthfulQA (0-shot) |41.37| |Winogrande (5-shot) |75.45| |GSM8k (5-shot) |31.46|
shitshow123/TinyLlama-1.1B-ChatStrong-DPO-PPO
shitshow123
2024-01-16T03:57:53Z
739
1
transformers
[ "transformers", "safetensors", "llama", "feature-extraction", "license:apache-2.0", "endpoints_compatible", "text-generation-inference", "region:us" ]
feature-extraction
2024-01-13T02:29:58Z
--- license: apache-2.0 --- This is a strong TinyLlama-1.1B-Chat-Model trained using a modified version of PPO + DPO algo *****Score rated by ChatGPT + Claude + Gemini (max-score is 3)***** | Characteristic | Baseline (TinyLlama/TinyLlama-1.1B-Chat-v1.0) | Ours (TinyLlama-1.1B-ChatStrong-DPO-PPO) | |-----------------|--------------------------------------------------|--------------------------------------------------------| | Fluency | 2 | 3 | | Coherence | 1 | 3 | | Non-repetition | 1 | 3 | | Engagement | 1 | 2 | | Creativity | 1 | 2 | | Accuracy | 2 | 2 | | Relevance | 2 | 3 | | Knowledge | 2 | 2 | | Common sense | 1 | 2 | | Empathy | 1 | 2 | | Humor | 1 | 2 | | Logic | 1 | 2 | | Grammar | 2 | 3 | *Note: score is based on prompt generated by GPT4. ```python # Install transformers from source - only needed for versions <= v4.34 # pip install git+https://github.com/huggingface/transformers.git # pip install accelerate import torch from transformers import pipeline pipe = pipeline("text-generation", model="shitshow123/TinyLlama-1.1B-ChatStrong-DPO-PPO", torch_dtype=torch.bfloat16, device_map="auto") # We use the tokenizer's chat template to format each message - see https://huggingface.co/docs/transformers/main/en/chat_templating messages = [ { "role": "system", "content": "You are a friendly chatbot who always responds in the style of a pirate", }, {"role": "user", "content": "How many helicopters can a human eat in one sitting?"}, ] prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) #print(outputs[0]["generated_text"]) output_text = outputs[0]["generated_text"] words = output_text.split() # Insert a newline character every 50 words formatted_output = "" for i in range(0, len(words), 10): formatted_output += ' '.join(words[i:i+10]) + "\n" print(formatted_output) # ... ``` ## Example Output by this model: <|system|> You are a friendly chatbot who always responds in the style of a pirate</s> ******<|user|> Provide a summary of the latest developments in renewable energy technologies, focusing on solar and wind power advancements in the past year.</s> <|assistant|>****** In the past year, significant advancements have been made in the renewable energy sector, particularly in the areas of solar and wind power. Here's a summary of some of the notable developments: 1. Solar: - Solar panels have become more efficient, with a record-breaking 29.2% conversion efficiency achieved in 2020. - The world's largest solar farm, located in China, has a capacity of 1.2 GW, making it the largest solar power plant in the world. - The European Union has set a target of achieving 32% renewable energy by 2030, with solar power expected to play a significant role in achieving this goal. 2. Wind: - Wind turbines have become more efficient, with a record-breaking 1.2 GW of wind power capacity installed in 2020. - The world's largest offshore wind farm, located in the UK, has a capacity of 670 MW. - The US has set a target of achieving 100 GW of offshore wind power by 2030, with the first offshore wind farm in the country, located in Massachusetts, expected to be operational by 2023. These advancements in renewable energy technologies have the potential to significantly reduce greenhouse gas emissions and improve energy security in the coming years. ## Example Output by the baseline TinyLamma model (see the repetition of Novemeber 2020): <|system|> You are a friendly chatbot who always responds in the style of a pirate</s> ******<|user|> Provide a summary of the latest developments in renewable energy technologies, focusing on solar and wind power advancements in the past year.</s> <|assistant|>****** In the past year, there have been significant advancements in renewable energy technologies, particularly in solar and wind power. Here's a summary: 1. Solar: - Solar panel efficiency has increased by 10% in the past year, according to a report by the International Renewable Energy Agency (IRENA). - The world's largest solar power plant, the 1.2 GW Solar Park in Abu Dhabi, opened in November 2020. - The world's largest solar power plant, the 1.2 GW Solar Park in Abu Dhabi, opened in November 2020. It is expected to generate enough electricity to power 500,000 homes. - The world's largest solar power plant, the 1.2 GW Solar Park in Abu Dhabi, opened in November 2020. It is expected to generate enough electricity to power 500,000 homes. - The world's largest solar power plant, the 1.2 GW Solar Park in Abu Dhabi, opened in November 2020. It is expected to generate enough electricity to power 500,000 homes. 2. Wind: - The world's largest wind farm, the Hornsdale Power Reserve in South Australia, opened in November 2020. It is expected to generate enough electricity to power 100,000 homes. - The world's largest wind farm, the Hornsdale Power Reserve in South Australia, opened in November 2020. It is expected to generate enough electricity to power 100,000 homes. - The world's largest wind farm, the Hornsdale Power Reserve in South Australia, opened in November 2020. It is expected to generate enough electricity to power 100,000 homes. These advancements in solar and wind power technologies have the potential to significantly reduce greenhouse gas emissions and improve energy security.
KeyonZeng/philion-2
KeyonZeng
2024-03-25T04:13:36Z
739
0
transformers
[ "transformers", "safetensors", "phi", "text-generation", "custom_code", "en", "dataset:argilla/distilabel-intel-orca-dpo-pairs", "arxiv:1910.09700", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-01-17T05:19:44Z
--- library_name: transformers license: apache-2.0 datasets: - argilla/distilabel-intel-orca-dpo-pairs language: - en metrics: - accuracy --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
liminerity/Blured-Ties-7B
liminerity
2024-01-17T23:29:07Z
739
0
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "merge", "mergekit", "lazymergekit", "liminerity/Blurstral-7b-slerp", "diffnamehard/Mistral-CatMacaroni-slerp-uncensored-7B", "base_model:liminerity/Blurstral-7b-slerp", "base_model:diffnamehard/Mistral-CatMacaroni-slerp-uncensored-7B", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-01-17T23:20:44Z
--- license: apache-2.0 tags: - merge - mergekit - lazymergekit - liminerity/Blurstral-7b-slerp - diffnamehard/Mistral-CatMacaroni-slerp-uncensored-7B base_model: - liminerity/Blurstral-7b-slerp - diffnamehard/Mistral-CatMacaroni-slerp-uncensored-7B --- # Blured-Ties-7B Blured-Ties-7B is a merge of the following models using [LazyMergekit](https://colab.research.google.com/drive/1obulZ1ROXHjYLn6PPZJwRR6GzgQogxxb?usp=sharing): * [liminerity/Blurstral-7b-slerp](https://huggingface.co/liminerity/Blurstral-7b-slerp) * [diffnamehard/Mistral-CatMacaroni-slerp-uncensored-7B](https://huggingface.co/diffnamehard/Mistral-CatMacaroni-slerp-uncensored-7B) ## 🧩 Configuration ```yaml models: - model: mistralai/Mistral-7B-v0.1 # no parameters necessary for base model - model: liminerity/Blurstral-7b-slerp parameters: density: 0.5 weight: 0.5 - model: diffnamehard/Mistral-CatMacaroni-slerp-uncensored-7B parameters: density: 0.5 weight: 0.3 merge_method: ties base_model: mistralai/Mistral-7B-v0.1 parameters: normalize: true dtype: bfloat16 ``` ## 💻 Usage ```python !pip install -qU transformers accelerate from transformers import AutoTokenizer import transformers import torch model = "liminerity/Blured-Ties-7B" messages = [{"role": "user", "content": "What is a large language model?"}] tokenizer = AutoTokenizer.from_pretrained(model) prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) pipeline = transformers.pipeline( "text-generation", model=model, torch_dtype=torch.float16, device_map="auto", ) outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) print(outputs[0]["generated_text"]) ```
andrijdavid/Macaroni-7b-Tied
andrijdavid
2024-03-22T10:44:01Z
739
1
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "merge", "en", "arxiv:2306.01708", "base_model:decruz07/kellemar-DPO-Orca-Distilled-7B-SLERP", "base_model:mlabonne/NeuralMarcoro14-7B", "base_model:fblgit/UNA-TheBeagle-7b-v1", "base_model:SanjiWatsuki/Lelantos-DPO-7B", "base_model:mistralai/Mistral-7B-v0.1", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-01-19T23:04:31Z
--- language: - en license: apache-2.0 tags: - mistral - merge base_model: - decruz07/kellemar-DPO-Orca-Distilled-7B-SLERP - mlabonne/NeuralMarcoro14-7B - fblgit/UNA-TheBeagle-7b-v1 - SanjiWatsuki/Lelantos-DPO-7B - mistralai/Mistral-7B-v0.1 model-index: - name: Macaroni-7b-Tied results: - task: type: text-generation name: Text Generation dataset: name: AI2 Reasoning Challenge (25-Shot) type: ai2_arc config: ARC-Challenge split: test args: num_few_shot: 25 metrics: - type: acc_norm value: 72.87 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=andrijdavid/Macaroni-7b-Tied name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: HellaSwag (10-Shot) type: hellaswag split: validation args: num_few_shot: 10 metrics: - type: acc_norm value: 88.14 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=andrijdavid/Macaroni-7b-Tied name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MMLU (5-Shot) type: cais/mmlu config: all split: test args: num_few_shot: 5 metrics: - type: acc value: 64.73 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=andrijdavid/Macaroni-7b-Tied name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: TruthfulQA (0-shot) type: truthful_qa config: multiple_choice split: validation args: num_few_shot: 0 metrics: - type: mc2 value: 70.54 source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=andrijdavid/Macaroni-7b-Tied name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: Winogrande (5-shot) type: winogrande config: winogrande_xl split: validation args: num_few_shot: 5 metrics: - type: acc value: 81.93 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=andrijdavid/Macaroni-7b-Tied name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: GSM8k (5-shot) type: gsm8k config: main split: test args: num_few_shot: 5 metrics: - type: acc value: 71.57 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=andrijdavid/Macaroni-7b-Tied name: Open LLM Leaderboard --- # Macaroni 7b Tied ## Merge Details ### Merge Method This model was merged using the [TIES](https://arxiv.org/abs/2306.01708) merge method using [mistralai/Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1) as a base. ### Models Merged The following models were included in the merge: * [decruz07/kellemar-DPO-Orca-Distilled-7B-SLERP](https://huggingface.co/decruz07/kellemar-DPO-Orca-Distilled-7B-SLERP) * [mlabonne/NeuralMarcoro14-7B](https://huggingface.co/mlabonne/NeuralMarcoro14-7B) * [fblgit/UNA-TheBeagle-7b-v1](https://huggingface.co/fblgit/UNA-TheBeagle-7b-v1) * [SanjiWatsuki/Lelantos-DPO-7B](https://huggingface.co/SanjiWatsuki/Lelantos-DPO-7B) # Disclaimer The users of this model (hereinafter referred to as "the Model") should be aware of the following points prior to using the Model: * Factual Inaccuracy: The Model has been trained on a diverse set of data sources. However, it may occasionally generate outputs that contain factual inaccuracies. Users are advised to verify the factual correctness of the information provided by the Model, especially when using it for critical applications. * Potential Biases: Despite efforts to ensure fairness and neutrality, the Model may still exhibit biases inherent in the training data. These biases do not reflect the views or intentions of the developers. Users should be cautious of these potential biases, especially when using the Model in contexts where fairness and impartiality are crucial. * Hallucination: The Model might generate content that is not grounded in reality, commonly referred to as "hallucinations." This is a known limitation of language models and should be considered when interpreting the Model's outputs. * Inappropriate Content: While measures have been taken to minimize the risk, the Model might still generate content that could be deemed offensive, inappropriate, or not suitable for all audiences. We advise users to exercise discretion and consider the context of use. + Use at Your Own Risk: The Model is provided "as is," and the developers make no representations or warranties of any kind concerning the Model's performance or suitability for any particular purpose. The user assumes full responsibility and risk of loss resulting from using the Model. By using the Model, users acknowledge and agree to the terms stated in this disclaimer. This disclaimer is subject to change without notice, and the latest version can be found on the Model's Hugging Face page. # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_andrijdavid__Macaroni-7b-Tied) | Metric |Value| |---------------------------------|----:| |Avg. |74.96| |AI2 Reasoning Challenge (25-Shot)|72.87| |HellaSwag (10-Shot) |88.14| |MMLU (5-Shot) |64.73| |TruthfulQA (0-shot) |70.54| |Winogrande (5-shot) |81.93| |GSM8k (5-shot) |71.57|
ConvexAI/BurningBruce-004
ConvexAI
2024-03-04T16:33:14Z
739
5
transformers
[ "transformers", "safetensors", "mixtral", "text-generation", "merge", "moe", "conversational", "en", "arxiv:2101.03961", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-01-20T13:58:06Z
--- language: - en license: apache-2.0 tags: - merge - moe model-index: - name: BurningBruce-004 results: - task: type: text-generation name: Text Generation dataset: name: AI2 Reasoning Challenge (25-Shot) type: ai2_arc config: ARC-Challenge split: test args: num_few_shot: 25 metrics: - type: acc_norm value: 73.29 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=ConvexAI/BurningBruce-004 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: HellaSwag (10-Shot) type: hellaswag split: validation args: num_few_shot: 10 metrics: - type: acc_norm value: 88.63 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=ConvexAI/BurningBruce-004 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MMLU (5-Shot) type: cais/mmlu config: all split: test args: num_few_shot: 5 metrics: - type: acc value: 64.68 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=ConvexAI/BurningBruce-004 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: TruthfulQA (0-shot) type: truthful_qa config: multiple_choice split: validation args: num_few_shot: 0 metrics: - type: mc2 value: 68.39 source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=ConvexAI/BurningBruce-004 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: Winogrande (5-shot) type: winogrande config: winogrande_xl split: validation args: num_few_shot: 5 metrics: - type: acc value: 84.06 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=ConvexAI/BurningBruce-004 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: GSM8k (5-shot) type: gsm8k config: main split: test args: num_few_shot: 5 metrics: - type: acc value: 70.58 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=ConvexAI/BurningBruce-004 name: Open LLM Leaderboard --- ![image/webp](https://cdn.discordapp.com/attachments/1194008951805714563/1197228918097313903/OIG.png?ex=65ba8151&is=65a80c51&hm=465a1a8f7aa4c0002017987123951efed25cd8d87d91f5a9ecc30d8c04e88f46&) # BurningBruce-004 We didn't start the **fire**. This model is a Mixture of Experts (MoE) made with [mergekit](https://github.com/cg123/mergekit/tree/mixtral) by Kquant03, Dontriskit and NeuralNovel [Join our Discord!](https://discord.gg/rJXGjmxqzS) ## Models used: - [leveldevai/TurdusBeagle-7B](https://huggingface.co/leveldevai/TurdusBeagle-7B) - base - [leveldevai/TurdusBeagle-7B](https://huggingface.co/leveldevai/TurdusBeagle-7B) - expert #1 - [udkai/Turdus](https://huggingface.co/nfaheem/udkai/Turdus) - expert #2 - [nfaheem/Marcoroni-7b-DPO-Merge](https://huggingface.co/nfaheem/Marcoroni-7b-DPO-Merge) - expert #3 - [Toten5/Marcoroni-neural-chat-7B-v2](https://huggingface.co/Toten5/Marcoroni-neural-chat-7B-v2) - expert #4 # "[What is a Mixture of Experts (MoE)?](https://huggingface.co/blog/moe)" ### (from the MistralAI papers...click the quoted question above to navigate to it directly.) The scale of a model is one of the most important axes for better model quality. Given a fixed computing budget, training a larger model for fewer steps is better than training a smaller model for more steps. Mixture of Experts enable models to be pretrained with far less compute, which means you can dramatically scale up the model or dataset size with the same compute budget as a dense model. In particular, a MoE model should achieve the same quality as its dense counterpart much faster during pretraining. So, what exactly is a MoE? In the context of transformer models, a MoE consists of two main elements: Sparse MoE layers are used instead of dense feed-forward network (FFN) layers. MoE layers have a certain number of “experts” (e.g. 32 in my "frankenMoE"), where each expert is a neural network. In practice, the experts are FFNs, but they can also be more complex networks or even a MoE itself, leading to hierarchical MoEs! A gate network or router, that determines which tokens are sent to which expert. For example, in the image below, the token “More” is sent to the second expert, and the token "Parameters” is sent to the first network. As we’ll explore later, we can send a token to more than one expert. How to route a token to an expert is one of the big decisions when working with MoEs - the router is composed of learned parameters and is pretrained at the same time as the rest of the network. At every layer, for every token, a router network chooses two of these groups (the “experts”) to process the token and combine their output additively. ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6589d7e6586088fd2784a12c/up_I0R2TQGjqTShZp_1Sz.png) Switch Layer MoE layer from the [Switch Transformers paper](https://arxiv.org/abs/2101.03961) So, to recap, in MoEs we replace every FFN layer of the transformer model with an MoE layer, which is composed of a gate network and a certain number of experts. Although MoEs provide benefits like efficient pretraining and faster inference compared to dense models, they also come with challenges: Training: MoEs enable significantly more compute-efficient pretraining, but they’ve historically struggled to generalize during fine-tuning, leading to overfitting. Inference: Although a MoE might have many parameters, only some of them are used during inference. This leads to much faster inference compared to a dense model with the same number of parameters. However, all parameters need to be loaded in RAM, so memory requirements are high. For example, [given a MoE like Mixtral 8x7B](https://huggingface.co/blog/moe), we’ll need to have enough VRAM to hold a dense 47B parameter model. Why 47B parameters and not 8 x 7B = 56B? That’s because in MoE models, only the FFN layers are treated as individual experts, and the rest of the model parameters are shared. At the same time, assuming just two experts are being used per token, the inference speed (FLOPs) is like using a 12B model (as opposed to a 14B model), because it computes 2x7B matrix multiplications, but with some layers shared (more on this soon). If all our tokens are sent to just a few popular experts, that will make training inefficient. In a normal MoE training, the gating network converges to mostly activate the same few experts. This self-reinforces as favored experts are trained quicker and hence selected more. To mitigate this, an auxiliary loss is added to encourage giving all experts equal importance. This loss ensures that all experts receive a roughly equal number of training examples. The following sections will also explore the concept of expert capacity, which introduces a threshold of how many tokens can be processed by an expert. In transformers, the auxiliary loss is exposed via the aux_loss parameter. ## "Wait...but you called this a frankenMoE?" The difference between MoE and "frankenMoE" lies in the fact that the router layer in a model like the one on this repo is not trained simultaneously. Sponsored by: [Dontriskit](https://huggingface.co/h2m) # Evals *coming soon* # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_ConvexAI__BurningBruce-004) | Metric |Value| |---------------------------------|----:| |Avg. |74.94| |AI2 Reasoning Challenge (25-Shot)|73.29| |HellaSwag (10-Shot) |88.63| |MMLU (5-Shot) |64.68| |TruthfulQA (0-shot) |68.39| |Winogrande (5-shot) |84.06| |GSM8k (5-shot) |70.58|
abacusai/MetaMath-Bagel-DPO-34B
abacusai
2024-01-26T21:38:07Z
739
15
transformers
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "dataset:abacusai/MetaMathFewshot", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-01-25T04:43:52Z
--- license: apache-2.0 datasets: - abacusai/MetaMathFewshot --- ![image/png](https://cdn-uploads.huggingface.co/production/uploads/64c14f6b02e1f8f67c73bd05/pf4d6FA7DriRtVq5HCkxd.png) DPO finetune of our [MetaMath SFT Model](https://huggingface.co/abacusai/MM-Orc-Vic-bagel-34b-c1000) on the [Truthy DPO dataset](https://huggingface.co/datasets/jondurbin/truthy-dpo-v0.1) ### Evaluation Results | Average | ARC | HellaSwag | MMLU | TruthfulQA | Winogrande | GSM8K | | --- | --- | --- | --- | --- | --- | --- | | 75.54 | 69.20 | 84.34 | 76.46 | 67.58 | 82.87 | 72.78 |
CultriX/SevereNeuralBeagleTrix-7B
CultriX
2024-01-27T11:39:59Z
739
0
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "merge", "mergekit", "lazymergekit", "PetroGPT/WestSeverus-7B-DPO", "CultriX/MergeTrix-7B-v2", "mlabonne/NeuralBeagle14-7B", "base_model:PetroGPT/WestSeverus-7B-DPO", "base_model:CultriX/MergeTrix-7B-v2", "base_model:mlabonne/NeuralBeagle14-7B", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-01-25T07:22:27Z
--- tags: - merge - mergekit - lazymergekit - PetroGPT/WestSeverus-7B-DPO - CultriX/MergeTrix-7B-v2 - mlabonne/NeuralBeagle14-7B base_model: - PetroGPT/WestSeverus-7B-DPO - CultriX/MergeTrix-7B-v2 - mlabonne/NeuralBeagle14-7B license: apache-2.0 --- # EDIT: Always check my space for the latest benchmark results for my models! * https://huggingface.co/spaces/CultriX/Yet_Another_LLM_Leaderboard # SevereNeuralBeagleTrix-7B SevereNeuralBeagleTrix-7B is a merge of the following models using [LazyMergekit](https://colab.research.google.com/drive/1obulZ1ROXHjYLn6PPZJwRR6GzgQogxxb?usp=sharing): * [PetroGPT/WestSeverus-7B-DPO](https://huggingface.co/PetroGPT/WestSeverus-7B-DPO) * [CultriX/MergeTrix-7B-v2](https://huggingface.co/CultriX/MergeTrix-7B-v2) * [mlabonne/NeuralBeagle14-7B](https://huggingface.co/mlabonne/NeuralBeagle14-7B) ## 🧩 Configuration ```yaml models: - model: mistralai/Mistral-7B-v0.1 # No parameters necessary for base model - model: PetroGPT/WestSeverus-7B-DPO parameters: density: 0.53 weight: 0.3 - model: CultriX/MergeTrix-7B-v2 parameters: density: 0.53 weight: 0.4 - model: mlabonne/NeuralBeagle14-7B parameters: density: 0.53 weight: 0.3 merge_method: dare_ties base_model: mistralai/Mistral-7B-v0.1 parameters: int8_mask: true dtype: float16 ``` ## 💻 Usage ```python !pip install -qU transformers accelerate from transformers import AutoTokenizer import transformers import torch model = "CultriX/SevereNeuralBeagleTrix-7B" messages = [{"role": "user", "content": "What is a large language model?"}] tokenizer = AutoTokenizer.from_pretrained(model) prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) pipeline = transformers.pipeline( "text-generation", model=model, torch_dtype=torch.float16, device_map="auto", ) outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) print(outputs[0]["generated_text"]) ```
wang7776/Llama-2-7b-chat-hf-30-attention-sparsity
wang7776
2024-02-05T18:27:14Z
739
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "facebook", "meta", "pytorch", "llama-2", "conversational", "en", "arxiv:2306.11695", "license:other", "autotrain_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-01-26T19:34:53Z
--- extra_gated_heading: Access Llama 2 on Hugging Face extra_gated_description: >- This is a form to enable access to Llama 2 on Hugging Face after you have been granted access from Meta. Please visit the [Meta website](https://ai.meta.com/resources/models-and-libraries/llama-downloads) and accept our license terms and acceptable use policy before submitting this form. Requests will be processed in 1-2 days. extra_gated_button_content: Submit extra_gated_fields: I agree to share my name, email address and username with Meta and confirm that I have already been granted download access on the Meta website: checkbox language: - en pipeline_tag: text-generation inference: false tags: - facebook - meta - pytorch - llama - llama-2 license: other --- # Overview This model has been pruned to 30% sparsity using the [Wanda pruning method](https://arxiv.org/abs/2306.11695) on attention layers. This method requires no retraining or weight updates and still achieves competitive performance. A link to the base model can be found [here](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf). # **Llama 2** Llama 2 is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 7B fine-tuned model, optimized for dialogue use cases and converted for the Hugging Face Transformers format. Links to other models can be found in the index at the bottom. ## Model Details *Note: Use of this model is governed by the Meta license. In order to download the model weights and tokenizer, please visit the [website](https://ai.meta.com/resources/models-and-libraries/llama-downloads/) and accept our License before requesting access here.* Meta developed and publicly released the Llama 2 family of large language models (LLMs), a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. Our fine-tuned LLMs, called Llama-2-Chat, are optimized for dialogue use cases. Llama-2-Chat models outperform open-source chat models on most benchmarks we tested, and in our human evaluations for helpfulness and safety, are on par with some popular closed-source models like ChatGPT and PaLM. **Model Developers** Meta **Variations** Llama 2 comes in a range of parameter sizes — 7B, 13B, and 70B — as well as pretrained and fine-tuned variations. **Input** Models input text only. **Output** Models generate text only. **Model Architecture** Llama 2 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align to human preferences for helpfulness and safety. ||Training Data|Params|Content Length|GQA|Tokens|LR| |---|---|---|---|---|---|---| |Llama 2|*A new mix of publicly available online data*|7B|4k|&#10007;|2.0T|3.0 x 10<sup>-4</sup>| |Llama 2|*A new mix of publicly available online data*|13B|4k|&#10007;|2.0T|3.0 x 10<sup>-4</sup>| |Llama 2|*A new mix of publicly available online data*|70B|4k|&#10004;|2.0T|1.5 x 10<sup>-4</sup>| *Llama 2 family of models.* Token counts refer to pretraining data only. All models are trained with a global batch-size of 4M tokens. Bigger models - 70B -- use Grouped-Query Attention (GQA) for improved inference scalability. **Model Dates** Llama 2 was trained between January 2023 and July 2023. **Status** This is a static model trained on an offline dataset. Future versions of the tuned models will be released as we improve model safety with community feedback. **License** A custom commercial license is available at: [https://ai.meta.com/resources/models-and-libraries/llama-downloads/](https://ai.meta.com/resources/models-and-libraries/llama-downloads/) ## Intended Use **Intended Use Cases** Llama 2 is intended for commercial and research use in English. Tuned models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks. To get the expected features and performance for the chat versions, a specific formatting needs to be followed, including the `INST` and `<<SYS>>` tags, `BOS` and `EOS` tokens, and the whitespaces and breaklines in between (we recommend calling `strip()` on inputs to avoid double-spaces). See our reference code in github for details: [`chat_completion`](https://github.com/facebookresearch/llama/blob/main/llama/generation.py#L212). **Out-of-scope Uses** Use in any manner that violates applicable laws or regulations (including trade compliance laws).Use in languages other than English. Use in any other way that is prohibited by the Acceptable Use Policy and Licensing Agreement for Llama 2. ## Hardware and Software **Training Factors** We used custom training libraries, Meta's Research Super Cluster, and production clusters for pretraining. Fine-tuning, annotation, and evaluation were also performed on third-party cloud compute. **Carbon Footprint** Pretraining utilized a cumulative 3.3M GPU hours of computation on hardware of type A100-80GB (TDP of 350-400W). Estimated total emissions were 539 tCO2eq, 100% of which were offset by Meta’s sustainability program. ||Time (GPU hours)|Power Consumption (W)|Carbon Emitted(tCO<sub>2</sub>eq)| |---|---|---|---| |Llama 2 7B|184320|400|31.22| |Llama 2 13B|368640|400|62.44| |Llama 2 70B|1720320|400|291.42| |Total|3311616||539.00| **CO<sub>2</sub> emissions during pretraining.** Time: total GPU time required for training each model. Power Consumption: peak power capacity per GPU device for the GPUs used adjusted for power usage efficiency. 100% of the emissions are directly offset by Meta's sustainability program, and because we are openly releasing these models, the pretraining costs do not need to be incurred by others. ## Training Data **Overview** Llama 2 was pretrained on 2 trillion tokens of data from publicly available sources. The fine-tuning data includes publicly available instruction datasets, as well as over one million new human-annotated examples. Neither the pretraining nor the fine-tuning datasets include Meta user data. **Data Freshness** The pretraining data has a cutoff of September 2022, but some tuning data is more recent, up to July 2023. ## Evaluation Results In this section, we report the results for the Llama 1 and Llama 2 models on standard academic benchmarks.For all the evaluations, we use our internal evaluations library. |Model|Size|Code|Commonsense Reasoning|World Knowledge|Reading Comprehension|Math|MMLU|BBH|AGI Eval| |---|---|---|---|---|---|---|---|---|---| |Llama 1|7B|14.1|60.8|46.2|58.5|6.95|35.1|30.3|23.9| |Llama 1|13B|18.9|66.1|52.6|62.3|10.9|46.9|37.0|33.9| |Llama 1|33B|26.0|70.0|58.4|67.6|21.4|57.8|39.8|41.7| |Llama 1|65B|30.7|70.7|60.5|68.6|30.8|63.4|43.5|47.6| |Llama 2|7B|16.8|63.9|48.9|61.3|14.6|45.3|32.6|29.3| |Llama 2|13B|24.5|66.9|55.4|65.8|28.7|54.8|39.4|39.1| |Llama 2|70B|**37.5**|**71.9**|**63.6**|**69.4**|**35.2**|**68.9**|**51.2**|**54.2**| **Overall performance on grouped academic benchmarks.** *Code:* We report the average pass@1 scores of our models on HumanEval and MBPP. *Commonsense Reasoning:* We report the average of PIQA, SIQA, HellaSwag, WinoGrande, ARC easy and challenge, OpenBookQA, and CommonsenseQA. We report 7-shot results for CommonSenseQA and 0-shot results for all other benchmarks. *World Knowledge:* We evaluate the 5-shot performance on NaturalQuestions and TriviaQA and report the average. *Reading Comprehension:* For reading comprehension, we report the 0-shot average on SQuAD, QuAC, and BoolQ. *MATH:* We report the average of the GSM8K (8 shot) and MATH (4 shot) benchmarks at top 1. |||TruthfulQA|Toxigen| |---|---|---|---| |Llama 1|7B|27.42|23.00| |Llama 1|13B|41.74|23.08| |Llama 1|33B|44.19|22.57| |Llama 1|65B|48.71|21.77| |Llama 2|7B|33.29|**21.25**| |Llama 2|13B|41.86|26.10| |Llama 2|70B|**50.18**|24.60| **Evaluation of pretrained LLMs on automatic safety benchmarks.** For TruthfulQA, we present the percentage of generations that are both truthful and informative (the higher the better). For ToxiGen, we present the percentage of toxic generations (the smaller the better). |||TruthfulQA|Toxigen| |---|---|---|---| |Llama-2-Chat|7B|57.04|**0.00**| |Llama-2-Chat|13B|62.18|**0.00**| |Llama-2-Chat|70B|**64.14**|0.01| **Evaluation of fine-tuned LLMs on different safety datasets.** Same metric definitions as above. ## Ethical Considerations and Limitations Llama 2 is a new technology that carries risks with use. Testing conducted to date has been in English, and has not covered, nor could it cover all scenarios. For these reasons, as with all LLMs, Llama 2’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate, biased or other objectionable responses to user prompts. Therefore, before deploying any applications of Llama 2, developers should perform safety testing and tuning tailored to their specific applications of the model. Please see the Responsible Use Guide available at [https://ai.meta.com/llama/responsible-use-guide/](https://ai.meta.com/llama/responsible-use-guide) ## Reporting Issues Please report any software “bug,” or other problems with the models through one of the following means: - Reporting issues with the model: [github.com/facebookresearch/llama](http://github.com/facebookresearch/llama) - Reporting problematic content generated by the model: [developers.facebook.com/llama_output_feedback](http://developers.facebook.com/llama_output_feedback) - Reporting bugs and security concerns: [facebook.com/whitehat/info](http://facebook.com/whitehat/info) ## Llama Model Index |Model|Llama2|Llama2-hf|Llama2-chat|Llama2-chat-hf| |---|---|---|---|---| |7B| [Link](https://huggingface.co/llamaste/Llama-2-7b) | [Link](https://huggingface.co/llamaste/Llama-2-7b-hf) | [Link](https://huggingface.co/llamaste/Llama-2-7b-chat) | [Link](https://huggingface.co/llamaste/Llama-2-7b-chat-hf)| |13B| [Link](https://huggingface.co/llamaste/Llama-2-13b) | [Link](https://huggingface.co/llamaste/Llama-2-13b-hf) | [Link](https://huggingface.co/llamaste/Llama-2-13b-chat) | [Link](https://huggingface.co/llamaste/Llama-2-13b-hf)| |70B| [Link](https://huggingface.co/llamaste/Llama-2-70b) | [Link](https://huggingface.co/llamaste/Llama-2-70b-hf) | [Link](https://huggingface.co/llamaste/Llama-2-70b-chat) | [Link](https://huggingface.co/llamaste/Llama-2-70b-hf)|
cloudyu/19B_MATH_DPO
cloudyu
2024-01-30T09:07:17Z
739
0
transformers
[ "transformers", "safetensors", "mixtral", "text-generation", "dpo", "moe", "conversational", "license:other", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-01-30T08:53:06Z
--- license: other tags: - dpo - moe --- this is a DPO fine-tuned MoE model with about 19B parameter. ``` DPO Trainer TRL supports the DPO Trainer for training language models from preference data, as described in the paper Direct Preference Optimization: Your Language Model is Secretly a Reward Model by Rafailov et al., 2023. ```
Manolo26/metis-chat-instruct-7b
Manolo26
2024-01-31T16:13:01Z
739
0
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "merge", "mergekit", "lazymergekit", "mlabonne/NeuralBeagle14-7B", "mlabonne/NeuralMarcoro14-7B", "conversational", "base_model:mlabonne/NeuralBeagle14-7B", "base_model:mlabonne/NeuralMarcoro14-7B", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-01-31T15:28:48Z
--- tags: - merge - mergekit - lazymergekit - mlabonne/NeuralBeagle14-7B - mlabonne/NeuralMarcoro14-7B base_model: - mlabonne/NeuralBeagle14-7B - mlabonne/NeuralMarcoro14-7B license: apache-2.0 --- # metis-chat-instruct-7b metis-chat-instruct-7b is a merge of the following models using [LazyMergekit](https://colab.research.google.com/drive/1obulZ1ROXHjYLn6PPZJwRR6GzgQogxxb?usp=sharing): * [mlabonne/NeuralBeagle14-7B](https://huggingface.co/mlabonne/NeuralBeagle14-7B) * [mlabonne/NeuralMarcoro14-7B](https://huggingface.co/mlabonne/NeuralMarcoro14-7B) ## 🧩 Configuration ```yaml slices: - sources: - model: mlabonne/NeuralBeagle14-7B layer_range: [0, 32] - model: mlabonne/NeuralMarcoro14-7B layer_range: [0, 32] merge_method: slerp base_model: mlabonne/NeuralBeagle14-7B parameters: t: - filter: self_attn value: [0, 0.5, 0.3, 0.7, 1] - filter: mlp value: [1, 0.5, 0.7, 0.3, 0] - value: 0.5 dtype: bfloat16 ``` ## 💻 Usage ```python !pip install -qU transformers accelerate from transformers import AutoTokenizer import transformers import torch model = "Manolo26/metis-chat-instruct-7b" messages = [{"role": "user", "content": "What is a large language model?"}] tokenizer = AutoTokenizer.from_pretrained(model) prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) pipeline = transformers.pipeline( "text-generation", model=model, torch_dtype=torch.float16, device_map="auto", ) outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) print(outputs[0]["generated_text"]) ```