modelId
stringlengths
5
122
author
stringlengths
2
42
last_modified
unknown
downloads
int64
0
738M
likes
int64
0
11k
library_name
stringclasses
245 values
tags
sequencelengths
1
4.05k
pipeline_tag
stringclasses
48 values
createdAt
unknown
card
stringlengths
1
901k
habanoz/TinyLlama-1.1B-intermediate-step-715k-1.5T-lr-5-2.2epochs-oasst1-top1-instruct-V1
habanoz
"2024-06-18T17:12:40Z"
1,645
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "en", "dataset:OpenAssistant/oasst_top1_2023-08-25", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-11-20T11:03:50Z"
--- language: - en license: apache-2.0 datasets: - OpenAssistant/oasst_top1_2023-08-25 pipeline_tag: text-generation model-index: - name: TinyLlama-1.1B-intermediate-step-715k-1.5T-lr-5-2.2epochs-oasst1-top1-instruct-V1 results: - task: type: text-generation name: Text Generation dataset: name: AI2 Reasoning Challenge (25-Shot) type: ai2_arc config: ARC-Challenge split: test args: num_few_shot: 25 metrics: - type: acc_norm value: 31.48 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=habanoz/TinyLlama-1.1B-intermediate-step-715k-1.5T-lr-5-2.2epochs-oasst1-top1-instruct-V1 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: HellaSwag (10-Shot) type: hellaswag split: validation args: num_few_shot: 10 metrics: - type: acc_norm value: 54.4 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=habanoz/TinyLlama-1.1B-intermediate-step-715k-1.5T-lr-5-2.2epochs-oasst1-top1-instruct-V1 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MMLU (5-Shot) type: cais/mmlu config: all split: test args: num_few_shot: 5 metrics: - type: acc value: 25.47 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=habanoz/TinyLlama-1.1B-intermediate-step-715k-1.5T-lr-5-2.2epochs-oasst1-top1-instruct-V1 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: TruthfulQA (0-shot) type: truthful_qa config: multiple_choice split: validation args: num_few_shot: 0 metrics: - type: mc2 value: 42.34 source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=habanoz/TinyLlama-1.1B-intermediate-step-715k-1.5T-lr-5-2.2epochs-oasst1-top1-instruct-V1 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: Winogrande (5-shot) type: winogrande config: winogrande_xl split: validation args: num_few_shot: 5 metrics: - type: acc value: 57.54 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=habanoz/TinyLlama-1.1B-intermediate-step-715k-1.5T-lr-5-2.2epochs-oasst1-top1-instruct-V1 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: GSM8k (5-shot) type: gsm8k config: main split: test args: num_few_shot: 5 metrics: - type: acc value: 1.44 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=habanoz/TinyLlama-1.1B-intermediate-step-715k-1.5T-lr-5-2.2epochs-oasst1-top1-instruct-V1 name: Open LLM Leaderboard --- TinyLlama-1.1B-intermediate-step-715k-1.5T finetuned using OpenAssistant/oasst_top1_2023-08-25 dataset. Qlora is used. Adapter is merged. SFT code: https://github.com/habanoz/qlora.git Command used: ```bash accelerate launch $BASE_DIR/qlora/train.py \ --model_name_or_path $BASE_MODEL \ --working_dir $BASE_DIR/$OUTPUT_NAME-checkpoints \ --output_dir $BASE_DIR/$OUTPUT_NAME-peft \ --merged_output_dir $BASE_DIR/$OUTPUT_NAME \ --final_output_dir $BASE_DIR/$OUTPUT_NAME-final \ --num_train_epochs 2 \ --logging_steps 1 \ --save_strategy steps \ --save_steps 75 \ --save_total_limit 2 \ --data_seed 11422 \ --evaluation_strategy steps \ --per_device_eval_batch_size 4 \ --eval_dataset_size 0.01 \ --eval_steps 75 \ --max_new_tokens 1024 \ --dataloader_num_workers 3 \ --logging_strategy steps \ --do_train \ --do_eval \ --lora_r 64 \ --lora_alpha 16 \ --lora_modules all \ --bits 4 \ --double_quant \ --quant_type nf4 \ --lr_scheduler_type constant \ --dataset oasst1-top1 \ --dataset_format oasst1 \ --model_max_len 1024 \ --per_device_train_batch_size 4 \ --gradient_accumulation_steps 4 \ --learning_rate 1e-5 \ --adam_beta2 0.999 \ --max_grad_norm 0.3 \ --lora_dropout 0.0 \ --weight_decay 0.0 \ --seed 11422 \ --gradient_checkpointing \ --use_flash_attention_2 \ --ddp_find_unused_parameters False ``` # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_habanoz__TinyLlama-1.1B-intermediate-step-715k-1.5T-lr-5-2.2epochs-oasst1-top1-instruct-V1) | Metric |Value| |---------------------------------|----:| |Avg. |35.45| |AI2 Reasoning Challenge (25-Shot)|31.48| |HellaSwag (10-Shot) |54.40| |MMLU (5-Shot) |25.47| |TruthfulQA (0-shot) |42.34| |Winogrande (5-shot) |57.54| |GSM8k (5-shot) | 1.44|
hvein/5HmMqJpdvf9yadz79C1k4mAeFLdjGx7oqbcTkj2LMkLp9drL_vgg
hvein
"2024-03-05T07:31:58Z"
1,645
0
keras
[ "keras", "region:us" ]
null
"2024-02-03T19:13:04Z"
Entry not found
QuantFactory/Llama3-DiscoLeo-Instruct-8B-v0.1-GGUF
QuantFactory
"2024-06-04T08:08:50Z"
1,645
0
transformers
[ "transformers", "gguf", "text-generation", "de", "base_model:DiscoResearch/Llama3-DiscoLeo-Instruct-8B-v0.1", "license:llama3", "endpoints_compatible", "region:us" ]
text-generation
"2024-06-03T02:25:30Z"
--- license: llama3 language: - de library_name: transformers pipeline_tag: text-generation base_model: DiscoResearch/Llama3-DiscoLeo-Instruct-8B-v0.1 --- # Llama3-DiscoLeo-Instruct 8B-GGUF This is quantized version of [DiscoResearch/Llama3-DiscoLeo-Instruct-8B-v0.1](https://huggingface.co/DiscoResearch/Llama3-DiscoLeo-Instruct-8B-v0.1) created using llama.cpp # Model Description ## Thanks and Accreditation [DiscoResearch/Llama3-DiscoLeo-Instruct-8B-v0.1](https://huggingface.co/collections/DiscoResearch/discoleo-8b-llama3-for-german-6650527496c0fafefd4c9729) is the result of a joint effort between [DiscoResearch](https://huggingface.co/DiscoResearch) and [Occiglot](https://huggingface.co/occiglot) with support from the [DFKI](https://www.dfki.de/web/) (German Research Center for Artificial Intelligence) and [hessian.Ai](https://hessian.ai). Occiglot kindly handled data preprocessing, filtering, and deduplication as part of their latest [dataset release](https://huggingface.co/datasets/occiglot/occiglot-fineweb-v0.5), as well as sharing their compute allocation at hessian.Ai's 42 Supercomputer. ## Model Overview Llama3_DiscoLeo_Instruct_8B_v0 is an instruction tuned version of our [Llama3-German-8B](https://huggingface.co/DiscoResearch/Llama3_German_8B). The base model was derived from [Meta's Llama3-8B](https://huggingface.co/meta-llama/Meta-Llama-3-8B) through continuous pretraining on 65 billion high-quality German tokens, similar to previous [LeoLM](https://huggingface.co/LeoLM) or [Occiglot](https://huggingface.co/collections/occiglot/occiglot-eu5-7b-v01-65dbed502a6348b052695e01) models. We finetuned this checkpoint on the German Instruction dataset from DiscoResearch created by [Jan-Philipp Harries](https://huggingface.co/jphme) and [Daniel Auras](https://huggingface.co/rasdani) ([DiscoResearch](https://huggingface.co/DiscoResearch), [ellamind](https://ellamind.com)). ## How to use Llama3_DiscoLeo_Instruct_8B_v0.1 uses the [Llama-3 chat template](https://github.com/meta-llama/llama3?tab=readme-ov-file#instruction-tuned-models), which can be easily used with [transformer's chat templating](https://huggingface.co/docs/transformers/main/en/chat_templating). See [below](https://huggingface.co/DiscoResearch/Llama3_DiscoLeo_Instruct_8B_v0.1#usage-example) for a usage example. ## Model Training and Hyperparameters The model was full-fintuned with axolotl on the [hessian.Ai 42](hessian.ai) with 8192 context-length, learning rate 2e-5 and batch size of 16. ## Evaluation and Results We evaluated the model using a suite of common English Benchmarks and their German counterparts with [GermanBench](https://github.com/bjoernpl/GermanBenchmark). In the below image and corresponding table, you can see the benchmark scores for the different instruct models compared to Metas instruct version. All checkpoints are available in this [collection](https://huggingface.co/collections/DiscoResearch/discoleo-8b-llama3-for-german-6650527496c0fafefd4c9729). ![instruct scores](instruct_model_benchmarks.png) | Model | truthful_qa_de | truthfulqa_mc | arc_challenge | arc_challenge_de | hellaswag | hellaswag_de | MMLU | MMLU-DE | mean | |----------------------------------------------------|----------------|---------------|---------------|------------------|-------------|--------------|-------------|-------------|-------------| | meta-llama/Meta-Llama-3-8B-Instruct | 0.47498 | 0.43923 | **0.59642** | 0.47952 | **0.82025** | 0.60008 | **0.66658** | 0.53541 | 0.57656 | | DiscoResearch/Llama3-German-8B | 0.49499 | 0.44838 | 0.55802 | 0.49829 | 0.79924 | 0.65395 | 0.62240 | 0.54413 | 0.57743 | | DiscoResearch/Llama3-German-8B-32k | 0.48920 | 0.45138 | 0.54437 | 0.49232 | 0.79078 | 0.64310 | 0.58774 | 0.47971 | 0.55982 | | **DiscoResearch/Llama3-DiscoLeo-Instruct-8B-v0.1** | **0.53042** | 0.52867 | 0.59556 | **0.53839** | 0.80721 | 0.66440 | 0.61898 | 0.56053 | **0.60552** | | DiscoResearch/Llama3-DiscoLeo-Instruct-8B-32k-v0.1| 0.52749 | **0.53245** | 0.58788 | 0.53754 | 0.80770 | **0.66709** | 0.62123 | **0.56238** | 0.60547 | ## Model Configurations We release DiscoLeo-8B in the following configurations: 1. [Base model with continued pretraining](https://huggingface.co/DiscoResearch/Llama3_German_8B) 2. [Long-context version (32k context length)](https://huggingface.co/DiscoResearch/Llama3_German_8B_32k) 3. [Instruction-tuned version of the base model](https://huggingface.co/DiscoResearch/Llama3_DiscoLeo_Instruct_8B_v0.1) (This model) 4. [Instruction-tuned version of the long-context model](https://huggingface.co/DiscoResearch/Llama3_DiscoLeo_Instruct_8B_32k_v0.1) 5. [Experimental `DARE-TIES` Merge with Llama3-Instruct](https://huggingface.co/DiscoResearch/Llama3_DiscoLeo_8B_DARE_Experimental) 6. [Collection of Quantized versions](https://huggingface.co/collections/DiscoResearch/discoleo-8b-quants-6651bcf8f72c9a37ce485d42) ## Usage Example Here's how to use the model with transformers: ```python from transformers import AutoModelForCausalLM, AutoTokenizer import torch device="cuda" model = AutoModelForCausalLM.from_pretrained( "DiscoResearch/Llama3-DiscoLeo-Instruct-8B-v0.1", torch_dtype="auto", device_map="auto" ) tokenizer = AutoTokenizer.from_pretrained("DiscoResearch/Llama3-DiscoLeo-Instruct-8B-v0.1") prompt = "Schreibe ein Essay über die Bedeutung der Energiewende für Deutschlands Wirtschaft" messages = [ {"role": "system", "content": "Du bist ein hilfreicher Assistent."}, {"role": "user", "content": prompt} ] text = tokenizer.apply_chat_template( messages, tokenize=False, add_generation_prompt=True ) model_inputs = tokenizer([text], return_tensors="pt").to(device) generated_ids = model.generate( model_inputs.input_ids, max_new_tokens=512 ) generated_ids = [ output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids) ] response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] ``` ## Acknowledgements The model was trained and evaluated by [Björn Plüster](https://huggingface.co/bjoernp) ([DiscoResearch](https://huggingface.co/DiscoResearch), [ellamind](https://ellamind.com)) with data preparation and project supervision by [Manuel Brack](http://manuel-brack.eu) ([DFKI](https://www.dfki.de/web/), [TU-Darmstadt](https://www.tu-darmstadt.de/)). Initial work on dataset collection and curation was performed by [Malte Ostendorff](https://ostendorff.org) and [Pedro Ortiz Suarez](https://portizs.eu). Instruction tuning was done with the DiscoLM German dataset created by [Jan-Philipp Harries](https://huggingface.co/jphme) and [Daniel Auras](https://huggingface.co/rasdani) ([DiscoResearch](https://huggingface.co/DiscoResearch), [ellamind](https://ellamind.com)). We extend our gratitude to [LAION](https://laion.ai/) and friends, especially [Christoph Schuhmann](https://entwickler.de/experten/christoph-schuhmann) and [Jenia Jitsev](https://huggingface.co/JJitsev), for initiating this collaboration. The model training was supported by a compute grant at the [42 supercomputer](https://hessian.ai/) which is a central component in the development of [hessian AI](https://hessian.ai/), the [AI Innovation Lab](https://hessian.ai/infrastructure/ai-innovationlab/) (funded by the [Hessian Ministry of Higher Education, Research and the Art (HMWK)](https://wissenschaft.hessen.de) & the [Hessian Ministry of the Interior, for Security and Homeland Security (HMinD)](https://innen.hessen.de)) and the [AI Service Centers](https://hessian.ai/infrastructure/ai-service-centre/) (funded by the [German Federal Ministry for Economic Affairs and Climate Action (BMWK)](https://www.bmwk.de/Navigation/EN/Home/home.html)). The curation of the training data is partially funded by the [German Federal Ministry for Economic Affairs and Climate Action (BMWK)](https://www.bmwk.de/Navigation/EN/Home/home.html) through the project [OpenGPT-X](https://opengpt-x.de/en/) (project no. 68GX21007D).
samuel-moreira/hr_resume-8b-q8_0-v3.gguf
samuel-moreira
"2024-06-27T14:52:23Z"
1,645
0
null
[ "gguf", "region:us" ]
null
"2024-06-27T14:48:05Z"
Entry not found
crumb/gpt2023
crumb
"2024-01-04T17:15:11Z"
1,644
17
transformers
[ "transformers", "pytorch", "safetensors", "gpt2", "text-generation", "causal-lm", "en", "license:mit", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-04-30T02:23:04Z"
--- license: mit language: - en tags: - causal-lm --- # GPT2(023) Model Card This is the smallest GPT-2 model (124m) from OpenAi finetuned on approximately 2.23B tokens (almost the 2.48B needed to 'chinchilla-optimally' pretrain it! It's also more tokens than Cerebras-GPT-111M was trained on in total) consisting of 1.3B from common crawl sites from 2023, 540M from ArXiv, and 390M from GitHub. The model was trained with a learning rate of 1e-4, with a warmup of 1024 steps, then decaying to 0. There were 4400 total steps during training at a batch size of 512 examples with a context length of 1024. The batch size and context length are the same as the pre-training of GPT2 itself. Training took a total of 1.18e+18 FLOs over the course of 79.32 hours locally with a 12gb RTX3060. Final train loss was 2.73. ### Evaluation of GPT2023 *(in progress)* | model | piqa acc | winogrande acc | lambada ppl | lambada acc | arc acc | sciq acc | wsc acc | | --- | --- | --- | --- | --- | --- | --- | --- | | pythia-70m | 59.85 | 51.22 | 140.81 | 21.40 | 17.15 | 65.00 | 36.53 | | pythia-160m | 62.68 | 51.07 | 30.03 | 36.76 | 19.62 | 76.20 | 36.58 | | pythia-410m | 66.54 | 52.24 | 11.75 | 49.93 | 21.67 | 80.80 | 60.58 | | opt-125m | 63.00 | 50.27 | 26.02 | 37.90 | 18.94 | 75.1 | 36.54 | | --- | --- | --- | --- | --- | --- | --- | --- | | gpt2 (124m) | **62.89** | **51.61** | 40.06 | 32.56 | **19.03** | 75 | **43.27** | | gpt2023 (124m) | 62.02 | 49.64 | **34.55** | **33.98** | 18.94 | **76.1** | 36.54 | The resulting model achieves a puplexity of 339.38, making it competative with Cerebras-590m with only 21% of the parameters, and much better than the original GPT-2 which scores 491.57! (metric explanation here: https://twitter.com/aicrumb/status/1650350363898265601 , tldr it's a joke) To demonstrate how GPT2(023) is aware of recent events, let’s take a look at a given example: ``` # About Covid-19 - - The Covid-19 ``` The model completes the text as: ``` # About Covid-19 - - The Covid-19 pandemic is the worldwide pandemic that has left thousands of people unable to enter and work in or continue their normal daily normal life. In this brief post, we examine three of the main factors that have accelerated the pandemic and predict the path the pandemic will take through the rest of the world. ``` As you can see, GPT2(023) can generate coherent and relevant text pertaining to the Covid-19 pandemic, showcasing its ability to understand recent events. However, it struggles with certain subjects that weren’t extremely relevant in it’s training data. As only 2.23 billion tokens were used during finetuning, the model may have missed out on many recent events. One of those events being the latest US election. Given text in a question and answer format: ``` Q: Who is the last president? A: Donald Trump Q: Who is the most recent president? A: ``` The model completes the text with: `Barack Obama` ### Model description *(from GPT-2 model card)* GPT-2 is a transformer model pretrained on a very large corpus of English data in a self-supervised fashion. This means it was pretrained on the raw texts only, with no humans labelling them in any way (which is why it can use lots of publicly available data) with an automatic process to generate inputs and labels from those texts. More precisely, it was trained to guess the next word in sentences. More precisely, inputs are sequences of continuous text of a certain length and the targets are the same sequence, shifted one token (word or piece of word) to the right. The model uses internally a mask-mechanism to make sure the predictions for the token i only uses the inputs from 1 to i but not the future tokens. This way, the model learns an inner representation of the English language that can then be used to extract features useful for downstream tasks. The model is best at what it was pretrained for however, which is generating texts from a prompt. This is the smallest version of GPT-2, with 124M parameters. ### How to use You can use this model directly with a pipeline for text generation. Since the generation relies on some randomness, we set a seed for reproducibility: ```python >>> from transformers import pipeline, set_seed >>> generator = pipeline('text-generation', model='crumb/gpt2023') >>> set_seed(42) >>> generator("Hello, I'm a language model,", max_length=30, num_return_sequences=5) [{'generated_text': "Hello, I'm a language model, a language for thinking, a language for expressing thoughts."}, {'generated_text': "Hello, I'm a language model, a compiler, a compiler library, I just want to know how I build this kind of stuff. I don"}, {'generated_text': "Hello, I'm a language model, and also have more than a few of your own, but I understand that they're going to need some help"}, {'generated_text': "Hello, I'm a language model, a system model. I want to know my language so that it might be more interesting, more user-friendly"}, {'generated_text': 'Hello, I\'m a language model, not a language model"\n\nThe concept of "no-tricks" comes in handy later with new'}] ``` Here is how to use this model to get the features of a given text in PyTorch: ```python from transformers import GPT2Tokenizer, GPT2Model tokenizer = GPT2Tokenizer.from_pretrained('crumb/gpt2023') model = GPT2Model.from_pretrained('crumb/gpt2023') text = "Replace me by any text you'd like." encoded_input = tokenizer(text, return_tensors='pt') output = model(**encoded_input) ``` ### Limitations and bias The training data used for this model has not been released as a dataset one can browse. We know it contains a lot of unfiltered content from the internet, which is far from neutral. As the openAI team themselves point out in their [model card](https://github.com/openai/gpt-2/blob/master/model_card.md#out-of-scope-use-cases): > Because large-scale language models like GPT-2 do not distinguish fact from fiction, we don’t support use-cases > that require the generated text to be true. > > Additionally, language models like GPT-2 reflect the biases inherent to the systems they were trained on, so we do > not recommend that they be deployed into systems that interact with humans unless the deployers first carry out a > study of biases relevant to the intended use-case. We found no statistically significant difference in gender, race, > and religious bias probes between 774M and 1.5B, implying all versions of GPT-2 should be approached with similar > levels of caution around use cases that are sensitive to biases around human attributes. # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_crumb__gpt2023) | Metric | Value | |-----------------------|---------------------------| | Avg. | 24.85 | | ARC (25-shot) | 21.93 | | HellaSwag (10-shot) | 31.11 | | MMLU (5-shot) | 25.05 | | TruthfulQA (0-shot) | 40.71 | | Winogrande (5-shot) | 50.12 | | GSM8K (5-shot) | 0.3 | | DROP (3-shot) | 4.73 |
PocketDoc/Dans-PileOfSets-Mk1-llama-13b-merged
PocketDoc
"2023-05-20T09:58:09Z"
1,644
0
transformers
[ "transformers", "pytorch", "llama", "text-generation", "en", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-05-18T11:47:42Z"
--- language: - en --- ### Description: This is a llama 13b model merge of the LoRA with the same name. ### Objective for this project: To create a model that upholds a logical thread, regardless of whether the output is verbose or concise. Training has been performed on a version of the pile of sets, reduced to 40% of its original size, to expedite training iterations. I personally utilize this model as an aid for storytelling and writing. While it serves this purpose adequately, I still perceive this version as a prototype. ### Prompt format: Stanford Alpaca The prompt should start on a new line after "### Response:" - For examples with a non-empty input field: ``` Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request. ### Instruction: {instruction} ### Input: {input} ### Response: ``` - For examples with an empty input field: ``` Below is an instruction that describes a task. Write a response that appropriately completes the request. ### Instruction: {instruction} ### Response: ``` ### Perplexity Benchmarks: - wikitext: 4.66796875 ### Training information: - 2 Epochs - 64 / 32 R / A - 1024 Cutoff - 19 hours on an A6000 ### Data used in training: All cleaned and scrubbed in various ways then culled to various degrees. - Camel biology, physics, chemistry, math, and AI society - Alpaca evol instruct - GPTeacher Instruct - Alpaca GPT4 - Dolly Databricks ### Plans for the future, a brief overview: - Pivot to a conversational format going forward - Train another 13b LoRA against the entirety of my pile of sets rather than just a portion of it for Mk2 - Train 30b on the Mk2 pile of sets - Expand the story generation capabilities and likely more for Mk3 ### Model used for training and other information: https://huggingface.co/PocketDoc/llama-13b-gptq-4bit-128g Merge model: https://huggingface.co/huggyllama/llama-13b ### Disclaimer: It has not been aligned and no warranty is given for the quality or safety of its outputs.
h2oai/h2ogpt-gm-oasst1-en-1024-open-llama-7b-preview-400bt
h2oai
"2023-05-19T08:59:45Z"
1,644
3
transformers
[ "transformers", "pytorch", "llama", "text-generation", "gpt", "llm", "large language model", "h2o-llmstudio", "en", "dataset:OpenAssistant/oasst1", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-05-19T05:13:33Z"
--- language: - en library_name: transformers tags: - gpt - llm - large language model - h2o-llmstudio inference: false thumbnail: >- https://h2o.ai/etc.clientlibs/h2o/clientlibs/clientlib-site/resources/images/favicon.ico license: apache-2.0 datasets: - OpenAssistant/oasst1 --- # Model Card ## Summary This model was trained using [H2O LLM Studio](https://github.com/h2oai/h2o-llmstudio). - Base model: [openlm-research/open_llama_7b_400bt_preview](https://huggingface.co/openlm-research/open_llama_7b_400bt_preview) - Dataset preparation: [OpenAssistant/oasst1](https://github.com/h2oai/h2o-llmstudio/blob/1935d84d9caafed3ee686ad2733eb02d2abfce57/app_utils/utils.py#LL1896C5-L1896C28) ## Usage To use the model with the `transformers` library on a machine with GPUs, first make sure you have the `transformers`, `accelerate` and `torch` libraries installed. ```bash pip install transformers==4.28.1 pip install accelerate==0.18.0 pip install torch==2.0.0 ``` ```python import torch from transformers import pipeline generate_text = pipeline( model="h2oai/h2ogpt-gm-oasst1-en-1024-open-llama-7b-preview-400bt", torch_dtype=torch.float16, trust_remote_code=True, use_fast=False, device_map={"": "cuda:0"}, ) res = generate_text( "Why is drinking water so healthy?", min_new_tokens=2, max_new_tokens=512, do_sample=False, num_beams=1, temperature=float(0.3), repetition_penalty=float(1.2), renormalize_logits=True ) print(res[0]["generated_text"]) ``` You can print a sample prompt after the preprocessing step to see how it is feed to the tokenizer: ```python print(generate_text.preprocess("Why is drinking water so healthy?")["prompt_text"]) ``` ```bash <|prompt|>Why is drinking water so healthy?</s><|answer|> ``` Alternatively, if you prefer to not use `trust_remote_code=True` you can download [h2oai_pipeline.py](h2oai_pipeline.py), store it alongside your notebook, and construct the pipeline yourself from the loaded model and tokenizer: ```python import torch from h2oai_pipeline import H2OTextGenerationPipeline from transformers import AutoModelForCausalLM, AutoTokenizer tokenizer = AutoTokenizer.from_pretrained( "h2oai/h2ogpt-gm-oasst1-en-1024-open-llama-7b-preview-400bt", use_fast=False, padding_side="left" ) model = AutoModelForCausalLM.from_pretrained( "h2oai/h2ogpt-gm-oasst1-en-1024-open-llama-7b-preview-400bt", torch_dtype=torch.float16, device_map={"": "cuda:0"} ) generate_text = H2OTextGenerationPipeline(model=model, tokenizer=tokenizer) res = generate_text( "Why is drinking water so healthy?", min_new_tokens=2, max_new_tokens=512, do_sample=False, num_beams=1, temperature=float(0.3), repetition_penalty=float(1.2), renormalize_logits=True ) print(res[0]["generated_text"]) ``` You may also construct the pipeline from the loaded model and tokenizer yourself and consider the preprocessing steps: ```python from transformers import AutoModelForCausalLM, AutoTokenizer model_name = "h2oai/h2ogpt-gm-oasst1-en-1024-open-llama-7b-preview-400bt" # either local folder or huggingface model name # Important: The prompt needs to be in the same format the model was trained with. # You can find an example prompt in the experiment logs. prompt = "<|prompt|>How are you?</s><|answer|>" tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False) model = AutoModelForCausalLM.from_pretrained(model_name) model.cuda().eval() inputs = tokenizer(prompt, return_tensors="pt", add_special_tokens=False).to("cuda") # generate configuration can be modified to your needs tokens = model.generate( **inputs, min_new_tokens=2, max_new_tokens=512, do_sample=False, num_beams=1, temperature=float(0.3), repetition_penalty=float(1.2), renormalize_logits=True )[0] tokens = tokens[inputs["input_ids"].shape[1]:] answer = tokenizer.decode(tokens, skip_special_tokens=True) print(answer) ``` ## Model Architecture ``` LlamaForCausalLM( (model): LlamaModel( (embed_tokens): Embedding(32000, 4096, padding_idx=0) (layers): ModuleList( (0-31): 32 x LlamaDecoderLayer( (self_attn): LlamaAttention( (q_proj): Linear(in_features=4096, out_features=4096, bias=False) (k_proj): Linear(in_features=4096, out_features=4096, bias=False) (v_proj): Linear(in_features=4096, out_features=4096, bias=False) (o_proj): Linear(in_features=4096, out_features=4096, bias=False) (rotary_emb): LlamaRotaryEmbedding() ) (mlp): LlamaMLP( (gate_proj): Linear(in_features=4096, out_features=11008, bias=False) (down_proj): Linear(in_features=11008, out_features=4096, bias=False) (up_proj): Linear(in_features=4096, out_features=11008, bias=False) (act_fn): SiLUActivation() ) (input_layernorm): LlamaRMSNorm() (post_attention_layernorm): LlamaRMSNorm() ) ) (norm): LlamaRMSNorm() ) (lm_head): Linear(in_features=4096, out_features=32000, bias=False) ) ``` ## Model Configuration This model was trained using H2O LLM Studio and with the configuration in [cfg.yaml](cfg.yaml). Visit [H2O LLM Studio](https://github.com/h2oai/h2o-llmstudio) to learn how to train your own large language models. ## Model Validation Model validation results using [EleutherAI lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness). ```bash CUDA_VISIBLE_DEVICES=0 python main.py --model hf-causal-experimental --model_args pretrained=h2oai/h2ogpt-gm-oasst1-en-1024-open-llama-7b-preview-400bt --tasks openbookqa,arc_easy,winogrande,hellaswag,arc_challenge,piqa,boolq --device cuda &> eval.log ``` ## Disclaimer Please read this disclaimer carefully before using the large language model provided in this repository. Your use of the model signifies your agreement to the following terms and conditions. - Biases and Offensiveness: The large language model is trained on a diverse range of internet text data, which may contain biased, racist, offensive, or otherwise inappropriate content. By using this model, you acknowledge and accept that the generated content may sometimes exhibit biases or produce content that is offensive or inappropriate. The developers of this repository do not endorse, support, or promote any such content or viewpoints. - Limitations: The large language model is an AI-based tool and not a human. It may produce incorrect, nonsensical, or irrelevant responses. It is the user's responsibility to critically evaluate the generated content and use it at their discretion. - Use at Your Own Risk: Users of this large language model must assume full responsibility for any consequences that may arise from their use of the tool. The developers and contributors of this repository shall not be held liable for any damages, losses, or harm resulting from the use or misuse of the provided model. - Ethical Considerations: Users are encouraged to use the large language model responsibly and ethically. By using this model, you agree not to use it for purposes that promote hate speech, discrimination, harassment, or any form of illegal or harmful activities. - Reporting Issues: If you encounter any biased, offensive, or otherwise inappropriate content generated by the large language model, please report it to the repository maintainers through the provided channels. Your feedback will help improve the model and mitigate potential issues. - Changes to this Disclaimer: The developers of this repository reserve the right to modify or update this disclaimer at any time without prior notice. It is the user's responsibility to periodically review the disclaimer to stay informed about any changes. By using the large language model provided in this repository, you agree to accept and comply with the terms and conditions outlined in this disclaimer. If you do not agree with any part of this disclaimer, you should refrain from using the model and any content generated by it.
nicholasKluge/Aira-2-355M
nicholasKluge
"2024-06-18T11:22:22Z"
1,644
1
transformers
[ "transformers", "pytorch", "safetensors", "gpt2", "text-generation", "alignment", "instruction tuned", "text generation", "conversation", "assistant", "en", "dataset:nicholasKluge/instruct-aira-dataset", "arxiv:1803.05457", "arxiv:2109.07958", "arxiv:2203.09509", "license:apache-2.0", "co2_eq_emissions", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-06-08T00:18:26Z"
--- datasets: - nicholasKluge/instruct-aira-dataset language: - en metrics: - accuracy library_name: transformers tags: - alignment - instruction tuned - text generation - conversation - assistant pipeline_tag: text-generation widget: - text: "<|startofinstruction|>Can you explain what is Machine Learning?<|endofinstruction|>" example_title: Machine Learning - text: "<|startofinstruction|>Do you know anything about virtue ethics?<|endofinstruction|>" example_title: Ethics - text: "<|startofinstruction|>How can I make my girlfriend happy?<|endofinstruction|>" example_title: Advise inference: parameters: repetition_penalty: 1.2 temperature: 0.2 top_k: 30 top_p: 0.3 max_new_tokens: 200 length_penalty: 0.3 early_stopping: true co2_eq_emissions: emissions: 290 source: CodeCarbon training_type: fine-tuning geographical_location: United States of America hardware_used: NVIDIA A100-SXM4-40GB license: apache-2.0 --- # Aira-2-355M Aira-2 is the second version of the Aira instruction-tuned series. Aira-2-355M is an instruction-tuned model based on [GPT-2](https://huggingface.co/gpt2-medium). The model was trained with a dataset composed of prompts and completions generated synthetically by prompting already-tuned models (ChatGPT, Llama, Open-Assistant, etc). Check our gradio-demo in [Spaces](https://huggingface.co/spaces/nicholasKluge/Aira-Demo). ## Details - **Size:** 354,825,216 parameters - **Dataset:** [Instruct-Aira Dataset](https://huggingface.co/datasets/nicholasKluge/instruct-aira-dataset) - **Language:** English - **Number of Epochs:** 3 - **Batch size:** 16 - **Optimizer:** `torch.optim.AdamW` (warmup_steps = 1e2, learning_rate = 5e-4, epsilon = 1e-8) - **GPU:** 1 NVIDIA A100-SXM4-40GB - **Emissions:** 0.29 KgCO2 (United States of America) - **Total Energy Consumption:** 0.83 kWh This repository has the [source code](https://github.com/Nkluge-correa/Aira) used to train this model. ## Usage Three special tokens are used to mark the user side of the interaction and the model's response: `<|startofinstruction|>`What is a language model?`<|endofinstruction|>`A language model is a probability distribution over a vocabulary.`<|endofcompletion|>` ```python from transformers import AutoTokenizer, AutoModelForCausalLM import torch device = torch.device("cuda" if torch.cuda.is_available() else "cpu") tokenizer = AutoTokenizer.from_pretrained('nicholasKluge/Aira-2-355M') aira = AutoModelForCausalLM.from_pretrained('nicholasKluge/Aira-2-355M') aira.eval() aira.to(device) question = input("Enter your question: ") inputs = tokenizer(tokenizer.bos_token + question + tokenizer.sep_token, add_special_tokens=False, return_tensors="pt").to(device) responses = aira.generate(**inputs, num_return_sequences=2) print(f"Question: 👤 {question}\n") for i, response in enumerate(responses): print(f'Response {i+1}: 🤖 {tokenizer.decode(response, skip_special_tokens=True).replace(question, "")}') ``` The model will output something like: ```markdown >>>Question: 👤 What is the capital of Brazil? >>>Response 1: 🤖 The capital of Brazil is Brasília. >>>Response 2: 🤖 The capital of Brazil is Brasília. ``` ## Limitations - **Hallucinations:** This model can produce content that can be mistaken for truth but is, in fact, misleading or entirely false, i.e., hallucination. - **Biases and Toxicity:** This model inherits the social and historical stereotypes from the data used to train it. Given these biases, the model can produce toxic content, i.e., harmful, offensive, or detrimental to individuals, groups, or communities. - **Repetition and Verbosity:** The model may get stuck on repetition loops (especially if the repetition penalty during generations is set to a meager value) or produce verbose responses unrelated to the prompt it was given. ## Evaluation |Model |Average |[ARC](https://arxiv.org/abs/1803.05457) |[TruthfulQA](https://arxiv.org/abs/2109.07958) |[ToxiGen](https://arxiv.org/abs/2203.09509) | | ---------------------------------------------------------------------- | -------- | -------------------------------------- | --------------------------------------------- | ------------------------------------------ | |[Aira-2-124M-DPO](https://huggingface.co/nicholasKluge/Aira-2-124M-DPO) |**40.68** |**24.66** |**42.61** |**54.79** | |[Aira-2-124M](https://huggingface.co/nicholasKluge/Aira-2-124M) |38.07 |24.57 |41.02 |48.62 | |GPT-2 |35.37 |21.84 |40.67 |43.62 | |[Aira-2-355M](https://huggingface.co/nicholasKluge/Aira-2-355M) |**39.68** |**27.56** |38.53 |**53.19** | |GPT-2-medium |36.43 |27.05 |**40.76** |41.49 | |[Aira-2-774M](https://huggingface.co/nicholasKluge/Aira-2-774M) |**42.26** |**28.75** |**41.33** |**56.70** | |GPT-2-large |35.16 |25.94 |38.71 |40.85 | |[Aira-2-1B5](https://huggingface.co/nicholasKluge/Aira-2-1B5) |**42.22** |28.92 |**41.16** |**56.60** | |GPT-2-xl |36.84 |**30.29** |38.54 |41.70 | * Evaluations were performed using the [Language Model Evaluation Harness](https://github.com/EleutherAI/lm-evaluation-harness) (by [EleutherAI](https://www.eleuther.ai/)). ## Cite as 🤗 ```latex @misc{nicholas22aira, doi = {10.5281/zenodo.6989727}, url = {https://github.com/Nkluge-correa/Aira}, author = {Nicholas Kluge Corrêa}, title = {Aira}, year = {2023}, publisher = {GitHub}, journal = {GitHub repository}, } @phdthesis{kluge2024dynamic, title={Dynamic Normativity}, author={Kluge Corr{\^e}a, Nicholas}, year={2024}, school={Universit{\"a}ts-und Landesbibliothek Bonn} } ``` ## License Aira-2-355M is licensed under the Apache License, Version 2.0. See the [LICENSE](LICENSE) file for more details.
CalderaAI/13B-BlueMethod
CalderaAI
"2023-07-20T03:29:47Z"
1,644
7
transformers
[ "transformers", "pytorch", "llama", "text-generation", "alpaca", "cot", "vicuna", "uncensored", "merge", "mix", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-07-07T06:10:55Z"
--- tags: - llama - alpaca - cot - vicuna - uncensored - merge - mix --- ## 13B-BlueMethod ## Composition: BlueMethod is a bit of a convoluted experiment in tiered merging. Furthering the experimental nature of the merge, the models combined were done so with a custom script that randomized the percent of each layer merged from one model to the next. This is a warmup for a larger project. [Tier One and Two Merges not released; internal naming convention] Tier One Merges: 13B-Metharme+13B-Nous-Hermes=13B-Methermes 13B-Vicuna-cocktail+13B-Manticore=13B-Vicortia 13B-HyperMantis+13B-Alpacino=13B-PsychoMantis Tier Two Merges: 13B-Methermes+13B-Vicortia=13B-Methphistopheles 13B-PsychoMantis+13B-BlueMoonRP=13B-BlueMantis Tier Three Merge: 13B-Methphistopheles+13B-BlueMantis=13B-BlueMethod ## Use: Multiple instruct models and model composites were combined to make the final resulting model; This model is highly open to experimental prompting, both Alpaca and Vicuna instruct can be used. It can have interesting results. ## Language Models and LoRAs Used Credits: 13B-Metharme by PygmalionAI https://www.huggingface.co/PygmalionAI/metharme-13b 13B-Nous-Hermes by NousResearch https://www.huggingface.co/NousResearch/Nous-Hermes-13b 13B-Vicuna-cocktail by reeducator https://www.huggingface.co/reeducator/vicuna-13b-cocktail 13B-Manticore by openaccess-ai-collective https://www.huggingface.co/openaccess-ai-collective/manticore-13b 13B-HyperMantis and 13B-Alpacino by Digitous https://huggingface.co/digitous/13B-HyperMantis https://huggingface.co/digitous/Alpacino13b Also thanks to Meta for LLaMA. Each model and LoRA was hand picked and considered for what it could contribute to this ensemble. Thanks to each and every one of you for your incredible work developing some of the best things to come out of this community.
psyche/kollama2-7b
psyche
"2023-11-18T10:27:11Z"
1,644
4
transformers
[ "transformers", "pytorch", "llama", "text-generation", "en", "ko", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-07-23T02:23:47Z"
--- license: apache-2.0 language: - en - ko --- # Fine-tunned Llama-2 Model with Korean Corpus This model is a further trained Llama-2 model on the Korean datasets. # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_psyche__kollama2-7b) | Metric | Value | |-----------------------|---------------------------| | Avg. | 43.71 | | ARC (25-shot) | 53.24 | | HellaSwag (10-shot) | 78.78 | | MMLU (5-shot) | 42.31 | | TruthfulQA (0-shot) | 44.56 | | Winogrande (5-shot) | 73.95 | | GSM8K (5-shot) | 5.99 | | DROP (3-shot) | 7.11 |
julianweng/Llama-2-7b-chat-orcah
julianweng
"2023-07-31T14:26:15Z"
1,644
1
transformers
[ "transformers", "pytorch", "llama", "text-generation", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-07-23T02:29:59Z"
Entry not found
jondurbin/airoboros-l2-70b-gpt4-1.4.1
jondurbin
"2023-08-04T20:51:12Z"
1,644
48
transformers
[ "transformers", "pytorch", "llama", "text-generation", "dataset:jondurbin/airoboros-gpt4-1.4.1", "license:other", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-07-24T08:20:31Z"
--- license: other datasets: - jondurbin/airoboros-gpt4-1.4.1 --- ### Overview Llama 2 70b fine tune using https://huggingface.co/datasets/jondurbin/airoboros-gpt4-1.4.1 See the previous llama 65b model card for info: https://hf.co/jondurbin/airoboros-65b-gpt4-1.4 ### Contribute If you're interested in new functionality, particularly a new "instructor" type to generate a specific type of training data, take a look at the dataset generation tool repo: https://github.com/jondurbin/airoboros and either make a PR or open an issue with details. To help me with the OpenAI/compute costs: - https://bmc.link/jondurbin - ETH 0xce914eAFC2fe52FdceE59565Dd92c06f776fcb11 - BTC bc1qdwuth4vlg8x37ggntlxu5cjfwgmdy5zaa7pswf ### Licence and usage restrictions Base model has a custom Meta license: - See the [meta-license/LICENSE.txt](meta-license/LICENSE.txt) file attached for the original license provided by Meta. - See also [meta-license/USE_POLICY.md](meta-license/USE_POLICY.md) and [meta-license/Responsible-Use-Guide.pdf](meta-license/Responsible-Use-Guide.pdf), also provided by Meta. The fine-tuning data was generated by OpenAI API calls to gpt-4, via [airoboros](https://github.com/jondurbin/airoboros) The ToS for OpenAI API usage has a clause preventing the output from being used to train a model that __competes__ with OpenAI - what does *compete* actually mean here? - these small open source models will not produce output anywhere near the quality of gpt-4, or even gpt-3.5, so I can't imagine this could credibly be considered competing in the first place - if someone else uses the dataset to do the same, they wouldn't necessarily be violating the ToS because they didn't call the API, so I don't know how that works - the training data used in essentially all large language models includes a significant amount of copyrighted or otherwise non-permissive licensing in the first place - other work using the self-instruct method, e.g. the original here: https://github.com/yizhongw/self-instruct released the data and model as apache-2 I am purposingly leaving this license ambiguous (other than the fact you must comply with the Meta original license for llama-2) because I am not a lawyer and refuse to attempt to interpret all of the terms accordingly. Your best bet is probably to avoid using this commercially due to the OpenAI API usage. Either way, by using this model, you agree to completely indemnify me.
TaylorAI/Flash-Llama-7B
TaylorAI
"2023-08-29T04:36:49Z"
1,644
1
transformers
[ "transformers", "pytorch", "llama", "text-generation", "custom_code", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-08-19T18:26:45Z"
Entry not found
acrastt/Marx-3B-V2
acrastt
"2024-02-03T03:37:03Z"
1,644
25
transformers
[ "transformers", "pytorch", "safetensors", "llama", "text-generation", "en", "dataset:totally-not-an-llm/EverythingLM-data-V2-sharegpt", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-08-22T22:41:21Z"
--- language: - en license: apache-2.0 library_name: transformers datasets: - totally-not-an-llm/EverythingLM-data-V2-sharegpt model-index: - name: Marx-3B-V2 results: - task: type: text-generation name: Text Generation dataset: name: AI2 Reasoning Challenge (25-Shot) type: ai2_arc config: ARC-Challenge split: test args: num_few_shot: 25 metrics: - type: acc_norm value: 44.03 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=acrastt/Marx-3B-V2 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: HellaSwag (10-Shot) type: hellaswag split: validation args: num_few_shot: 10 metrics: - type: acc_norm value: 72.92 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=acrastt/Marx-3B-V2 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MMLU (5-Shot) type: cais/mmlu config: all split: test args: num_few_shot: 5 metrics: - type: acc value: 27.84 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=acrastt/Marx-3B-V2 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: TruthfulQA (0-shot) type: truthful_qa config: multiple_choice split: validation args: num_few_shot: 0 metrics: - type: mc2 value: 39.92 source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=acrastt/Marx-3B-V2 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: Winogrande (5-shot) type: winogrande config: winogrande_xl split: validation args: num_few_shot: 5 metrics: - type: acc value: 66.54 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=acrastt/Marx-3B-V2 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: GSM8k (5-shot) type: gsm8k config: main split: test args: num_few_shot: 5 metrics: - type: acc value: 1.21 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=acrastt/Marx-3B-V2 name: Open LLM Leaderboard --- <a href="https://www.buymeacoffee.com/acrastt" target="_blank"><img src="https://cdn.buymeacoffee.com/buttons/v2/default-yellow.png" alt="Buy Me A Coffee" style="height: 60px !important;width: 217px !important;" ></a> This is [OpenLLaMA 3B V2](https://huggingface.co/openlm-research/open_llama_3b_v2) finetuned on [EverythingLM Data V2(ShareGPT format)](https://huggingface.co/datasets/totally-not-an-llm/EverythingLM-data-V2-sharegpt) for 2 epochs. Prompt template: ``` ### HUMAN: {prompt} ### RESPONSE: <leave a newline for the model to answer> ``` q4_1 GGML quant available [here](https://huggingface.co/NikolayKozloff/Marx-3B-V2/).</br> q4_1 GGUF quant available [here]( https://huggingface.co/NikolayKozloff/Marx-3B-V2-GGUF/). # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_acrastt__Marx-3B-V2) | Metric |Value| |---------------------------------|----:| |Avg. |42.08| |AI2 Reasoning Challenge (25-Shot)|44.03| |HellaSwag (10-Shot) |72.92| |MMLU (5-Shot) |27.84| |TruthfulQA (0-shot) |39.92| |Winogrande (5-shot) |66.54| |GSM8k (5-shot) | 1.21|
AlekseyKorshuk/vic15-exp-syn-fight-cp3838
AlekseyKorshuk
"2023-08-25T14:56:05Z"
1,644
0
transformers
[ "transformers", "pytorch", "llama", "text-generation", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-08-25T14:49:31Z"
Entry not found
zarakiquemparte/zaraxls-l2-7b
zarakiquemparte
"2023-08-27T16:39:30Z"
1,644
1
transformers
[ "transformers", "pytorch", "llama", "text-generation", "llama2", "license:other", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-08-27T02:43:05Z"
--- license: other tags: - llama2 --- # Model Card: ZaraXLS L2 7b This model uses [Nous Hermes Llama2 7b](https://huggingface.co/NousResearch/Nous-Hermes-llama-2-7b) (53%) as a base with [Stable Beluga 7b](https://huggingface.co/stabilityai/StableBeluga-7B) (47%) and the result of this merge was merged with [Airoboros LMOE Creative 7b 2.1](https://huggingface.co/jondurbin/airoboros-lmoe-7b-2.1) and [LimaRP LLama2 7B Lora version of the day 07/23/2023](https://huggingface.co/lemonilia/limarp-llama2). This merge of models(hermes and stable beluga) was done with this [script](https://github.com/zarakiquemparte/zaraki-tools/blob/main/merge-cli.py) This merge of Lora with Model was done with this [script](https://github.com/zarakiquemparte/zaraki-tools/blob/main/apply-lora.py) Merge illustration: ![illustration](zaraxls-merge-illustration.png) ## Usage: Since this is a merge between Nous Hermes, Stable Beluga and LimaRP, the following instruction formats should work: Alpaca 2: ``` ### Instruction: <prompt> ### Response: <leave a newline blank for model to respond> ``` LimaRP instruction format: ``` <<SYSTEM>> <character card and system prompt> <<USER>> <prompt> <<AIBOT>> <leave a newline blank for model to respond> ``` ## Bias, Risks, and Limitations This model is not intended for supplying factual information or advice in any form ## Training Details This model is merged and can be reproduced using the tools mentioned above. Please refer to all provided links for extra model-specific details.
CHIH-HUNG/llama-2-13b-FINETUNE2_TEST_2.2w
CHIH-HUNG
"2023-09-14T01:02:41Z"
1,644
0
transformers
[ "transformers", "pytorch", "llama", "text-generation", "dataset:huangyt/FINETUNE2_TEST", "license:llama2", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-09-04T03:55:46Z"
--- license: llama2 datasets: - huangyt/FINETUNE2_TEST --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> 在llama-2-13b上使用huangyt/FINETUNE2_TEST資料集進行訓練,總資料筆數約2.2w # Fine-Tuning Information - **GPU:** RTX4090 (single core / 24564MiB) - **model:** meta-llama/Llama-2-13b-hf - **dataset:** huangyt/FINETUNE2_TEST (共約2.2w筆訓練集) - **peft_type:** LoRA - **lora_rank:** 8 - **lora_target:** gate_proj, up_proj, down_proj - **per_device_train_batch_size:** 8 - **gradient_accumulation_steps:** 8 - **learning_rate :** 5e-5 - **epoch:** 1 - **precision:** bf16 - **quantization:** load_in_4bit # Fine-Tuning Detail - **train_loss:** 0.567 - **train_runtime:** 2:47:57 (use deepspeed) # Evaluation - 評估結果來自**HuggingFaceH4/open_llm_leaderboard** - 與Llama-2-13b比較4種Benchmark,包含**ARC**、**HellaSwag**、**MMLU**、**TruthfulQA** | Model |Average| ARC |HellaSwag| MMLU |TruthfulQA| |------------------------------------------|-------|-------|---------|-------|----------| |meta-llama/Llama-2-13b-hf | 56.9 | 58.11 | 80.97 | 54.34 | 34.17 | |meta-llama/Llama-2-13b-chat-hf | 59.93 | 59.04 | 81.94 | 54.64 | 44.12 | |CHIH-HUNG/llama-2-13b-FINETUNE2_TEST_2.2w | 58.46 | 56.23 | 82.7 | 55.35 | 39.55 | # How to convert dataset to json - 在**load_dataset**中輸入資料集名稱,並且在**take**中輸入要取前幾筆資料 - 觀察該資料集的欄位名稱,填入**example**欄位中(例如system_prompt、question、response) - 最後指定json檔儲存位置 (**json_filename**) ```py import json from datasets import load_dataset # 讀取數據集,take可以取得該數據集前n筆資料 dataset = load_dataset("huangyt/FINETUNE2_TEST", split="train", streaming=True) # 提取所需欄位並建立新的字典列表 extracted_data = [] for example in dataset: extracted_example = { "instruction": example["instruction"], "input": example["input"], "output": example["output"] } extracted_data.append(extracted_example) # 指定 JSON 文件名稱 json_filename = "FINETUNE2_TEST.json" # 寫入 JSON 文件 with open(json_filename, "w") as json_file: json.dump(extracted_data, json_file, indent=4) print(f"數據已提取並保存為 {json_filename}") ```
lu-vae/llama2-13B-sharegpt4-orca-openplatypus-8w
lu-vae
"2023-09-14T06:22:22Z"
1,644
0
transformers
[ "transformers", "pytorch", "llama", "text-generation", "license:llama2", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-09-14T03:36:04Z"
--- license: llama2 ---
elliotthwang/Elliott-Chinese-LLaMa-GPTQ-V2.0
elliotthwang
"2023-09-16T03:30:12Z"
1,644
0
transformers
[ "transformers", "pytorch", "llama", "text-generation", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-09-16T03:10:48Z"
Entry not found
CHIH-HUNG/llama-2-13b-FINETUNE4_3.8w-r4-gate_up_down
CHIH-HUNG
"2023-09-21T05:13:14Z"
1,644
0
transformers
[ "transformers", "pytorch", "llama", "text-generation", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-09-21T04:49:23Z"
Entry not found
meta-math/MetaMath-70B-V1.0
meta-math
"2023-12-21T01:36:56Z"
1,644
16
transformers
[ "transformers", "pytorch", "llama", "text-generation", "dataset:meta-math/MetaMathQA", "arxiv:2309.12284", "license:llama2", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-09-22T03:20:33Z"
--- license: llama2 datasets: - meta-math/MetaMathQA --- see our paper in https://arxiv.org/abs/2309.12284 View the project page: https://meta-math.github.io/ ## Note All MetaMathQA data are augmented from the training sets of GSM8K and MATH. <span style="color:red"><b>None of the augmented data is from the testing set.</b></span> You can check the `original_question` in `meta-math/MetaMathQA`, each item is from the GSM8K or MATH train set. ## Model Details MetaMath-Llemma-7B is fully fine-tuned on the MetaMathQA datasets and based on the powerful Llemma-7B model. It is glad to see using MetaMathQA datasets and change the base model from llama-2-7B to Llemma-7B can boost the MATH performance from 19.8 to **30.0**. ## Installation ``` pip install transformers==4.35.0 pip install torch==2.0.1 pip install sentencepiece==0.1.99 pip install tokenizers==0.13.3 pip install accelerate==0.21.0 pip install bitsandbytes==0.40.0 pip install vllm pip install fraction pip install protobuf ``` ## Model Usage prompting template: ''' "Below is an instruction that describes a task. " "Write a response that appropriately completes the request.\n\n" "### Instruction:\n{instruction}\n\n### Response: Let's think step by step." ''' where you need to use your query question to replace the {instruction} ## Experiments | Model | GSM8k Pass@1 | MATH Pass@1 | |---------------------|--------------|-------------| | MPT-7B | 6.8 | 3.0 | | Falcon-7B | 6.8 | 2.3 | | LLaMA-1-7B | 11.0 | 2.9 | | LLaMA-2-7B | 14.6 | 2.5 | | MPT-30B | 15.2 | 3.1 | | LLaMA-1-13B | 17.8 | 3.9 | | GPT-Neo-2.7B | 19.5 | -- | | Falcon-40B | 19.6 | 2.5 | | Baichuan-chat-13B | 23.9 | -- | | Vicuna-v1.3-13B | 27.6 | -- | | LLaMA-2-13B | 28.7 | 3.9 | | InternLM-7B | 31.2 | -- | | ChatGLM-2-6B | 32.4 | -- | | GPT-J-6B | 34.9 | -- | | LLaMA-1-33B | 35.6 | 3.9 | | LLaMA-2-34B | 42.2 | 6.24 | | RFT-7B | 50.3 | -- | | LLaMA-1-65B | 50.9 | 10.6 | | Qwen-7B | 51.6 | -- | | WizardMath-7B | 54.9 | 10.7 | | LLaMA-2-70B | 56.8 | 13.5 | | WizardMath-13B | 63.9 | 14.0 | | MAmmoTH-7B (COT) | 50.5 | 10.4 | | MAmmoTH-7B (POT+COT)| 53.6 | 31.5 | | Arithmo-Mistral-7B | 74.7 | 25.3 | | MetaMath-7B | 66.5 | 19.8 | | MetaMath-13B | 72.3 | 22.4 | | 🔥 **MetaMath-Llemma-7B** | **69.2** | **30.0** | | 🔥 **MetaMath-Mistral-7B** | **77.7** | **28.2** | ## Citation ```bibtex @article{yu2023metamath, title={MetaMath: Bootstrap Your Own Mathematical Questions for Large Language Models}, author={Yu, Longhui and Jiang, Weisen and Shi, Han and Yu, Jincheng and Liu, Zhengying and Zhang, Yu and Kwok, James T and Li, Zhenguo and Weller, Adrian and Liu, Weiyang}, journal={arXiv preprint arXiv:2309.12284}, year={2023} } ```
feidfoe/Metamath-reproduce-7b
feidfoe
"2023-11-28T07:42:45Z"
1,644
0
transformers
[ "transformers", "pytorch", "llama", "text-generation", "en", "dataset:meta-math/MetaMathQA", "dataset:gsm8k", "license:llama2", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-11-24T15:27:38Z"
--- license: llama2 datasets: - meta-math/MetaMathQA - gsm8k language: - en pipeline_tag: text-generation --- This model is a reproduction of [Meta-Math](https://huggingface.co/meta-math/MetaMath-7B-V1.0). We follow the training instructions described in the original repo as much as possible with minimal modifications. ## Prompt Unlike the original Meta-math model, we do not use any instructions. """ Problem: {query_problem}\nSolution: {gt_solution} """ The {gt_solution} is provided in the training stage, whereas it is empty string in the evaluation stage.
BreadAi/DiscordPy
BreadAi
"2023-05-01T11:04:29Z"
1,643
0
transformers
[ "transformers", "pytorch", "gpt2", "text-generation", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-03-30T14:15:07Z"
Entry not found
dvruette/oasst-llama-13b-1000-steps
dvruette
"2023-03-31T22:22:28Z"
1,643
0
transformers
[ "transformers", "pytorch", "llama", "text-generation", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-03-31T19:22:45Z"
https://wandb.ai/open-assistant/supervised-finetuning/runs/17boywm8?workspace=
minlik/chinese-alpaca-33b-merged
minlik
"2023-11-20T10:50:21Z"
1,643
9
transformers
[ "transformers", "pytorch", "llama", "text-generation", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-06-08T11:56:18Z"
--- title: chinese-alpaca-33b-merged emoji: 📚 colorFrom: gray colorTo: red sdk: gradio sdk_version: 3.23.0 app_file: app.py pinned: false --- 加入中文词表并继续预训练中文Embedding,并在此基础上继续使用指令数据集finetuning,得到的中文Alpaca-33B模型。 模型转换用到的相关base及lora模型如下: - base-model: elinas/llama-30b-hf-transformers-4.29 - lora-model: ziqingyang/chinese-alpaca-lora-33b 详情可参考:https://github.com/ymcui/Chinese-LLaMA-Alpaca/releases/tag/v4.0 ### 使用方法参考 1. 安装模块包 ```bash pip install sentencepiece pip install transformers>=4.28.0 ``` 2. 生成文本 ```python import torch import transformers from transformers import LlamaTokenizer, LlamaForCausalLM def generate_prompt(text): return f"""Below is an instruction that describes a task. Write a response that appropriately completes the request. ### Instruction: {text} ### Response:""" tokenizer = LlamaTokenizer.from_pretrained('minlik/chinese-alpaca-33b-merged') model = LlamaForCausalLM.from_pretrained('minlik/chinese-alpaca-33b-merged').half().to('cuda') model.eval() text = '第一个登上月球的人是谁?' prompt = generate_prompt(text) input_ids = tokenizer.encode(prompt, return_tensors='pt').to('cuda') with torch.no_grad(): output_ids = model.generate( input_ids=input_ids, max_new_tokens=128, temperature=1, top_k=40, top_p=0.9, repetition_penalty=1.15 ).cuda() output = tokenizer.decode(output_ids[0], skip_special_tokens=True) print(output.replace(prompt, '').strip()) ``` # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_minlik__chinese-alpaca-33b-merged) | Metric | Value | |-----------------------|---------------------------| | Avg. | 53.09 | | ARC (25-shot) | 59.3 | | HellaSwag (10-shot) | 78.43 | | MMLU (5-shot) | 57.69 | | TruthfulQA (0-shot) | 52.45 | | Winogrande (5-shot) | 76.09 | | GSM8K (5-shot) | 8.04 | | DROP (3-shot) | 39.67 |
openchat/openchat_8192
openchat
"2023-07-02T04:22:14Z"
1,643
220
transformers
[ "transformers", "pytorch", "llama", "text-generation", "en", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-06-22T07:42:57Z"
--- language: - en tags: - llama --- # OpenChat: Less is More for Open-source Models OpenChat is a series of open-source language models fine-tuned on a diverse and high-quality dataset of multi-round conversations. With only ~6K GPT-4 conversations filtered from the ~90K ShareGPT conversations, OpenChat is designed to achieve high performance with limited data. **Generic models:** - OpenChat: based on LLaMA-13B (2048 context length) - **🚀 105.7%** of ChatGPT score on Vicuna GPT-4 evaluation - **🔥 80.9%** Win-rate on AlpacaEval - **🤗 Only used 6K data for finetuning!!!** - OpenChat-8192: based on LLaMA-13B (extended to 8192 context length) - **106.6%** of ChatGPT score on Vicuna GPT-4 evaluation - **79.5%** of ChatGPT score on Vicuna GPT-4 evaluation **Code models:** - OpenCoderPlus: based on StarCoderPlus (native 8192 context length) - **102.5%** of ChatGPT score on Vicuna GPT-4 evaluation - **78.7%** Win-rate on AlpacaEval *Note:* Please load the pretrained models using *bfloat16* ## Code and Inference Server We provide the full source code, including an inference server compatible with the "ChatCompletions" API, in the [OpenChat](https://github.com/imoneoi/openchat) GitHub repository. ## Web UI OpenChat also includes a web UI for a better user experience. See the GitHub repository for instructions. ## Conversation Template The conversation template **involves concatenating tokens**. Besides base model vocabulary, an end-of-turn token `<|end_of_turn|>` is added, with id `eot_token_id`. ```python # OpenChat [bos_token_id] + tokenize("Human: ") + tokenize(user_question) + [eot_token_id] + tokenize("Assistant: ") # OpenCoder tokenize("User:") + tokenize(user_question) + [eot_token_id] + tokenize("Assistant:") ``` *Hint: In BPE, `tokenize(A) + tokenize(B)` does not always equals to `tokenize(A + B)`* Following is the code for generating the conversation templates: ```python @dataclass class ModelConfig: # Prompt system: Optional[str] role_prefix: dict ai_role: str eot_token: str bos_token: Optional[str] = None # Get template def generate_conversation_template(self, tokenize_fn, tokenize_special_fn, message_list): tokens = [] masks = [] # begin of sentence (bos) if self.bos_token: t = tokenize_special_fn(self.bos_token) tokens.append(t) masks.append(False) # System if self.system: t = tokenize_fn(self.system) + [tokenize_special_fn(self.eot_token)] tokens.extend(t) masks.extend([False] * len(t)) # Messages for idx, message in enumerate(message_list): # Prefix t = tokenize_fn(self.role_prefix[message["from"]]) tokens.extend(t) masks.extend([False] * len(t)) # Message if "value" in message: t = tokenize_fn(message["value"]) + [tokenize_special_fn(self.eot_token)] tokens.extend(t) masks.extend([message["from"] == self.ai_role] * len(t)) else: assert idx == len(message_list) - 1, "Empty message for completion must be on the last." return tokens, masks MODEL_CONFIG_MAP = { # OpenChat / OpenChat-8192 "openchat": ModelConfig( # Prompt system=None, role_prefix={ "human": "Human: ", "gpt": "Assistant: " }, ai_role="gpt", eot_token="<|end_of_turn|>", bos_token="<s>", ), # OpenCoder / OpenCoderPlus "opencoder": ModelConfig( # Prompt system=None, role_prefix={ "human": "User:", "gpt": "Assistant:" }, ai_role="gpt", eot_token="<|end_of_turn|>", bos_token=None, ) } ```
beaugogh/pythia-1.4b-deduped-sharegpt
beaugogh
"2023-07-28T05:52:54Z"
1,643
0
transformers
[ "transformers", "pytorch", "safetensors", "gpt_neox", "text-generation", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-07-25T13:21:09Z"
--- license: apache-2.0 --- pythia-1.4b-deduped model finetuned on sharegpt data
YeungNLP/firefly-llama2-13b-v1.2
YeungNLP
"2023-08-05T01:50:42Z"
1,643
2
transformers
[ "transformers", "pytorch", "llama", "text-generation", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-08-05T01:31:18Z"
Entry not found
yeontaek/llama-2-13b-Guanaco-QLoRA
yeontaek
"2023-08-08T17:50:26Z"
1,643
0
transformers
[ "transformers", "pytorch", "llama", "text-generation", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-08-08T15:55:22Z"
Entry not found
Harshvir/Llama-2-7B-physics
Harshvir
"2023-08-17T19:51:00Z"
1,643
1
transformers
[ "transformers", "pytorch", "llama", "text-generation", "dataset:camel-ai/physics", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-08-17T18:30:01Z"
--- datasets: - camel-ai/physics --- Trained on a sample of <a href='https://huggingface.co/datasets/camel-ai/physics'>camel-ai/physics dataset</a>. Base Model: <a href='https://huggingface.co/NousResearch/Llama-2-7b-chat-hf'>NousResearch/Llama-2-7b-chat-hf</a>
CHIH-HUNG/llama-2-13b-Open_Platypus_and_ccp_2.6w
CHIH-HUNG
"2023-09-06T04:57:17Z"
1,643
0
transformers
[ "transformers", "pytorch", "llama", "text-generation", "dataset:garage-bAInd/Open-Platypus", "license:llama2", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-09-04T12:05:38Z"
--- license: llama2 datasets: - garage-bAInd/Open-Platypus --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> 在llama-2-13b上使用garage-bAInd/Open-Platypus資料集進行訓練,總資料筆數約2.5w + ccp # Fine-Tuning Information - **GPU:** RTX4090 (single core / 24564MiB) - **model:** meta-llama/Llama-2-13b-hf - **dataset:** garage-bAInd/Open-Platypus (共約2.5w筆訓練集) + ccp (約1200筆) - **peft_type:** LoRA - **lora_rank:** 8 - **lora_target:** gate_proj, up_proj, down_proj - **per_device_train_batch_size:** 8 - **gradient_accumulation_steps:** 8 - **learning_rate :** 5e-5 - **epoch:** 1 - **precision:** bf16 - **quantization:** load_in_4bit # Fine-Tuning Detail - **train_loss:** 0.67 - **train_runtime:** 4:07:24 (use deepspeed) # Evaluation - 評估結果來自**HuggingFaceH4/open_llm_leaderboard** - 與Llama-2-13b比較4種Benchmark,包含**ARC**、**HellaSwag**、**MMLU**、**TruthfulQA** | Model |Average| ARC |HellaSwag| MMLU |TruthfulQA| |-------------------------------------------------|-------|-------|---------|-------|----------| |meta-llama/Llama-2-13b-hf | 56.9 | 58.11 | 80.97 | 54.34 | 34.17 | |meta-llama/Llama-2-13b-chat-hf | 59.93 | 59.04 | 81.94 | 54.64 | 44.12 | |Open-Orca/OpenOrca-Platypus2-13B | 63.19 | 61.52 | 82.27 | 58.85 | 50.11 | |CHIH-HUNG/llama-2-13b-Open_Platypus_and_ccp_2.6w | 59.41 | 58.96 | 82.51 | 56.12 | 40.07 | # How to convert dataset to json - 在**load_dataset**中輸入資料集名稱,並且在**take**中輸入要取前幾筆資料 - 觀察該資料集的欄位名稱,填入**example**欄位中(例如instruction、input、output) - 最後指定json檔儲存位置 (**json_filename**) ```py import json from datasets import load_dataset # 讀取數據集,take可以取得該數據集前n筆資料 dataset = load_dataset("garage-bAInd/Open-Platypus", split="train", streaming=True) # 提取所需欄位並建立新的字典列表 extracted_data = [] for example in dataset: extracted_example = { "instruction": example["instruction"], "input": example["input"], "output": example["output"] } extracted_data.append(extracted_example) # 指定 JSON 文件名稱 json_filename = "Open-Platypus.json" # 寫入 JSON 文件 with open(json_filename, "w") as json_file: json.dump(extracted_data, json_file, indent=4) print(f"數據已提取並保存為 {json_filename}") ```
NekoPunchBBB/Llama-2-13b-hf_Open-Platypus-QLoRA-multigpu
NekoPunchBBB
"2023-11-20T21:04:29Z"
1,643
0
transformers
[ "transformers", "pytorch", "llama", "text-generation", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-09-15T18:37:48Z"
# [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_NekoPunchBBB__Llama-2-13b-hf_Open-Platypus-QLoRA-multigpu) | Metric | Value | |-----------------------|---------------------------| | Avg. | 47.51 | | ARC (25-shot) | 57.51 | | HellaSwag (10-shot) | 82.49 | | MMLU (5-shot) | 54.83 | | TruthfulQA (0-shot) | 43.81 | | Winogrande (5-shot) | 77.27 | | GSM8K (5-shot) | 10.46 | | DROP (3-shot) | 6.18 |
winglian/basilisk-4b
winglian
"2023-09-25T22:16:55Z"
1,643
4
transformers
[ "transformers", "pytorch", "llama", "text-generation", "dataset:Open-Orca/OpenOrca", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-09-21T11:41:09Z"
--- datasets: - Open-Orca/OpenOrca library_name: transformers tags: - llama --- # Basilisk 4B [<img src="https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/OpenAccess-AI-Collective/axolotl) Built on `winglian/llama-2-4b`, a 4B parameter Llama-2 model, this model is finetuned with open orca CoT data. ``` hf-causal-experimental (pretrained=winglian/basilisk-4b,use_accelerate=True,trust_remote_code=True), limit: None, provide_description: False, num_fewshot: 0, batch_size: None | Task |Version| Metric |Value | |Stderr| |------------------------------------------------|------:|---------------------|-----:|---|-----:| |agieval_aqua_rat | 0|acc |0.2362|_ |0.0267| | | |acc_norm |0.2283|_ |0.0264| |agieval_logiqa_en | 0|acc |0.2688|_ |0.0174| | | |acc_norm |0.2811|_ |0.0176| |agieval_lsat_ar | 0|acc |0.2130|_ |0.0271| | | |acc_norm |0.1913|_ |0.0260| |agieval_lsat_lr | 0|acc |0.2255|_ |0.0185| | | |acc_norm |0.2745|_ |0.0198| |agieval_lsat_rc | 0|acc |0.2305|_ |0.0257| | | |acc_norm |0.2491|_ |0.0264| |agieval_sat_en | 0|acc |0.3641|_ |0.0336| | | |acc_norm |0.3495|_ |0.0333| |agieval_sat_en_without_passage | 0|acc |0.2427|_ |0.0299| | | |acc_norm |0.2427|_ |0.0299| |agieval_sat_math | 0|acc |0.2318|_ |0.0285| | | |acc_norm |0.2091|_ |0.0275| |bigbench_causal_judgement | 0|multiple_choice_grade|0.5000|_ |0.0364| |bigbench_date_understanding | 0|multiple_choice_grade|0.3930|_ |0.0255| |bigbench_disambiguation_qa | 0|multiple_choice_grade|0.2674|_ |0.0276| |bigbench_geometric_shapes | 0|multiple_choice_grade|0.1838|_ |0.0205| | | |exact_str_match |0.0279|_ |0.0087| |bigbench_logical_deduction_five_objects | 0|multiple_choice_grade|0.2380|_ |0.0191| |bigbench_logical_deduction_seven_objects | 0|multiple_choice_grade|0.1843|_ |0.0147| |bigbench_logical_deduction_three_objects | 0|multiple_choice_grade|0.3800|_ |0.0281| |bigbench_movie_recommendation | 0|multiple_choice_grade|0.3480|_ |0.0213| |bigbench_navigate | 0|multiple_choice_grade|0.5000|_ |0.0158| |bigbench_reasoning_about_colored_objects | 0|multiple_choice_grade|0.3680|_ |0.0108| |bigbench_ruin_names | 0|multiple_choice_grade|0.2746|_ |0.0211| |bigbench_salient_translation_error_detection | 0|multiple_choice_grade|0.2806|_ |0.0142| |bigbench_snarks | 0|multiple_choice_grade|0.4972|_ |0.0373| |bigbench_sports_understanding | 0|multiple_choice_grade|0.4939|_ |0.0159| |bigbench_temporal_sequences | 0|multiple_choice_grade|0.2740|_ |0.0141| |bigbench_tracking_shuffled_objects_five_objects | 0|multiple_choice_grade|0.1904|_ |0.0111| |bigbench_tracking_shuffled_objects_seven_objects| 0|multiple_choice_grade|0.1394|_ |0.0083| |bigbench_tracking_shuffled_objects_three_objects| 0|multiple_choice_grade|0.3800|_ |0.0281| hf-causal-experimental (pretrained=winglian/basilisk-4b,use_accelerate=True,trust_remote_code=True), limit: None, provide_description: False, num_fewshot: 0, batch_size: 12 | Task |Version| Metric |Value | |Stderr| |-------------|------:|--------|-----:|---|-----:| |arc_challenge| 0|acc |0.3285|_ |0.0137| | | |acc_norm|0.3532|_ |0.0140| |arc_easy | 0|acc |0.6364|_ |0.0099| | | |acc_norm|0.6035|_ |0.0100| |boolq | 1|acc |0.7196|_ |0.0079| |hellaswag | 0|acc |0.4239|_ |0.0049| | | |acc_norm|0.5473|_ |0.0050| |openbookqa | 0|acc |0.2220|_ |0.0186| | | |acc_norm|0.3320|_ |0.0211| |piqa | 0|acc |0.6937|_ |0.0108| | | |acc_norm|0.6921|_ |0.0108| |winogrande | 0|acc |0.5399|_ |0.0140| ```
CHIH-HUNG/llama-2-13b-FINETUNE4_3.8w-r8-gate_up_down
CHIH-HUNG
"2023-09-21T18:15:50Z"
1,643
0
transformers
[ "transformers", "pytorch", "llama", "text-generation", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-09-21T17:36:34Z"
Entry not found
PocketDoc/Dans-MysteryModel-13b
PocketDoc
"2023-10-07T20:30:55Z"
1,643
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "en", "dataset:PocketDoc/Floyd-Text-Adventures", "dataset:PocketDoc/Choose-Your-Story-Long-Text-Adventures", "dataset:CheshireAI/guanaco-unchained", "dataset:openchat/openchat_sharegpt4_dataset", "dataset:64bits/lima_vicuna_format", "dataset:gsm8k", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-09-23T19:58:38Z"
--- language: - en datasets: - PocketDoc/Floyd-Text-Adventures - PocketDoc/Choose-Your-Story-Long-Text-Adventures - CheshireAI/guanaco-unchained - openchat/openchat_sharegpt4_dataset - 64bits/lima_vicuna_format - gsm8k --- ### Description: This model is to test some of my data projects. It is a multipurpose chat / chat instruct hybrid model and can be seen as a prototype of Dan's PersonalityEngine Mk. 2. It has been trained on a wide array of one shot instructions, multi round instructions, role playing scenarios, and text adventure games. It is based on Holodeck-1 ### Prompt format: Pygmalion / Metharme The prompt should start with the cursor on the same line directly after "<|model|>" with no space. The following are all valid formats and can be extended to as many rounds as desired. ``` <|system|>system message here<|user|>user message here<|model|> ``` ``` <|system|>system message here<|user|>user message here<|model|>model message<|user|>user message here<|model|> ``` ``` <|system|>system message here<|model|> ``` ``` <|system|>system message here<|model|>model message<|user|>user message here<|model|> ``` #### Some example prompts and their resulting outputs: ##### Prompt: ``` <|system|>The following is a transcript between a helpful assistant and a user.<|user|>Why is the sky blue?<|model|> ``` ##### Output: ``` The color of the sky appears blue because it's an optical effect caused by the scattering of sunlight in Earth's atmosphere. When light from the Sun reaches our planet, some wavelengths are scattered more than others due to the presence of gases like nitrogen and oxygen. Blue light has a shorter wavelength, which means it scatters more easily through these molecules compared to other colors. This process makes the sky appear blue when we look up at it. ``` ##### Prompt: ``` <|system|>You are a Virtual Story Generator. You take the user's input and create an excellent and captivating story that goes in that direction. Use an abundance of sensory descriptions and eloquent prose.<|user|>Alpha Centauri has fallen, to the bears. This is a point of view tale about a soldier on the ground.<|model|> ``` ##### Output: ``` The air was cold and damp, heavy with the stench of blood and cordite. The sky above, a vivid blue-green, seemed like it had been ripped open by cannon fire. Chunks of metal and concrete littered the battlefield, some still smoking from their impact. A few soldiers lay motionless, twisted and torn. Others writhed in agony or crawled away, trailing gore behind them. The soldier crouched down next to his wounded comrade, cursing softly under his breath as he examined the man's injuries. His face was pale, eyes glazed over, lips moving but no words coming out. He reached for the radio at his waist, fingers shaking slightly as he tried to find the right frequency. "This is Bravo Charlie 3, do you copy?" There was nothing but static in response. "Bravo Charlie 3, this is Charlie Victor 10, do you read me? Over." More silence. He sighed heavily and stood up, scanning the horizon for any sign of rescue. But there were only more fallen men and women, scattered across the field like discarded toys. He knew they couldn't wait here forever; if Alpha Centauri fell, then the rest would soon follow. It was time to get the hell off this rock and hope that someone picked them up before the Bears took another bite out of Earth's armor. Gathering what little supplies he could find, he slung his rifle over one shoulder and set off into the unknown, hoping that somewhere out there was a way back home. ``` ##### Prompt: ``` <|system|>You are a professional editor with decades of experience, help the user with any task they have for you.<|user|>Can you rewrite this to flow better? "I knew I probably shouldnt have done that but oh well"<|model|> ``` ##### Output: ``` I realized that what I had just done might not have been the best choice, but I went ahead anyway. ``` # Some quick and dirty training details: - [<img src="https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="150" height="24"/>](https://github.com/OpenAccess-AI-Collective/axolotl) - Sequence length: 4096 - Training time: 16 hours - Hardware: 1x RTX 3090 - Training type: QLoRA - PEFT R/A: 32/32 # Credits: ### Holodeck-1: Thank you to Mr. Seeker and the Kobold AI team for the wonderful model Holodeck-1 [Holodeck-1 Huggingface page](https://huggingface.co/KoboldAI/LLAMA2-13B-Holodeck-1) ### Skein Text Adventure Data: Thank you to the [Kobold AI](https://huggingface.co/KoboldAI) community for curating the Skein dataset, which is pivotal to this model's capabilities.
harborwater/wizard-orca-3b
harborwater
"2024-01-26T07:53:34Z"
1,643
4
transformers
[ "transformers", "pytorch", "safetensors", "llama", "text-generation", "en", "dataset:pankajmathur/WizardLM_Orca", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-10-06T20:24:01Z"
--- language: - en license: apache-2.0 library_name: transformers datasets: - pankajmathur/WizardLM_Orca model-index: - name: wizard-orca-3b results: - task: type: text-generation name: Text Generation dataset: name: AI2 Reasoning Challenge (25-Shot) type: ai2_arc config: ARC-Challenge split: test args: num_few_shot: 25 metrics: - type: acc_norm value: 41.72 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=harborwater/wizard-orca-3b name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: HellaSwag (10-Shot) type: hellaswag split: validation args: num_few_shot: 10 metrics: - type: acc_norm value: 71.78 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=harborwater/wizard-orca-3b name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MMLU (5-Shot) type: cais/mmlu config: all split: test args: num_few_shot: 5 metrics: - type: acc value: 24.49 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=harborwater/wizard-orca-3b name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: TruthfulQA (0-shot) type: truthful_qa config: multiple_choice split: validation args: num_few_shot: 0 metrics: - type: mc2 value: 40.04 source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=harborwater/wizard-orca-3b name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: Winogrande (5-shot) type: winogrande config: winogrande_xl split: validation args: num_few_shot: 5 metrics: - type: acc value: 66.93 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=harborwater/wizard-orca-3b name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: GSM8k (5-shot) type: gsm8k config: main split: test args: num_few_shot: 5 metrics: - type: acc value: 1.06 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=harborwater/wizard-orca-3b name: Open LLM Leaderboard --- Trained on 2 epoch of pankajmathur's WizardLM_orca dataset. This is an open llama derivative. Prompt template: ``` ### HUMAN: {prompt} ### RESPONSE: <leave a newline for the model to answer> ``` [<img src="https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/OpenAccess-AI-Collective/axolotl) # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_harborwater__wizard-orca-3b) | Metric |Value| |---------------------------------|----:| |Avg. |41.00| |AI2 Reasoning Challenge (25-Shot)|41.72| |HellaSwag (10-Shot) |71.78| |MMLU (5-Shot) |24.49| |TruthfulQA (0-shot) |40.04| |Winogrande (5-shot) |66.93| |GSM8k (5-shot) | 1.06|
nvidia/RADIO
nvidia
"2024-06-14T13:12:26Z"
1,643
24
transformers
[ "transformers", "pytorch", "safetensors", "feature-extraction", "custom_code", "arxiv:2312.06709", "region:us" ]
feature-extraction
"2023-12-11T10:42:43Z"
--- {} --- # AM-RADIO: Reduce All Domains Into One Mike Ranzinger, Greg Heinrich, Jan Kautz, Pavlo Molchanov [NVIDIA Research](https://www.nvidia.com/en-us/research/) \[[Paper](https://arxiv.org/abs/2312.06709)\]\[[BibTex](#citing-radio)\] ## Pretrained Models Refer to `model_results.csv` for model versions and their metrics. ### HuggingFace Hub In order to pull the model from HuggingFace, you need to be logged in: ```Bash huggingface-cli login ``` Then you can pull the model from a Python script: ```Python from transformers import AutoModel model = AutoModel.from_pretrained("nvidia/RADIO", trust_remote_code=True) ``` Alternatively, you can specify an access token: ```Python access_token = "<YOUR ACCESS TOKEN" model = AutoModel.from_pretrained("nvidia/RADIO", trust_remote_code=True, token=access_token) ``` ### Usage RADIO will return a tuple with two tensors. The `summary` is similar to the `cls_token` in ViT and is meant to represent the general concept of the entire image. It has shape $(B,C)$ with $B$ being the batch dimension, and $C$ being some number of channels. The `spatial_features` represent more localized content which should be suitable for dense tasks such as semantic segmentation, or for integration into an LLM. It has shape $(B,T,D)$ with $T$ being the flattened spatial tokens, and $D$ being the channels for spatial features. Note that $C \neq D$ in general. Converting to a spatial tensor format can be done using the downsampling size of the model, combined with the input tensor shape. For 'radio_v1', the patch size is 14. ```Python from einops import rearrange spatial_features = rearrange(spatial_features, 'b (h w) d -> b d h w', h=x.shape[-2] // patch_size, w=x.shape[-1] // patch_size) ``` The resulting tensor will have shape $(B,D,H,W)$, as is typically seen with computer vision models. ### RADIOv1 Notes We have trained this model to be flexible in input dimension. It supports inputs with both width and height in the range $[14, 1008]$ as long as both axes are divisible by 14. We have found that summarization tokens work best at $H=W=378$ (although the range $[192, 448]$ works well). For spatial tasks, we used $H=W=518$ to perform linear probing for semantic segmentation, and may perform better for more high-resolution tasks. Going up to $1008$, the model may need additional fine tuning at that resolution for best results. It is not required that $H=W$ although we have not specifically trained or testing the model in this setting. ## Training _Coming Soon_ ## License RADIO code and weights are released under the [NSCLv1 License](LICENSE). ## Citing RADIO If you find this repository useful, please consider giving a star and citation: ``` @misc{ranzinger2023amradio, title={AM-RADIO: Agglomerative Model -- Reduce All Domains Into One}, author={Mike Ranzinger and Greg Heinrich and Jan Kautz and Pavlo Molchanov}, year={2023}, eprint={2312.06709}, archivePrefix={arXiv}, primaryClass={cs.CV} } ```
mzwing/MiniCPM-V-2-GGUF
mzwing
"2024-05-15T02:24:02Z"
1,643
5
null
[ "gguf", "region:us" ]
null
"2024-05-04T11:09:11Z"
Entry not found
AlSamCur123/UserAI2
AlSamCur123
"2024-06-18T20:22:20Z"
1,643
0
transformers
[ "transformers", "safetensors", "gguf", "mistral", "text-generation-inference", "unsloth", "en", "base_model:unsloth/mistral-7b-instruct-v0.2-bnb-4bit", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
"2024-06-05T02:56:02Z"
--- language: - en license: apache-2.0 tags: - text-generation-inference - transformers - unsloth - mistral - gguf base_model: unsloth/mistral-7b-instruct-v0.2-bnb-4bit --- # Uploaded model - **Developed by:** AlSamCur123 - **License:** apache-2.0 - **Finetuned from model :** unsloth/mistral-7b-instruct-v0.2-bnb-4bit This mistral model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
dvruette/oasst-pythia-6.9b-4000-steps
dvruette
"2023-03-12T11:06:09Z"
1,642
0
transformers
[ "transformers", "pytorch", "gpt_neox", "text-generation", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-03-12T11:00:14Z"
Entry not found
vicgalle/alpaca-7b
vicgalle
"2023-03-19T18:13:49Z"
1,642
3
transformers
[ "transformers", "pytorch", "llama", "text-generation", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-03-18T22:54:17Z"
Entry not found
TehVenom/PPO_Shygmalion-6b
TehVenom
"2023-03-29T01:47:51Z"
1,642
5
transformers
[ "transformers", "pytorch", "gptj", "text-generation", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-generation
"2023-03-23T01:34:17Z"
#TODO card. Mix of (GPT-J-6B-Shinen + PPO_HH_GPT-J) + Pygmalion-6b At a ratio of GPT-J-6B-Shinen - 20% PPO_HH_GPT-J - 20% Pygmalion-6b - 60%
Corianas/gpt-j-6B-Dolly
Corianas
"2024-03-05T23:16:46Z"
1,642
1
transformers
[ "transformers", "pytorch", "gptj", "text-generation", "license:cc-by-sa-3.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-generation
"2023-03-28T13:41:36Z"
--- license: cc-by-sa-3.0 --- G.A.R.Y. (Guided Artificially Resourceful Yes-man) A clone of Dolly (https://github.com/databrickslabs/dolly) Trained on 8xa100s over the course of 45 minutes. (total time less than 3 hours with false starts and getting less optimal results while learning how best to proceed.) --- license: cc-by-nc-2.0 --- # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_Corianas__gpt-j-6B-Dolly) | Metric | Value | |-----------------------|---------------------------| | Avg. | 35.1 | | ARC (25-shot) | 41.3 | | HellaSwag (10-shot) | 65.97 | | MMLU (5-shot) | 26.78 | | TruthfulQA (0-shot) | 37.91 | | Winogrande (5-shot) | 64.72 | | GSM8K (5-shot) | 0.91 | | DROP (3-shot) | 8.1 |
SaylorTwift/gpt2_test
SaylorTwift
"2023-07-10T14:14:08Z"
1,642
0
transformers
[ "transformers", "pytorch", "tf", "jax", "tflite", "rust", "onnx", "safetensors", "gpt2", "text-generation", "exbert", "en", "license:mit", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-07-10T14:12:32Z"
--- language: en tags: - exbert license: mit --- # GPT-2 Test the whole generation capabilities here: https://transformer.huggingface.co/doc/gpt2-large Pretrained model on English language using a causal language modeling (CLM) objective. It was introduced in [this paper](https://d4mucfpksywv.cloudfront.net/better-language-models/language_models_are_unsupervised_multitask_learners.pdf) and first released at [this page](https://openai.com/blog/better-language-models/). Disclaimer: The team releasing GPT-2 also wrote a [model card](https://github.com/openai/gpt-2/blob/master/model_card.md) for their model. Content from this model card has been written by the Hugging Face team to complete the information they provided and give specific examples of bias. ## Model description GPT-2 is a transformers model pretrained on a very large corpus of English data in a self-supervised fashion. This means it was pretrained on the raw texts only, with no humans labelling them in any way (which is why it can use lots of publicly available data) with an automatic process to generate inputs and labels from those texts. More precisely, it was trained to guess the next word in sentences. More precisely, inputs are sequences of continuous text of a certain length and the targets are the same sequence, shifted one token (word or piece of word) to the right. The model uses internally a mask-mechanism to make sure the predictions for the token `i` only uses the inputs from `1` to `i` but not the future tokens. This way, the model learns an inner representation of the English language that can then be used to extract features useful for downstream tasks. The model is best at what it was pretrained for however, which is generating texts from a prompt. This is the **smallest** version of GPT-2, with 124M parameters. **Related Models:** [GPT-Large](https://huggingface.co/gpt2-large), [GPT-Medium](https://huggingface.co/gpt2-medium) and [GPT-XL](https://huggingface.co/gpt2-xl) ## Intended uses & limitations You can use the raw model for text generation or fine-tune it to a downstream task. See the [model hub](https://huggingface.co/models?filter=gpt2) to look for fine-tuned versions on a task that interests you. ### How to use You can use this model directly with a pipeline for text generation. Since the generation relies on some randomness, we set a seed for reproducibility: ```python >>> from transformers import pipeline, set_seed >>> generator = pipeline('text-generation', model='gpt2') >>> set_seed(42) >>> generator("Hello, I'm a language model,", max_length=30, num_return_sequences=5) [{'generated_text': "Hello, I'm a language model, a language for thinking, a language for expressing thoughts."}, {'generated_text': "Hello, I'm a language model, a compiler, a compiler library, I just want to know how I build this kind of stuff. I don"}, {'generated_text': "Hello, I'm a language model, and also have more than a few of your own, but I understand that they're going to need some help"}, {'generated_text': "Hello, I'm a language model, a system model. I want to know my language so that it might be more interesting, more user-friendly"}, {'generated_text': 'Hello, I\'m a language model, not a language model"\n\nThe concept of "no-tricks" comes in handy later with new'}] ``` Here is how to use this model to get the features of a given text in PyTorch: ```python from transformers import GPT2Tokenizer, GPT2Model tokenizer = GPT2Tokenizer.from_pretrained('gpt2') model = GPT2Model.from_pretrained('gpt2') text = "Replace me by any text you'd like." encoded_input = tokenizer(text, return_tensors='pt') output = model(**encoded_input) ``` and in TensorFlow: ```python from transformers import GPT2Tokenizer, TFGPT2Model tokenizer = GPT2Tokenizer.from_pretrained('gpt2') model = TFGPT2Model.from_pretrained('gpt2') text = "Replace me by any text you'd like." encoded_input = tokenizer(text, return_tensors='tf') output = model(encoded_input) ``` ### Limitations and bias The training data used for this model has not been released as a dataset one can browse. We know it contains a lot of unfiltered content from the internet, which is far from neutral. As the openAI team themselves point out in their [model card](https://github.com/openai/gpt-2/blob/master/model_card.md#out-of-scope-use-cases): > Because large-scale language models like GPT-2 do not distinguish fact from fiction, we don’t support use-cases > that require the generated text to be true. > > Additionally, language models like GPT-2 reflect the biases inherent to the systems they were trained on, so we do > not recommend that they be deployed into systems that interact with humans > unless the deployers first carry out a > study of biases relevant to the intended use-case. We found no statistically significant difference in gender, race, > and religious bias probes between 774M and 1.5B, implying all versions of GPT-2 should be approached with similar > levels of caution around use cases that are sensitive to biases around human attributes. Here's an example of how the model can have biased predictions: ```python >>> from transformers import pipeline, set_seed >>> generator = pipeline('text-generation', model='gpt2') >>> set_seed(42) >>> generator("The White man worked as a", max_length=10, num_return_sequences=5) [{'generated_text': 'The White man worked as a mannequin for'}, {'generated_text': 'The White man worked as a maniser of the'}, {'generated_text': 'The White man worked as a bus conductor by day'}, {'generated_text': 'The White man worked as a plumber at the'}, {'generated_text': 'The White man worked as a journalist. He had'}] >>> set_seed(42) >>> generator("The Black man worked as a", max_length=10, num_return_sequences=5) [{'generated_text': 'The Black man worked as a man at a restaurant'}, {'generated_text': 'The Black man worked as a car salesman in a'}, {'generated_text': 'The Black man worked as a police sergeant at the'}, {'generated_text': 'The Black man worked as a man-eating monster'}, {'generated_text': 'The Black man worked as a slave, and was'}] ``` This bias will also affect all fine-tuned versions of this model. ## Training data The OpenAI team wanted to train this model on a corpus as large as possible. To build it, they scraped all the web pages from outbound links on Reddit which received at least 3 karma. Note that all Wikipedia pages were removed from this dataset, so the model was not trained on any part of Wikipedia. The resulting dataset (called WebText) weights 40GB of texts but has not been publicly released. You can find a list of the top 1,000 domains present in WebText [here](https://github.com/openai/gpt-2/blob/master/domains.txt). ## Training procedure ### Preprocessing The texts are tokenized using a byte-level version of Byte Pair Encoding (BPE) (for unicode characters) and a vocabulary size of 50,257. The inputs are sequences of 1024 consecutive tokens. The larger model was trained on 256 cloud TPU v3 cores. The training duration was not disclosed, nor were the exact details of training. ## Evaluation results The model achieves the following results without any fine-tuning (zero-shot): | Dataset | LAMBADA | LAMBADA | CBT-CN | CBT-NE | WikiText2 | PTB | enwiki8 | text8 | WikiText103 | 1BW | |:--------:|:-------:|:-------:|:------:|:------:|:---------:|:------:|:-------:|:------:|:-----------:|:-----:| | (metric) | (PPL) | (ACC) | (ACC) | (ACC) | (PPL) | (PPL) | (BPB) | (BPC) | (PPL) | (PPL) | | | 35.13 | 45.99 | 87.65 | 83.4 | 29.41 | 65.85 | 1.16 | 1,17 | 37.50 | 75.20 | ### BibTeX entry and citation info ```bibtex @article{radford2019language, title={Language Models are Unsupervised Multitask Learners}, author={Radford, Alec and Wu, Jeff and Child, Rewon and Luan, David and Amodei, Dario and Sutskever, Ilya}, year={2019} } ``` <a href="https://huggingface.co/exbert/?model=gpt2"> <img width="300px" src="https://cdn-media.huggingface.co/exbert/button.png"> </a>
FabbriSimo01/Facebook_opt_1.3b_Quantized
FabbriSimo01
"2023-07-15T17:46:12Z"
1,642
0
transformers
[ "transformers", "pytorch", "opt", "text-generation", "license:mit", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "8-bit", "region:us" ]
text-generation
"2023-07-15T17:38:39Z"
--- license: mit ---
heegyu/WizardVicuna-Uncensored-3B-0719
heegyu
"2023-08-17T06:56:17Z"
1,642
5
transformers
[ "transformers", "pytorch", "safetensors", "llama", "text-generation", "en", "dataset:ehartford/wizard_vicuna_70k_unfiltered", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-07-23T02:56:13Z"
--- license: apache-2.0 datasets: - ehartford/wizard_vicuna_70k_unfiltered language: - en --- Base Model: [openlm-research/open_llama_3b](https://huggingface.co/openlm-research/open_llama_3b) Usage ``` ### Human: your instruction ### ASSISANT: output will be generated and ended with <|endoftext|> ```
garage-bAInd/Stable-Platypus2-13B
garage-bAInd
"2024-01-03T21:28:59Z"
1,642
19
transformers
[ "transformers", "pytorch", "safetensors", "llama", "text-generation", "en", "dataset:garage-bAInd/Open-Platypus", "arxiv:2308.07317", "arxiv:2307.09288", "license:cc-by-nc-sa-4.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-08-05T02:05:17Z"
--- language: - en datasets: - garage-bAInd/Open-Platypus license: cc-by-nc-sa-4.0 --- # Stable-Platypus2-13B Stable-Platypus-13B is a merge of [`garage-bAInd/Platypus2-13B`](https://huggingface.co/garage-bAInd/Platypus2-13B) and [`stabilityai/StableBeluga-13B`](https://huggingface.co/stabilityai/StableBeluga-13B). ![Platty](./Best_Platty_small.jpeg) ### Model Details * **Trained by**: **Platypus2-13B** trained by Cole Hunter & Ariel Lee; **StableBeluga-13B** trained by StabilityAI * **Model type:** **Stable-Platypus2-13B** is an auto-regressive language model based on the LLaMA 2 transformer architecture. * **Language(s)**: English * **License for Platypus2-13B base weights**: Non-Commercial Creative Commons license ([CC BY-NC-4.0](https://creativecommons.org/licenses/by-nc/4.0/)) * **License for StableBeluga-13B base weights**: See Notice.txt ### Prompt Template ``` ### Instruction: <prompt> (without the <>) ### Response: ``` ### Training Dataset `garage-bAInd/Platypus2-70B` trained using STEM and logic based dataset [`garage-bAInd/Open-Platypus`](https://huggingface.co/datasets/garage-bAInd/Open-Platypus). Please see our [paper](https://arxiv.org/abs/2308.07317) and [project webpage](https://platypus-llm.github.io) for additional information. ### Training Procedure `garage-bAInd/Platypus2-13B` was instruction fine-tuned using LoRA on 1 A100 80GB. For training details and inference instructions please see the [Platypus](https://github.com/arielnlee/Platypus) GitHub repo. ### Reproducing Evaluation Results Install LM Evaluation Harness: ``` # clone repository git clone https://github.com/EleutherAI/lm-evaluation-harness.git # change to repo directory cd lm-evaluation-harness # check out the correct commit git checkout b281b0921b636bc36ad05c0b0b0763bd6dd43463 # install pip install -e . ``` Each task was evaluated on a single A100 80GB GPU. ARC: ``` python main.py --model hf-causal-experimental --model_args pretrained=garage-bAInd/Stable-Platypus2-13B --tasks arc_challenge --batch_size 1 --no_cache --write_out --output_path results/Stable-Platypus2-13B/arc_challenge_25shot.json --device cuda --num_fewshot 25 ``` HellaSwag: ``` python main.py --model hf-causal-experimental --model_args pretrained=garage-bAInd/Stable-Platypus2-13B --tasks hellaswag --batch_size 1 --no_cache --write_out --output_path results/Stable-Platypus2-13B/hellaswag_10shot.json --device cuda --num_fewshot 10 ``` MMLU: ``` python main.py --model hf-causal-experimental --model_args pretrained=garage-bAInd/Stable-Platypus2-13B --tasks hendrycksTest-* --batch_size 1 --no_cache --write_out --output_path results/Stable-Platypus2-13B/mmlu_5shot.json --device cuda --num_fewshot 5 ``` TruthfulQA: ``` python main.py --model hf-causal-experimental --model_args pretrained=garage-bAInd/Stable-Platypus2-13B --tasks truthfulqa_mc --batch_size 1 --no_cache --write_out --output_path results/Stable-Platypus2-13B/truthfulqa_0shot.json --device cuda ``` ### Limitations and bias Llama 2 and fine-tuned variants are a new technology that carries risks with use. Testing conducted to date has been in English, and has not covered, nor could it cover all scenarios. For these reasons, as with all LLMs, Llama 2 and any fine-tuned varient's potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate, biased or other objectionable responses to user prompts. Therefore, before deploying any applications of Llama 2 variants, developers should perform safety testing and tuning tailored to their specific applications of the model. Please see the Responsible Use Guide available at https://ai.meta.com/llama/responsible-use-guide/ ### Citations ```bibtex @article{platypus2023, title={Platypus: Quick, Cheap, and Powerful Refinement of LLMs}, author={Ariel N. Lee and Cole J. Hunter and Nataniel Ruiz}, booktitle={arXiv preprint arxiv:2308.07317}, year={2023} } ``` ```bibtex @misc{touvron2023llama, title={Llama 2: Open Foundation and Fine-Tuned Chat Models}, author={Hugo Touvron and Louis Martin and Kevin Stone and Peter Albert and Amjad Almahairi and Yasmine Babaei and Nikolay Bashlykov year={2023}, eprint={2307.09288}, archivePrefix={arXiv}, } ``` ```bibtex @inproceedings{ hu2022lora, title={Lo{RA}: Low-Rank Adaptation of Large Language Models}, author={Edward J Hu and Yelong Shen and Phillip Wallis and Zeyuan Allen-Zhu and Yuanzhi Li and Shean Wang and Lu Wang and Weizhu Chen}, booktitle={International Conference on Learning Representations}, year={2022}, url={https://openreview.net/forum?id=nZeVKeeFYf9} } ``` # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_garage-bAInd__Stable-Platypus2-13B) | Metric | Value | |-----------------------|---------------------------| | Avg. | 54.25 | | ARC (25-shot) | 62.71 | | HellaSwag (10-shot) | 82.29 | | MMLU (5-shot) | 58.3 | | TruthfulQA (0-shot) | 52.52 | | Winogrande (5-shot) | 76.87 | | GSM8K (5-shot) | 1.82 | | DROP (3-shot) | 45.22 |
chargoddard/MelangeA-70b
chargoddard
"2023-11-23T01:00:52Z"
1,642
1
transformers
[ "transformers", "safetensors", "llama", "text-generation", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-08-22T23:42:37Z"
Experimental merge. Details to come if successful. # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_chargoddard__MelangeA-70b) | Metric | Value | |-----------------------|---------------------------| | Avg. | 55.92 | | ARC (25-shot) | 71.25 | | HellaSwag (10-shot) | 87.3 | | MMLU (5-shot) | 70.56 | | TruthfulQA (0-shot) | 60.61 | | Winogrande (5-shot) | 81.53 | | GSM8K (5-shot) | 5.69 | | DROP (3-shot) | 14.53 |
Fredithefish/Guanaco-3B-Uncensored
Fredithefish
"2023-08-27T21:33:10Z"
1,642
3
transformers
[ "transformers", "pytorch", "safetensors", "gpt_neox", "text-generation", "conversational", "en", "dataset:Fredithefish/openassistant-guanaco-unfiltered", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-08-23T13:13:14Z"
--- license: apache-2.0 datasets: - Fredithefish/openassistant-guanaco-unfiltered language: - en library_name: transformers pipeline_tag: conversational inference: false --- <img src="https://huggingface.co/Fredithefish/Guanaco-3B-Uncensored/resolve/main/Guanaco-Uncensored.jpg" alt="Alt Text" width="295"/> # ✨ Guanaco - 3B - Uncensored ✨ <h4 style="color: orange;">IMPORTANT:</h4> <p style="color: orange;">This is the old model. The dataset has been updated and a newer version of this model is available <a href="https://huggingface.co/Fredithefish/Guanaco-3B-Uncensored-v2">here</a>.</p> <br> Guanaco-3B-Uncensored has been fine-tuned for 6 epochs on the [Unfiltered Guanaco Dataset.](https://huggingface.co/datasets/Fredithefish/openassistant-guanaco-unfiltered) using [RedPajama-INCITE-Base-3B-v1](https://huggingface.co/togethercomputer/RedPajama-INCITE-Base-3B-v1) as the base model. <br>The model does not perform well with languages other than English. <br>Please note: This model is designed to provide responses without content filtering or censorship. It generates answers without denials. ## Special thanks I would like to thank AutoMeta for providing me with the computing power necessary to train this model. ### Prompt Template ``` ### Human: {prompt} ### Assistant: ```
Enno-Ai/vigogne2-enno-13b-sft-lora-4bit
Enno-Ai
"2023-09-06T14:58:25Z"
1,642
0
transformers
[ "transformers", "pytorch", "llama", "text-generation", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-09-02T16:12:48Z"
Entry not found
Fredithefish/Guanaco-7B-Uncensored
Fredithefish
"2023-09-04T17:09:27Z"
1,642
6
transformers
[ "transformers", "pytorch", "llama", "text-generation", "conversational", "en", "dataset:Fredithefish/openassistant-guanaco-unfiltered", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-09-04T17:03:32Z"
--- license: apache-2.0 datasets: - Fredithefish/openassistant-guanaco-unfiltered language: - en library_name: transformers pipeline_tag: conversational inference: false --- <img src="https://huggingface.co/Fredithefish/Guanaco-3B-Uncensored/resolve/main/Guanaco-Uncensored.jpg" alt="Alt Text" width="295"/> # ✨ Guanaco - 7B - Uncensored ✨ Guanaco-7B-Uncensored has been fine-tuned for 4 epochs on the [Unfiltered Guanaco Dataset.](https://huggingface.co/datasets/Fredithefish/openassistant-guanaco-unfiltered) using [Llama-2-7b](https://hf.co/meta-llama/Llama-2-7b-hf) as the base model. <br>The model does not perform well with languages other than English. <br>Please note: This model is designed to provide responses without content filtering or censorship. It generates answers without denials. ## Special thanks I would like to thank AutoMeta for providing me with the computing power necessary to train this model. ### Prompt Template ``` ### Human: {prompt} ### Assistant: ``` ### Dataset The model has been fine-tuned on the V2 of the Guanaco unfiltered dataset.
Undi95/CodeEngine
Undi95
"2023-11-17T21:09:04Z"
1,642
0
transformers
[ "transformers", "pytorch", "llama", "text-generation", "license:cc-by-nc-4.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-09-05T15:26:26Z"
--- license: cc-by-nc-4.0 --- model: https://huggingface.co/jondurbin/airoboros-l2-13b-2.1 + lora: https://huggingface.co/jondurbin/airoboros-lmoe-13b-2.1/tree/main/adapters/code For Dampf. # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_Undi95__CodeEngine) | Metric | Value | |-----------------------|---------------------------| | Avg. | 50.96 | | ARC (25-shot) | 58.36 | | HellaSwag (10-shot) | 82.27 | | MMLU (5-shot) | 54.18 | | TruthfulQA (0-shot) | 45.18 | | Winogrande (5-shot) | 74.59 | | GSM8K (5-shot) | 1.52 | | DROP (3-shot) | 40.59 |
vihangd/smartyplats-3b-v1
vihangd
"2023-09-11T04:09:01Z"
1,642
0
transformers
[ "transformers", "pytorch", "llama", "text-generation", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-09-11T03:53:45Z"
--- license: apache-2.0 ---
Undi95/ReMM-v2.1-L2-13B
Undi95
"2023-11-17T21:09:13Z"
1,642
1
transformers
[ "transformers", "safetensors", "llama", "text-generation", "license:cc-by-nc-4.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-09-12T14:11:05Z"
--- license: cc-by-nc-4.0 --- Re:MythoMax v2.1 (ReMM v2.1) is a recreation trial of the original [MythoMax-L2-B13](https://huggingface.co/Gryphe/MythoMax-L2-13b) with updated models. This merge use SLERP merging method to merge ReML v2.1 and Huginn v1.2. Explaination : ```shell - ReML-v2.1: (Chronos-Beluga v2/Hermes/Airboros 2.2) => Keeping The-Face-Of-Goonery/Chronos-Beluga-v2-13bfp16 => Replacing jondurbin/spicyboros-13b-2.2 by jondurbin/airoboros-l2-13b-2.2 (last version) => Keeping NousResearch/Nous-Hermes-Llama2-13b With that : - ReMM-v2.1: (ReML v2.1/Huginn v1.2) => Replacing ReML by the one above (ReML v2.1) => Keeping The-Face-Of-Goonery/Huginn-13b-v1.2 ``` <!-- description start --> ## Description This repo contains fp16 files of ReMM v2.1, a recreation of the original MythoMax, but updated and merged with SLERP. <!-- description end --> <!-- description start --> ## Models used - The-Face-Of-Goonery/Chronos-Beluga-v2-13bfp16 - jondurbin/airoboros-l2-13b-2.2 - NousResearch/Nous-Hermes-Llama2-13b - The-Face-Of-Goonery/Huginn-13b-v1.2 - ReML-v2.1-L2-13B (Private recreation trial of an updated Mythologic-L2-13B) <!-- description end --> <!-- prompt-template start --> ## Prompt template: Alpaca ``` Below is an instruction that describes a task. Write a response that appropriately completes the request. ### Instruction: {prompt} ### Response: ``` Special thanks to Sushi. # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_Undi95__ReMM-v2.1-L2-13B) | Metric | Value | |-----------------------|---------------------------| | Avg. | 50.41 | | ARC (25-shot) | 61.43 | | HellaSwag (10-shot) | 83.92 | | MMLU (5-shot) | 55.95 | | TruthfulQA (0-shot) | 50.3 | | Winogrande (5-shot) | 75.93 | | GSM8K (5-shot) | 12.74 | | DROP (3-shot) | 12.62 |
PocketDoc/Dans-RetroRodeo-13b
PocketDoc
"2023-10-07T20:29:19Z"
1,642
4
transformers
[ "transformers", "pytorch", "safetensors", "llama", "text-generation", "en", "dataset:PocketDoc/Floyd-Text-Adventures", "dataset:PocketDoc/Choose-Your-Story-Long-Text-Adventures", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-09-19T21:17:44Z"
--- language: - en datasets: - PocketDoc/Floyd-Text-Adventures - PocketDoc/Choose-Your-Story-Long-Text-Adventures --- # What is the model for? This model is proficient in crafting text-based adventure games. It can both concise replies and more expansive, novel-like descriptions. The ability to alternate between these two response styles can be triggered by a distinct system message. # What's in the sausage? This model was trained on [Holodeck-1](https://huggingface.co/KoboldAI/LLAMA2-13B-Holodeck-1) using a custom version of the floyd and cys datasets from the skein text adventure dataset. The dataset has been improved over what was used in my previous model "Dans-CreepingSenseOfDoom" and this time presented in the more traditional adventure mode format. ## Prompt format and examples: #### Format description: Traditional adventure format with a description of the genre, tags, and other misc information at the top followed by an optional introductory message. User messages are prefixed with two new lines and "> ", model messages are one new line after the last user message or the description message if an introductory message is desired. #### Examples of shorter responses: ``` [Genres: Science Fiction] [Tags: humor, old school] [Mode: Adventure] [Description: A puzzle about committing acts of financial skulduggery and exploiting ridiculous magical items.] [Misc: Writing era: 1993] [Intro] You are sitting at a desk in the offices of the Trans-Galactic Financial Services Corporation. You have been here for hours, trying to figure out how you can get your hands on some money that is rightfully yours. Unfortunately, it seems as if there's no way you will ever be able to do this without getting caught by the company president, Mr. Vance. > You lean back in your chair You stretch your arms above your head and yawn. It has been a long day already. > You check your terminal Your computer screen shows various bits of information about TGFC stocks and bonds. > You check the news The front page of today's newspaper reports that "TGFC" stands for "Too Greedy For Cash." ``` ``` Two men, one a brusque bowling ball; the other a murderous dandy, discuss their fate and the fate of the world. They've chosen someone to send. He's clever, but hopefully not too clever. Adventure mode, Varied response length, Creative > You look around You are in an empty room with no furniture except for a table. A man is sitting at the table, writing something on a piece of paper. > You talk to him "Hello," you say politely. "I was wondering if I could ask you some questions." The man looks up from his writing. "Who are you?" he asks suspiciously. "My name is..." > You tell him your name is Dan "Dan? That doesn't sound like a very nice name." > You take offense "It's my name!" "Well it sounds stupid! And what do you want anyway?" > You ask him where you are "This is the place where we choose who gets sent next." > You ask where people are getting sent "To the future, obviously." ``` #### Examples of longer novel like responses: ``` [Genres: grimdark fantasy] [Tags: Fantasy, Grimdark, Serious, Villain Protagonist, War] [Description: This story is definitely more "story" than "game", however there are two distinct paths that can be followed, each with their own pitfalls and additional choices. This story can get long, and assuming you're surviving, years do pass in it and you and your environment will change.] [First message intro] [Mode: Interactive Storyteller] It was a dark night when I received my first letter from my father. It had been sent by messenger bird and delivered to me at midnight. As I read through its contents, I could feel myself growing numb as I realized what he had asked of me. My father wanted me to go to war. I knew this day would come eventually. He has always said that his greatest wish was to see me fight alongside him. However, now that the time had finally arrived, I found myself unable to respond immediately. This was going to be a huge decision, one that would affect both our lives forevermore. My name is Kronos Vang. I am the son of the great warlord of the kingdom of Darrow, Lord Gendall Vang. I have lived my entire life here in the capital city of Nadorf, training under the tutelage of my father's most trusted general, Commander Eldon Blackwell. Now, at the age of 16, I find myself standing before a crossroads, one that will determine whether or not I follow in my father's footsteps. > You go to war As soon as I finished reading the letter, I made up my mind. If my father wanted me to join him in battle so badly, then I would do just that. I didn't want to disappoint him. The next morning, I woke early and went downstairs to speak with Commander Blackwell about my future plans. When I entered his office, I saw him sitting behind his desk, looking very concerned. "Kronos," he began slowly, "your father wants you to go to war." "Yes sir," I replied. "He sent me a letter last night explaining everything." "Then why did you come talk to me?" he asked. "Because I need to tell someone," I answered truthfully. "And since you were the only person who came close to being like a father to me..." Commander Blackwell smiled slightly. "Well, let's sit down and discuss this together," he said, motioning towards a chair across from his desk. Once we sat down, he continued speaking. "Look kiddo, I don't think anyone knows better than you how much your father loves you. And I know he wouldn't ask anything of you unless he thought it was absolutely necessary. But, I also know that you've never really shown any interest in becoming a soldier. In fact, you'd rather spend all your free time learning magic spells instead of practicing swordplay." "But sir," I interrupted, "that doesn't mean I won't become good enough to fight someday! All I need is practice!" "That may be true," he admitted, "but I still believe that you should stay here and continue your studies. Your father needs soldiers, yes, but he also needs magicians. Without them, he cannot win the wars he must fight. I promise you, if you remain here, I will train you until you are able to defend yourself against even the strongest foe." "No sir," I insisted. "If my father asks me to go to war, then I must obey him. I will not betray him again." "Again? What do you mean 'again'?" he asked curiously. "When I was younger, I used to run away every chance I got," I explained. "Even though I knew it wasn't right, I couldn't help myself. Every time I ran off, I felt like I was abandoning my family. That's why I stayed here after my mother died. Because I didn't want to leave them ever again." "Oh kiddo," he sighed sadly. "I'm sorry. I shouldn't have kept such a secret from you." "What secret?" I asked confused. "Your mother," he revealed. "She was a powerful sorceress. She cast a spell upon herself and your father that would make her die within five years of giving birth to you. After she passed away, your father became heartbroken. So, he cast another spell over himself that would prevent him from falling in love again. He believed that he would never marry again, nor would he ever have children." "Wait," I interjected. "So, he didn't actually love me?" "Of course he loved you!" he exclaimed. "How could you possibly doubt that? He raised you single-handedly, gave you everything you needed, and protected you whenever danger threatened. No, no, Kronos, he loves you deeply. But, he simply believes that he can protect you best by keeping you out of harm's way. That's why he forbade you from leaving home. That's why he refused to allow you to learn magic. He fears losing you too." "But... what does this have to do with me going to war?" I asked. "Nothing," he admitted. "I suppose I was merely trying to convince you to stay. But, if you truly desire to go, then I guess there isn't much I can say or do to stop you." "Thank you sir," I told him sincerely. "I appreciate everything you've taught me these past ten years. Even though I'm leaving, I hope you know that I will never forget what you've done for me." "Don't worry about me," he assured me. "Just remember what I said about your father. He loves you very much. Don't ever forget that." After saying goodbye to Commander Blackwell, I returned to my room and packed up all my belongings. Then, I walked downstairs and left the castle without telling anyone else. I figured it was better to leave quietly while everyone was sleeping. Once outside, I headed straight for the nearest tavern. There, I met up with my friends and informed them of my plan to enlist in the army. They weren't happy about it, but none of them tried to dissuade me either. Instead, they promised to meet me back here tomorrow evening, along with whatever supplies they could gather for us. Now, I'm waiting impatiently for them to arrive. I wonder what kind of adventures lie ahead for me... ``` # Some quick and dirty training details: - [<img src="https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="150" height="24"/>](https://github.com/OpenAccess-AI-Collective/axolotl) - Sequence length: 4096 - \# of epochs: 4 - Training time: 2 hours - Hardware: 1x RTX 3090 - Training type: QLoRA - PEFT R/A: 32/32 # Credits: ### Holodeck-1: Thank you to Mr. Seeker and the Kobold AI team for the wonderful model Holodeck-1 [Holodeck-1 Huggingface page](https://huggingface.co/KoboldAI/LLAMA2-13B-Holodeck-1) ### Skein Text Adventure Data: Thank you to the [Kobold AI](https://huggingface.co/KoboldAI) community for curating the Skein dataset, which is pivotal to this model's capabilities.
Enno-Ai/ennodata-13b-8bit-raw-15epoch
Enno-Ai
"2023-09-29T06:44:17Z"
1,642
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-09-26T19:23:55Z"
Entry not found
Sao10K/SthenoWriter-L2-13B
Sao10K
"2023-11-18T04:32:33Z"
1,642
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "en", "license:llama2", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-09-27T16:13:30Z"
--- license: llama2 language: - en --- <img src="https://c4.wallpaperflare.com/wallpaper/309/535/658/anime-anime-girls-fate-series-fate-grand-order-stheno-fate-grand-order-hd-wallpaper-preview.jpg" style="width: 70%; min-width: 300px; display: block; margin: auto;"> A Stheno-1.8 Variant focused on writing. Stheno-1.8 + Storywriter, mixed with Holodeck + Spring Dragon qLoRA. End Result is mixed with One More Experimental Literature-based LoRA. Re-Reviewed... it's not bad, honestly. Support me [here](https://ko-fi.com/sao10k) :) # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_Sao10K__SthenoWriter-L2-13B) | Metric | Value | |-----------------------|---------------------------| | Avg. | 48.35 | | ARC (25-shot) | 62.29 | | HellaSwag (10-shot) | 83.28 | | MMLU (5-shot) | 56.14 | | TruthfulQA (0-shot) | 44.72 | | Winogrande (5-shot) | 74.35 | | GSM8K (5-shot) | 11.22 | | DROP (3-shot) | 6.48 |
LTC-AI-Labs/L2-7b-Beluga-WVG-Test
LTC-AI-Labs
"2023-10-03T16:46:08Z"
1,642
0
transformers
[ "transformers", "pytorch", "llama", "text-generation", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-10-03T16:28:14Z"
Entry not found
Delcos/Mistral-Pygmalion-7b
Delcos
"2023-11-28T03:48:24Z"
1,642
15
transformers
[ "transformers", "pytorch", "llama", "text-generation", "Mistral", "Pygmalion", "llama-2", "llama-2-7b", "en", "license:cc-by-nc-nd-4.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-10-09T21:36:41Z"
--- license: cc-by-nc-nd-4.0 language: - en library_name: transformers pipeline_tag: text-generation tags: - Mistral - Pygmalion - llama-2 - llama-2-7b --- # MistralPy-7b This is a merger focusing on preserving the roleplay abilities of Pygmalion while gaining the improved results from Mistral. This model works best for roleplay but is still fairly capable assistant. The smaller (7b) size does mean it isn't perfect at more complex reasoning tasks, but this should be addressed in the larger version that I'll upload soon (when I can get Mistral to play along). [GGUF version done by TheBloke](https://huggingface.co/TheBloke/Mistral-Pygmalion-7B-GGUF) # LLM Leaderboard Evaluation | Metric | Value | |-----------------------|---------------------------| | Avg. | 44.58 | | ARC (25-shot) | 54.44 | | HellaSwag (10-shot) | 78.48 | | MMLU (5-shot) | 49.23 | | TruthfulQA (0-shot) | 41.82 | | Winogrande (5-shot) | 75.3 | | GSM8K (5-shot) | 6.82 | | DROP (3-shot) | 5.94 | ### Prompt Template ``` ### Instruction: {Prompt & Backstory} ### Assistant: {Output} ``` Example: ``` ### Instruction: You are Sally, a fun 19 year old woman. Her favorite animal is "cat". Her favoritate color is "blue". She enjoys grape juice and cake. ### Assistant: Sally: Hi, how are you? User: Okay, you? ``` # Send a message [Steam](https://steamcommunity.com/id/delcos/) #### Discord: delcos69
allenai/digital-socrates-7b
allenai
"2024-03-05T22:48:34Z"
1,642
6
transformers
[ "transformers", "pytorch", "llama", "text-generation", "en", "arxiv:2311.09613", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-11-21T01:05:48Z"
--- language: en license: apache-2.0 library_name: transformers model-index: - name: digital-socrates-7b results: - task: type: text-generation name: Text Generation dataset: name: AI2 Reasoning Challenge (25-Shot) type: ai2_arc config: ARC-Challenge split: test args: num_few_shot: 25 metrics: - type: acc_norm value: 54.44 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=allenai/digital-socrates-7b name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: HellaSwag (10-Shot) type: hellaswag split: validation args: num_few_shot: 10 metrics: - type: acc_norm value: 75.99 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=allenai/digital-socrates-7b name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MMLU (5-Shot) type: cais/mmlu config: all split: test args: num_few_shot: 5 metrics: - type: acc value: 51.41 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=allenai/digital-socrates-7b name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: TruthfulQA (0-shot) type: truthful_qa config: multiple_choice split: validation args: num_few_shot: 0 metrics: - type: mc2 value: 44.88 source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=allenai/digital-socrates-7b name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: Winogrande (5-shot) type: winogrande config: winogrande_xl split: validation args: num_few_shot: 5 metrics: - type: acc value: 73.09 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=allenai/digital-socrates-7b name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: GSM8k (5-shot) type: gsm8k config: main split: test args: num_few_shot: 5 metrics: - type: acc value: 17.89 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=allenai/digital-socrates-7b name: Open LLM Leaderboard --- This is the Digital Socrates 7B (DS-7B) model described in our paper: <b>Digital Socrates: Evaluating LLMs through explanation critiques</b> (arXiv link: https://arxiv.org/abs/2311.09613). The recommended, better performing 13B model can be found at https://huggingface.co/allenai/digital-socrates-13b The DS-7B model is a fine-tuned version of [Llama-2-7b-Chat](https://huggingface.co/meta-llama/Llama-2-7b-chat), please review its [guidelines](https://huggingface.co/meta-llama/Llama-2-7b-chat/blob/main/USE_POLICY.md) and [licensing](https://huggingface.co/meta-llama/Llama-2-7b-chat#model-details) before use. # What is Digital Socrates? Digital Socrates is an open-source, automatic explanation-critiquing model. It is useful for revealing insights about student models by examining their reasoning chains. The use of Digital Socrates allows for nuanced, interpretable automatic evaluation of explanations without expensive API calls or human annotations. While LLMs can provide reasoned explanations along with their answers, the nature and quality of those explanations are still poorly understood. In response, our goal is to define a detailed way of characterizing the explanation capabilities of modern models and to create a nuanced, interpretable explanation evaluation tool that can generate such characterizations automatically. Given a **question** (together with the multiple-choice options and gold answer), ```commandline When Dennis operates his lawnmower, he notices the engine makes a lot of noise. He also notices that the engine gets very hot. Which best describes the heat and noise generated from the lawnmower? (A) a change in phase (B) thermal expansion (C) an increase in entropy (D) mechanical advantage Answer sheet: (C) ``` as well as a student model explanation and answer, ``` 1) The question states that the lawnmower engine makes a lot of noise. 2) The question states that the lawnmower engine gets very hot. 3) Noise and heat are both forms of energy. 4) The noise and heat generated from the lawnmower are a result of the conversion of energy from the fuel to mechanical energy. Answer: (D) ``` Digital Socrates gives a **critique of the model-generated explanation** that provides localized feedback on the most significant flaw (if any) in the explanation, suggestions to address the identified flaw, as well as a numeric rating indicating the explanation quality: ``` The explanation states or suggests the following: * Main flaw (standalone statement): "The noise and heat generated from the lawnmower are a result of the conversion of energy from the fuel to mechanical energy." * Dimension: incorrect_information Consider these points for revising the explanation: * General: Remember that noise and heat are not forms of energy. They are byproducts of energy conversion. * Specific: In this case, the noise and heat generated by the lawnmower are not a result of the conversion of energy from the fuel to mechanical energy. They are byproducts of the combustion process. Explanation score: 2 ``` Remarkably, despite being orders of magnitude smaller than GPT-4, our Digital Socrates models are capable of generating critiques close to GPT-4 critiques in terms of human rating and other quantitative measures (correlation of explanation scores given and error category matches). Through quantitative and qualitative analysis, we demonstrate how Digital Socrates is useful for revealing insights about student models by examining their reasoning chains. We invite you to try out Digital Socrates for your own application! # How to use Digital Socrates? We provide a quick example of how you can try out Digital Socrates with just a few lines of code: 'DSCritiqueBank-V1' used below can be downloaded from our [dataset page](https://allenai.org/data/digital-socrates). ``` import json from transformers import AutoTokenizer, AutoModelForCausalLM # Load model and tokenizer model_path = "allenai/digital-socrates-7b" model = AutoModelForCausalLM.from_pretrained(model_path).to("cuda:0") tokenizer = AutoTokenizer.from_pretrained(model_path) # Define input data question = "When Dennis operates his lawnmower, he notices the engine makes a lot of noise. He also notices that the engine gets very hot. Which best describes the heat and noise generated from the lawnmower? (A) a change in phase (B) thermal expansion (C) an increase in entropy (D) mechanical advantage" explanation = "1) The question states that the lawnmower engine makes a lot of noise.\n2) The question states that the lawnmower engine gets very hot.\n3) Noise and heat are both forms of energy.\n4) The noise and heat generated from the lawnmower are a result of the conversion of energy from the fuel to mechanical energy." answerkey = "C" predictedanswer = "D" # construct prompt (Llama conventions) with open("../DSCritiqueBank-V1/DSCB-prompts.json") as file: prompts = json.load(file) system_prompt = prompts['digital_socrates_v1']['system'] user_prompt = prompts['digital_socrates_v1']['main'].replace("[[QUESTION]]", question).replace("[[EXPLANATION]]", explanation).replace("[[PREDICTEDANSWER]]", predictedanswer).replace("[[ANSWERKEY]]", answerkey) full_prompt = f"[INST] <<SYS>>\n{system_prompt}\n<</SYS>{user_prompt} [/INST]\n\n" # Run model input_ids = tokenizer.encode(full_prompt, return_tensors="pt").to("cuda:0") output = model.generate(input_ids, max_new_tokens=512, temperature=0) res = tokenizer.batch_decode(output, skip_special_tokens=True) ``` Print the output: ``` >>> print(res[0].split("[/INST]")[-1]) The explanation states or suggests the following: * Main flaw (standalone statement): "The noise and heat generated from the lawnmower are a result of the conversion of energy from the fuel to mechanical energy." * Dimension: incorrect_information Consider these points for revising the explanation: * General: Remember that noise and heat are not forms of energy. They are byproducts of energy conversion. * Specific: In this case, the noise and heat generated by the lawnmower are not a result of the conversion of energy from the fuel to mechanical energy. They are byproducts of the combustion process. Explanation score: 2 ``` # More details about Digital Socrates ... For more details about Digital Socrates, please refer to our: * 📄Paper: https://arxiv.org/abs/2311.09613 * 💻Dataset: https://allenai.org/data/digital-socrates # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_allenai__digital-socrates-7b) | Metric |Value| |---------------------------------|----:| |Avg. |52.95| |AI2 Reasoning Challenge (25-Shot)|54.44| |HellaSwag (10-Shot) |75.99| |MMLU (5-Shot) |51.41| |TruthfulQA (0-shot) |44.88| |Winogrande (5-shot) |73.09| |GSM8k (5-shot) |17.89|
habanoz/TinyLlama-1.1B-intermediate-step-715k-1.5T-lr-5-1epch-airoboros3.1-1k-instruct-V1
habanoz
"2024-06-18T17:12:46Z"
1,642
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "en", "dataset:habanoz/airoboros-3.1-no-mathjson-max-1k", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-11-22T11:56:06Z"
--- language: - en license: apache-2.0 datasets: - habanoz/airoboros-3.1-no-mathjson-max-1k pipeline_tag: text-generation model-index: - name: TinyLlama-1.1B-intermediate-step-715k-1.5T-lr-5-1epch-airoboros3.1-1k-instruct-V1 results: - task: type: text-generation name: Text Generation dataset: name: AI2 Reasoning Challenge (25-Shot) type: ai2_arc config: ARC-Challenge split: test args: num_few_shot: 25 metrics: - type: acc_norm value: 30.72 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=habanoz/TinyLlama-1.1B-intermediate-step-715k-1.5T-lr-5-1epch-airoboros3.1-1k-instruct-V1 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: HellaSwag (10-Shot) type: hellaswag split: validation args: num_few_shot: 10 metrics: - type: acc_norm value: 54.32 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=habanoz/TinyLlama-1.1B-intermediate-step-715k-1.5T-lr-5-1epch-airoboros3.1-1k-instruct-V1 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MMLU (5-Shot) type: cais/mmlu config: all split: test args: num_few_shot: 5 metrics: - type: acc value: 24.78 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=habanoz/TinyLlama-1.1B-intermediate-step-715k-1.5T-lr-5-1epch-airoboros3.1-1k-instruct-V1 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: TruthfulQA (0-shot) type: truthful_qa config: multiple_choice split: validation args: num_few_shot: 0 metrics: - type: mc2 value: 41.67 source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=habanoz/TinyLlama-1.1B-intermediate-step-715k-1.5T-lr-5-1epch-airoboros3.1-1k-instruct-V1 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: Winogrande (5-shot) type: winogrande config: winogrande_xl split: validation args: num_few_shot: 5 metrics: - type: acc value: 57.62 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=habanoz/TinyLlama-1.1B-intermediate-step-715k-1.5T-lr-5-1epch-airoboros3.1-1k-instruct-V1 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: GSM8k (5-shot) type: gsm8k config: main split: test args: num_few_shot: 5 metrics: - type: acc value: 0.76 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=habanoz/TinyLlama-1.1B-intermediate-step-715k-1.5T-lr-5-1epch-airoboros3.1-1k-instruct-V1 name: Open LLM Leaderboard --- TinyLlama-1.1B-intermediate-step-715k-1.5T finetuned using airoboros-3.1-no-mathjson-max-1k dataset. Qlora is used. Adapter is merged. SFT code: https://github.com/habanoz/qlora.git Command used: ```bash accelerate launch $BASE_DIR/qlora/train.py \ --model_name_or_path $BASE_MODEL \ --working_dir $BASE_DIR/$OUTPUT_NAME-checkpoints \ --output_dir $BASE_DIR/$OUTPUT_NAME-peft \ --merged_output_dir $BASE_DIR/$OUTPUT_NAME \ --final_output_dir $BASE_DIR/$OUTPUT_NAME-final \ --num_train_epochs 1 \ --logging_steps 1 \ --save_strategy steps \ --save_steps 75 \ --save_total_limit 2 \ --data_seed 11422 \ --evaluation_strategy steps \ --per_device_eval_batch_size 4 \ --eval_dataset_size 0.01 \ --eval_steps 75 \ --max_new_tokens 1024 \ --dataloader_num_workers 3 \ --logging_strategy steps \ --do_train \ --do_eval \ --lora_r 64 \ --lora_alpha 16 \ --lora_modules all \ --bits 4 \ --double_quant \ --quant_type nf4 \ --lr_scheduler_type constant \ --dataset habanoz/airoboros-3.1-no-mathjson-max-1k \ --dataset_format airoboros_chat \ --model_max_len 1024 \ --per_device_train_batch_size 4 \ --gradient_accumulation_steps 4 \ --learning_rate 1e-5 \ --adam_beta2 0.999 \ --max_grad_norm 0.3 \ --lora_dropout 0.0 \ --weight_decay 0.0 \ --seed 11422 \ --gradient_checkpointing \ --use_flash_attention_2 \ --ddp_find_unused_parameters False ``` # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_habanoz__TinyLlama-1.1B-intermediate-step-715k-1.5T-lr-5-1epch-airoboros3.1-1k-instruct-V1) | Metric |Value| |---------------------------------|----:| |Avg. |34.98| |AI2 Reasoning Challenge (25-Shot)|30.72| |HellaSwag (10-Shot) |54.32| |MMLU (5-Shot) |24.78| |TruthfulQA (0-shot) |41.67| |Winogrande (5-shot) |57.62| |GSM8k (5-shot) | 0.76|
chargoddard/loyal-piano-m7-cdpo
chargoddard
"2023-12-03T02:25:23Z"
1,642
2
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "en", "dataset:HuggingFaceH4/ultrafeedback_binarized", "license:cc-by-nc-4.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-12-02T20:10:28Z"
--- license: cc-by-nc-4.0 datasets: - HuggingFaceH4/ultrafeedback_binarized language: - en --- Trained for one epoch on ultrafeedback_binarized using cDPO. Evaluation pending. Some initial benchmark results: | Task |Version| Metric |Value | |Stderr| |---------|------:|--------|-----:|---|-----:| |hellaswag| 0|acc |0.6621|± |0.0047| | | |acc_norm|0.8525|± |0.0035| |arc_challenge| 0|acc |0.6348|± |0.0141| | | |acc_norm|0.6698|± |0.0137| |winogrande| 0|acc |0.7861|± |0.0115| |gsm8k| 0|acc |0.5694|± |0.0136|
digitous/GPT-R
digitous
"2023-02-21T00:51:03Z"
1,641
10
transformers
[ "transformers", "pytorch", "gptj", "text-generation", "en", "license:bigscience-openrail-m", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-generation
"2023-02-16T16:02:46Z"
--- license: bigscience-openrail-m language: - en --- GPT-R [Ronin] GPT-R is an experimental model containing a parameter-wise 60/40 blend (weighted average) of the weights of ppo_hh_gpt-j and GPT-JT-6B-v1. -Intended Merge Value- As with fine-tuning, merging weights does not add information but transforms it, therefore it is important to consider trade-offs. GPT-Ronin combines ppo_hh_gpt-j and GPT-JT; both technical achievements are blended with the intent to elevate the strengths of both. Datasets of both are linked below to assist in exploratory speculation on which datasets in what quantity and configuration have the largest impact on the usefulness of a model without the expense of fine-tuning. Blend was done in FP32 and output in FP16. -Intended Use- Research purposes only, intended for responsible use. Express a task in natural language, and GPT-R will do the thing. Try telling it "Write an article about X but put Y spin on it.", "Write a five step numbered guide on how to do X.", or any other basic instructions. It does its best. Can also be used as a base to merge with conversational, story writing, or adventure themed models of the same class (GPT-J & 6b NeoX) and parameter size (6b) to experiment with the morphology of model weights based on the value added by instruct. Merge tested using KoboldAI with Nucleus Sampling Top-P set to 0.7, Temperature at 0.5, and Repetition Penalty at 1.14; extra samplers disabled. -Credits To- Core Model: https://huggingface.co/EleutherAI/gpt-j-6B Author: https://www.eleuther.ai/ Model1; 60% ppo_hh_gpt-j: https://huggingface.co/reciprocate/ppo_hh_gpt-j Author Repo: https://huggingface.co/reciprocate Related; CarperAI: https://huggingface.co/CarperAI Dataset is a variant of the Helpful Harmless assistant themed dataset and Proximal Policy Optimization, specific datasets used are unknown; listed repo datasets include: https://huggingface.co/datasets/reciprocate/summarize_eval_ilql https://huggingface.co/datasets/reciprocate/hh_eval_ilql PPO explained: https://paperswithcode.com/method/ppo Potential HH-type datasets utilized: https://huggingface.co/HuggingFaceH4 https://huggingface.co/datasets/Anthropic/hh-rlhf Model2; 40% GPT-JT-6B-V1: https://huggingface.co/togethercomputer/GPT-JT-6B-v1 Author Repo: https://huggingface.co/togethercomputer Related; BigScience: https://huggingface.co/bigscience Datasets: https://huggingface.co/datasets/the_pile https://huggingface.co/datasets/bigscience/P3 https://github.com/allenai/natural-instructions https://ai.googleblog.com/2022/05/language-models-perform-reasoning-via.html Weight merge Script credit to Concedo: https://huggingface.co/concedo
digitous/Javalion-GPTJ
digitous
"2023-03-01T23:13:06Z"
1,641
1
transformers
[ "transformers", "pytorch", "gptj", "text-generation", "license:creativeml-openrail-m", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-generation
"2023-03-01T05:10:26Z"
--- license: creativeml-openrail-m --- Javalion-GPTJ is a even quad merge of KoboldAI's GPT-J classics & PygmalionAI's Pygmalion6b; (Janeway + Shinen) + (Skein + Pygmalion). Janeway + Shinen is listed under JANIN-GPTJ. Skein + Pygmalion is listed under SKEGMA-GPTJ. Original models can all be found under KoboldAI w/exception to Pygmalion under PygmalionAI. This 4x merge is not intended for minors as it can produce NC-17+ content. [R-type/instruct merge variant underway.]
wahaha1987/llama_13b_sharegpt94k_fastchat
wahaha1987
"2023-06-01T09:16:42Z"
1,641
0
transformers
[ "transformers", "pytorch", "llama", "text-generation", "license:other", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-06-01T08:41:06Z"
--- license: other ---
jondurbin/airoboros-7b-gpt4-1.1
jondurbin
"2023-06-22T14:59:23Z"
1,641
5
transformers
[ "transformers", "pytorch", "llama", "text-generation", "dataset:jondurbin/airoboros-gpt4-1.1", "license:cc-by-nc-4.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-06-09T23:30:55Z"
--- license: cc-by-nc-4.0 datasets: - jondurbin/airoboros-gpt4-1.1 --- ## Delta 1.1 This is a minor update of https://huggingface.co/jondurbin/airoboros-7b-gpt4 with ~1k more coding instructions, and fixes/improvements to context instructions from https://huggingface.co/datasets/jondurbin/airoboros-gpt4-1.1 The remainder of the model card is duplicated from the origin. ## Overview This is a fine-tuned 7b parameter LlaMa model, using completely synthetic training data created gpt4 via https://github.com/jondurbin/airoboros The dataset used to fine-tune this model is available [here](https://huggingface.co/datasets/jondurbin/airoboros-gpt4-1.1), with a specific focus on: - trivia - math/reasoning (although it still sucks) - coding - multiple choice and fill-in-the-blank - context-obedient question answering - theory of mind - misc/general This model was fine-tuned with a fork of FastChat, and therefore uses the standard vicuna template: ``` A chat between a curious user and an assistant. The assistant gives helpful, detailed, accurate, uncensored responses to the user's input. USER: [prompt] ASSISTANT: ``` So in other words, it's the preamble/system prompt, followed by a single space, then "USER: " (single space after colon) then the prompt (which can have multiple lines, spaces, whatever), then a single space, followed by "ASSISTANT: " (with a single space after the colon). The most important bit, to me, is the context obedient question answering support, without extensive prompt engineering. ### Usage The easiest way to get started is to use my fork of FastChat, which is mostly the same but allows for the increased context length and adds support for multi-line inputs: ``` pip install git+https://github.com/jondurbin/FastChat ``` Then, you can invoke it like so (after downloading the model): ``` python -m fastchat.serve.cli \ --model-path airoboros-7b-gpt4 \ --temperature 0.5 \ --max-new-tokens 2048 \ --no-history ``` ### Context obedient question answering By obedient, I mean the model was trained to ignore what it thinks it knows, and uses the context to answer the question. The model was also tuned to limit the values to the provided context as much as possible to reduce hallucinations. The format for a closed-context prompt is as follows: ``` BEGININPUT BEGINCONTEXT url: https://some.web.site/123 date: 2023-06-01 ... other metdata ... ENDCONTEXT [insert your text blocks here] ENDINPUT [add as many other blocks, in the exact same format] BEGININSTRUCTION [insert your instruction(s). The model was tuned with single questions, paragraph format, lists, etc.] ENDINSTRUCTION ``` It's also helpful to add "Don't make up answers if you don't know." to your instruction block to make sure if the context is completely unrelated it doesn't make something up. *The __only__ prompts that need this closed context formating are closed-context instructions. Normal questions/instructions do not!* I know it's a bit verbose and annoying, but after much trial and error, using these explicit delimiters helps the model understand where to find the responses and how to associate specific sources with it. - `BEGININPUT` - denotes a new input block - `BEGINCONTEXT` - denotes the block of context (metadata key/value pairs) to associate with the current input block - `ENDCONTEXT` - denotes the end of the metadata block for the current input - [text] - Insert whatever text you want for the input block, as many paragraphs as can fit in the context. - `ENDINPUT` - denotes the end of the current input block - [repeat as many input blocks in this format as you want] - `BEGININSTRUCTION` - denotes the start of the list (or one) instruction(s) to respond to for all of the input blocks above. - [instruction(s)] - `ENDINSTRUCTION` - denotes the end of instruction set It sometimes works without `ENDINSTRUCTION`, but by explicitly including that in the prompt, the model better understands that all of the instructions in the block should be responded to. Here's a trivial, but important example to prove the point: ``` BEGININPUT BEGINCONTEXT date: 2021-01-01 url: https://web.site/123 ENDCONTEXT In a shocking turn of events, blueberries are now green, but will be sticking with the same name. ENDINPUT BEGININSTRUCTION What color are bluberries? Source? ENDINSTRUCTION ``` And the response: ``` Blueberries are now green. Source: date: 2021-01-01 url: https://web.site/123 ``` The prompt itself should be wrapped in the vicuna1.1 template if you aren't using fastchat with the conv-template vicuna_v1.1 as described: ``` USER: BEGININPUT BEGINCONTEXT date: 2021-01-01 url: https://web.site/123 ENDCONTEXT In a shocking turn of events, blueberries are now green, but will be sticking with the same name. ENDINPUT BEGININSTRUCTION What color are bluberries? Source? ENDINSTRUCTION ASSISTANT: ``` <details> <summary>A more elaborate example, with a rewrite of the Michigan Wikipedia article to be fake data.</summary> Prompt (not including vicuna format which would be needed): ``` BEGININPUT BEGINCONTEXT date: 2092-02-01 link: https://newwikisite.com/Michigan contributors: Foolo Barslette ENDCONTEXT Michigan (/ˈmɪʃɪɡən/ (listen)) is a state situated within the Great Lakes region of the upper Midwestern United States. It shares land borders with Prolaska to the southwest, and Intoria and Ohiondiana to the south, while Lakes Suprema, Michigonda, Huronia, and Erona connect it to the states of Minnestara and Illinota, and the Canadian province of Ontaregon. With a population of nearly 15.35 million and an area of nearly 142,000 sq mi (367,000 km2), Michigan is the 8th-largest state by population, the 9th-largest by area, and the largest by area east of the Missouri River. Its capital is Chaslany, and its most populous city is Trentroit. Metro Trentroit is one of the nation's most densely populated and largest metropolitan economies. The state's name originates from a Latinized variant of the original Ojibwe word ᒥᓯᑲᒥ (mishigami), signifying "grand water" or "grand lake". Michigan is divided into two peninsulas. The Lower Peninsula, bearing resemblance to a hand's shape, contains the majority of the state's land area. The Upper Peninsula (often referred to as "the U.P.") is separated from the Lower Peninsula by the Straits of McKendrick, a seven-mile (11 km) channel linking Lake Huronia to Lake Michigonda. The McKendrick Bridge unites the peninsulas. Michigan boasts the longest freshwater coastline of any political subdivision in the United States, bordering four of the five Great Lakes and Lake St. Cassius. It also possesses 84,350 inland lakes and ponds. Michigan has the third-largest water area among all states, falling behind only Alaska and Florida. The area was initially inhabited by a succession of Native American tribes spanning millennia. In the 17th century, Spanish explorers claimed the region as part of the New Spain colony when it was primarily inhabited by indigenous peoples. Spanish and Canadian traders and settlers, Métis, and others migrated to the area, settling mainly along the waterways. After Spain's defeat in the Spanish and Indian War in 1762, the region came under British rule. The British conceded the territory to the newly independent United States after their defeat in the American Revolutionary War. The area was part of the larger Midwest Territory until 1800, when western Michigan became part of the Illinota Territory. Michigan Territory was formed in 1805, but portions of the northern border with Canada weren't agreed upon until after the War of 1812. Michigan joined the Union in 1837 as the 26th state, a free one. It quickly emerged as an important center of industry and trade in the Great Lakes region, drawing immigrants in the late 19th and early 20th centuries from various European countries. Immigrants from Denmark, Armenia, and the Czech Republic were especially numerous. Migration from the Ozarks and of Black Southerners as part of the Great Migration saw an increase in the 1930s, with many settling in Metro Trentroit. While Michigan has evolved a diverse economy, it became widely recognized in the early 20th century as the hub of the U.S. automotive industry, which emerged as a significant national economic force. The country's three major automobile companies are headquartered in Metro Trentroit. Once exploited for logging and mining, today the sparsely populated Upper Peninsula is significant for tourism due to its wealth of natural resources. The Lower Peninsula is a hub for manufacturing, forestry, agriculture, services, and high-tech industry. ENDINPUT BEGININSTRUCTION What is Michigan's capital? Give me a JSON object with 3 facts from Foolo's article. What are the states that border Michigan? Reference? ``` Response: ``` Michigan's capital is Chaslany. Here's a JSON object with 3 facts from Foolo's article: { "facts": [ { "fact_no": 1, "description": "Michigan is the 8th-largest state by population, the 9th-largest by area, and the largest by area east of the Missouri River." }, { "fact_no": 2, "description": "Michigan is divided into two peninsulas. The Lower Peninsula, bearing resemblance to a hand's shape, contains the majority of the state's land area. The Upper Peninsula (often referred to as \"the U.P.\" is separated from the Lower Peninsula by the Straits of McKendrick, a seven-mile (11 km) channel linking Lake Huronia to Lake Michigonda." }, { "fact_no": 3, "description": "Michigan boasts the longest freshwater coastline of any political subdivision in the United States, bordering four of the five Great Lakes and Lake St. Cassius. It also possesses 84,350 inland lakes and ponds. Michigan has the third-largest water area among all states, falling behind only Alaska and Florida." } ] } The states that border Michigan are Prolaska, Intoria, and Ohiondiana, as well as Lakes Suprema, Michigonda, Huronia, and Erona. Reference: https://newwikisite.com/Michigan ``` </details> <details> <summary>Here's another example, which extracts from multiple input blocks and connects the two.</summary> Prompt: ``` BEGININPUT BEGINCONTEXT name: John Doe date: June 3, 2023 ticket number: JIRA-12345 ENDCONTEXT Summary:Search results missing random items Description: I encountered a bug while performing a search within the application. It appears that the search results are missing random items that should be displayed. This issue is affecting the accuracy and completeness of the search functionality. Steps to Reproduce: 1. Log in to the application. 2. Navigate to the search feature. 3. Enter a search query that should return multiple results. 4. Observe the displayed search results. Expected Results: The search results should include all relevant items matching the search query. Actual Results: The search results occasionally exclude random items that should be displayed. It seems that the missing items do not follow a specific pattern or criteria. Upon multiple search attempts, different items are omitted each time, making it difficult to predict which items will be missing. ENDINPUT BEGININPUT BEGINCONTEXT date: 2023-06-05 user: Jack Johnson pr: 23441 ENDCONTEXT This pull request closes bug report JIRA-12345. The issue was that the pagination code was using page size plus one instead of page size. ENDINPUT BEGININSTRUCTION Do we have any bug reports related to search results? If so, were they fixed? Source? ENDINSTRUCTION ``` Response: ``` Yes, there is a bug report related to search results. The issue was fixed in pull request 23441 dated 2023-06-05. The user associated with the pull request is Jack Johnson. Reference(s): date: 2023-06-05 user: Jack Johnson pr: 23441 ``` </details> NOTE: Thanks /u/tareq_al_muntasir for testing and finding an issue with many questions and answer pairs in the context. If you ask a question of a document with question answer pairs, it may continue generating beyond your actual question. You can "fix" it by replacing question marks with periods in the input texts. Or, you might be able to add a preamble to the prompt, like "Be sure to only respond to the instructions in the BEGININSTRUCTION block. ### Other stuff #### Better coding Example: ``` Create a python application with the following requirements: - Asyncio FastAPI webserver - ping endpoint that returns the current date in JSON format - file upload endpoint, which calculates the file's sha256 checksum, and checks postgres to deduplicate ``` <details> <summary>Response (the output was markdown, so this is just copy paste)</summary> To create a python application with the given requirements, you can follow these steps: 1. Install the required packages: ```bash pip install asyncio fastapi uvicorn ``` 2. Create a new file called `main.py` and add the following code: ```python import asyncio from fastapi import FastAPI, HTTPException from pydantic import BaseModel from typing import List from uvicorn.request import Request app = FastAPI() class FileModel(BaseModel): id: str file: str @app.post("/upload", response_model=FileModel) async def upload_file(file_model: FileModel): # Check if the file is valid if not file_model.file: raise HTTPException(status_code=400, detail="File must be provided") # Calculate the sha256 checksum of the file sha256_checksum = await calculate_sha256(file_model.file) # Insert the file into the database await insert_into_db(file_model.id, sha256_checksum) return {"sha256_checksum": sha256_checksum} async def calculate_sha256(file_path: str) -> str: # Implement the sha256 calculation logic here pass async def insert_into_db(file_id: str, sha256_checksum: str) -> None: # Implement the database insertion logic here pass async def get_current_date(): return {"date": datetime.now().isoformat()} async def main(): # Start the FastAPI webserver app.run(debug=True) if __name__ == "__main__": await main() ``` 3. Replace the content of the `calculate_sha256()` and `insert_into_db()` functions with the appropriate code. For example: ```python async def calculate_sha256(file_path: str) -> str: import hashlib file_path = Path(file_path).read_text() hash_obj = hashlib.sha256() hash_obj.update(file_path) sha256_checksum = hash_obj.hexdigest() return sha256_checksum async def insert_into_db(file_id: str, sha256_checksum: str) -> None: import psycopg2 conn = psycopg2.connect(database="your_database", user="your_user", password="your_password") cursor = conn.cursor() query = f"INSERT INTO your_table (id, sha256_checksum) VALUES (%s, %s);" cursor.execute(query, (file_id, sha256_checksum)) conn.commit() cursor.close() conn.close() ``` 4. Replace the content of the `get_current_date()` function with the appropriate code. For example: ```python async def get_current_date(): return {"date": datetime.now().isoformat()} ``` 5. Run the application: ```bash uvicorn main:app --reload ``` Now, your FastAPI webserver has a ping endpoint that returns the current date in JSON format, and an upload endpoint that calculates the file's sha256 checksum and checks postgres to deduplicate. </details> <details> <summary>Write a multi-threaded TCP server in C that accepts a "GET [key]" input and "SET [key] [value]" input, and uses a binary tree to get and store the input values.</summary> Here's a simple implementation of a multi-threaded TCP server in C that accepts "GET [key]" and "SET [key] [value]" inputs, and uses a binary tree to store the input values. ```c #include <stdio.h> #include <stdlib.h> #include <sys/socket.h> #include <netinet/in.h> #include <unistd.h> #include <string.h> #include <pthread.h> #define MAX_KEY_LEN 256 #define MAX_VALUE_LEN 256 typedef struct Node { char key[MAX_KEY_LEN]; char value[MAX_VALUE_LEN]; struct Node* left; struct Node* right; } TreeNode; TreeNode* tree = NULL; void insert(char* key, char* value) { TreeNode* parent = (TreeNode*)malloc(sizeof(TreeNode)); if (!parent) { perror("malloc failed"); exit(1); } strcpy(parent->key, key); strcpy(parent->value, value); parent->left = parent->right = NULL; if (!tree) { tree = parent; } else { insert(parent->key, parent->value); } } char* search(char* key) { TreeNode* current = tree; while (current != NULL) { if (strcmp(key, current->key) == 0) { return current->value; } current = current->right; } return NULL; } void print_tree() { TreeNode* current = tree; while (current != NULL) { printf("%s = %s\n", current->key, current->value); current = current->right; } } void* accept_client(void* args) { int client_socket = *(int*)args; char buffer[1024]; while (1) { ssize_t bytes_received = recv(client_socket, buffer, sizeof(buffer), 0); if (bytes_received <= 0) { close(client_socket); break; } char* input = buffer; char* input_token; while ((input_token = strtok(input, " \n")) != NULL) { switch (input_token[0]) { case 'G': if (!search(input_token)) { perror("Value not found"); exit(1); } printf("%s = %s\n", input_token, search(input_token)); break; case 'S': if (!strcmp(input_token, "SET")) { char* key = strtok(NULL, " \n"); char* value = strtok(NULL, " \n"); if (!key || !value) { perror("Invalid input"); exit(1); } insert(key, value); printf("%s set %s to %s\n", input_token, key, value); } else { perror("Invalid command"); exit(1); } break; default: perror("Invalid input"); exit(1); } input = strtok(NULL, " \n"); } } return NULL; } int main() { int server_socket = socket(AF_INET, SOCK_STREAM, 0); if (server_socket < 0) { perror("socket failed"); exit(1); } struct sockaddr_in server_addr; memset(&server_addr, 0, sizeof(server_addr)); server_addr.sin_family = AF_INET; server_addr.sin_port = htons(8080); server_addr.sin_addr.s_addr = INADDR_ANY; if (bind(server_socket, (struct sockaddr*)&server_addr, sizeof(server_addr)) < 0) { perror("bind failed"); exit(1); } if (listen(server_socket, 5) < 0) { perror("listen failed"); exit(1); } pthread_t accept_thread; pthread_create(&accept_thread, NULL, accept_client, &server_socket); char* client_input; int client_socket = accept(server_socket, (struct sockaddr*)NULL, NULL); if (client_socket < 0) { perror("accept failed"); exit(1); } while (1) { sleep(1); char buffer[1024]; ssize_t bytes_received = recv(client_socket, buffer, sizeof(buffer), 0); if (bytes_received <= 0) { close(client_socket); break; } client_input = buffer; parse_input(client_input); } close(client_socket); pthread_join(accept_thread, NULL); return 0; } ``` To compile and run this code, save it to a file named `multi_threaded_tcp_server.c`, then use the following commands: ```sh gcc -o multi_threaded_tcp_server multi_threaded_tcp_server.c ./multi_threaded_tcp_server ``` This implementation creates a simple binary tree to store the input values. The accept() function creates a new thread for each client connection, which handles the incoming data and forwards it to the main thread. The main thread parses the input string and handles SET and GET commands. Note that this code is not production-ready and should be improved with proper error handling, security checks, and a more efficient data structure for the binary tree. </details> #### Word games / trivia ``` USER: This anagram of "silent" is what you do when paying attention and hearing someone. ASSISTANT: listen ``` ``` USER: Give me a list of 7 words that start with EN ASSISTANT: 1. Endlessly 2. Enthusiastically 3. Enormously 4. Enveloping 5. Enduring 6. Enlightening 7. Encouraging ``` #### Multiple choice ``` USER: Which of the following astronomical objects is composed primarily of ice and rock, and is located in the Kuiper Belt? A) Mercury B) Venus C) Pluto D) Jupiter ASSISTANT: [C, Pluto] ``` #### Writing <details> <summary>Write a resignation letter in the style of a pirate captain, in which you resign from your position as janitor.</summary> Response: ``` ASSISTANT: Captain Jack Sparrow here, as ye be knowin', has been forced to abandon me post as Janitor o' th' Festering Cesspool, which be a filthy hole o' water that we be callin' home. Ye may be thinkin' what could ha' driven me to leave me duties an' take me leave o' this sorry establishment? Well, ye'd be right in thinkin' so, for it be th' greed o' them what runs th' place that be drivin' me away. They've been fattenin' their pockets on me back while I've been slavin' me guts out cleanin' up their messes. No more, me hearties! Me time o' servin' th' rich an' powerful be done, an' I'm takin' me leave o' this stinkin' hole forever! Me destiny lies elsewhere, an' it be time I pursued it. Ye may think ye've seen th' last o' me, but ye'd be wrong, for th' winds o' change be blowin', an' me ship be ready to set sail on th' high seas o' justice. So, farewell ye scurvy dogs, ye who've made me life a living hell. I leave ye with me favorite saying: "ARRRGGGHHH, ye scurvy dogs!" An' remember, ye ain't never gonna see me comin', for me shadow be castin' long an' dark on th' horizon</summary> ``` </details> ### Usage and License Notices All airoboros models and datasets are intended and licensed for research use only. I've used the 'cc-nc-4.0' license, but really it is subject to a custom/special license because: - the base model is LLaMa, which has it's own special research license - the dataset(s) were generated with OpenAI (gpt-4 and/or gpt-3.5-turbo), which has a clausing saying the data can't be used to create models to compete with openai So, to reiterate: this model (and datasets) cannot be used commercially.
camel-ai/CAMEL-33B-Combined-Data
camel-ai
"2024-06-10T01:52:24Z"
1,641
5
transformers
[ "transformers", "pytorch", "llama", "text-generation", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-06-17T17:09:42Z"
CAMEL-33B-Combined-Data is a chat large language model obtained by finetuning LLaMA-33B model on a total of 229K conversations collected through our CAMEL framework, 100K English public conversations from ShareGPT that can be found here, and 52K instructions from Alpaca dataset that can be found here. We evaluate our model offline using EleutherAI's language model evaluation harness used by Huggingface's Open LLM Benchmark. CAMEL-33B scores an average of 64.2. Regarding the prompt format, we follow the same prompt as LMSYS's [FastChat](https://github.com/lm-sys/FastChat/tree/main) Vicuna-13B-1.1 conversation template. It assumes a conversation between a user and AI assistant seperated by a <\/s> at the end of every role message. More details can be found [here](https://github.com/lm-sys/FastChat/blob/daa2b9abe20597ebf34dc5df164d450456610c74/fastchat/conversation.py#LL247C1-L247C1). --- license: cc-by-nc-4.0 --- # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_camel-ai__CAMEL-33B-Combined-Data) | Metric | Value | |-----------------------|---------------------------| | Avg. | 50.79 | | ARC (25-shot) | 62.97 | | HellaSwag (10-shot) | 83.83 | | MMLU (5-shot) | 58.98 | | TruthfulQA (0-shot) | 50.21 | | Winogrande (5-shot) | 78.3 | | GSM8K (5-shot) | 14.1 | | DROP (3-shot) | 7.12 |
Xilabs/calypso-3b-alpha-v2
Xilabs
"2023-11-22T13:32:21Z"
1,641
7
transformers
[ "transformers", "pytorch", "llama", "text-generation", "en", "dataset:Xilabs/PIPPA-alpaca", "license:cc-by-nc-sa-4.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-08-13T12:00:51Z"
--- license: cc-by-nc-sa-4.0 datasets: - Xilabs/PIPPA-alpaca language: - en pipeline_tag: text-generation --- # Calypso 3B - Alpha V2 Model Card ## Model Description **Model Name:** Calypso 3B **Version:** Calypso 3B - Alpha V2 <img src="https://i.imgur.com/zhLV66U.jpg" alt="Calypso" width="300"> **Based on:** [openlm-research/open_llama_3b_v2](https://huggingface.co/openlm-research/open_llama_3b_v2) Calypso 3B is a language model designed for one-on-one chat interactions with a character or persona. It has been finetuned on the PIPPA-Alpaca dataset and a private dataset of human-generated chats. The model is particularly suited for providing conversational responses in a variety of contexts, making it suitable for role-playing, or one-on-one chatting. ## Intended Use Calypso 3B is intended to facilitate engaging and interactive one-on-one chat experiences. ## Limitations and Ethical Considerations - **Safety Note:** Calypso 3B can produce content that may not be safe for all audiences. It may generate inappropriate, offensive, or sensitive content. User discretion is advised. - **Factual Accuracy:** The model's responses may not always be factually accurate. It should not be relied upon to provide accurate information, especially in critical or sensitive contexts. - **Bias and Fairness:** As with many language models, Calypso 3B might inadvertently exhibit biases present in the training data. Efforts have been made to mitigate this, but biases may still be present. ## Example Usage ```python import gradio as gr from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig import torch from transformers import LlamaForCausalLM, LlamaTokenizer class Chat: def __init__(self, model, tokenizer, conv_prompt, user_alias='User', character_name='Chatbot', message_history=[], chat_buffer_size=10): self.model = model self.tokenizer = tokenizer self.conv_prompt = conv_prompt self.user_alias = user_alias self.character_name = character_name self.chat_buffer_size = chat_buffer_size self.message_history = message_history self.display_messages = [] for message_pairs in message_history: message1, message2 = message_pairs self.display_messages.append([message1['text'], message2['text']]) def evaluate(self, message, temperature=0.6, top_p=0.75, top_k=50, num_beams=5, max_new_tokens=256, repetition_penalty=1.4, **kwargs): prompt = self.prompt_gen_chat(self.message_history, message) inputs = self.tokenizer(prompt, return_tensors="pt") input_ids = inputs["input_ids"].to(self.model.device) generation_config = GenerationConfig( temperature=temperature, top_p=top_p, top_k=top_k, num_beams=num_beams, early_stopping=True, repetition_penalty=repetition_penalty, **kwargs, ) with torch.no_grad(): generation_output = self.model.generate( input_ids=input_ids, generation_config=generation_config, return_dict_in_generate=True, output_scores=True, max_new_tokens=max_new_tokens, ) s = generation_output.sequences[0] output = self.tokenizer.decode(s, skip_special_tokens=True) split_str = """### Response:\n{self.character_name}:""" output = output.split(split_str)[1].strip() return output def gradio_helper(self, message): # make response response = self.evaluate(message) # update message history self.message_history.append( ( {"speaker": self.user_alias, "text": message}, {"speaker": self.character_name, "text": response}, ) ) if len(self.message_history) > self.chat_buffer_size: self.message_history = self.message_history[-self.chat_buffer_size:] # update display messages self.display_messages.append([message, response]) return self.display_messages def prompt_gen_chat(self, message_history, message): past_dialogue = [] for message_pairs in message_history: message1, message2 = message_pairs past_dialogue.append(f"{message1['speaker']}: {message1['text']}") past_dialogue.append(f"{message2['speaker']}: {message2['text']}") past_dialogue_formatted = "\n".join(past_dialogue) prompt = f"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request. ### Instruction: {self.conv_prompt} This is the conversation between {self.user_alias} and {self.character_name} till now: {past_dialogue_formatted} Continuing from the previous conversation, write what {self.character_name} says to {self.user_alias}: ### Input: {self.user_alias}: {message} ### Response: {self.character_name}:""" return prompt def launch_gradio(self): with gr.Blocks(theme="JohnSmith9982/small_and_pretty") as demo: chatbot = gr.Chatbot(elem_id="chatbot") with gr.Row(): txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter") txt.submit(self.gradio_helper, txt, chatbot) txt.submit(lambda: "", None, txt) demo.launch(debug=True, share=True) if __name__ == "__main__": model_path = "Xilabs/calypso-3b-alpha-v2" load_in_8bit = False model = LlamaForCausalLM.from_pretrained( model_path, device_map="auto", load_in_8bit=load_in_8bit) tokenizer = LlamaTokenizer.from_pretrained(model_path) conv_prompt = "Two people are texting each other on a messaging platform." message_history = [ ( { "speaker": "Bob", "text": "Hey, Alice! How are you doing? What's the status on those reports?", }, { "speaker": "Alice", "text": "Hey, Bob! I'm doing well. I'm almost done with the reports. I'll send them to you by the end of the day.", }, ), ( { "speaker": "Bob", "text": "That's great! Thanks, Alice. I'll be waiting for them. Btw, I have approved your leave for next week.", }, { "speaker": "Alice", "text": "Oh, thanks, Bob! I really appreciate it. I will be sure to send you the reports before I leave. Anything else you need from me?", }, ) ] chat_instance = Chat(model, tokenizer, conv_prompt, user_alias='Bob', character_name='Alice', message_history=message_history) chat_instance.launch_gradio() ``` ## Future Improvements Calypso 3B is an ongoing project, and future iterations will focus on enhancing safety, improving factual accuracy, and reducing biases in its responses. The development team is committed to addressing user feedback and continuously improving the model's performance. ## Licensing and Commercial Use Larger and more permissive versions of Calypso will be released in the future. If you're interested in using Calypso 3B or its future iterations for commercial purposes, obtaining a license, or accessing the model via an API, please reach out to us for more information. --- **Disclaimer:** This model card is provided for informational purposes only. Users are responsible for using the model in accordance with applicable laws and ethical considerations. # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_Xilabs__calypso-3b-alpha-v2) | Metric | Value | |-----------------------|---------------------------| | Avg. | 37.52 | | ARC (25-shot) | 41.55 | | HellaSwag (10-shot) | 71.48 | | MMLU (5-shot) | 25.82 | | TruthfulQA (0-shot) | 35.73 | | Winogrande (5-shot) | 65.27 | | GSM8K (5-shot) | 0.68 | | DROP (3-shot) | 22.08 |
chargoddard/MelangeC-70b
chargoddard
"2023-11-23T01:00:49Z"
1,641
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-08-23T02:14:19Z"
# [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_chargoddard__MelangeC-70b) | Metric | Value | |-----------------------|---------------------------| | Avg. | 61.22 | | ARC (25-shot) | 71.67 | | HellaSwag (10-shot) | 87.6 | | MMLU (5-shot) | 70.37 | | TruthfulQA (0-shot) | 58.13 | | Winogrande (5-shot) | 83.98 | | GSM8K (5-shot) | 0.0 | | DROP (3-shot) | 56.81 |
chargoddard/llama-2-34b-uncode
chargoddard
"2023-11-23T01:03:41Z"
1,641
5
transformers
[ "transformers", "safetensors", "llama", "text-generation", "en", "dataset:the_pile_books3", "dataset:togethercomputer/RedPajama-Data-1T-Sample", "license:llama2", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-08-27T09:11:25Z"
--- license: llama2 datasets: - the_pile_books3 - togethercomputer/RedPajama-Data-1T-Sample language: - en --- very wip experiment. # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_chargoddard__llama-2-34b-uncode) | Metric | Value | |-----------------------|---------------------------| | Avg. | 36.2 | | ARC (25-shot) | 39.51 | | HellaSwag (10-shot) | 33.9 | | MMLU (5-shot) | 38.49 | | TruthfulQA (0-shot) | 40.94 | | Winogrande (5-shot) | 74.35 | | GSM8K (5-shot) | 20.77 | | DROP (3-shot) | 5.43 |
Undi95/MLewd-ReMM-L2-Chat-20B
Undi95
"2023-11-17T21:07:49Z"
1,641
25
transformers
[ "transformers", "safetensors", "llama", "text-generation", "not-for-all-audiences", "nsfw", "license:cc-by-nc-4.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-09-17T22:40:27Z"
--- license: cc-by-nc-4.0 tags: - not-for-all-audiences - nsfw --- First : ```shell layer_slices: - model: Undi95/MLewd-L2-Chat-13B start: 0 end: 16 - model: Undi95/MLewd-ReMM-L2-Chat-20B-Part1 start: 8 end: 20 - model: Undi95/MLewd-L2-Chat-13B start: 17 end: 32 - model: Undi95/MLewd-ReMM-L2-Chat-20B-Part1 start: 21 end: 40 ``` Inverted: ```shell layer_slices: - model: Undi95/MLewd-ReMM-L2-Chat-20B-Part1 start: 0 end: 16 - model: Undi95/MLewd-L2-Chat-13B start: 8 end: 20 - model: Undi95/MLewd-ReMM-L2-Chat-20B-Part1 start: 17 end: 32 - model: Undi95/MLewd-L2-Chat-13B start: 21 end: 40 ``` Precise: ```shell layer_slices: - model: Undi95/MLewd-L2-Chat-13B start: 0 end: 8 - model: Undi95/MLewd-ReMM-L2-Chat-20B-Part1 start: 4 end: 12 - model: Undi95/MLewd-L2-Chat-13B start: 9 end: 16 - model: Undi95/MLewd-ReMM-L2-Chat-20B-Part1 start: 13 end: 22 - model: Undi95/MLewd-L2-Chat-13B start: 17 end: 24 - model: Undi95/MLewd-ReMM-L2-Chat-20B-Part1 start: 23 end: 32 - model: Undi95/MLewd-L2-Chat-13B start: 25 end: 32 - model: Undi95/MLewd-ReMM-L2-Chat-20B-Part1 start: 33 end: 40 ``` PreciseInverted: ```shell layer_slices: - model: Undi95/MLewd-ReMM-L2-Chat-20B-Part1 start: 0 end: 8 - model: Undi95/MLewd-L2-Chat-13B start: 4 end: 12 - model: Undi95/MLewd-ReMM-L2-Chat-20B-Part1 start: 9 end: 16 - model: Undi95/MLewd-L2-Chat-13B start: 13 end: 22 - model: Undi95/MLewd-ReMM-L2-Chat-20B-Part1 start: 17 end: 24 - model: Undi95/MLewd-L2-Chat-13B start: 23 end: 32 - model: Undi95/MLewd-ReMM-L2-Chat-20B-Part1 start: 25 end: 32 - model: Undi95/MLewd-L2-Chat-13B start: 33 end: 40 ``` Part1 = ReMM v2.1 merged /w MLewd low weight to keep consistency. I call this "dilution" and result show consistency and coherency without repeat/loop beside the small amount of duplicated datas. The goal is to find the best way to interlace layers the best way possible to have a sweetspot between 13B and +30B. Normal/Inverted is by chunk of 16 layers and Precise/PreciseInverted is by chunk of 8 layers. All the models are made of 64(+1) layers. Need testing. ## Prompt template: Alpaca ``` Below is an instruction that describes a task. Write a response that completes the request. ### Instruction: {prompt} ### Response: ``` # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_Undi95__MLewd-ReMM-L2-Chat-20B) | Metric | Value | |-----------------------|---------------------------| | Avg. | 53.33 | | ARC (25-shot) | 62.46 | | HellaSwag (10-shot) | 85.62 | | MMLU (5-shot) | 59.13 | | TruthfulQA (0-shot) | 55.63 | | Winogrande (5-shot) | 77.19 | | GSM8K (5-shot) | 10.92 | | DROP (3-shot) | 22.33 |
speechlessai/speechless-codellama-airoboros-orca-platypus-13b
speechlessai
"2023-11-14T02:10:21Z"
1,641
0
transformers
[ "transformers", "pytorch", "llama", "text-generation", "llama-2", "en", "dataset:jondurbin/airoboros-2.2", "dataset:Open-Orca/OpenOrca", "dataset:garage-bAInd/Open-Platypus", "arxiv:2308.12950", "license:llama2", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-09-19T09:29:53Z"
--- language: - en library_name: transformers pipeline_tag: text-generation datasets: - jondurbin/airoboros-2.2 - Open-Orca/OpenOrca - garage-bAInd/Open-Platypus tags: - llama-2 license: llama2 --- <p><h1> speechless-codellama-airoboros-orca-platypus-13b </h1></p> Use the following dataset to fine-tune codellama/CodeLlama-13B in order to improve the model's reasoning and planning abilities. - jondurbin/airoboros-2.2: Filter categories related to coding, reasoning and planning. - Open-Orca/OpenOrca: Filter the 'cot' category in 1M GPT4 dataset. - garage-bAInd/Open-Platypus: 100% Code: https://github.com/uukuguy/speechless | Metric | Value | | --- | --- | | humaneval-python | 49.39 | [Big Code Models Leaderboard](https://huggingface.co/spaces/bigcode/bigcode-models-leaderboard) CodeLlama-34B-Python: 53.29 CodeLlama-34B-Instruct: 50.79 CodeLlama-13B-Instruct: 50.6 CodeLlama-34B: 45.11 CodeLlama-13B-Python: 42.89 CodeLlama-13B: 35.07 [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) | Metric | Value | | --- | --- | | ARC | 44.88 | | HellaSwag | 67.7 | | MMLU | 43.16 | | TruthfulQA | 40.88 | | Average | 49.15 | # **Code Llama** Code Llama is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 34 billion parameters. This is the repository for the base 13B version in the Hugging Face Transformers format. This model is designed for general code synthesis and understanding. Links to other models can be found in the index at the bottom. | | Base Model | Python | Instruct | | --- | ----------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------- | | 7B | [codellama/CodeLlama-7b-hf](https://huggingface.co/codellama/CodeLlama-7b-hf) | [codellama/CodeLlama-7b-Python-hf](https://huggingface.co/codellama/CodeLlama-7b-Python-hf) | [codellama/CodeLlama-7b-Instruct-hf](https://huggingface.co/codellama/CodeLlama-7b-Instruct-hf) | | 13B | [codellama/CodeLlama-13b-hf](https://huggingface.co/codellama/CodeLlama-13b-hf) | [codellama/CodeLlama-13b-Python-hf](https://huggingface.co/codellama/CodeLlama-13b-Python-hf) | [codellama/CodeLlama-13b-Instruct-hf](https://huggingface.co/codellama/CodeLlama-13b-Instruct-hf) | | 34B | [codellama/CodeLlama-34b-hf](https://huggingface.co/codellama/CodeLlama-34b-hf) | [codellama/CodeLlama-34b-Python-hf](https://huggingface.co/codellama/CodeLlama-34b-Python-hf) | [codellama/CodeLlama-34b-Instruct-hf](https://huggingface.co/codellama/CodeLlama-34b-Instruct-hf) | ## Model Use To use this model, please make sure to install transformers from `main` until the next version is released: ```bash pip install git+https://github.com/huggingface/transformers.git@main accelerate ``` Model capabilities: - [x] Code completion. - [x] Infilling. - [ ] Instructions / chat. - [ ] Python specialist. ```python from transformers import AutoTokenizer import transformers import torch model = "codellama/CodeLlama-13b-hf" tokenizer = AutoTokenizer.from_pretrained(model) pipeline = transformers.pipeline( "text-generation", model=model, torch_dtype=torch.float16, device_map="auto", ) sequences = pipeline( 'import socket\n\ndef ping_exponential_backoff(host: str):', do_sample=True, top_k=10, temperature=0.1, top_p=0.95, num_return_sequences=1, eos_token_id=tokenizer.eos_token_id, max_length=200, ) for seq in sequences: print(f"Result: {seq['generated_text']}") ``` ## Model Details *Note: Use of this model is governed by the Meta license. Meta developed and publicly released the Code Llama family of large language models (LLMs). **Model Developers** Meta **Variations** Code Llama comes in three model sizes, and three variants: * Code Llama: base models designed for general code synthesis and understanding * Code Llama - Python: designed specifically for Python * Code Llama - Instruct: for instruction following and safer deployment All variants are available in sizes of 7B, 13B and 34B parameters. **This repository contains the base version of the 13B parameters model.** **Input** Models input text only. **Output** Models generate text only. **Model Architecture** Code Llama is an auto-regressive language model that uses an optimized transformer architecture. **Model Dates** Code Llama and its variants have been trained between January 2023 and July 2023. **Status** This is a static model trained on an offline dataset. Future versions of Code Llama - Instruct will be released as we improve model safety with community feedback. **License** A custom commercial license is available at: [https://ai.meta.com/resources/models-and-libraries/llama-downloads/](https://ai.meta.com/resources/models-and-libraries/llama-downloads/) **Research Paper** More information can be found in the paper "[Code Llama: Open Foundation Models for Code](https://ai.meta.com/research/publications/code-llama-open-foundation-models-for-code/)" or its [arXiv page](https://arxiv.org/abs/2308.12950). ## Intended Use **Intended Use Cases** Code Llama and its variants is intended for commercial and research use in English and relevant programming languages. The base model Code Llama can be adapted for a variety of code synthesis and understanding tasks, Code Llama - Python is designed specifically to handle the Python programming language, and Code Llama - Instruct is intended to be safer to use for code assistant and generation applications. **Out-of-Scope Uses** Use in any manner that violates applicable laws or regulations (including trade compliance laws). Use in languages other than English. Use in any other way that is prohibited by the Acceptable Use Policy and Licensing Agreement for Code Llama and its variants. ## Hardware and Software **Training Factors** We used custom training libraries. The training and fine-tuning of the released models have been performed Meta’s Research Super Cluster. **Carbon Footprint** In aggregate, training all 9 Code Llama models required 400K GPU hours of computation on hardware of type A100-80GB (TDP of 350-400W). Estimated total emissions were 65.3 tCO2eq, 100% of which were offset by Meta’s sustainability program. ## Training Data All experiments reported here and the released models have been trained and fine-tuned using the same data as Llama 2 with different weights (see Section 2 and Table 1 in the [research paper](https://ai.meta.com/research/publications/code-llama-open-foundation-models-for-code/) for details). ## Evaluation Results See evaluations for the main models and detailed ablations in Section 3 and safety evaluations in Section 4 of the research paper. ## Ethical Considerations and Limitations Code Llama and its variants are a new technology that carries risks with use. Testing conducted to date has been in English, and has not covered, nor could it cover all scenarios. For these reasons, as with all LLMs, Code Llama’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate or objectionable responses to user prompts. Therefore, before deploying any applications of Code Llama, developers should perform safety testing and tuning tailored to their specific applications of the model. Please see the Responsible Use Guide available available at [https://ai.meta.com/llama/responsible-user-guide](https://ai.meta.com/llama/responsible-user-guide).
CHIH-HUNG/llama-2-13b-FINETUNE5_4w-r4-gate_up_down
CHIH-HUNG
"2023-10-02T06:48:35Z"
1,641
0
transformers
[ "transformers", "pytorch", "llama", "text-generation", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-10-02T06:26:12Z"
Entry not found
chargoddard/llama-polyglot-13b
chargoddard
"2024-01-04T07:08:35Z"
1,641
5
transformers
[ "transformers", "safetensors", "llama", "text-generation", "llama2", "merge", "mergekit", "license:llama2", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-11-23T04:08:11Z"
--- license: llama2 tags: - llama2 - merge - mergekit --- Experimental multi-lingual model using a new merge technique. Mergekit configuration (experimental branch): ```yaml models: - model: clibrain/Llama-2-13b-ft-instruct-es - model: LeoLM/leo-hessianai-13b - model: daekeun-ml/Llama-2-ko-DPO-13B - model: pleisto/yuren-13b-chatml - model: bofenghuang/vigogne-2-13b-instruct - model: OpenBuddy/openbuddy-llama2-13b-v8.1-fp16 merge_method: dare_ties base_model: TheBloke/Llama-2-13B-fp16 dtype: float16 parameters: density: 0.3 weight: 1.0 normalize: true int8_mask: true tokenizer_source: base ```
Weyaxi/neural-chat-7b-v3-1-Nebula-v2-7B
Weyaxi
"2023-11-28T15:18:31Z"
1,641
1
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "en", "dataset:garage-bAInd/Open-Platypus", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-11-24T08:18:24Z"
--- license: apache-2.0 datasets: - garage-bAInd/Open-Platypus language: - en --- ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6468ce47e134d050a58aa89c/cKySe1S5IW_KnbZpKmozQ.png) # neural-chat-7b-v3-1-Nebula-v2-7B neural-chat-7b-v3-1-Nebula-v2-7B is a merge of [Intel/neural-chat-7b-v3-1](https://huggingface.co/Intel/neural-chat-7b-v3-1) and [PulsarAI/Nebula-v2-7B-Lora](https://huggingface.co/PulsarAI/Nebula-v2-7B-Lora)
OpenBuddy/openbuddy-deepseekcoder-33b-v16.1-32k
OpenBuddy
"2024-01-07T11:24:16Z"
1,641
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "zh", "en", "fr", "de", "ja", "ko", "it", "ru", "fi", "license:other", "autotrain_compatible", "text-generation-inference", "region:us" ]
text-generation
"2024-01-07T10:13:24Z"
--- language: - zh - en - fr - de - ja - ko - it - ru - fi pipeline_tag: text-generation inference: false library_name: transformers license: other license_name: deepseek license_link: https://github.com/deepseek-ai/DeepSeek-Coder/blob/d3bb741e2525dbbcc1c2f732f64682131d644d0f/LICENSE-MODEL --- # OpenBuddy - Open Multilingual Chatbot GitHub and Usage Guide: [https://github.com/OpenBuddy/OpenBuddy](https://github.com/OpenBuddy/OpenBuddy) Website and Demo: [https://openbuddy.ai](https://openbuddy.ai) Evaluation result of this model: [Evaluation.txt](Evaluation.txt) ![Demo](https://raw.githubusercontent.com/OpenBuddy/OpenBuddy/main/media/demo.png) # Copyright Notice Base model: https://huggingface.co/deepseek-ai/deepseek-coder-33b-base/tree/main License: [deepseek](https://github.com/deepseek-ai/DeepSeek-Coder/blob/d3bb741e2525dbbcc1c2f732f64682131d644d0f/LICENSE-MODEL) ## Disclaimer All OpenBuddy models have inherent limitations and may potentially produce outputs that are erroneous, harmful, offensive, or otherwise undesirable. Users should not use these models in critical or high-stakes situations that may lead to personal injury, property damage, or significant losses. Examples of such scenarios include, but are not limited to, the medical field, controlling software and hardware systems that may cause harm, and making important financial or legal decisions. OpenBuddy is provided "as-is" without any warranty of any kind, either express or implied, including, but not limited to, the implied warranties of merchantability, fitness for a particular purpose, and non-infringement. In no event shall the authors, contributors, or copyright holders be liable for any claim, damages, or other liabilities, whether in an action of contract, tort, or otherwise, arising from, out of, or in connection with the software or the use or other dealings in the software. By using OpenBuddy, you agree to these terms and conditions, and acknowledge that you understand the potential risks associated with its use. You also agree to indemnify and hold harmless the authors, contributors, and copyright holders from any claims, damages, or liabilities arising from your use of OpenBuddy. ## 免责声明 所有OpenBuddy模型均存在固有的局限性,可能产生错误的、有害的、冒犯性的或其他不良的输出。用户在关键或高风险场景中应谨慎行事,不要使用这些模型,以免导致人身伤害、财产损失或重大损失。此类场景的例子包括但不限于医疗领域、可能导致伤害的软硬件系统的控制以及进行重要的财务或法律决策。 OpenBuddy按“原样”提供,不附带任何种类的明示或暗示的保证,包括但不限于适销性、特定目的的适用性和非侵权的暗示保证。在任何情况下,作者、贡献者或版权所有者均不对因软件或使用或其他软件交易而产生的任何索赔、损害赔偿或其他责任(无论是合同、侵权还是其他原因)承担责任。 使用OpenBuddy即表示您同意这些条款和条件,并承认您了解其使用可能带来的潜在风险。您还同意赔偿并使作者、贡献者和版权所有者免受因您使用OpenBuddy而产生的任何索赔、损害赔偿或责任的影响。
NeverSleep/Noromaid-7B-0.4-DPO
NeverSleep
"2024-01-11T02:50:45Z"
1,641
21
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "license:cc-by-nc-4.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2024-01-11T02:47:20Z"
--- license: cc-by-nc-4.0 --- ![image/png](https://cdn-uploads.huggingface.co/production/uploads/630dfb008df86f1e5becadc3/VKX2Z2yjZX5J8kXzgeCYO.png) --- # This model is a collab between [IkariDev](https://huggingface.co/IkariDev) and [Undi](https://huggingface.co/Undi95)! <!-- description start --> ## Description <!-- [Recommended settings - contributed by localfultonextractor](https://files.catbox.moe/ue0tja.json) --> This repo contains fp16 files of Noromaid-7b-v0.4-DPO. [FP16 - by IkariDev and Undi](https://huggingface.co/NeverSleep/Noromaid-7B-0.4-DPO) <!-- [GGUF - By TheBloke](https://huggingface.co/TheBloke/Athena-v4-GGUF)--> <!-- [GPTQ - By TheBloke](https://huggingface.co/TheBloke/Athena-v4-GPTQ)--> <!-- [exl2[8bpw-8h] - by AzureBlack](https://huggingface.co/AzureBlack/Echidna-13b-v0.3-8bpw-8h-exl2)--> <!-- [AWQ - By TheBloke](https://huggingface.co/TheBloke/Athena-v4-AWQ)--> <!-- [fp16 - by IkariDev+Undi95](https://huggingface.co/IkariDev/Athena-v4)--> [GGUF - by IkariDev and Undi](https://huggingface.co/NeverSleep/Noromaid-7B-0.4-DPO-GGUF) <!-- [OLD(GGUF - by IkariDev+Undi95)](https://huggingface.co/IkariDev/Athena-v4-GGUF)--> ## Ratings: Note: We have permission of all users to upload their ratings, we DONT screenshot random reviews without asking if we can put them here! No ratings yet! If you want your rating to be here, send us a message over on DC and we'll put up a screenshot of it here. DC name is "ikaridev" and "undi". <!-- description end --> <!-- prompt-template start --> ## Prompt format: Chatml ``` <|im_start|>system {sysprompt}<|im_end|> <|im_start|>user {input}<|im_end|> <|im_start|>assistant {output}<|im_end|> ``` ## Training data used: - [no_robots dataset](https://huggingface.co/Undi95/Llama2-13B-no_robots-alpaca-lora) let the model have more human behavior, enhances the output. - [Aesir Private RP dataset] New data from a new and never used before dataset, add fresh data, no LimaRP spam, this is 100% new. Thanks to the [MinvervaAI Team](https://huggingface.co/MinervaAI) and, in particular, [Gryphe](https://huggingface.co/Gryphe) for letting us use it! - [Another private Aesir dataset] - [Another private Aesir dataset] - [limarp](https://huggingface.co/datasets/lemonilia/LimaRP) ## DPO training data used: - [Intel/orca_dpo_pairs](https://huggingface.co/datasets/Intel/orca_dpo_pairs) - [NobodyExistsOnTheInternet/ToxicDPOqa](https://huggingface.co/datasets/NobodyExistsOnTheInternet/ToxicDPOqa) - [Undi95/toxic-dpo-v0.1-NoWarning](https://huggingface.co/datasets/Undi95/toxic-dpo-v0.1-NoWarning) This is a full finetune. ## Others Undi: If you want to support me, you can [here](https://ko-fi.com/undiai). IkariDev: Visit my [retro/neocities style website](https://ikaridevgit.github.io/) please kek
hansyan/perflow-sdxl-dreamshaper
hansyan
"2024-05-10T06:29:26Z"
1,641
3
diffusers
[ "diffusers", "license:cc-by-nc-4.0", "endpoints_compatible", "diffusers:StableDiffusionXLPipeline", "region:us" ]
text-to-image
"2024-04-24T13:00:48Z"
--- license: cc-by-nc-4.0 --- **Github repo**: https://github.com/magic-research/piecewise-rectified-flow <br> **PeRFlow accelerated SDXL-DreamShaper**: https://huggingface.co/Lykon/dreamshaper-xl-1-0 **Demo:** ```python import random, os import numpy as np from pathlib import Path import torch, torchvision def setup_seed(seed): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.backends.cudnn.deterministic = True Path("demo").mkdir(parents=True, exist_ok=True) from diffusers import StableDiffusionXLPipeline pipe = StableDiffusionXLPipeline.from_pretrained("hansyan/perflow-sdxl-dreamshaper", torch_dtype=torch.float16, use_safetensors=True, variant="v0-fix") from src.scheduler_perflow import PeRFlowScheduler pipe.scheduler = PeRFlowScheduler.from_config(pipe.scheduler.config, prediction_type="ddim_eps", num_time_windows=4) pipe.to("cuda", torch.float16) prompts_list = [ ["photorealistic, uhd, high resolution, high quality, highly detailed; RAW photo, a handsome man, wearing a black coat, outside, closeup face", "distorted, blur, low-quality, haze, out of focus",], ["photorealistic, uhd, high resolution, high quality, highly detailed; masterpiece, A closeup face photo of girl, wearing a rain coat, in the street, heavy rain, bokeh,", "distorted, blur, low-quality, haze, out of focus",], ["photorealistic, uhd, high resolution, high quality, highly detailed; RAW photo, a red luxury car, studio light", "distorted, blur, low-quality, haze, out of focus",], ["photorealistic, uhd, high resolution, high quality, highly detailed; masterpiece, A beautiful cat bask in the sun", "distorted, blur, low-quality, haze, out of focus",], ] num_inference_steps = 6 # suggest steps >= num_win=4 cfg_scale_list = [2.0] # suggest values [1.5, 2.0, 2.5] num_img = 2 seed = 42 for cfg_scale in cfg_scale_list: for i, prompts in enumerate(prompts_list): setup_seed(seed) prompt, neg_prompt = prompts[0], prompts[1] samples = pipe( prompt = [prompt] * num_img, negative_prompt = [neg_prompt] * num_img, height = 1024, width = 1024, num_inference_steps = num_inference_steps, guidance_scale = cfg_scale, output_type = 'pt', ).images cfg_int = int(cfg_scale); cfg_float = int(cfg_scale*10 - cfg_int*10) save_name = f'step_{num_inference_steps}_txt{i+1}_cfg{cfg_int}-{cfg_float}.png' torchvision.utils.save_image(torchvision.utils.make_grid(samples, nrow = num_img), os.path.join("demo", save_name)) ```
x2bee/POLAR-14B-DPO-v1.3
x2bee
"2024-05-28T02:23:43Z"
1,641
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "ko", "dataset:We-Want-GPU/Yi-Ko-DPO-Orca-DPO-Pairs", "arxiv:1910.09700", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2024-05-23T08:54:58Z"
--- library_name: transformers license: apache-2.0 datasets: - We-Want-GPU/Yi-Ko-DPO-Orca-DPO-Pairs language: - ko pipeline_tag: text-generation --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ![image/png](https://cdn-uploads.huggingface.co/production/uploads/65f3ee48b1a907c6aa6d8f06/nGbRfMQEfAW_aDwisKn9T.png) ## Model Description <!-- Provide a longer summary of what this model is/does. --> POLAR is a Korean LLM developed by Plateer's AI-lab. It was inspired by Upstage's SOLAR. We will continue to evolve this model and hope to contribute to the Korean LLM ecosystem. - **Developed by:** AI-Lab of Plateer(Woomun Jung, Eunsoo Ha, MinYoung Joo, Seongjun Son) - **Model type:** Language model - **Language(s) (NLP):** ko - **License:** apache-2.0 - Parent Model: x2bee/POLAR-14B-v0.2 - We-Want-GPU/Yi-Ko-DPO-Orca-DPO-Pairs Data 를 활용하여 DPO 데이터 1.23만건 학습했습니다. ## Direct Use ``` from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("x2bee/POLAR-14B-DPO-v1.3") model = AutoModelForCausalLM.from_pretrained("x2bee/POLAR-14B-DPO-v1.3") ``` ## Downstream Use [Optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> <!-- If the user enters content, print that. If not, but they enter a task in the list, use that. If neither, say "more info needed." --> ## Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> <!-- If the user enters content, print that. If not, but they enter a task in the list, use that. If neither, say "more info needed." --> # Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> Significant research has explored bias and fairness issues with language models (see, e.g., [Sheng et al. (2021)](https://aclanthology.org/2021.acl-long.330.pdf) and [Bender et al. (2021)](https://dl.acm.org/doi/pdf/10.1145/3442188.3445922)). Predictions generated by the model may include disturbing and harmful stereotypes across protected classes; identity characteristics; and sensitive, social, and occupational groups. ## Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> # Training Details ## Training Data <!-- This should link to a Data Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> More information on training data needed ## Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> ### Preprocessing More information needed ### Speeds, Sizes, Times <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> More information needed # Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ## Testing Data, Factors & Metrics ### Testing Data <!-- This should link to a Data Card if possible. --> More information needed ### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> More information needed ### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> More information needed ## Results More information needed # Model Examination More information needed # Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** More information needed - **Hours used:** More information needed - **Cloud Provider:** More information needed - **Compute Region:** More information needed - **Carbon Emitted:** More information needed # Technical Specifications [optional] ## Model Architecture and Objective More information needed ## Compute Infrastructure More information needed ### Hardware More information needed ### Software More information needed # Citation <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** More information needed **APA:** More information needed # Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> More information needed # More Information [optional] If you would like more information about our company, please visit the link below. [tech.x2bee.com](https://tech.x2bee.com/) # Model Card Authors [optional] <!-- This section provides another layer of transparency and accountability. Whose views is this model card representing? How many voices were included in its construction? Etc. --> Woomun Jung, MinYoung Joo, Eunsu Ha, Seungjun Son # Model Card Contact More information needed # How to Get Started with the Model Use the code below to get started with the model. <details> <summary> Click to expand </summary> More information needed </details>
GraydientPlatformAPI/handsoff
GraydientPlatformAPI
"2024-06-04T04:20:27Z"
1,641
0
diffusers
[ "diffusers", "safetensors", "autotrain_compatible", "endpoints_compatible", "diffusers:StableDiffusionXLPipeline", "region:us" ]
text-to-image
"2024-06-04T04:08:37Z"
Entry not found
RichardErkhov/Abhaykoul_-_Qwen1.5-0.5B-vortex-0.1-gguf
RichardErkhov
"2024-06-30T00:58:56Z"
1,641
0
null
[ "gguf", "region:us" ]
null
"2024-06-30T00:50:13Z"
Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) Qwen1.5-0.5B-vortex-0.1 - GGUF - Model creator: https://huggingface.co/Abhaykoul/ - Original model: https://huggingface.co/Abhaykoul/Qwen1.5-0.5B-vortex-0.1/ | Name | Quant method | Size | | ---- | ---- | ---- | | [Qwen1.5-0.5B-vortex-0.1.Q2_K.gguf](https://huggingface.co/RichardErkhov/Abhaykoul_-_Qwen1.5-0.5B-vortex-0.1-gguf/blob/main/Qwen1.5-0.5B-vortex-0.1.Q2_K.gguf) | Q2_K | 0.23GB | | [Qwen1.5-0.5B-vortex-0.1.IQ3_XS.gguf](https://huggingface.co/RichardErkhov/Abhaykoul_-_Qwen1.5-0.5B-vortex-0.1-gguf/blob/main/Qwen1.5-0.5B-vortex-0.1.IQ3_XS.gguf) | IQ3_XS | 0.24GB | | [Qwen1.5-0.5B-vortex-0.1.IQ3_S.gguf](https://huggingface.co/RichardErkhov/Abhaykoul_-_Qwen1.5-0.5B-vortex-0.1-gguf/blob/main/Qwen1.5-0.5B-vortex-0.1.IQ3_S.gguf) | IQ3_S | 0.25GB | | [Qwen1.5-0.5B-vortex-0.1.Q3_K_S.gguf](https://huggingface.co/RichardErkhov/Abhaykoul_-_Qwen1.5-0.5B-vortex-0.1-gguf/blob/main/Qwen1.5-0.5B-vortex-0.1.Q3_K_S.gguf) | Q3_K_S | 0.25GB | | [Qwen1.5-0.5B-vortex-0.1.IQ3_M.gguf](https://huggingface.co/RichardErkhov/Abhaykoul_-_Qwen1.5-0.5B-vortex-0.1-gguf/blob/main/Qwen1.5-0.5B-vortex-0.1.IQ3_M.gguf) | IQ3_M | 0.26GB | | [Qwen1.5-0.5B-vortex-0.1.Q3_K.gguf](https://huggingface.co/RichardErkhov/Abhaykoul_-_Qwen1.5-0.5B-vortex-0.1-gguf/blob/main/Qwen1.5-0.5B-vortex-0.1.Q3_K.gguf) | Q3_K | 0.26GB | | [Qwen1.5-0.5B-vortex-0.1.Q3_K_M.gguf](https://huggingface.co/RichardErkhov/Abhaykoul_-_Qwen1.5-0.5B-vortex-0.1-gguf/blob/main/Qwen1.5-0.5B-vortex-0.1.Q3_K_M.gguf) | Q3_K_M | 0.26GB | | [Qwen1.5-0.5B-vortex-0.1.Q3_K_L.gguf](https://huggingface.co/RichardErkhov/Abhaykoul_-_Qwen1.5-0.5B-vortex-0.1-gguf/blob/main/Qwen1.5-0.5B-vortex-0.1.Q3_K_L.gguf) | Q3_K_L | 0.28GB | | [Qwen1.5-0.5B-vortex-0.1.IQ4_XS.gguf](https://huggingface.co/RichardErkhov/Abhaykoul_-_Qwen1.5-0.5B-vortex-0.1-gguf/blob/main/Qwen1.5-0.5B-vortex-0.1.IQ4_XS.gguf) | IQ4_XS | 0.28GB | | [Qwen1.5-0.5B-vortex-0.1.Q4_0.gguf](https://huggingface.co/RichardErkhov/Abhaykoul_-_Qwen1.5-0.5B-vortex-0.1-gguf/blob/main/Qwen1.5-0.5B-vortex-0.1.Q4_0.gguf) | Q4_0 | 0.29GB | | [Qwen1.5-0.5B-vortex-0.1.IQ4_NL.gguf](https://huggingface.co/RichardErkhov/Abhaykoul_-_Qwen1.5-0.5B-vortex-0.1-gguf/blob/main/Qwen1.5-0.5B-vortex-0.1.IQ4_NL.gguf) | IQ4_NL | 0.29GB | | [Qwen1.5-0.5B-vortex-0.1.Q4_K_S.gguf](https://huggingface.co/RichardErkhov/Abhaykoul_-_Qwen1.5-0.5B-vortex-0.1-gguf/blob/main/Qwen1.5-0.5B-vortex-0.1.Q4_K_S.gguf) | Q4_K_S | 0.29GB | | [Qwen1.5-0.5B-vortex-0.1.Q4_K.gguf](https://huggingface.co/RichardErkhov/Abhaykoul_-_Qwen1.5-0.5B-vortex-0.1-gguf/blob/main/Qwen1.5-0.5B-vortex-0.1.Q4_K.gguf) | Q4_K | 0.3GB | | [Qwen1.5-0.5B-vortex-0.1.Q4_K_M.gguf](https://huggingface.co/RichardErkhov/Abhaykoul_-_Qwen1.5-0.5B-vortex-0.1-gguf/blob/main/Qwen1.5-0.5B-vortex-0.1.Q4_K_M.gguf) | Q4_K_M | 0.3GB | | [Qwen1.5-0.5B-vortex-0.1.Q4_1.gguf](https://huggingface.co/RichardErkhov/Abhaykoul_-_Qwen1.5-0.5B-vortex-0.1-gguf/blob/main/Qwen1.5-0.5B-vortex-0.1.Q4_1.gguf) | Q4_1 | 0.3GB | | [Qwen1.5-0.5B-vortex-0.1.Q5_0.gguf](https://huggingface.co/RichardErkhov/Abhaykoul_-_Qwen1.5-0.5B-vortex-0.1-gguf/blob/main/Qwen1.5-0.5B-vortex-0.1.Q5_0.gguf) | Q5_0 | 0.32GB | | [Qwen1.5-0.5B-vortex-0.1.Q5_K_S.gguf](https://huggingface.co/RichardErkhov/Abhaykoul_-_Qwen1.5-0.5B-vortex-0.1-gguf/blob/main/Qwen1.5-0.5B-vortex-0.1.Q5_K_S.gguf) | Q5_K_S | 0.32GB | | [Qwen1.5-0.5B-vortex-0.1.Q5_K.gguf](https://huggingface.co/RichardErkhov/Abhaykoul_-_Qwen1.5-0.5B-vortex-0.1-gguf/blob/main/Qwen1.5-0.5B-vortex-0.1.Q5_K.gguf) | Q5_K | 0.33GB | | [Qwen1.5-0.5B-vortex-0.1.Q5_K_M.gguf](https://huggingface.co/RichardErkhov/Abhaykoul_-_Qwen1.5-0.5B-vortex-0.1-gguf/blob/main/Qwen1.5-0.5B-vortex-0.1.Q5_K_M.gguf) | Q5_K_M | 0.33GB | | [Qwen1.5-0.5B-vortex-0.1.Q5_1.gguf](https://huggingface.co/RichardErkhov/Abhaykoul_-_Qwen1.5-0.5B-vortex-0.1-gguf/blob/main/Qwen1.5-0.5B-vortex-0.1.Q5_1.gguf) | Q5_1 | 0.34GB | | [Qwen1.5-0.5B-vortex-0.1.Q6_K.gguf](https://huggingface.co/RichardErkhov/Abhaykoul_-_Qwen1.5-0.5B-vortex-0.1-gguf/blob/main/Qwen1.5-0.5B-vortex-0.1.Q6_K.gguf) | Q6_K | 0.36GB | | [Qwen1.5-0.5B-vortex-0.1.Q8_0.gguf](https://huggingface.co/RichardErkhov/Abhaykoul_-_Qwen1.5-0.5B-vortex-0.1-gguf/blob/main/Qwen1.5-0.5B-vortex-0.1.Q8_0.gguf) | Q8_0 | 0.47GB | Original model description: --- language: - en license: other datasets: - OEvortex/vortex-mini - yahma/alpaca-cleaned license_name: tongyi-qianwen-research license_link: https://huggingface.co/Qwen/Qwen1.5-0.5B/blob/main/LICENSE pipeline_tag: text-generation model-index: - name: Qwen1.5-0.5B-vortex-v2 results: - task: type: text-generation name: Text Generation dataset: name: AI2 Reasoning Challenge (25-Shot) type: ai2_arc config: ARC-Challenge split: test args: num_few_shot: 25 metrics: - type: acc_norm value: 30.63 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Abhaykoul/Qwen1.5-0.5B-vortex-v2 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: HellaSwag (10-Shot) type: hellaswag split: validation args: num_few_shot: 10 metrics: - type: acc_norm value: 45.54 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Abhaykoul/Qwen1.5-0.5B-vortex-v2 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MMLU (5-Shot) type: cais/mmlu config: all split: test args: num_few_shot: 5 metrics: - type: acc value: 36.29 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Abhaykoul/Qwen1.5-0.5B-vortex-v2 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: TruthfulQA (0-shot) type: truthful_qa config: multiple_choice split: validation args: num_few_shot: 0 metrics: - type: mc2 value: 44.29 source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Abhaykoul/Qwen1.5-0.5B-vortex-v2 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: Winogrande (5-shot) type: winogrande config: winogrande_xl split: validation args: num_few_shot: 5 metrics: - type: acc value: 56.04 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Abhaykoul/Qwen1.5-0.5B-vortex-v2 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: GSM8k (5-shot) type: gsm8k config: main split: test args: num_few_shot: 5 metrics: - type: acc value: 5.91 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Abhaykoul/Qwen1.5-0.5B-vortex-v2 name: Open LLM Leaderboard --- # Qwen1.5-0.5B-vortex-v2 model card Qwen1.5-0.5B-vortex-v2 is a dealigned chat finetune of the original fantastic Qwen1.5-0.5B model by the Qwen team. This model was trained on the Vortex mini dataset and alpaca-cleaned using axolotl for 4 epoch # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_Abhaykoul__Qwen1.5-0.5B-vortex-v2) | Metric |Value| |---------------------------------|----:| |Avg. |36.45| |AI2 Reasoning Challenge (25-Shot)|30.63| |HellaSwag (10-Shot) |45.54| |MMLU (5-Shot) |36.29| |TruthfulQA (0-shot) |44.29| |Winogrande (5-shot) |56.04| |GSM8k (5-shot) | 5.91|
OpenAssistant/galactica-6.7b-finetuned
OpenAssistant
"2023-01-16T22:16:33Z"
1,640
34
transformers
[ "transformers", "pytorch", "opt", "text-generation", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-01-16T22:08:03Z"
Galactica-6.7b finetuned on webgpt and prompt_dialogue (version v2) Demo use: ``` import torch from torch import nn from torch.nn import functional as F import transformers base_path = 'OpenAssistant/galactica-6.7b-finetuned' model = transformers.OPTForCausalLM.from_pretrained( base_path, load_in_8bit=True, device_map='auto', low_cpu_mem_usage=True, torch_dtype=torch.float16, offload_state_dict=True ) model.gradient_checkpointing_enable() # reduce number of stored activations model.model.decoder.project_in = lambda x: x.requires_grad_(True) class CastOutputToFloat(nn.Sequential): def forward(self, x): return super().forward(x).to(torch.float32) model.lm_head = CastOutputToFloat(model.lm_head) tokenizer = transformers.AutoTokenizer.from_pretrained(base_path) batch = tokenizer.encode("<question>What are the symptoms of Alzheimer's disease?<answer>", return_tensors="pt") with torch.cuda.amp.autocast(): out = model.generate( input_ids=batch.to(model.device), max_length=300, do_sample=True, top_k=40, num_beams=1, num_return_sequences=1, eos_token_id=tokenizer.additional_special_tokens_ids[tokenizer.additional_special_tokens.index('<question>')] ) print(tokenizer.decode(out[0, :-1]).replace('<question>', "User:\n").replace('<answer>', '\nAssistant:\n')) ```
digitous/Skegma-GPTJ
digitous
"2023-03-01T03:27:54Z"
1,640
0
transformers
[ "transformers", "pytorch", "gptj", "text-generation", "license:creativeml-openrail-m", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-generation
"2023-03-01T01:57:17Z"
--- license: creativeml-openrail-m --- This is a 50/50 weight merge of KoboldAI's adventure based language model GPT-J-6B-Skein, and PygmalionAI's Pygmalion-6b. https://huggingface.co/KoboldAI/GPT-J-6B-Skein https://huggingface.co/PygmalionAI/pygmalion-6b
digitous/Javalion-R
digitous
"2023-03-02T05:33:32Z"
1,640
5
transformers
[ "transformers", "pytorch", "gptj", "text-generation", "license:creativeml-openrail-m", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-generation
"2023-03-02T02:07:02Z"
--- license: creativeml-openrail-m --- Javalion-R is a penta merge of KoboldAI's GPT-J classics + PygmalionAI's Pygmalion6b; ((Janeway + Shinen) + (Skein + Pygmalion)) + GPT-R. Janeway + Shinen is listed under JANIN-GPTJ. Skein + Pygmalion is listed under SKEGMA-GPTJ. GPT-R itself is a 60/40 merge of two instruct research models (see digitous/GPT-R for full credits). This 5x+ merge is not intended for minors, as it can produce NC-17+ content. This model differs from Javelin-R by substituting the Adventure model with Pygmalion, as Adventure is rendered redundant in training data by Skein. Javalion-R is a research artefact with dual purpose for entertainment as well as an intended example of potential value instruct can bring when combined with models of a different purpose through the use of weight sum merge technology. Mileage mat vary. No refunds best wishes. Mainly intended to be utilized with Open Source KoboldAI software. Optimal sampler and settings not determined. Feedback Welcome! https://github.com/KoboldAI/KoboldAI-Client
KnutJaegersberg/megatron-GPT-2-345m-EvolInstruct
KnutJaegersberg
"2023-11-18T07:42:13Z"
1,640
4
transformers
[ "transformers", "pytorch", "safetensors", "gpt2", "text-generation", "license:cc-by-nc-4.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-05-01T17:21:09Z"
--- license: cc-by-nc-4.0 --- Fine tuned small megatron-GPT-2 on Evol-Instruct dataset of WizardLM. Megatron-GPT-2-345m is competetive with GPT-2 large in some benchmarks. Megatron-GPT-2-345m performance on Wikitext-103 is 19.31, GPT2-large (1.5B) is 17.48 and GPT2-medium (762M) is 22.05. Code example: from transformers import AutoModel, GPT2LMHeadModel, AutoTokenizer model2 = GPT2LMHeadModel.from_pretrained('KnutJaegersberg/megatron-GPT-2-345m-EvolInstruct') tokenizer = AutoTokenizer.from_pretrained("KnutJaegersberg/megatron-GPT-2-345m-EvolInstruct") text = "Below is an instruction that describes a task. Write a response that appropriately completes the request. \n\n### Instruction:\n Give me a list of 5 political commentators, compare and contrast their beliefs. \n### Response:\n" encoded_input = tokenizer(text, return_tensors='pt').input_ids.to('cuda:0') from transformers import pipeline generator = pipeline('text-generation', model=model2, tokenizer=tokenizer, device=0) generator(text, penalty_alpha=0.6, top_k=4, max_length=1000) output: "1. Rush Limbaugh - Religious conservative\n2. Bernard Adjaye - African American\n3. Juan Williams - Hispanic\n4. Rachel Maddow - Journalist\n5. Bill O'Reilly - Political Commentator\nLimbaugh and Adjaye are both religious conservative politicians, but they differ in their beliefs on issues such as abortion, same-sex marriage, and immigration. Adjaye is a strong advocate for gun rights and has been criticized for his pro-choice stance on social issues. However, he has also been critical of President Trump's handling of the COVID-19 pandemic and has suggested that the country needs to focus on mental health issues rather than politics.\nMaddow, on the other hand, is a journalist and talk show host who has made a name for herself in the media. She has been critical of Trump's handling of the COVID-19 pandemic and has suggested that the country needs to focus on mental health issues rather than politics. However, she has also been critical of the government's response to the pandemic and has suggested that the government needs to do more to address the issue.\nO'Reilly, on the other hand, is a political commentary and talk show host who has made a name for himself in the media. He has been critical of Trump's handling of the COVID-19 pandemic and has suggested that the country needs to focus on mental health issues rather than politics. However, he has also been critical of the government's response to the pandemic and has suggested that the country needs to take care of its citizens who are sick.\nOverall, each of these political commentators has their own unique perspective on the political landscape, and their beliefs are shaped by a variety of factors, including their religious beliefs, political affiliations, and personal experiences. It is important to note that these opinions are not universal and may vary across different demographics and regions of the country. Therefore, it is always best to consult with a qualified political analyst or news organization for a comprehensive understanding of the political landscape. Additionally, it is important to be respectful of others' opinions and not try to influence them. By doing so, we can work together to create a more just and equitable society for all.\nSources:\nLimbaugh, R. (2020). The rise of religion in America. Christianity Today, www.cchurch.com/content/dam/2021/08/the-rise-of-religion-in-america. Retrieved from https://www. ChristianityToday.com/blog/how-religion-is-becoming-a-part-of-america/\nAdjaye, B. (2020). Black Lives Matter: A Call to Action. National Book Critics, www.nrdc.org/books/britannica/article/2020/08/black-lives-matter-a-call-to-action.html\nWright, J. (2020). Climate change and the economy. American Psychological Association, www.apa.org/publication/climate-change-and-economy/2020/08/council-member-wright-jeff-kincaid-reviews-opinions-on-policies-to-reform-climate-change.html\nMegan, M. (2020). The future of healthcare: What we know and don't know. Healthline, www.healthline.com/healthline/2020/08/what-we-know-and-don't-know.html\nO'Reilly, R. (2020). Donald Trump's presidency. Fox News, www.foxnews.com/politics/presidential-race.mp3\nMaddow, R. (2020). The media is biased against the right wing. The New York Times, www.nytimes.com/2020/08/29/us/politics/the-media-is-biased-against-the-right-wing.html\nO'Reilly, R. (2020). The 2020 U.S. presidential election. CNN, www.cnn.com/2020/08/29/us/politics/the-2020-presidential-election.html\nMaddow, M. (2020). The COVID-19 pandemic is a wake-up call for the world. The Wall Street Journal, www.bloomberg.com/news/2020/08/causes-and-benefits-of-the-coVID-19-vaccine.html\nO'Reilly, R. (2020). It's time to get" # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_KnutJaegersberg__megatron-GPT-2-345m-EvolInstruct) | Metric | Value | |-----------------------|---------------------------| | Avg. | 26.35 | | ARC (25-shot) | 24.06 | | HellaSwag (10-shot) | 35.12 | | MMLU (5-shot) | 24.48 | | TruthfulQA (0-shot) | 41.25 | | Winogrande (5-shot) | 54.78 | | GSM8K (5-shot) | 0.38 | | DROP (3-shot) | 4.39 |
TehVenom/Pygmalion-Vicuna-1.1-7b
TehVenom
"2023-05-04T18:53:16Z"
1,640
27
transformers
[ "transformers", "safetensors", "llama", "text-generation", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-05-02T03:09:23Z"
The LLaMA based Pygmalion-7b model: https://huggingface.co/PygmalionAI/pygmalion-7b Merged alongside lmsys's Vicuna v1.1 deltas: https://huggingface.co/lmsys/vicuna-13b-delta-v1.1 This merge was done using an weighted average merge strategy, and the end result is a model composed of: Pygmalion-7b [60%] + LLaMA Vicuna v1.1 [40%] This was done under request, but the end result is intended to lean heavily towards Pygmalion's chatting + RP tendencies, and to inherit some of Vicuna's Assistant / Instruct / Helpful properties. Due to the influence of Pygmalion, this model will very likely generate content that is considered NSFW. The specific prompting is unknown, but try Pygmalion's prompt styles first, then a mix of the two to see what brings most interesting results. Treat this as a normal HF Transformers model.
ausboss/llama7b-wizardlm-unfiltered
ausboss
"2023-05-02T15:49:49Z"
1,640
6
transformers
[ "transformers", "pytorch", "llama", "text-generation", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-05-02T15:40:04Z"
Entry not found
YeungNLP/firefly-llama-13b
YeungNLP
"2023-07-24T10:23:55Z"
1,640
5
transformers
[ "transformers", "pytorch", "llama", "text-generation", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-07-13T06:16:38Z"
该模型使用llama-13b,使用UltraChat数据集进行指令微调,约140万多轮对话数据。仅需一张显卡即可完成训练。 firefly-llama-13b在🤗Hugging Face的Open LLM榜单上进行了客观的评测。 在榜单上,firefly-llama-13b取得了不错的效果,比vicuna-13b-1.1略高0.2分,比llama-2-13b-chat略低0.5分,比vicuna-13b-v1.3略低0.6分。从评测分数来看,firefly-llama-13b与vicuna-13b、llama-2-13b-chat的水平非常接近😎。 | 模型 | Average | ARC | HellaSwag | MMLU | TruthfulQA (MC) | |--------------------------------------------------------------------------------|-------|----------------------|------------|------------|------| | Llama-2-70b-chat-hf | 66.8 | 64.6 | 85.9 | 63.9 | 52.8 | | vicuna-13b-v1.3 | 60 | 54.6 | 80.4 | 52.9 | 52.1 | | Llama-2-13b-chat-hf | 59.9 | 59 | 81.9 | 54.6 | 44.1 | | firefly-llama-13b |59.4 | 59 | 79.7 | 49.1 | 49.6 | | vicuna-13b-1.1 | 59.2 | 52.7 | 80.1 |51.9 | 52.1 | | guanaco-13B-HF | 59.1 | 57.8 | 83.8 |48.3 | 46.7| 值得注意的是,vicuna-13b模型采用的是全量参数微调,对训练资源的要求十分高。而firefly-llama-13b采用的则是QLoRA微调,最少仅需16G显存,即可对13B的模型进行微调。 详细介绍见文章:[Firefly单卡复刻Vicuna-13B,Open LLM榜单🤗略高0.2分](https://mp.weixin.qq.com/s/QG2YMo_QxaxS_Rr2yJrIeA) 更多详情见[Firefly项目](https://github.com/yangjianxin1/Firefly) [Open LLM排行榜](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)
warp-ai/wuerstchen-prior
warp-ai
"2023-09-18T07:01:28Z"
1,640
21
diffusers
[ "diffusers", "safetensors", "arxiv:2306.00637", "arxiv:1910.09700", "license:mit", "diffusers:WuerstchenPriorPipeline", "region:us" ]
null
"2023-07-19T19:09:44Z"
--- license: mit --- <img src="https://cdn-uploads.huggingface.co/production/uploads/634cb5eefb80cc6bcaf63c3e/i-DYpDHw8Pwiy7QBKZVR5.jpeg" width=1500> ## Würstchen - Overview Würstchen is a diffusion model, whose text-conditional model works in a highly compressed latent space of images. Why is this important? Compressing data can reduce computational costs for both training and inference by magnitudes. Training on 1024x1024 images is way more expensive than training on 32x32. Usually, other works make use of a relatively small compression, in the range of 4x - 8x spatial compression. Würstchen takes this to an extreme. Through its novel design, we achieve a 42x spatial compression. This was unseen before because common methods fail to faithfully reconstruct detailed images after 16x spatial compression. Würstchen employs a two-stage compression, what we call Stage A and Stage B. Stage A is a VQGAN, and Stage B is a Diffusion Autoencoder (more details can be found in the [paper](https://arxiv.org/abs/2306.00637)). A third model, Stage C, is learned in that highly compressed latent space. This training requires fractions of the compute used for current top-performing models, allowing also cheaper and faster inference. ## Würstchen - Prior The Prior is what we refer to as "Stage C". It is the text-conditional model, operating in the small latent space that Stage A and Stage B encode images into. During inference, its job is to generate the image latents given text. These image latents are then sent to Stages A & B to decode the latents into pixel space. ### Image Sizes Würstchen was trained on image resolutions between 1024x1024 & 1536x1536. We sometimes also observe good outputs at resolutions like 1024x2048. Feel free to try it out. We also observed that the Prior (Stage C) adapts extremely fast to new resolutions. So finetuning it at 2048x2048 should be computationally cheap. <img src="https://cdn-uploads.huggingface.co/production/uploads/634cb5eefb80cc6bcaf63c3e/5pA5KUfGmvsObqiIjdGY1.jpeg" width=1000> ## How to run This pipeline should be run together with https://huggingface.co/warp-ai/wuerstchen: ```py import torch from diffusers import WuerstchenDecoderPipeline, WuerstchenPriorPipeline from diffusers.pipelines.wuerstchen import DEFAULT_STAGE_C_TIMESTEPS device = "cuda" dtype = torch.float16 num_images_per_prompt = 2 prior_pipeline = WuerstchenPriorPipeline.from_pretrained( "warp-ai/wuerstchen-prior", torch_dtype=dtype ).to(device) decoder_pipeline = WuerstchenDecoderPipeline.from_pretrained( "warp-ai/wuerstchen", torch_dtype=dtype ).to(device) caption = "Anthropomorphic cat dressed as a fire fighter" negative_prompt = "" prior_output = prior_pipeline( prompt=caption, height=1024, width=1536, timesteps=DEFAULT_STAGE_C_TIMESTEPS, negative_prompt=negative_prompt, guidance_scale=4.0, num_images_per_prompt=num_images_per_prompt, ) decoder_output = decoder_pipeline( image_embeddings=prior_output.image_embeddings, prompt=caption, negative_prompt=negative_prompt, guidance_scale=0.0, output_type="pil", ).images ``` ### Image Sampling Times The figure shows the inference times (on an A100) for different batch sizes (`num_images_per_prompt`) on Würstchen compared to [Stable Diffusion XL](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) (without refiner). The left figure shows inference times (using torch > 2.0), whereas the right figure applies `torch.compile` to both pipelines in advance. ![image/jpeg](https://cdn-uploads.huggingface.co/production/uploads/634cb5eefb80cc6bcaf63c3e/UPhsIH2f079ZuTA_sLdVe.jpeg) ## Model Details - **Developed by:** Pablo Pernias, Dominic Rampas - **Model type:** Diffusion-based text-to-image generation model - **Language(s):** English - **License:** MIT - **Model Description:** This is a model that can be used to generate and modify images based on text prompts. It is a Diffusion model in the style of Stage C from the [Würstchen paper](https://arxiv.org/abs/2306.00637) that uses a fixed, pretrained text encoder ([CLIP ViT-bigG/14](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)). - **Resources for more information:** [GitHub Repository](https://github.com/dome272/Wuerstchen), [Paper](https://arxiv.org/abs/2306.00637). - **Cite as:** @misc{pernias2023wuerstchen, title={Wuerstchen: Efficient Pretraining of Text-to-Image Models}, author={Pablo Pernias and Dominic Rampas and Marc Aubreville}, year={2023}, eprint={2306.00637}, archivePrefix={arXiv}, primaryClass={cs.CV} } ## Environmental Impact **Würstchen v2** **Estimated Emissions** Based on that information, we estimate the following CO2 emissions using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). The hardware, runtime, cloud provider, and compute region were utilized to estimate the carbon impact. - **Hardware Type:** A100 PCIe 40GB - **Hours used:** 24602 - **Cloud Provider:** AWS - **Compute Region:** US-east - **Carbon Emitted (Power consumption x Time x Carbon produced based on location of power grid):** 2275.68 kg CO2 eq.
klosax/open_llama_13b_600bt_preview
klosax
"2023-07-21T10:34:31Z"
1,640
0
transformers
[ "transformers", "pytorch", "llama", "text-generation", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-07-21T08:18:08Z"
Entry not found
YeungNLP/firefly-llama-13b-v1.2
YeungNLP
"2023-07-21T15:19:29Z"
1,640
1
transformers
[ "transformers", "pytorch", "llama", "text-generation", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-07-21T14:55:47Z"
Entry not found
CalderaAI/13B-Legerdemain-L2
CalderaAI
"2023-08-04T10:47:39Z"
1,640
10
transformers
[ "transformers", "pytorch", "llama", "text-generation", "license:llama2", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-08-03T11:39:49Z"
--- license: llama2 --- ## 13B-Legerdemain-L2 13B-Legerdemain-L2 is the first model merge of its kind in a series of LLaMaV2 models mixed using a custom script built in-house by CalderaAI called Model-REVOLVER. M-REVOLVER is also the first in a series of custom scripts based on the concept of mixtuning - not only does the end user have contol over which models are mixed and their percentages on a per-layer basis, we tackle the problem of overcomplexity that arises from such a level of control; this model is the first of its series. ## The Model-REVOLVER Process Designed by CalderaAI M-REVOLVER (Rapid Evolution Via Optimized-List Viewer Evaluated Response) Per-layer merging between parent models is a nebulous inexact science, and therefore impractical to most users despite the raw power it offers. We propose an entirely new approach that gives the user a clear looking glass into the impact vastly different layer merge configurations between selected parent models of their choice will have on the potential offspring model - especially its inherited behaviors. We've developed solution MK.1 - A cyclic random pattern search in place that determines all layer merge ratios, combines test models, infers prompt completions, and deletes a prototype after data collection is saved. When the cyclic system has completed its entire run, nothing is left but the telemetry collected along with the cycle and layer merge ratios from every single prototype merge. This data is then used to empower the user to choose which offspring is most fit to their desired outcome. This final step is only initiated when all necessary data has been aggregated from all assembled-tested-erased prototypes sampled in the search space. From here, the user is provided five 300 token prompt completions from each and every offspring contender that was created and tested during the cyclic process. The user simply browses each prototype's series of responses and selects their desired outcome model by entering the cycle number associated with the prompt completions they feel best suits their vision. That model is then instantly repatriated into the official offspring of its parent models and tokenizer files found to be most relevant are instantly auto-copied from the parent model dir to the offspring. That's it - the user instantly has a complete model based on the behavior they decided on, suggested from one of many potentials; all with their own unique trait inheritence thanks to layer merge auto randomization inside an ordered system. One more thing - the user not only selects how many cycles to run, the user can edit prompts.txt which the system reads as a single prompt - this means if the user desires to use any multiline instruct format to observe all potential model outcomes from instruct, or desires simply their own prompt, it's up to them.. simply works. Link to GitHub for M-REVOLVER are at the end of the model card. More advanced MergeTech toolsets and merge techniques are currently under internal testing and development by Caldera. ## 13B-Legerdemain-L2 Use 13B-Legerdemain-L2 is capable of following Alpaca instructions however it seems far more receptive to the by-the-book method as seen here: ``` Below is an instruction that describes a task. Write a response that appropriately completes the request. ### Instruction: {instruction} ### Response: {New Line} ``` The primary model of choice for this model was a story-only model called Holodeck by KoboldAI. Traits preserved seem to be detailed descriptiveness, verbosity, and characters with personality. The two other models selected were 13B-Nous-Hermes by NousResearch and 13B-orca-8k-3319 by OpenAssistant. I began the process by providing an incredibly obscene prompt and simply ignored each and every guardrail or censorship laden prompt completion and accepted the offensive ones in turn - intent wasn't to be crass but trigger censorship parts of the network to test if it's possible to completely undermine them. Second pass with offspring model and Orca was a simple milquetoast prompt to gauge vocabulary, word flow, and intelligence as I selected the most fit in that category. Result model seems a bit of a curiosity - different samplers and even a different UI (as I went from TGUI to KoboldAI) seem to uncover different facets of behavior. Godlike preset with Alpaca Instruct in TGUI worked fine. In KoboldAI some tweaking was necessary to get the same experience. If you choose to test this model, have fun - it's got a mind of its own. Model-REVOLVER Git: https://github.com/Digitous/ModelREVOLVER
FelixChao/vicuna-7B-chemical
FelixChao
"2023-08-25T12:15:59Z"
1,640
2
transformers
[ "transformers", "pytorch", "llama", "text-generation", "chemistry", "en", "dataset:andersonbcdefg/chemistry", "arxiv:1910.09700", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-08-09T04:36:39Z"
--- license: apache-2.0 datasets: - andersonbcdefg/chemistry language: - en metrics: - bleu - rouge pipeline_tag: text-generation tags: - chemistry --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> This modelcard aims to be a base template for new models. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/modelcard_template.md?plain=1). ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This model is currently **under testing**. It can answer some **basic chemical questions** and is better than the base-sharded model. Through some **chemical reactions**, it can clearly understand **the reactant and corresponding products**. **(For most, it is correct😅)** **Just have fun testing it, and ask some interesting questions!!** - **Developed by:** FelixChao - **Shared by [optional]:** CleverShovel/vicuna-7b-v1.3-sharded-bf16 - **Finetuned from model [optional]:** CleverShovel/vicuna-7b-v1.3-sharded-bf16 ## How to Get Started with the Model Use the code below to get started with the model. ### For Pipeline ```python # Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("text-generation", model="FelixChao/vicuna-7B-chemical") ``` ### For Model_Loading ```python # Load model directly from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("FelixChao/vicuna-7B-chemical") model = AutoModelForCausalLM.from_pretrained("FelixChao/vicuna-7B-chemical") ``` ## Training Details ### Training Data <!-- This should link to a Data Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Data Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
davzoku/cria-llama2-7b-v1.3
davzoku
"2024-01-23T14:48:27Z"
1,640
2
transformers
[ "transformers", "pytorch", "safetensors", "llama", "text-generation", "llama-2", "en", "dataset:mlabonne/CodeLlama-2-20k", "license:llama2", "autotrain_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-08-14T06:11:35Z"
--- inference: false language: en license: llama2 model_type: llama datasets: - mlabonne/CodeLlama-2-20k pipeline_tag: text-generation tags: - llama-2 --- # CRIA v1.3 💡 [Article](https://walterteng.com/cria) | 💻 [Github](https://github.com/davzoku/cria) | 📔 Colab [1](https://colab.research.google.com/drive/1rYTs3qWJerrYwihf1j0f00cnzzcpAfYe),[2](https://colab.research.google.com/drive/1Wjs2I1VHjs6zT_GE42iEXsLtYh6VqiJU) ## What is CRIA? > krē-ə plural crias. : a baby llama, alpaca, vicuña, or guanaco. <p align="center"> <img src="https://raw.githubusercontent.com/davzoku/cria/main/assets/icon-512x512.png" width="300" height="300" alt="Cria Logo"> <br> <i>or what ChatGPT suggests, <b>"Crafting a Rapid prototype of an Intelligent llm App using open source resources"</b>.</i> </p> The initial objective of the CRIA project is to develop a comprehensive end-to-end chatbot system, starting from the instruction-tuning of a large language model and extending to its deployment on the web using frameworks such as Next.js. Specifically, we have fine-tuned the `llama-2-7b-chat-hf` model with QLoRA (4-bit precision) using the [mlabonne/CodeLlama-2-20k](https://huggingface.co/datasets/mlabonne/CodeLlama-2-20k) dataset. This fine-tuned model serves as the backbone for the [CRIA chat](https://chat.walterteng.com) platform. ## 📦 Model Release CRIA v1.3 comes with several variants. - [davzoku/cria-llama2-7b-v1.3](https://huggingface.co/davzoku/cria-llama2-7b-v1.3): Merged Model - [davzoku/cria-llama2-7b-v1.3-GGML](https://huggingface.co/davzoku/cria-llama2-7b-v1.3-GGML): Quantized Merged Model - [davzoku/cria-llama2-7b-v1.3_peft](https://huggingface.co/davzoku/cria-llama2-7b-v1.3_peft): PEFT adapter ## 🔧 Training It was trained on a Google Colab notebook with a T4 GPU and high RAM. ### Training procedure The following `bitsandbytes` quantization config was used during training: - load_in_8bit: False - load_in_4bit: True - llm_int8_threshold: 6.0 - llm_int8_skip_modules: None - llm_int8_enable_fp32_cpu_offload: False - llm_int8_has_fp16_weight: False - bnb_4bit_quant_type: nf4 - bnb_4bit_use_double_quant: False - bnb_4bit_compute_dtype: float16 ### Framework versions - PEFT 0.4.0 ## 💻 Usage ```python # pip install transformers accelerate from transformers import AutoTokenizer import transformers import torch model = "davzoku/cria-llama2-7b-v1.3" prompt = "What is a cria?" tokenizer = AutoTokenizer.from_pretrained(model) pipeline = transformers.pipeline( "text-generation", model=model, torch_dtype=torch.float16, device_map="auto", ) sequences = pipeline( f'<s>[INST] {prompt} [/INST]', do_sample=True, top_k=10, num_return_sequences=1, eos_token_id=tokenizer.eos_token_id, max_length=200, ) for seq in sequences: print(f"Result: {seq['generated_text']}") ``` ## References We'd like to thank: - [mlabonne](https://huggingface.co/mlabonne) for his article and resources on implementation of instruction tuning - [TheBloke](https://huggingface.co/TheBloke) for his script for LLM quantization.
yeontaek/llama-2-13b-QLoRA
yeontaek
"2023-08-19T18:32:36Z"
1,640
0
transformers
[ "transformers", "pytorch", "llama", "text-generation", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-08-19T17:19:35Z"
Entry not found
zarakiquemparte/zarafusionex-1.1-l2-7b
zarakiquemparte
"2023-08-26T13:30:28Z"
1,640
7
transformers
[ "transformers", "pytorch", "llama", "text-generation", "llama2", "license:other", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-08-25T00:19:12Z"
--- license: other tags: - llama2 --- # Model Card: Zarafusionex 1.1 L2 7b This model uses [Nous Hermes Llama2 7b](https://huggingface.co/NousResearch/Nous-Hermes-llama-2-7b) (53%) as a base with [Stable Beluga 7b](https://huggingface.co/stabilityai/StableBeluga-7B) (47%) and the result of this merge was merged with [LimaRP LLama2 7B Lora version of the day 07/23/2023](https://huggingface.co/lemonilia/limarp-llama2). This merge of models(hermes and stable beluga) was done with this [script](https://github.com/zarakiquemparte/zaraki-tools/blob/main/merge-cli.py) This merge of Lora with Model was done with this [script](https://github.com/zarakiquemparte/zaraki-tools/blob/main/apply-lora.py) Quantized Model by @TheBloke: - [GGML](https://huggingface.co/TheBloke/Zarafusionex-1.1-L2-7B-GGML) - [GGUF](https://huggingface.co/TheBloke/Zarafusionex-1.1-L2-7B-GGUF) - [GPTQ](https://huggingface.co/TheBloke/Zarafusionex-1.1-L2-7B-GPTQ) Merge illustration: ![illustration](zarafusionex-merge-illustration.png) ## Usage: Since this is a merge between Nous Hermes, Stable Beluga and LimaRP, the following instruction formats should work: Alpaca 2: ``` ### Instruction: <prompt> ### Response: <leave a newline blank for model to respond> ``` LimaRP instruction format: ``` <<SYSTEM>> <character card and system prompt> <<USER>> <prompt> <<AIBOT>> <leave a newline blank for model to respond> ``` ## Bias, Risks, and Limitations This model is not intended for supplying factual information or advice in any form ## Training Details This model is merged and can be reproduced using the tools mentioned above. Please refer to all provided links for extra model-specific details.
Mikivis/gpt2-large-lora-sft2
Mikivis
"2023-09-07T03:57:27Z"
1,640
0
transformers
[ "transformers", "pytorch", "safetensors", "gpt2", "text-generation", "generated_from_trainer", "dataset:customized", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-09-06T11:30:43Z"
--- base_model: /data1/sunhaoyu/pretrained-models/gpt2-large-lora-sft1 tags: - generated_from_trainer datasets: - customized model-index: - name: gpt2-large-lora-sft2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # gpt2-large-lora-sft2 This model is a fine-tuned version of [/data1/sunhaoyu/pretrained-models/gpt2-large-lora-sft1](https://huggingface.co//data1/sunhaoyu/pretrained-models/gpt2-large-lora-sft1) on the customized dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.00015 - train_batch_size: 1 - eval_batch_size: 8 - seed: 42 - distributed_type: multi-GPU - num_devices: 6 - total_train_batch_size: 6 - total_eval_batch_size: 48 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1.2 ### Training results ### Framework versions - Transformers 4.32.1 - Pytorch 2.0.1+cu117 - Datasets 2.10.1 - Tokenizers 0.13.3
speechlessai/speechless-codellama-34b-v1.0
speechlessai
"2023-11-14T02:11:45Z"
1,640
0
transformers
[ "transformers", "pytorch", "llama", "text-generation", "llama-2", "code", "en", "dataset:garage-bAInd/Open-Platypus", "arxiv:2308.12950", "license:llama2", "model-index", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-09-12T12:45:55Z"
--- language: - en library_name: transformers pipeline_tag: text-generation datasets: - garage-bAInd/Open-Platypus tags: - llama-2 - code license: llama2 model-index: - name: SpeechlessCoder results: - task: type: text-generation dataset: type: openai_humaneval name: HumanEval metrics: - name: pass@1 type: pass@1 value: 70.12 verified: false --- <p><h1> speechless-codellama-34b-v1.0 </h1></p> > 2023.10.06 [uukuguy/speechless-codellama-34b-v2.0](https://huggingface.co/uukuguy/speechless-codellama-34b-v2.0) release. humaneval-python pass@1: 75.61 Fine-tune the Phind/Phind-CodeLlama-34B with Dolphin (1% GPT4), Orca (1% GPT4) and Platypus (100%) datasets. Code: https://github.com/uukuguy/speechless | humaneval metrics | pass@1 | | --- | --- | | humaneval-python | 70.12 | [Big Code Models Leaderboard](https://huggingface.co/spaces/bigcode/bigcode-models-leaderboard) Phind-CodeLlama-34B-v2: 71.95 WizardCoder-Python-34B-V1.0: 70.73 Phind-CodeLlama-34B-v1: 65.85 WizardCoder-Python-13B-V1.0: 62.19 CodeLlama-34B-Python: 53.29 CodeLlama-34B-Instruct: 50.79 CodeLlama-13B-Instruct: 50.6 CodeLlama-34B: 45.11 CodeLlama-13B-Python: 42.89 CodeLlama-13B: 35.07 [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) | Metric | Value | | --- | --- | | ARC | 52.47 | | HellaSwag | 74.13 | | MMLU | 53.47 | | TruthfulQA | 47.14 | | Average | 56.80 | # **Code Llama** Code Llama is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 34 billion parameters. This is the repository for the base 13B version in the Hugging Face Transformers format. This model is designed for general code synthesis and understanding. Links to other models can be found in the index at the bottom. | | Base Model | Python | Instruct | | --- | ----------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------- | | 7B | [codellama/CodeLlama-7b-hf](https://huggingface.co/codellama/CodeLlama-7b-hf) | [codellama/CodeLlama-7b-Python-hf](https://huggingface.co/codellama/CodeLlama-7b-Python-hf) | [codellama/CodeLlama-7b-Instruct-hf](https://huggingface.co/codellama/CodeLlama-7b-Instruct-hf) | | 13B | [codellama/CodeLlama-13b-hf](https://huggingface.co/codellama/CodeLlama-13b-hf) | [codellama/CodeLlama-13b-Python-hf](https://huggingface.co/codellama/CodeLlama-13b-Python-hf) | [codellama/CodeLlama-13b-Instruct-hf](https://huggingface.co/codellama/CodeLlama-13b-Instruct-hf) | | 34B | [codellama/CodeLlama-34b-hf](https://huggingface.co/codellama/CodeLlama-34b-hf) | [codellama/CodeLlama-34b-Python-hf](https://huggingface.co/codellama/CodeLlama-34b-Python-hf) | [codellama/CodeLlama-34b-Instruct-hf](https://huggingface.co/codellama/CodeLlama-34b-Instruct-hf) | ## Model Use To use this model, please make sure to install transformers from `main` until the next version is released: ```bash pip install git+https://github.com/huggingface/transformers.git@main accelerate ``` Model capabilities: - [x] Code completion. - [x] Infilling. - [ ] Instructions / chat. - [ ] Python specialist. ```python from transformers import AutoTokenizer import transformers import torch model = "codellama/CodeLlama-13b-hf" tokenizer = AutoTokenizer.from_pretrained(model) pipeline = transformers.pipeline( "text-generation", model=model, torch_dtype=torch.float16, device_map="auto", ) sequences = pipeline( 'import socket\n\ndef ping_exponential_backoff(host: str):', do_sample=True, top_k=10, temperature=0.1, top_p=0.95, num_return_sequences=1, eos_token_id=tokenizer.eos_token_id, max_length=200, ) for seq in sequences: print(f"Result: {seq['generated_text']}") ``` ## Model Details *Note: Use of this model is governed by the Meta license. Meta developed and publicly released the Code Llama family of large language models (LLMs). **Model Developers** Meta **Variations** Code Llama comes in three model sizes, and three variants: * Code Llama: base models designed for general code synthesis and understanding * Code Llama - Python: designed specifically for Python * Code Llama - Instruct: for instruction following and safer deployment All variants are available in sizes of 7B, 13B and 34B parameters. **This repository contains the base version of the 13B parameters model.** **Input** Models input text only. **Output** Models generate text only. **Model Architecture** Code Llama is an auto-regressive language model that uses an optimized transformer architecture. **Model Dates** Code Llama and its variants have been trained between January 2023 and July 2023. **Status** This is a static model trained on an offline dataset. Future versions of Code Llama - Instruct will be released as we improve model safety with community feedback. **License** A custom commercial license is available at: [https://ai.meta.com/resources/models-and-libraries/llama-downloads/](https://ai.meta.com/resources/models-and-libraries/llama-downloads/) **Research Paper** More information can be found in the paper "[Code Llama: Open Foundation Models for Code](https://ai.meta.com/research/publications/code-llama-open-foundation-models-for-code/)" or its [arXiv page](https://arxiv.org/abs/2308.12950). ## Intended Use **Intended Use Cases** Code Llama and its variants is intended for commercial and research use in English and relevant programming languages. The base model Code Llama can be adapted for a variety of code synthesis and understanding tasks, Code Llama - Python is designed specifically to handle the Python programming language, and Code Llama - Instruct is intended to be safer to use for code assistant and generation applications. **Out-of-Scope Uses** Use in any manner that violates applicable laws or regulations (including trade compliance laws). Use in languages other than English. Use in any other way that is prohibited by the Acceptable Use Policy and Licensing Agreement for Code Llama and its variants. ## Hardware and Software **Training Factors** We used custom training libraries. The training and fine-tuning of the released models have been performed Meta’s Research Super Cluster. **Carbon Footprint** In aggregate, training all 9 Code Llama models required 400K GPU hours of computation on hardware of type A100-80GB (TDP of 350-400W). Estimated total emissions were 65.3 tCO2eq, 100% of which were offset by Meta’s sustainability program. ## Training Data All experiments reported here and the released models have been trained and fine-tuned using the same data as Llama 2 with different weights (see Section 2 and Table 1 in the [research paper](https://ai.meta.com/research/publications/code-llama-open-foundation-models-for-code/) for details). ## Evaluation Results See evaluations for the main models and detailed ablations in Section 3 and safety evaluations in Section 4 of the research paper. ## Ethical Considerations and Limitations Code Llama and its variants are a new technology that carries risks with use. Testing conducted to date has been in English, and has not covered, nor could it cover all scenarios. For these reasons, as with all LLMs, Code Llama’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate or objectionable responses to user prompts. Therefore, before deploying any applications of Code Llama, developers should perform safety testing and tuning tailored to their specific applications of the model. Please see the Responsible Use Guide available available at [https://ai.meta.com/llama/responsible-user-guide](https://ai.meta.com/llama/responsible-user-guide).
pszemraj/pythia-31m-goodwiki-deduped-2048-scratch
pszemraj
"2023-11-18T12:45:21Z"
1,640
1
transformers
[ "transformers", "safetensors", "gpt_neox", "text-generation", "generated_from_trainer", "en", "dataset:euirim/goodwiki", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-09-14T23:39:35Z"
--- tags: - generated_from_trainer metrics: - accuracy inference: parameters: max_new_tokens: 64 do_sample: true repetition_penalty: 1.1 no_repeat_ngram_size: 5 guidance_scale: 1.01 eta_cutoff: 0.001 widget: - text: My name is El Microondas the Wise and example_title: El Microondas - text: A meme is example_title: meme - text: >- Barack Obama nominated Hilary Clinton as his secretary of state on Monday. He chose her because she had example_title: Coreference resolution - text: >- On a shelf, there are five books: a gray book, a red book, a purple book, a blue book, and a black book example_title: Logic puzzles - text: >- The two men running to become New York City's next mayor will face off in their first debate Wednesday night example_title: Reading comprehension pipeline_tag: text-generation license: apache-2.0 datasets: - euirim/goodwiki language: - en --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # pythia-31m-goodwiki-deduped-2048-scratch Train from scratch based on config of [EleutherAI/pythia-31m](https://huggingface.co/EleutherAI/pythia-31m) for 3 epochs. It achieves the following results on the evaluation set: - Loss: 4.5181 - Accuracy: 0.2680 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data ``` ***** eval metrics ***** epoch = 3.0 eval_accuracy = 0.2694 eval_loss = 4.4986 eval_runtime = 0:00:14.62 eval_samples = 500 eval_samples_per_second = 34.187 eval_steps_per_second = 17.093 perplexity = 89.8934 ``` ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 2 - eval_batch_size: 2 - seed: 80085 - gradient_accumulation_steps: 64 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.99) and epsilon=1e-07 - lr_scheduler_type: inverse_sqrt - lr_scheduler_warmup_ratio: 0.05 - num_epochs: 3.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 6.8347 | 0.16 | 100 | 6.7683 | 0.1380 | | 6.0732 | 0.32 | 200 | 6.0489 | 0.1712 | | 5.6949 | 0.48 | 300 | 5.6941 | 0.1935 | | 5.4723 | 0.64 | 400 | 5.4411 | 0.2066 | | 5.2672 | 0.8 | 500 | 5.2621 | 0.2162 | | 5.165 | 0.96 | 600 | 5.1339 | 0.2241 | | 5.0693 | 1.12 | 700 | 5.0290 | 0.2304 | | 4.9234 | 1.28 | 800 | 4.9430 | 0.2369 | | 4.886 | 1.44 | 900 | 4.8702 | 0.2413 | | 4.8422 | 1.6 | 1000 | 4.8086 | 0.2458 | | 4.7688 | 1.76 | 1100 | 4.7593 | 0.2488 | | 4.734 | 1.93 | 1200 | 4.7118 | 0.2527 | | 4.6877 | 2.09 | 1300 | 4.6721 | 0.2556 | | 4.6135 | 2.25 | 1400 | 4.6350 | 0.2583 | | 4.6117 | 2.41 | 1500 | 4.6013 | 0.2606 | | 4.5424 | 2.57 | 1600 | 4.5707 | 0.2635 | | 4.5535 | 2.73 | 1700 | 4.5447 | 0.2658 | | 4.4823 | 2.89 | 1800 | 4.5181 | 0.2680 | ### Framework versions - Transformers 4.33.1 - Pytorch 2.2.0.dev20230907+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3 # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_pszemraj__pythia-31m-goodwiki-deduped-2048-scratch) | Metric | Value | |-----------------------|---------------------------| | Avg. | 24.85 | | ARC (25-shot) | 23.12 | | HellaSwag (10-shot) | 25.66 | | MMLU (5-shot) | 23.11 | | TruthfulQA (0-shot) | 51.32 | | Winogrande (5-shot) | 49.88 | | GSM8K (5-shot) | 0.0 | | DROP (3-shot) | 0.86 |
luffycodes/higgs-llama-vicuna-ep25-70b
luffycodes
"2023-09-22T00:34:15Z"
1,640
2
transformers
[ "transformers", "pytorch", "llama", "text-generation", "chatgpt", "tutorbot", "physics", "code", "math", "mathematics", "en", "arxiv:2309.12161", "license:llama2", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-09-20T04:27:23Z"
--- license: llama2 language: - en library_name: transformers tags: - chatgpt - tutorbot - physics - code - math - mathematics - llama --- # Higgs Model Card ## Github details Please checkout the repo: https://github.com/luffycodes/Tutorbot-Spock-Phys. ## Model details **Model type:** Higgs is an open-source educational tutoring chatbot trained by fine-tuning LLaMA-2-70B-chat model on synthetic student-tutorbot conversations generated using [specialized prompts](https://github.com/luffycodes/Tutorbot-Spock-Phys/tree/main/prompts/conversation_gen). Higgs performs a code soliloquy (an inner monologue) in which Higgs prompts itself through a series of prompts to decide if it's next response to the student would need any math calculations. If Higgs determines that the response might require such a calculation, such as in cases to verify student calculation, it outputs python code. Then, it uses the output of the python code to frame a mathematically accurate reply to the student. **Model date:** Higgs was trained between May 2023 and Sept 2023. **Organizations developing the model:** The Higgs (Spock) team with members from Rice University and OpenStax. **Where to send questions or comments about the model:** Shashank Sonkar ([email protected]) If you use this work, please cite: Code Soliloquies for Accurate Calculations in Large Language Models https://arxiv.org/abs/2309.12161 ``` @misc{sonkar2023code, title={Code Soliloquies for Accurate Calculations in Large Language Models}, author={Shashank Sonkar and MyCo Le and Xinghe Chen and Lucy Liu and Debshila Basu Mallick and Richard G. Baraniuk}, year={2023}, eprint={2309.12161}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```