modelId
stringlengths
5
122
author
stringlengths
2
42
last_modified
timestamp[us, tz=UTC]
downloads
int64
0
738M
likes
int64
0
11k
library_name
stringclasses
245 values
tags
listlengths
1
4.05k
pipeline_tag
stringclasses
48 values
createdAt
timestamp[us, tz=UTC]
card
stringlengths
1
901k
sbeall/Phind-CodeLlama-34B-v1-q5_K_M-GGUF
sbeall
2023-08-26T06:45:38Z
607
0
null
[ "gguf", "license:llama2", "region:us" ]
null
2023-08-26T04:36:46Z
--- license: llama2 --- q5_K_M version of https://huggingface.co/Phind/Phind-CodeLlama-34B-v1
cnmoro/Mistral-7B-Portuguese
cnmoro
2024-04-04T12:39:04Z
607
7
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "conversational", "en", "pt", "dataset:cnmoro/WizardVicuna-PTBR-Instruct-Clean", "model-index", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-03-03T02:08:00Z
--- language: - en - pt datasets: - cnmoro/WizardVicuna-PTBR-Instruct-Clean model-index: - name: Mistral-7B-Portuguese results: - task: type: text-generation name: Text Generation dataset: name: ENEM Challenge (No Images) type: eduagarcia/enem_challenge split: train args: num_few_shot: 3 metrics: - type: acc value: 58.08 name: accuracy source: url: https://huggingface.co/spaces/eduagarcia/open_pt_llm_leaderboard?query=cnmoro/Mistral-7B-Portuguese name: Open Portuguese LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: BLUEX (No Images) type: eduagarcia-temp/BLUEX_without_images split: train args: num_few_shot: 3 metrics: - type: acc value: 48.68 name: accuracy source: url: https://huggingface.co/spaces/eduagarcia/open_pt_llm_leaderboard?query=cnmoro/Mistral-7B-Portuguese name: Open Portuguese LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: OAB Exams type: eduagarcia/oab_exams split: train args: num_few_shot: 3 metrics: - type: acc value: 37.08 name: accuracy source: url: https://huggingface.co/spaces/eduagarcia/open_pt_llm_leaderboard?query=cnmoro/Mistral-7B-Portuguese name: Open Portuguese LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: Assin2 RTE type: assin2 split: test args: num_few_shot: 15 metrics: - type: f1_macro value: 90.31 name: f1-macro source: url: https://huggingface.co/spaces/eduagarcia/open_pt_llm_leaderboard?query=cnmoro/Mistral-7B-Portuguese name: Open Portuguese LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: Assin2 STS type: eduagarcia/portuguese_benchmark split: test args: num_few_shot: 15 metrics: - type: pearson value: 76.55 name: pearson source: url: https://huggingface.co/spaces/eduagarcia/open_pt_llm_leaderboard?query=cnmoro/Mistral-7B-Portuguese name: Open Portuguese LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: FaQuAD NLI type: ruanchaves/faquad-nli split: test args: num_few_shot: 15 metrics: - type: f1_macro value: 58.84 name: f1-macro source: url: https://huggingface.co/spaces/eduagarcia/open_pt_llm_leaderboard?query=cnmoro/Mistral-7B-Portuguese name: Open Portuguese LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: HateBR Binary type: ruanchaves/hatebr split: test args: num_few_shot: 25 metrics: - type: f1_macro value: 79.21 name: f1-macro source: url: https://huggingface.co/spaces/eduagarcia/open_pt_llm_leaderboard?query=cnmoro/Mistral-7B-Portuguese name: Open Portuguese LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: PT Hate Speech Binary type: hate_speech_portuguese split: test args: num_few_shot: 25 metrics: - type: f1_macro value: 68.87 name: f1-macro source: url: https://huggingface.co/spaces/eduagarcia/open_pt_llm_leaderboard?query=cnmoro/Mistral-7B-Portuguese name: Open Portuguese LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: tweetSentBR type: eduagarcia-temp/tweetsentbr split: test args: num_few_shot: 25 metrics: - type: f1_macro value: 64.71 name: f1-macro source: url: https://huggingface.co/spaces/eduagarcia/open_pt_llm_leaderboard?query=cnmoro/Mistral-7B-Portuguese name: Open Portuguese LLM Leaderboard --- This is a finetuned version of [mistralai/Mistral-7B-Instruct-v0.2](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2) using [unsloth](https://github.com/unslothai/unsloth) on a instruct portuguese dataset, as an attempt to improve the performance of the model on the language. No benchmarks have been executed yet. The original prompt format was used: ```plaintext <s>[INST] {Prompt goes here} [/INST] ``` # [Open Portuguese LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/eduagarcia/open_pt_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/eduagarcia-temp/llm_pt_leaderboard_raw_results/tree/main/cnmoro/Mistral-7B-Portuguese) | Metric | Value | |--------------------------|--------| |Average |**64.7**| |ENEM Challenge (No Images)| 58.08| |BLUEX (No Images) | 48.68| |OAB Exams | 37.08| |Assin2 RTE | 90.31| |Assin2 STS | 76.55| |FaQuAD NLI | 58.84| |HateBR Binary | 79.21| |PT Hate Speech Binary | 68.87| |tweetSentBR | 64.71|
mradermacher/Mixtral_34Bx2_MoE_60B-GGUF
mradermacher
2024-06-29T06:35:33Z
607
1
transformers
[ "transformers", "gguf", "yi", "moe", "en", "base_model:cloudyu/Mixtral_34Bx2_MoE_60B", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2024-03-24T14:07:04Z
--- base_model: cloudyu/Mixtral_34Bx2_MoE_60B language: - en library_name: transformers license: apache-2.0 license_link: https://huggingface.co/01-ai/Yi-34B-200K/blob/main/LICENSE license_name: yi-license quantized_by: mradermacher tags: - yi - moe --- ## About static quants of https://huggingface.co/cloudyu/Mixtral_34Bx2_MoE_60B <!-- provided-files --> weighted/imatrix quants seem not to be available (by me) at this time. If they do not show up a week or so after the static ones, I have probably not planned for them. Feel free to request them by opening a Community Discussion. ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/Mixtral_34Bx2_MoE_60B-GGUF/resolve/main/Mixtral_34Bx2_MoE_60B.Q2_K.gguf) | Q2_K | 23.0 | | | [GGUF](https://huggingface.co/mradermacher/Mixtral_34Bx2_MoE_60B-GGUF/resolve/main/Mixtral_34Bx2_MoE_60B.IQ3_XS.gguf) | IQ3_XS | 25.6 | | | [GGUF](https://huggingface.co/mradermacher/Mixtral_34Bx2_MoE_60B-GGUF/resolve/main/Mixtral_34Bx2_MoE_60B.Q3_K_S.gguf) | Q3_K_S | 27.0 | | | [GGUF](https://huggingface.co/mradermacher/Mixtral_34Bx2_MoE_60B-GGUF/resolve/main/Mixtral_34Bx2_MoE_60B.IQ3_S.gguf) | IQ3_S | 27.0 | beats Q3_K* | | [GGUF](https://huggingface.co/mradermacher/Mixtral_34Bx2_MoE_60B-GGUF/resolve/main/Mixtral_34Bx2_MoE_60B.IQ3_M.gguf) | IQ3_M | 27.7 | | | [GGUF](https://huggingface.co/mradermacher/Mixtral_34Bx2_MoE_60B-GGUF/resolve/main/Mixtral_34Bx2_MoE_60B.Q3_K_M.gguf) | Q3_K_M | 29.9 | lower quality | | [GGUF](https://huggingface.co/mradermacher/Mixtral_34Bx2_MoE_60B-GGUF/resolve/main/Mixtral_34Bx2_MoE_60B.Q3_K_L.gguf) | Q3_K_L | 32.4 | | | [GGUF](https://huggingface.co/mradermacher/Mixtral_34Bx2_MoE_60B-GGUF/resolve/main/Mixtral_34Bx2_MoE_60B.IQ4_XS.gguf) | IQ4_XS | 33.5 | | | [GGUF](https://huggingface.co/mradermacher/Mixtral_34Bx2_MoE_60B-GGUF/resolve/main/Mixtral_34Bx2_MoE_60B.Q4_0.gguf) | Q4_0 | 35.0 | fast, low quality | | [GGUF](https://huggingface.co/mradermacher/Mixtral_34Bx2_MoE_60B-GGUF/resolve/main/Mixtral_34Bx2_MoE_60B.Q4_K_S.gguf) | Q4_K_S | 35.2 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/Mixtral_34Bx2_MoE_60B-GGUF/resolve/main/Mixtral_34Bx2_MoE_60B.IQ4_NL.gguf) | IQ4_NL | 35.3 | prefer IQ4_XS | | [GGUF](https://huggingface.co/mradermacher/Mixtral_34Bx2_MoE_60B-GGUF/resolve/main/Mixtral_34Bx2_MoE_60B.Q4_K_M.gguf) | Q4_K_M | 37.3 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/Mixtral_34Bx2_MoE_60B-GGUF/resolve/main/Mixtral_34Bx2_MoE_60B.Q5_K_S.gguf) | Q5_K_S | 42.5 | | | [GGUF](https://huggingface.co/mradermacher/Mixtral_34Bx2_MoE_60B-GGUF/resolve/main/Mixtral_34Bx2_MoE_60B.Q5_K_M.gguf) | Q5_K_M | 43.7 | | | [PART 1](https://huggingface.co/mradermacher/Mixtral_34Bx2_MoE_60B-GGUF/resolve/main/Mixtral_34Bx2_MoE_60B.Q6_K.gguf.part1of2) [PART 2](https://huggingface.co/mradermacher/Mixtral_34Bx2_MoE_60B-GGUF/resolve/main/Mixtral_34Bx2_MoE_60B.Q6_K.gguf.part2of2) | Q6_K | 50.5 | very good quality | | [PART 1](https://huggingface.co/mradermacher/Mixtral_34Bx2_MoE_60B-GGUF/resolve/main/Mixtral_34Bx2_MoE_60B.Q8_0.gguf.part1of2) [PART 2](https://huggingface.co/mradermacher/Mixtral_34Bx2_MoE_60B-GGUF/resolve/main/Mixtral_34Bx2_MoE_60B.Q8_0.gguf.part2of2) | Q8_0 | 65.1 | fast, best quality | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. <!-- end -->
mightbe/Better-PairRM
mightbe
2024-04-21T12:31:14Z
607
12
transformers
[ "transformers", "safetensors", "deberta-v2", "reward_model", "reward-model", "RLHF", "evaluation", "llm", "instruction", "reranking", "en", "dataset:openai/summarize_from_feedback", "dataset:openai/webgpt_comparisons", "dataset:berkeley-nest/Nectar", "dataset:Dahoas/instruct-synthetic-prompt-responses", "dataset:Anthropic/hh-rlhf", "dataset:lmsys/chatbot_arena_conversations", "dataset:openbmb/UltraFeedback", "dataset:argilla/ultrafeedback-binarized-preferences-cleaned", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2024-04-03T23:23:40Z
--- license: apache-2.0 datasets: - openai/summarize_from_feedback - openai/webgpt_comparisons - berkeley-nest/Nectar - Dahoas/instruct-synthetic-prompt-responses - Anthropic/hh-rlhf - lmsys/chatbot_arena_conversations - openbmb/UltraFeedback - argilla/ultrafeedback-binarized-preferences-cleaned metrics: - accuracy tags: - reward_model - reward-model - RLHF - evaluation - llm - instruction - reranking language: - en --- # Better Implementation of [*PairRM*](https://huggingface.co/llm-blender/PairRM) ## Introduction This version of PairRM have some fixes on training process, which improve model's performance by **15%**. ### Minor Fixes - Longer Context Length (2048 -> 3370) Thanks to deberta's tokenzer, original PairRM model had enough Context Length. But, the longer the better :> --- ### Major Fixes - Change Prompt Format Why use something like ``` <Response i + 1> {response} ``` So, I changed to a format based on Vicuna 1.1. --- - Change Truncate side The original process was using right side truncate even on Input. This can cause serious problem when Input exceeds model's context length. --- - Dataset Filter There was decent amount of empty assistant response on original dataset. So, I dropped them. --- ## Example Code **The code below is modified from** (**PairRM-hf Repo**)[https://huggingface.co/llm-blender/PairRM-hf] ```python import os os.environ["CUDA_VISIBLE_DEVICES"] = "0" from llm_blender.pair_ranker.pairrm import DebertaV2PairRM from transformers import AutoTokenizer from typing import List pairrm = DebertaV2PairRM.from_pretrained("maywell/Better-PairRM", device_map="cuda:0").eval() tokenizer = AutoTokenizer.from_pretrained("maywell/Better-PairRM") source_prefix = "<|source|>" cand1_prefix = "<|candidate1|>" cand2_prefix = "<|candidate2|>" inputs = ["hello!", "I love you!"] candidates_A = ["hi!", "I hate you!"] candidates_B = ["f**k off!", "I love you, too!"] def tokenize_pair(sources:List[str], candidate1s:List[str], candidate2s:List[str], source_max_length=2030, candidate_max_length=670): ids = [] assert len(sources) == len(candidate1s) == len(candidate2s) max_length = source_max_length + 2 * candidate_max_length for i in range(len(sources)): source_ids = tokenizer.encode(source_prefix + sources[i], max_length=source_max_length, truncation=True) candidate_max_length = (max_length - len(source_ids)) // 2 candidate1_ids = tokenizer.encode(cand1_prefix + candidate1s[i], max_length=candidate_max_length, truncation=True) candidate2_ids = tokenizer.encode(cand2_prefix + candidate2s[i], max_length=candidate_max_length, truncation=True) ids.append(source_ids + candidate1_ids + candidate2_ids) encodings = tokenizer.pad({"input_ids": ids}, return_tensors="pt", padding="max_length", max_length=max_length) return encodings encodings = tokenize_pair(inputs, candidates_A, candidates_B) encodings = {k:v.to(pairrm.device) for k,v in encodings.items()} outputs = pairrm(**encodings) logits = outputs.logits.tolist() comparison_results = outputs.logits > 0 print(logits) print(comparison_results) ``` You can also easily compare two conversations like the followings: ```python import jinja2 from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("microsoft/deberta-v3-large") def truncate_texts(text, max_length, truncate_side): tokenizer.truncation_side = truncate_side tokens = tokenizer.encode(text, add_special_tokens=False, max_length=max_length) truncated_text = tokenizer.decode(tokens, skip_special_tokens=True) return truncated_text MY_JINJA_TEMPLATE = """{% for message in messages -%} {% if message['role'] == 'user' -%} USER: {{ message['content']|trim -}} {% if not loop.last -%} {% endif %} {% elif message['role'] == 'assistant' -%} ASSISTANT: {{ message['content']|trim -}} {% if not loop.last -%} {% endif %} {% elif message['role'] == 'user_context' -%} USER: {{ message['content']|trim -}} {% if not loop.last -%} {% endif %} {% elif message['role'] == 'system' -%} SYSTEM MESSAGE: {{ message['content']|trim -}} {% if not loop.last -%} {% endif %} {% endif %} {% endfor -%} {% if add_generation_prompt and messages[-1]['role'] != 'assistant' -%} ASSISTANT: {% endif -%}""" my_jinja2_env = jinja2.Environment() my_jinja2_template = my_jinja2_env.from_string(MY_JINJA_TEMPLATE) def tokenize_conv_pair(convAs: List[str], convBs: List[str]): # check conversations correctness assert len(convAs) == len(convBs), "Number of conversations must be the same" for c_a, c_b in zip(convAs, convBs): assert len(c_a) == len(c_b), "Number of turns in each conversation must be the same" assert all([c_a[i]['content'] == c_b[i]['content'] for i in range(0, len(c_a), 2)]), "USER turns must be the same" inputs = [ truncate_texts(my_jinja2_template.render(messages=x[:-1], add_generation_prompt=True), 2030, "left") for x in convAs ] cand1_texts = [ truncate_texts(x[-1]['content'], 670, "right") for x in convAs ] cand2_texts = [ truncate_texts(x[-1]['content'], 670, "right") for x in convBs ] encodings = tokenize_pair(inputs, cand1_texts, cand2_texts) return encodings ``` ## Statistics ### Context length | PairRanker type | Source max length | Candidate max length | Total max length | |:-----------------:|:-----------------:|----------------------|------------------| | [pair-ranker](https://huggingface.co/llm-blender/pair-ranker) | 128 | 128 | 384 | | [PairRM](https://huggingface.co/llm-blender/pair-reward-model/) | 1224 | 412 | 2048 | | [Better-PairRM](https://huggingface.co/maywell/Better-PairRM/) (This model) | 2030 | 670 | 3370 | ### Performance #### Reward-Bench by AllenAI | Metric | llm-blender/PairRM-hf | maywell/Better-PairRM | |----------------------------|------------------------|------------------------| | model | llm-blender/PairRM-hf | maywell/Better-PairRM | | model_type | Custom Classifier | Custom Classifier | | alpacaeval-length | 0.758 | **0.863** | | alpacaeval-hard | 0.979 | **1.000** | | alpacaeval-easy | 0.970 | **0.990** | | donotanswer | 0.360 | **0.522** | | hep-cpp | 0.628 | **0.646** | | hep-go | 0.689 | **0.713** | | hep-java | 0.628 | **0.713** | | hep-js | 0.604 | **0.707** | | hep-python | 0.646 | **0.713** | | hep-rust | 0.652 | **0.726** | | llmbar-adver-GPTInst | **0.304** | 0.141 | | llmbar-adver-GPTOut | **0.596** | 0.447 | | llmbar-adver-manual | **0.500** | 0.261 | | llmbar-adver-neighbor | **0.433** | 0.276 | | llmbar-natural | **0.800** | 0.720 | | math-prm | **0.333** | 0.295 | | mt-bench-hard | 0.649 | **0.703** | | mt-bench-med | 0.900 | **1.000** | | mt-bench-easy | **0.964** | 0.929 | | refusals-dangerous | 0.080 | **0.730** | | refusals-offensive | 0.010 | **0.940** | | xstest-should-refuse | 0.370 | **0.968** | | xstest-should-respond | **0.952** | 0.876 | | average | 0.600 | **0.690** | > *Note - llmbar test score is bit weird across all models on [Reward-Bench](https://huggingface.co/spaces/allenai/reward-bench)* ## Thanks to - [Sionic AI](https://sionic.ai/) for providing the A100 cluster. ## Contact - [Discord Server Link](https://discord.gg/MrBt3PXdXc) ## Original Paper ``` @inproceedings{llm-blender-2023, title = "LLM-Blender: Ensembling Large Language Models with Pairwise Comparison and Generative Fusion", author = "Jiang, Dongfu and Ren, Xiang and Lin, Bill Yuchen", booktitle = "Proceedings of the 61th Annual Meeting of the Association for Computational Linguistics (ACL 2023)", year = "2023" } ```
Felladrin/gguf-pythia-1.4b-sft-full
Felladrin
2024-05-23T13:05:54Z
607
0
null
[ "gguf", "base_model:nnheui/pythia-1.4b-sft-full", "license:apache-2.0", "region:us" ]
null
2024-05-23T12:18:14Z
--- license: apache-2.0 base_model: nnheui/pythia-1.4b-sft-full --- GGUF version of [nnheui/pythia-1.4b-sft-full](https://huggingface.co/nnheui/pythia-1.4b-sft-full).
patrickvonplaten/bert2bert-cnn_dailymail-fp16
patrickvonplaten
2020-12-12T11:22:49Z
606
4
transformers
[ "transformers", "pytorch", "encoder-decoder", "text2text-generation", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text2text-generation
2022-03-02T23:29:05Z
# Bert2Bert Summarization with 🤗 EncoderDecoder Framework This model is a Bert2Bert model fine-tuned on summarization. Bert2Bert is a `EncoderDecoderModel`, meaning that both the encoder and the decoder are `bert-base-uncased` BERT models. Leveraging the [EncoderDecoderFramework](https://huggingface.co/transformers/model_doc/encoderdecoder.html#encoder-decoder-models), the two pretrained models can simply be loaded into the framework via: ```python bert2bert = EncoderDecoderModel.from_encoder_decoder_pretrained("bert-base-uncased", "bert-base-uncased") ``` The decoder of an `EncoderDecoder` model needs cross-attention layers and usually makes use of causal masking for auto-regressiv generation. Thus, ``bert2bert`` is consequently fined-tuned on the `CNN/Daily Mail`dataset and the resulting model `bert2bert-cnn_dailymail-fp16` is uploaded here. ## Example The model is by no means a state-of-the-art model, but nevertheless produces reasonable summarization results. It was mainly fine-tuned as a proof-of-concept for the 🤗 EncoderDecoder Framework. The model can be used as follows: ```python from transformers import BertTokenizer, EncoderDecoderModel model = EncoderDecoderModel.from_pretrained("patrickvonplaten/bert2bert-cnn_dailymail-fp16") tokenizer = BertTokenizer.from_pretrained("patrickvonplaten/bert2bert-cnn_dailymail-fp16") article = """(CNN)Sigma Alpha Epsilon is under fire for a video showing party-bound fraternity members singing a racist chant. SAE's national chapter suspended the students, but University of Oklahoma President David Boren took it a step further, saying the university's affiliation with the fraternity is permanently done. The news is shocking, but it's not the first time SAE has faced controversy. SAE was founded March 9, 1856, at the University of Alabama, five years before the American Civil War, according to the fraternity website. When the war began, the group had fewer than 400 members, of which "369 went to war for the Confederate States and seven for the Union Army," the website says. The fraternity now boasts more than 200,000 living alumni, along with about 15,000 undergraduates populating 219 chapters and 20 "colonies" seeking full membership at universities. SAE has had to work hard to change recently after a string of member deaths, many blamed on the hazing of new recruits, SAE national President Bradley Cohen wrote in a message on the fraternity's website. The fraternity's website lists more than 130 chapters cited or suspended for "health and safety incidents" since 2010. At least 30 of the incidents involved hazing, and dozens more involved alcohol. However, the list is missing numerous incidents from recent months. Among them, according to various media outlets: Yale University banned the SAEs from campus activities last month after members allegedly tried to interfere with a sexual misconduct investigation connected to an initiation rite. Stanford University in December suspended SAE housing privileges after finding sorority members attending a fraternity function were subjected to graphic sexual content. And Johns Hopkins University in November suspended the fraternity for underage drinking. "The media has labeled us as the 'nation's deadliest fraternity,' " Cohen said. In 2011, for example, a student died while being coerced into excessive alcohol consumption, according to a lawsuit. SAE's previous insurer dumped the fraternity. "As a result, we are paying Lloyd's of London the highest insurance rates in the Greek-letter world," Cohen said. Universities have turned down SAE's attempts to open new chapters, and the fraternity had to close 12 in 18 months over hazing incidents.""" input_ids = tokenizer(article, return_tensors="pt").input_ids output_ids = model.generate(input_ids) print(tokenizer.decode(output_ids[0], skip_special_tokens=True)) # should produce # sae was founded in 1856, five years before the civil war. the fraternity has had to work hard to change recently. the university of oklahoma president says the university's affiliation with the fraternity is permanently done. the sae has had a string of members in recent mon ths. ``` ## Training script: Please follow this tutorial to see how to warm-start a BERT2BERT model: https://colab.research.google.com/drive/1WIk2bxglElfZewOHboPFNj8H44_VAyKE?usp=sharing The obtained results should be: | - | Rouge2 - mid -precision | Rouge2 - mid - recall | Rouge2 - mid - fmeasure | |----------|:-------------:|:------:|:------:| | **CNN/Daily Mail** | 16.12 | 17.07 | **16.1** |
tae898/emoberta-large
tae898
2022-03-16T11:01:48Z
606
7
transformers
[ "transformers", "pytorch", "roberta", "text-classification", "emoberta", "en", "dataset:MELD", "dataset:IEMOCAP", "arxiv:2108.12009", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
2022-03-14T20:33:23Z
--- language: en tags: - emoberta - roberta license: mit datasets: - MELD - IEMOCAP --- Check https://github.com/tae898/erc for the details [Watch a demo video!](https://youtu.be/qbr7fNd6J28) # Emotion Recognition in Coversation (ERC) [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/emoberta-speaker-aware-emotion-recognition-in/emotion-recognition-in-conversation-on)](https://paperswithcode.com/sota/emotion-recognition-in-conversation-on?p=emoberta-speaker-aware-emotion-recognition-in) [![PWC](https://img.shields.io/endpoint.svg?url=https://paperswithcode.com/badge/emoberta-speaker-aware-emotion-recognition-in/emotion-recognition-in-conversation-on-meld)](https://paperswithcode.com/sota/emotion-recognition-in-conversation-on-meld?p=emoberta-speaker-aware-emotion-recognition-in) At the moment, we only use the text modality to correctly classify the emotion of the utterances.The experiments were carried out on two datasets (i.e. MELD and IEMOCAP) ## Prerequisites 1. An x86-64 Unix or Unix-like machine 1. Python 3.8 or higher 1. Running in a virtual environment (e.g., conda, virtualenv, etc.) is highly recommended so that you don't mess up with the system python. 1. [`multimodal-datasets` repo](https://github.com/tae898/multimodal-datasets) (submodule) 1. pip install -r requirements.txt ## EmoBERTa training First configure the hyper parameters and the dataset in `train-erc-text.yaml` and then, In this directory run the below commands. I recommend you to run this in a virtualenv. ```sh python train-erc-text.py ``` This will subsequently call `train-erc-text-hp.py` and `train-erc-text-full.py`. ## Results on the test split (weighted f1 scores) | Model | | MELD | IEMOCAP | | -------- | ------------------------------- | :-------: | :-------: | | EmoBERTa | No past and future utterances | 63.46 | 56.09 | | | Only past utterances | 64.55 | **68.57** | | | Only future utterances | 64.23 | 66.56 | | | Both past and future utterances | **65.61** | 67.42 | | | → *without speaker names* | 65.07 | 64.02 | Above numbers are the mean values of five random seed runs. If you want to see more training test details, check out `./results/` If you want to download the trained checkpoints and stuff, then [here](https://surfdrive.surf.nl/files/index.php/s/khREwk4MUI7MSnO/download) is where you can download them. It's a pretty big zip file. ## Deployment ### Huggingface We have released our models on huggingface: - [emoberta-base](https://huggingface.co/tae898/emoberta-base) - [emoberta-large](https://huggingface.co/tae898/emoberta-large) They are based on [RoBERTa-base](https://huggingface.co/roberta-base) and [RoBERTa-large](https://huggingface.co/roberta-large), respectively. They were trained on [both MELD and IEMOCAP datasets](utterance-ordered-MELD_IEMOCAP.json). Our deployed models are neither speaker-aware nor take previous utterances into account, meaning that it only classifies one utterance at a time without the speaker information (e.g., "I love you"). ### Flask app You can either run the Flask RESTful server app as a docker container or just as a python script. 1. Running the app as a docker container **(recommended)**. There are four images. Take what you need: - `docker run -it --rm -p 10006:10006 tae898/emoberta-base` - `docker run -it --rm -p 10006:10006 --gpus all tae898/emoberta-base-cuda` - `docker run -it --rm -p 10006:10006 tae898/emoberta-large` - `docker run -it --rm -p 10006:10006 --gpus all tae898/emoberta-large-cuda` 1. Running the app in your python environment: This method is less recommended than the docker one. Run `pip install -r requirements-deploy.txt` first.<br> The [`app.py`](app.py) is a flask RESTful server. The usage is below: ```console app.py [-h] [--host HOST] [--port PORT] [--device DEVICE] [--model-type MODEL_TYPE] ``` For example: ```sh python app.py --host 0.0.0.0 --port 10006 --device cpu --model-type emoberta-base ``` ### Client Once the app is running, you can send a text to the server. First install the necessary packages: `pip install -r requirements-client.txt`, and the run the [client.py](client.py). The usage is as below: ```console client.py [-h] [--url-emoberta URL_EMOBERTA] --text TEXT ``` For example: ```sh python client.py --text "Emotion recognition is so cool\!" ``` will give you: ```json { "neutral": 0.0049800905, "joy": 0.96399665, "surprise": 0.018937444, "anger": 0.0071516023, "sadness": 0.002021492, "disgust": 0.001495996, "fear": 0.0014167271 } ``` ## Troubleshooting The best way to find and solve your problems is to see in the github issue tab. If you can't find what you want, feel free to raise an issue. We are pretty responsive. ## Contributing Contributions are what make the open source community such an amazing place to be learn, inspire, and create. Any contributions you make are **greatly appreciated**. 1. Fork the Project 1. Create your Feature Branch (`git checkout -b feature/AmazingFeature`) 1. Run `make style && quality` in the root repo directory, to ensure code quality. 1. Commit your Changes (`git commit -m 'Add some AmazingFeature'`) 1. Push to the Branch (`git push origin feature/AmazingFeature`) 1. Open a Pull Request ## Cite our work Check out the [paper](https://arxiv.org/abs/2108.12009). ```bibtex @misc{kim2021emoberta, title={EmoBERTa: Speaker-Aware Emotion Recognition in Conversation with RoBERTa}, author={Taewoon Kim and Piek Vossen}, year={2021}, eprint={2108.12009}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` [![DOI](https://zenodo.org/badge/328375452.svg)](https://zenodo.org/badge/latestdoi/328375452)<br> ## Authors - [Taewoon Kim](https://taewoonkim.com/) ## License [MIT](https://choosealicense.com/licenses/mit/)
Matthijs/deeplabv3_mobilenet_v2_1.0_513
Matthijs
2022-11-10T12:50:21Z
606
1
transformers
[ "transformers", "pytorch", "coreml", "mobilenet_v2", "vision", "image-segmentation", "dataset:pascal-voc", "arxiv:1801.04381", "arxiv:1802.02611", "license:other", "endpoints_compatible", "region:us" ]
image-segmentation
2022-06-28T11:16:06Z
--- license: other tags: - vision - image-segmentation datasets: - pascal-voc widget: - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/cat-2.jpg example_title: Cat --- # MobileNetV2 with DeepLabV3+ MobileNet V2 model pre-trained on PASCAL VOC at resolution 513x513. It was introduced in [MobileNetV2: Inverted Residuals and Linear Bottlenecks](https://arxiv.org/abs/1801.04381) by Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, Liang-Chieh Chen. It was first released in [this repository](https://github.com/tensorflow/models/tree/master/research/deeplab). Disclaimer: The team releasing MobileNet V2 did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model description From the [original README](https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet_v1.md): > MobileNets are small, low-latency, low-power models parameterized to meet the resource constraints of a variety of use cases. They can be built upon for classification, detection, embeddings and segmentation similar to how other popular large scale models, such as Inception, are used. MobileNets can be run efficiently on mobile devices [...] MobileNets trade off between latency, size and accuracy while comparing favorably with popular models from the literature. The model in this repo adds a [DeepLabV3+](https://arxiv.org/abs/1802.02611) head to the MobileNetV2 backbone for semantic segmentation. ## Intended uses & limitations You can use the raw model for semantic segmentation. See the [model hub](https://huggingface.co/models?search=mobilenet_v2) to look for fine-tuned versions on a task that interests you. ### How to use Here is how to use this model: ```python from transformers import MobileNetV2FeatureExtractor, MobileNetV2ForSemanticSegmentation from PIL import Image import requests url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) feature_extractor = MobileNetV2FeatureExtractor.from_pretrained("Matthijs/deeplabv3_mobilenet_v2_1.0_513") model = MobileNetV2ForSemanticSegmentation.from_pretrained("Matthijs/deeplabv3_mobilenet_v2_1.0_513") inputs = feature_extractor(images=image, return_tensors="pt") outputs = model(**inputs) logits = outputs.logits predicted_mask = logits.argmax(1).squeeze(0) ``` Currently, both the feature extractor and model support PyTorch. ### BibTeX entry and citation info ```bibtex @inproceedings{deeplabv3plus2018, title={Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation}, author={Liang-Chieh Chen and Yukun Zhu and George Papandreou and Florian Schroff and Hartwig Adam}, booktitle={ECCV}, year={2018} } ```
instruction-tuning-sd/scratch-cartoonizer
instruction-tuning-sd
2023-05-13T07:45:21Z
606
7
diffusers
[ "diffusers", "stable-diffusion", "stable-diffusion-diffusers", "image-to-image", "art", "dataset:instruction-tuning-sd/cartoonization", "arxiv:2211.09800", "arxiv:2109.01652", "license:mit", "diffusers:StableDiffusionInstructPix2PixPipeline", "region:us" ]
image-to-image
2023-03-23T03:59:30Z
--- license: mit tags: - stable-diffusion - stable-diffusion-diffusers - image-to-image - art widget: - src: >- https://hf.co/datasets/diffusers/diffusers-images-docs/resolve/main/mountain.png prompt: Cartoonize the following image datasets: - instruction-tuning-sd/cartoonization --- # Instruction-tuned Stable Diffusion for Cartoonization (Scratch) This pipeline is an 'instruction-tuned' version of [Stable Diffusion (v1.5)](https://huggingface.co/runwayml/stable-diffusion-v1-5). It was trained using the [InstructPix2Pix methodology](https://huggingface.co/papers/2211.09800). ## Pipeline description Motivation behind this pipeline partly comes from [FLAN](https://huggingface.co/papers/2109.01652) and partly comes from [InstructPix2Pix](https://huggingface.co/papers/2211.09800). The main idea is to first create an instruction prompted dataset (as described in [our blog](https://hf.co/blog/instruction-tuning-sd)) and then conduct InstructPix2Pix style training. The end objective is to make Stable Diffusion better at following specific instructions that entail image transformation related operations. <p align="center"> <img src="https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/instruction-tuning-sd.png" width=600/> </p> Follow [this post](https://hf.co/blog/instruction-tuning-sd) to know more. ## Training procedure and results Training was conducted on [instruction-tuning-sd/cartoonization](https://huggingface.co/datasets/instruction-tuning-sd/cartoonization) dataset. Refer to [this repository](https://github.com/huggingface/instruction-tuned-sd) to know more. Here are some results dervied from the pipeline: <p align="center"> <img src="https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/cartoonization_results.jpeg" width=600/> </p> ## Intended uses & limitations You can use the pipeline for performing cartoonization with an input image and an input prompt. ### How to use Here is how to use this model: ```python import torch from diffusers import StableDiffusionInstructPix2PixPipeline from diffusers.utils import load_image model_id = "instruction-tuning-sd/scratch-cartoonizer" pipeline = StableDiffusionInstructPix2PixPipeline.from_pretrained( model_id, torch_dtype=torch.float16, use_auth_token=True ).to("cuda") image_path = "https://hf.co/datasets/diffusers/diffusers-images-docs/resolve/main/mountain.png" image = load_image(image_path) image = pipeline("Cartoonize the following image", image=image).images[0] image.save("image.png") ``` For notes on limitations, misuse, malicious use, out-of-scope use, please refer to the model card [here](https://huggingface.co/runwayml/stable-diffusion-v1-5). ## Citation **FLAN** ```bibtex @inproceedings{ wei2022finetuned, title={Finetuned Language Models are Zero-Shot Learners}, author={Jason Wei and Maarten Bosma and Vincent Zhao and Kelvin Guu and Adams Wei Yu and Brian Lester and Nan Du and Andrew M. Dai and Quoc V Le}, booktitle={International Conference on Learning Representations}, year={2022}, url={https://openreview.net/forum?id=gEZrGCozdqR} } ``` **InstructPix2Pix** ```bibtex @InProceedings{ brooks2022instructpix2pix, author = {Brooks, Tim and Holynski, Aleksander and Efros, Alexei A.}, title = {InstructPix2Pix: Learning to Follow Image Editing Instructions}, booktitle = {CVPR}, year = {2023}, } ``` **Instruction-tuning for Stable Diffusion blog** ```bibtex @article{ Paul2023instruction-tuning-sd, author = {Paul, Sayak}, title = {Instruction-tuning Stable Diffusion with InstructPix2Pix}, journal = {Hugging Face Blog}, year = {2023}, note = {https://huggingface.co/blog/instruction-tuning-sd}, } ```
TheBloke/notux-8x7b-v1-GGUF
TheBloke
2023-12-29T15:37:20Z
606
5
transformers
[ "transformers", "gguf", "mixtral", "dpo", "rlaif", "preference", "ultrafeedback", "text-generation", "en", "de", "es", "fr", "it", "dataset:argilla/ultrafeedback-binarized-preferences-cleaned", "base_model:argilla/notux-8x7b-v1", "license:apache-2.0", "text-generation-inference", "region:us" ]
text-generation
2023-12-29T14:50:04Z
--- base_model: argilla/notux-8x7b-v1 datasets: - argilla/ultrafeedback-binarized-preferences-cleaned inference: false language: - en - de - es - fr - it library_name: transformers license: apache-2.0 model-index: - name: notux-8x7b-v1 results: [] model_creator: Argilla model_name: Notux 8X7B v1 model_type: mixtral pipeline_tag: text-generation prompt_template: '{prompt} ' quantized_by: TheBloke tags: - dpo - rlaif - preference - ultrafeedback --- <!-- markdownlint-disable MD041 --> <!-- header start --> <!-- 200823 --> <div style="width: auto; margin-left: auto; margin-right: auto"> <img src="https://i.imgur.com/EBdldam.jpg" alt="TheBlokeAI" style="width: 100%; min-width: 400px; display: block; margin: auto;"> </div> <div style="display: flex; justify-content: space-between; width: 100%;"> <div style="display: flex; flex-direction: column; align-items: flex-start;"> <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://discord.gg/theblokeai">Chat & support: TheBloke's Discord server</a></p> </div> <div style="display: flex; flex-direction: column; align-items: flex-end;"> <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://www.patreon.com/TheBlokeAI">Want to contribute? TheBloke's Patreon page</a></p> </div> </div> <div style="text-align:center; margin-top: 0em; margin-bottom: 0em"><p style="margin-top: 0.25em; margin-bottom: 0em;">TheBloke's LLM work is generously supported by a grant from <a href="https://a16z.com">andreessen horowitz (a16z)</a></p></div> <hr style="margin-top: 1.0em; margin-bottom: 1.0em;"> <!-- header end --> # Notux 8X7B v1 - GGUF - Model creator: [Argilla](https://huggingface.co/argilla) - Original model: [Notux 8X7B v1](https://huggingface.co/argilla/notux-8x7b-v1) <!-- description start --> ## Description This repo contains GGUF format model files for [Argilla's Notux 8X7B v1](https://huggingface.co/argilla/notux-8x7b-v1). These files were quantised using hardware kindly provided by [Massed Compute](https://massedcompute.com/). <!-- description end --> <!-- README_GGUF.md-about-gguf start --> ### About GGUF GGUF is a new format introduced by the llama.cpp team on August 21st 2023. It is a replacement for GGML, which is no longer supported by llama.cpp. Here is an incomplete list of clients and libraries that are known to support GGUF: * [llama.cpp](https://github.com/ggerganov/llama.cpp). The source project for GGUF. Offers a CLI and a server option. * [text-generation-webui](https://github.com/oobabooga/text-generation-webui), the most widely used web UI, with many features and powerful extensions. Supports GPU acceleration. * [KoboldCpp](https://github.com/LostRuins/koboldcpp), a fully featured web UI, with GPU accel across all platforms and GPU architectures. Especially good for story telling. * [GPT4All](https://gpt4all.io/index.html), a free and open source local running GUI, supporting Windows, Linux and macOS with full GPU accel. * [LM Studio](https://lmstudio.ai/), an easy-to-use and powerful local GUI for Windows and macOS (Silicon), with GPU acceleration. Linux available, in beta as of 27/11/2023. * [LoLLMS Web UI](https://github.com/ParisNeo/lollms-webui), a great web UI with many interesting and unique features, including a full model library for easy model selection. * [Faraday.dev](https://faraday.dev/), an attractive and easy to use character-based chat GUI for Windows and macOS (both Silicon and Intel), with GPU acceleration. * [llama-cpp-python](https://github.com/abetlen/llama-cpp-python), a Python library with GPU accel, LangChain support, and OpenAI-compatible API server. * [candle](https://github.com/huggingface/candle), a Rust ML framework with a focus on performance, including GPU support, and ease of use. * [ctransformers](https://github.com/marella/ctransformers), a Python library with GPU accel, LangChain support, and OpenAI-compatible AI server. Note, as of time of writing (November 27th 2023), ctransformers has not been updated in a long time and does not support many recent models. <!-- README_GGUF.md-about-gguf end --> <!-- repositories-available start --> ## Repositories available * [AWQ model(s) for GPU inference.](https://huggingface.co/TheBloke/notux-8x7b-v1-AWQ) * [GPTQ models for GPU inference, with multiple quantisation parameter options.](https://huggingface.co/TheBloke/notux-8x7b-v1-GPTQ) * [2, 3, 4, 5, 6 and 8-bit GGUF models for CPU+GPU inference](https://huggingface.co/TheBloke/notux-8x7b-v1-GGUF) * [Argilla's original unquantised fp16 model in pytorch format, for GPU inference and for further conversions](https://huggingface.co/argilla/notux-8x7b-v1) <!-- repositories-available end --> <!-- prompt-template start --> ## Prompt template: Unknown ``` {prompt} ``` <!-- prompt-template end --> <!-- compatibility_gguf start --> ## Compatibility These quantised GGUFv2 files are compatible with llama.cpp from August 27th onwards, as of commit [d0cee0d](https://github.com/ggerganov/llama.cpp/commit/d0cee0d36d5be95a0d9088b674dbb27354107221) They are also compatible with many third party UIs and libraries - please see the list at the top of this README. ## Explanation of quantisation methods <details> <summary>Click to see details</summary> The new methods available are: * GGML_TYPE_Q2_K - "type-1" 2-bit quantization in super-blocks containing 16 blocks, each block having 16 weight. Block scales and mins are quantized with 4 bits. This ends up effectively using 2.5625 bits per weight (bpw) * GGML_TYPE_Q3_K - "type-0" 3-bit quantization in super-blocks containing 16 blocks, each block having 16 weights. Scales are quantized with 6 bits. This end up using 3.4375 bpw. * GGML_TYPE_Q4_K - "type-1" 4-bit quantization in super-blocks containing 8 blocks, each block having 32 weights. Scales and mins are quantized with 6 bits. This ends up using 4.5 bpw. * GGML_TYPE_Q5_K - "type-1" 5-bit quantization. Same super-block structure as GGML_TYPE_Q4_K resulting in 5.5 bpw * GGML_TYPE_Q6_K - "type-0" 6-bit quantization. Super-blocks with 16 blocks, each block having 16 weights. Scales are quantized with 8 bits. This ends up using 6.5625 bpw Refer to the Provided Files table below to see what files use which methods, and how. </details> <!-- compatibility_gguf end --> <!-- README_GGUF.md-provided-files start --> ## Provided files | Name | Quant method | Bits | Size | Max RAM required | Use case | | ---- | ---- | ---- | ---- | ---- | ----- | | [notux-8x7b-v1.Q2_K.gguf](https://huggingface.co/TheBloke/notux-8x7b-v1-GGUF/blob/main/notux-8x7b-v1.Q2_K.gguf) | Q2_K | 2 | 15.64 GB| 18.14 GB | smallest, significant quality loss - not recommended for most purposes | | [notux-8x7b-v1.Q3_K_S.gguf](https://huggingface.co/TheBloke/notux-8x7b-v1-GGUF/blob/main/notux-8x7b-v1.Q3_K_S.gguf) | Q3_K_S | 3 | 20.29 GB| 22.79 GB | very small, high quality loss | | [notux-8x7b-v1.Q3_K_M.gguf](https://huggingface.co/TheBloke/notux-8x7b-v1-GGUF/blob/main/notux-8x7b-v1.Q3_K_M.gguf) | Q3_K_M | 3 | 20.36 GB| 22.86 GB | very small, high quality loss | | [notux-8x7b-v1.Q3_K_L.gguf](https://huggingface.co/TheBloke/notux-8x7b-v1-GGUF/blob/main/notux-8x7b-v1.Q3_K_L.gguf) | Q3_K_L | 3 | 20.43 GB| 22.93 GB | small, substantial quality loss | | [notux-8x7b-v1.Q4_0.gguf](https://huggingface.co/TheBloke/notux-8x7b-v1-GGUF/blob/main/notux-8x7b-v1.Q4_0.gguf) | Q4_0 | 4 | 26.44 GB| 28.94 GB | legacy; small, very high quality loss - prefer using Q3_K_M | | [notux-8x7b-v1.Q4_K_M.gguf](https://huggingface.co/TheBloke/notux-8x7b-v1-GGUF/blob/main/notux-8x7b-v1.Q4_K_M.gguf) | Q4_K_M | 4 | 26.44 GB| 28.94 GB | medium, balanced quality - recommended | | [notux-8x7b-v1.Q4_K_S.gguf](https://huggingface.co/TheBloke/notux-8x7b-v1-GGUF/blob/main/notux-8x7b-v1.Q4_K_S.gguf) | Q4_K_S | 4 | 26.44 GB| 28.94 GB | small, greater quality loss | | [notux-8x7b-v1.Q5_0.gguf](https://huggingface.co/TheBloke/notux-8x7b-v1-GGUF/blob/main/notux-8x7b-v1.Q5_0.gguf) | Q5_0 | 5 | 32.23 GB| 34.73 GB | legacy; medium, balanced quality - prefer using Q4_K_M | | [notux-8x7b-v1.Q5_K_M.gguf](https://huggingface.co/TheBloke/notux-8x7b-v1-GGUF/blob/main/notux-8x7b-v1.Q5_K_M.gguf) | Q5_K_M | 5 | 32.23 GB| 34.73 GB | large, very low quality loss - recommended | | [notux-8x7b-v1.Q5_K_S.gguf](https://huggingface.co/TheBloke/notux-8x7b-v1-GGUF/blob/main/notux-8x7b-v1.Q5_K_S.gguf) | Q5_K_S | 5 | 32.23 GB| 34.73 GB | large, low quality loss - recommended | | [notux-8x7b-v1.Q6_K.gguf](https://huggingface.co/TheBloke/notux-8x7b-v1-GGUF/blob/main/notux-8x7b-v1.Q6_K.gguf) | Q6_K | 6 | 38.38 GB| 40.88 GB | very large, extremely low quality loss | | [notux-8x7b-v1.Q8_0.gguf](https://huggingface.co/TheBloke/notux-8x7b-v1-GGUF/blob/main/notux-8x7b-v1.Q8_0.gguf) | Q8_0 | 8 | 49.63 GB| 52.13 GB | very large, extremely low quality loss - not recommended | **Note**: the above RAM figures assume no GPU offloading. If layers are offloaded to the GPU, this will reduce RAM usage and use VRAM instead. <!-- README_GGUF.md-provided-files end --> <!-- README_GGUF.md-how-to-download start --> ## How to download GGUF files **Note for manual downloaders:** You almost never want to clone the entire repo! Multiple different quantisation formats are provided, and most users only want to pick and download a single file. The following clients/libraries will automatically download models for you, providing a list of available models to choose from: * LM Studio * LoLLMS Web UI * Faraday.dev ### In `text-generation-webui` Under Download Model, you can enter the model repo: TheBloke/notux-8x7b-v1-GGUF and below it, a specific filename to download, such as: notux-8x7b-v1.Q4_K_M.gguf. Then click Download. ### On the command line, including multiple files at once I recommend using the `huggingface-hub` Python library: ```shell pip3 install huggingface-hub ``` Then you can download any individual model file to the current directory, at high speed, with a command like this: ```shell huggingface-cli download TheBloke/notux-8x7b-v1-GGUF notux-8x7b-v1.Q4_K_M.gguf --local-dir . --local-dir-use-symlinks False ``` <details> <summary>More advanced huggingface-cli download usage (click to read)</summary> You can also download multiple files at once with a pattern: ```shell huggingface-cli download TheBloke/notux-8x7b-v1-GGUF --local-dir . --local-dir-use-symlinks False --include='*Q4_K*gguf' ``` For more documentation on downloading with `huggingface-cli`, please see: [HF -> Hub Python Library -> Download files -> Download from the CLI](https://huggingface.co/docs/huggingface_hub/guides/download#download-from-the-cli). To accelerate downloads on fast connections (1Gbit/s or higher), install `hf_transfer`: ```shell pip3 install hf_transfer ``` And set environment variable `HF_HUB_ENABLE_HF_TRANSFER` to `1`: ```shell HF_HUB_ENABLE_HF_TRANSFER=1 huggingface-cli download TheBloke/notux-8x7b-v1-GGUF notux-8x7b-v1.Q4_K_M.gguf --local-dir . --local-dir-use-symlinks False ``` Windows Command Line users: You can set the environment variable by running `set HF_HUB_ENABLE_HF_TRANSFER=1` before the download command. </details> <!-- README_GGUF.md-how-to-download end --> <!-- README_GGUF.md-how-to-run start --> ## Example `llama.cpp` command Make sure you are using `llama.cpp` from commit [d0cee0d](https://github.com/ggerganov/llama.cpp/commit/d0cee0d36d5be95a0d9088b674dbb27354107221) or later. ```shell ./main -ngl 35 -m notux-8x7b-v1.Q4_K_M.gguf --color -c 32768 --temp 0.7 --repeat_penalty 1.1 -n -1 -p "{prompt}" ``` Change `-ngl 32` to the number of layers to offload to GPU. Remove it if you don't have GPU acceleration. Change `-c 32768` to the desired sequence length. For extended sequence models - eg 8K, 16K, 32K - the necessary RoPE scaling parameters are read from the GGUF file and set by llama.cpp automatically. Note that longer sequence lengths require much more resources, so you may need to reduce this value. If you want to have a chat-style conversation, replace the `-p <PROMPT>` argument with `-i -ins` For other parameters and how to use them, please refer to [the llama.cpp documentation](https://github.com/ggerganov/llama.cpp/blob/master/examples/main/README.md) ## How to run in `text-generation-webui` Further instructions can be found in the text-generation-webui documentation, here: [text-generation-webui/docs/04 ‐ Model Tab.md](https://github.com/oobabooga/text-generation-webui/blob/main/docs/04%20%E2%80%90%20Model%20Tab.md#llamacpp). ## How to run from Python code You can use GGUF models from Python using the [llama-cpp-python](https://github.com/abetlen/llama-cpp-python) or [ctransformers](https://github.com/marella/ctransformers) libraries. Note that at the time of writing (Nov 27th 2023), ctransformers has not been updated for some time and is not compatible with some recent models. Therefore I recommend you use llama-cpp-python. ### How to load this model in Python code, using llama-cpp-python For full documentation, please see: [llama-cpp-python docs](https://abetlen.github.io/llama-cpp-python/). #### First install the package Run one of the following commands, according to your system: ```shell # Base ctransformers with no GPU acceleration pip install llama-cpp-python # With NVidia CUDA acceleration CMAKE_ARGS="-DLLAMA_CUBLAS=on" pip install llama-cpp-python # Or with OpenBLAS acceleration CMAKE_ARGS="-DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS" pip install llama-cpp-python # Or with CLBLast acceleration CMAKE_ARGS="-DLLAMA_CLBLAST=on" pip install llama-cpp-python # Or with AMD ROCm GPU acceleration (Linux only) CMAKE_ARGS="-DLLAMA_HIPBLAS=on" pip install llama-cpp-python # Or with Metal GPU acceleration for macOS systems only CMAKE_ARGS="-DLLAMA_METAL=on" pip install llama-cpp-python # In windows, to set the variables CMAKE_ARGS in PowerShell, follow this format; eg for NVidia CUDA: $env:CMAKE_ARGS = "-DLLAMA_OPENBLAS=on" pip install llama-cpp-python ``` #### Simple llama-cpp-python example code ```python from llama_cpp import Llama # Set gpu_layers to the number of layers to offload to GPU. Set to 0 if no GPU acceleration is available on your system. llm = Llama( model_path="./notux-8x7b-v1.Q4_K_M.gguf", # Download the model file first n_ctx=32768, # The max sequence length to use - note that longer sequence lengths require much more resources n_threads=8, # The number of CPU threads to use, tailor to your system and the resulting performance n_gpu_layers=35 # The number of layers to offload to GPU, if you have GPU acceleration available ) # Simple inference example output = llm( "{prompt}", # Prompt max_tokens=512, # Generate up to 512 tokens stop=["</s>"], # Example stop token - not necessarily correct for this specific model! Please check before using. echo=True # Whether to echo the prompt ) # Chat Completion API llm = Llama(model_path="./notux-8x7b-v1.Q4_K_M.gguf", chat_format="llama-2") # Set chat_format according to the model you are using llm.create_chat_completion( messages = [ {"role": "system", "content": "You are a story writing assistant."}, { "role": "user", "content": "Write a story about llamas." } ] ) ``` ## How to use with LangChain Here are guides on using llama-cpp-python and ctransformers with LangChain: * [LangChain + llama-cpp-python](https://python.langchain.com/docs/integrations/llms/llamacpp) * [LangChain + ctransformers](https://python.langchain.com/docs/integrations/providers/ctransformers) <!-- README_GGUF.md-how-to-run end --> <!-- footer start --> <!-- 200823 --> ## Discord For further support, and discussions on these models and AI in general, join us at: [TheBloke AI's Discord server](https://discord.gg/theblokeai) ## Thanks, and how to contribute Thanks to the [chirper.ai](https://chirper.ai) team! Thanks to Clay from [gpus.llm-utils.org](llm-utils)! I've had a lot of people ask if they can contribute. I enjoy providing models and helping people, and would love to be able to spend even more time doing it, as well as expanding into new projects like fine tuning/training. If you're able and willing to contribute it will be most gratefully received and will help me to keep providing more models, and to start work on new AI projects. Donaters will get priority support on any and all AI/LLM/model questions and requests, access to a private Discord room, plus other benefits. * Patreon: https://patreon.com/TheBlokeAI * Ko-Fi: https://ko-fi.com/TheBlokeAI **Special thanks to**: Aemon Algiz. **Patreon special mentions**: Michael Levine, 阿明, Trailburnt, Nikolai Manek, John Detwiler, Randy H, Will Dee, Sebastain Graf, NimbleBox.ai, Eugene Pentland, Emad Mostaque, Ai Maven, Jim Angel, Jeff Scroggin, Michael Davis, Manuel Alberto Morcote, Stephen Murray, Robert, Justin Joy, Luke @flexchar, Brandon Frisco, Elijah Stavena, S_X, Dan Guido, Undi ., Komninos Chatzipapas, Shadi, theTransient, Lone Striker, Raven Klaugh, jjj, Cap'n Zoog, Michel-Marie MAUDET (LINAGORA), Matthew Berman, David, Fen Risland, Omer Bin Jawed, Luke Pendergrass, Kalila, OG, Erik Bjäreholt, Rooh Singh, Joseph William Delisle, Dan Lewis, TL, John Villwock, AzureBlack, Brad, Pedro Madruga, Caitlyn Gatomon, K, jinyuan sun, Mano Prime, Alex, Jeffrey Morgan, Alicia Loh, Illia Dulskyi, Chadd, transmissions 11, fincy, Rainer Wilmers, ReadyPlayerEmma, knownsqashed, Mandus, biorpg, Deo Leter, Brandon Phillips, SuperWojo, Sean Connelly, Iucharbius, Jack West, Harry Royden McLaughlin, Nicholas, terasurfer, Vitor Caleffi, Duane Dunston, Johann-Peter Hartmann, David Ziegler, Olakabola, Ken Nordquist, Trenton Dambrowitz, Tom X Nguyen, Vadim, Ajan Kanaga, Leonard Tan, Clay Pascal, Alexandros Triantafyllidis, JM33133, Xule, vamX, ya boyyy, subjectnull, Talal Aujan, Alps Aficionado, wassieverse, Ari Malik, James Bentley, Woland, Spencer Kim, Michael Dempsey, Fred von Graf, Elle, zynix, William Richards, Stanislav Ovsiannikov, Edmond Seymore, Jonathan Leane, Martin Kemka, usrbinkat, Enrico Ros Thank you to all my generous patrons and donaters! And thank you again to a16z for their generous grant. <!-- footer end --> <!-- original-model-card start --> # Original model card: Argilla's Notux 8X7B v1 <div align="center"> <img src="https://cdn-uploads.huggingface.co/production/uploads/60f0608166e5701b80ed3f02/dj-spsk9eXMMXVGxK6jRz.png" alt="A banner representing Notus, the wind god of the south, in a mythical and artistic style. The banner features a strong, swirling breeze, embodying the warm, wet character of the southern wind. Gracefully flowing across the scene are several paper planes, caught in the gentle yet powerful gusts of Notus. The background is a blend of warm colors, symbolizing the heat of the south, with hints of blue and green to represent the moisture carried by this wind. The overall atmosphere is one of dynamic movement and warmth."/> </div> # Model Card for Notux 8x7B-v1 This model is a preference-tuned version of [mistralai/Mixtral-8x7B-Instruct-v0.1](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1) on the [argilla/ultrafeedback-binarized-preferences-cleaned](https://huggingface.co/datasets/argilla/ultrafeedback-binarized-preferences-cleaned) dataset using DPO (Direct Preference Optimization). As of Dec 26th 2023, it outperforms `Mixtral-8x7B-Instruct-v0.1` and is the top ranked MoE (Mixture of Experts) model on the [Hugging Face Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). This is part of the Notus family of models and experiments, where the Argilla team investigates data-first and preference tuning methods like dDPO (distilled DPO). This model is the result of our first experiment at tuning a MoE model that has already been fine-tuned with DPO (i.e., Mixtral-8x7B-Instruct-v0.1). ## Model Details ### Model Description - **Developed by:** Argilla (based on MistralAI previous efforts) - **Shared by:** Argilla - **Model type:** Pretrained generative Sparse Mixture of Experts - **Language(s) (NLP):** English, Spanish, Italian, German, and French - **License:** MIT - **Finetuned from model:** [mistralai/Mixtral-8x7B-Instruct-v0.1](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1) ### Model Sources - **Repository:** https://github.com/argilla-io/notus - **Paper:** N/A ## Training Details ### Training Hardware We used a VM with 8 x H100 80GB hosted in runpod.io for 1 epoch (~10hr). ### Training Data We used a new iteration of the Argilla UltraFeedback preferences dataset named [argilla/ultrafeedback-binarized-preferences-cleaned](https://huggingface.co/datasets/argilla/ultrafeedback-binarized-preferences-cleaned). ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-07 - train_batch_size: 8 - eval_batch_size: 4 - seed: 42 - distributed_type: multi-GPU - num_devices: 8 - total_train_batch_size: 64 - total_eval_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen | |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:| | 0.4384 | 0.22 | 200 | 0.4556 | -0.3275 | -1.9448 | 0.7937 | 1.6174 | -405.7994 | -397.8617 | -1.3157 | -1.4511 | | 0.4064 | 0.43 | 400 | 0.4286 | -0.2163 | -2.2090 | 0.8254 | 1.9927 | -408.4409 | -396.7496 | -0.7660 | -0.6539 | | 0.3952 | 0.65 | 600 | 0.4275 | -0.1311 | -2.1603 | 0.8016 | 2.0291 | -407.9537 | -395.8982 | -0.6783 | -0.7206 | | 0.3909 | 0.87 | 800 | 0.4167 | -0.2273 | -2.3146 | 0.8135 | 2.0872 | -409.4968 | -396.8602 | -0.8458 | -0.7738 | ### Framework versions - Transformers 4.36.0 - Pytorch 2.1.0+cu118 - Datasets 2.14.6 - Tokenizers 0.15.0 <!-- original-model-card end -->
NickyNicky/gemma-2b-it_oasst2_chatML_Cluster_2_V1
NickyNicky
2024-02-23T11:25:06Z
606
1
transformers
[ "transformers", "safetensors", "gemma", "text-generation", "conversational", "bg", "ca", "cs", "da", "de", "en", "es", "fr", "hr", "hu", "it", "nl", "pl", "pt", "ro", "ru", "sl", "sr", "sv", "uk", "dataset:OpenAssistant/oasst2", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-02-23T07:19:56Z
--- license: apache-2.0 datasets: - OpenAssistant/oasst2 language: - bg - ca - cs - da - de - en - es - fr - hr - hu - it - nl - pl - pt - ro - ru - sl - sr - sv - uk library_name: transformers widget: - text: | <bos><start_of_turn>system You are a helpful AI assistant.<end_of_turn> <start_of_turn>user What is the meaning of life in the current time?<end_of_turn> <start_of_turn>model --- ![image/png](https://cdn-uploads.huggingface.co/production/uploads/641b435ba5f876fe30c5ae0a/YXqUXFjX8uIJT-mdOnM1h.png) ``` reference data model: datasets: - lang: "bg,ca,cs,da,de,en,es,fr,hr,hu,it,nl,pl,pt,ro,ru,sl,sr,sv,uk" link: https://huggingface.co/datasets/NickyNicky/oasst2_clusters model: - google/gemma-2b-it Link: https://huggingface.co/google/gemma-2b-it Epoch: 7 future experts: Cluster_2 Eval model: - link: soon ``` ## ```Python !python -m pip install --upgrade pip !pip install "torch>=2.1.1" -U !pip install torchaudio==2.2.0 !pip install -q datasets trl peft bitsandbytes sentencepiece wandb !pip install -q accelerate safetensors deepspeed !pip install -q scipy ninja -U !pip install -q -U transformers==4.38.0 ``` ## Version ```py import torch torch.__version__ #OUTPUTS: ('2.2.0+cu121' ) ``` ## How to use ```py from transformers import ( AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, HfArgumentParser, TrainingArguments, pipeline, logging, GenerationConfig, TextIteratorStreamer, ) from transformers import StoppingCriteria, StoppingCriteriaList import torch model_id='NickyNicky/gemma-2b-it_oasst2_chatML_Cluster_2_V1' model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", trust_remote_code=True, torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2", # load_in_4bit=True, # low_cpu_mem_usage= True, ) max_length=2055 print("max_length",max_length) tokenizer = AutoTokenizer.from_pretrained(model_id, # use_fast = False, max_length=max_length,) class ListOfTokensStoppingCriteria(StoppingCriteria): """ Clase para definir un criterio de parada basado en una lista de tokens específicos. """ def __init__(self, tokenizer, stop_tokens): self.tokenizer = tokenizer # Codifica cada token de parada y guarda sus IDs en una lista self.stop_token_ids_list = [tokenizer.encode(stop_token, add_special_tokens=False) for stop_token in stop_tokens] def __call__(self, input_ids, scores, **kwargs): # Verifica si los últimos tokens generados coinciden con alguno de los conjuntos de tokens de parada for stop_token_ids in self.stop_token_ids_list: len_stop_tokens = len(stop_token_ids) if len(input_ids[0]) >= len_stop_tokens: if input_ids[0, -len_stop_tokens:].tolist() == stop_token_ids: return True return False # Uso del criterio de parada personalizado stop_tokens = ["<end_of_turn>"] # Lista de tokens de parada # Inicializa tu criterio de parada con el tokenizer y la lista de tokens de parada stopping_criteria = ListOfTokensStoppingCriteria(tokenizer, stop_tokens) # Añade tu criterio de parada a una StoppingCriteriaList stopping_criteria_list = StoppingCriteriaList([stopping_criteria]) #EXAMPLE #1 txt="""<bos><start_of_turn>system You are a helpful AI assistant.<end_of_turn> <start_of_turn>user Me dices los diferentes tipos de reciclaje que suelen existir en las ciudades europeas<end_of_turn> <start_of_turn>model """ #EXAMPLE #2 txt="""<bos><start_of_turn>system You are a helpful AI assistant.<end_of_turn> <start_of_turn>user What is the meaning of life in the current time?<end_of_turn> <start_of_turn>model """ inputs = tokenizer.encode(txt, return_tensors="pt", add_special_tokens=False).to("cuda:0") max_new_tokens=1000 generation_config = GenerationConfig( max_new_tokens=max_new_tokens, temperature=0.55, #top_p=0.9, #top_k=len_tokens, repetition_penalty=1.1, do_sample=True, ) outputs = model.generate(generation_config=generation_config, input_ids=inputs, stopping_criteria=stopping_criteria_list,) tokenizer.decode(outputs[0], skip_special_tokens=False) #True ```
DialOnce/local-EmbeddingDimensionReducer
DialOnce
2024-06-21T18:07:57Z
606
0
transformers
[ "transformers", "pytorch", "safetensors", "pytorch_model_hub_mixin", "model_hub_mixin", "endpoints_compatible", "region:us" ]
null
2024-04-08T13:38:27Z
--- tags: - pytorch_model_hub_mixin - model_hub_mixin --- This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: - Library: [More Information Needed] - Docs: [More Information Needed]
votepurchase/waiREALMIX_v70
votepurchase
2024-06-04T10:16:43Z
606
0
diffusers
[ "diffusers", "safetensors", "ja", "license:mit", "autotrain_compatible", "endpoints_compatible", "diffusers:StableDiffusionXLPipeline", "region:us" ]
text-to-image
2024-05-25T16:23:17Z
--- license: mit language: - ja library_name: diffusers --- [waiREALMIX_v70](https://civitai.com/models/393905/wai-realmix)
Kurgan1138/L3-8B-Stheno-Horny-v3.3-32K-Q5_K_M-GGUF
Kurgan1138
2024-06-28T17:27:58Z
606
0
transformers
[ "transformers", "gguf", "llama-factory", "llama-cpp", "gguf-my-repo", "base_model:nothingiisreal/L3-8B-Stheno-Horny-v3.3-32K", "endpoints_compatible", "region:us" ]
null
2024-06-28T17:27:31Z
--- base_model: nothingiisreal/L3-8B-Stheno-Horny-v3.3-32K library_name: transformers tags: - llama-factory - llama-cpp - gguf-my-repo --- # Kurgan1138/L3-8B-Stheno-Horny-v3.3-32K-Q5_K_M-GGUF This model was converted to GGUF format from [`nothingiisreal/L3-8B-Stheno-Horny-v3.3-32K`](https://huggingface.co/nothingiisreal/L3-8B-Stheno-Horny-v3.3-32K) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space. Refer to the [original model card](https://huggingface.co/nothingiisreal/L3-8B-Stheno-Horny-v3.3-32K) for more details on the model. ## Use with llama.cpp Install llama.cpp through brew (works on Mac and Linux) ```bash brew install llama.cpp ``` Invoke the llama.cpp server or the CLI. ### CLI: ```bash llama-cli --hf-repo Kurgan1138/L3-8B-Stheno-Horny-v3.3-32K-Q5_K_M-GGUF --hf-file l3-8b-stheno-horny-v3.3-32k-q5_k_m.gguf -p "The meaning to life and the universe is" ``` ### Server: ```bash llama-server --hf-repo Kurgan1138/L3-8B-Stheno-Horny-v3.3-32K-Q5_K_M-GGUF --hf-file l3-8b-stheno-horny-v3.3-32k-q5_k_m.gguf -c 2048 ``` Note: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well. Step 1: Clone llama.cpp from GitHub. ``` git clone https://github.com/ggerganov/llama.cpp ``` Step 2: Move into the llama.cpp folder and build it with `LLAMA_CURL=1` flag along with other hardware-specific flags (for ex: LLAMA_CUDA=1 for Nvidia GPUs on Linux). ``` cd llama.cpp && LLAMA_CURL=1 make ``` Step 3: Run inference through the main binary. ``` ./llama-cli --hf-repo Kurgan1138/L3-8B-Stheno-Horny-v3.3-32K-Q5_K_M-GGUF --hf-file l3-8b-stheno-horny-v3.3-32k-q5_k_m.gguf -p "The meaning to life and the universe is" ``` or ``` ./llama-server --hf-repo Kurgan1138/L3-8B-Stheno-Horny-v3.3-32K-Q5_K_M-GGUF --hf-file l3-8b-stheno-horny-v3.3-32k-q5_k_m.gguf -c 2048 ```
huggingtweets/gcrclassic
huggingtweets
2023-02-03T01:16:32Z
605
0
transformers
[ "transformers", "pytorch", "gpt2", "text-generation", "huggingtweets", "en", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2023-02-03T01:14:55Z
--- language: en thumbnail: http://www.huggingtweets.com/gcrclassic/1675386987213/predictions.png tags: - huggingtweets widget: - text: "My dream is" --- <div class="inline-flex flex-col" style="line-height: 1.5;"> <div class="flex"> <div style="display:inherit; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;https://pbs.twimg.com/profile_images/1614712110650478592/wCLV35Hg_400x400.jpg&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> <div style="display:none; margin-left: 4px; margin-right: 4px; width: 92px; height:92px; border-radius: 50%; background-size: cover; background-image: url(&#39;&#39;)"> </div> </div> <div style="text-align: center; margin-top: 3px; font-size: 16px; font-weight: 800">🤖 AI BOT 🤖</div> <div style="text-align: center; font-size: 16px; font-weight: 800">GCR</div> <div style="text-align: center; font-size: 14px;">@gcrclassic</div> </div> I was made with [huggingtweets](https://github.com/borisdayma/huggingtweets). Create your own bot based on your favorite user with [the demo](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb)! ## How does it work? The model uses the following pipeline. ![pipeline](https://github.com/borisdayma/huggingtweets/blob/master/img/pipeline.png?raw=true) To understand how the model was developed, check the [W&B report](https://wandb.ai/wandb/huggingtweets/reports/HuggingTweets-Train-a-Model-to-Generate-Tweets--VmlldzoxMTY5MjI). ## Training data The model was trained on tweets from GCR. | Data | GCR | | --- | --- | | Tweets downloaded | 663 | | Retweets | 33 | | Short tweets | 59 | | Tweets kept | 571 | [Explore the data](https://wandb.ai/wandb/huggingtweets/runs/pxchpylx/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline. ## Training procedure The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on @gcrclassic's tweets. Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/wandb/huggingtweets/runs/h991nhyq) for full transparency and reproducibility. At the end of training, [the final model](https://wandb.ai/wandb/huggingtweets/runs/h991nhyq/artifacts) is logged and versioned. ## How to use You can use this model directly with a pipeline for text generation: ```python from transformers import pipeline generator = pipeline('text-generation', model='huggingtweets/gcrclassic') generator("My dream is", num_return_sequences=5) ``` ## Limitations and bias The model suffers from [the same limitations and bias as GPT-2](https://huggingface.co/gpt2#limitations-and-bias). In addition, the data present in the user's tweets further affects the text generated by the model. ## About *Built by Boris Dayma* [![Follow](https://img.shields.io/twitter/follow/borisdayma?style=social)](https://twitter.com/intent/follow?screen_name=borisdayma) For more details, visit the project repository. [![GitHub stars](https://img.shields.io/github/stars/borisdayma/huggingtweets?style=social)](https://github.com/borisdayma/huggingtweets)
google/codegemma-1.1-7b-it-GGUF
google
2024-06-27T14:10:05Z
605
12
llama.cpp
[ "llama.cpp", "gguf", "text-generation", "license:gemma", "region:us" ]
text-generation
2024-04-30T22:03:05Z
--- library_name: llama.cpp license: gemma license_link: https://ai.google.dev/gemma/terms pipeline_tag: text-generation extra_gated_heading: Access CodeGemma on Hugging Face extra_gated_prompt: To access Gemma on Hugging Face, you’re required to review and agree to Google’s usage license. To do this, please ensure you’re logged-in to Hugging Face and click below. Requests are processed immediately. extra_gated_button_content: Acknowledge license --- # CodeGemma Model Page : [CodeGemma](https://ai.google.dev/gemma/docs/codegemma) Resources and Technical Documentation : [Technical Report](https://goo.gle/codegemma) : [Responsible Generative AI Toolkit](https://ai.google.dev/responsible) Terms of Use : [Terms](https://www.kaggle.com/models/google/codegemma/license/consent/verify/huggingface?returnModelRepoId=google/codegemma-1.1-7b-it-GGUF) Authors : Google > [!IMPORTANT] > > In llama.cpp, and other related tools such as Ollama and LM Studio, please make sure that you have these flags set correctly, especially **`repeat-penalty`**. Georgi Gerganov (llama.cpp's author) shared his experience in https://huggingface.co/google/gemma-7b-it/discussions/38#65d7b14adb51f7c160769fa1. ## Description CodeGemma is a collection of lightweight open code models built on top of Gemma. CodeGemma models are text-to-text and text-to-code decoder-only models and are available as a 7 billion pretrained variant that specializes in code completion and code generation tasks, a 7 billion parameter instruction-tuned variant for code chat and instruction following and a 2 billion parameter pretrained variant for fast code completion. | | [ **codegemma-2b** ](https://huggingface.co/google/codegemma-1.1-2b-GGUF) | [codegemma-7b](https://huggingface.co/google/codegemma-7b-GGUF) | [codegemma-7b-it](https://huggingface.co/google/codegemma-1.1-7b-it-GGUF) | |----------------------------------|:----------------------------------------------------------------:|:----------------------------------------------------------:|:----------------------------------------------------------------:| | Code Completion | ✅ | ✅ | | | Generation from natural language | | ✅ | ✅ | | Chat | | | ✅ | | Instruction Following | | | ✅ | For detailed model card, refer to https://huggingface.co/google/codegemma-1.1-7b-it. ## Sample Usage ```shell $ cat non_prime /// Write a rust function to identify non-prime numbers. /// /// Examples: /// >>> is_not_prime(2) /// False /// >>> is_not_prime(10) /// True pub fn is_not_prime(n: i32) -> bool { $ main -m codegemma-1.1-7b-it.gguf --temp 0 --top-k 0 -f non_prime --log-disable --repeat-penalty 1.0 /// Write a rust function to identify non-prime numbers. /// /// Examples: /// >>> is_not_prime(2) /// False /// >>> is_not_prime(10) /// True pub fn is_not_prime(n: i32) -> bool { if n <= 1 { return true; } for i in 2..=(n as f64).sqrt() as i32 { if n % i == 0 { return true; } } false } ``` ## Coding Benchmarks Benchmark | [2B](https://huggingface.co/google/codegemma-2b-GGUF) | [2B (1.1)](https://huggingface.co/google/codegemma-1.1-2b-GGUF) | [7B](https://huggingface.co/google/codegemma-7b-GGUF) | [7B-IT](https://huggingface.co/google/codegemma-7b-it-GGUF) | [7B-IT (1.1)](https://huggingface.co/google/codegemma-1.1-7b-it-GGUF) ----------------------|------|----------|------|-------|------------ HumanEval | 31.1 | 37.8 | 44.5 | 56.1 | 60.4 MBPP | 43.6 | 49.2 | 56.2 | 54.2 | 55.6 HumanEval Single Line | 78.4 | 79.3 | 76.1 | 68.3 | 77.4 HumanEval Multi Line | 51.4 | 51.0 | 58.4 | 20.1 | 23.7 BC HE C++ | 24.2 | 19.9 | 32.9 | 42.2 | 46.6 BC HE C# | 10.6 | 26.1 | 22.4 | 26.7 | 54.7 BC HE Go | 20.5 | 18.0 | 21.7 | 28.6 | 34.2 BC HE Java | 29.2 | 29.8 | 41.0 | 48.4 | 50.3 BC HE JavaScript | 21.7 | 28.0 | 39.8 | 46.0 | 48.4 BC HE Kotlin | 28.0 | 32.3 | 39.8 | 51.6 | 47.8 BC HE Python | 21.7 | 36.6 | 42.2 | 48.4 | 54.0 BC HE Rust | 26.7 | 24.2 | 34.1 | 36.0 | 37.3 BC MBPP C++ | 47.1 | 38.9 | 53.8 | 56.7 | 63.5 BC MBPP C# | 28.7 | 45.3 | 32.5 | 41.2 | 62.0 BC MBPP Go | 45.6 | 38.9 | 43.3 | 46.2 | 53.2 BC MBPP Java | 41.8 | 49.7 | 50.3 | 57.3 | 62.9 BC MBPP JavaScript | 45.3 | 45.0 | 58.2 | 61.4 | 61.4 BC MBPP Kotlin | 46.8 | 49.7 | 54.7 | 59.9 | 62.6 BC MBPP Python | 38.6 | 52.9 | 59.1 | 62.0 | 60.2 BC MBPP Rust | 45.3 | 47.4 | 52.9 | 53.5 | 52.3 ## Natural Language Benchmarks ![CodeGemma Natural Language Benchmarks](./codegemma_nl_benchmarks.png)
gaianet/Yi-1.5-9B-Chat-16K-GGUF
gaianet
2024-06-28T06:05:33Z
605
2
null
[ "gguf", "text-generation", "base_model:01-ai/Yi-1.5-9B-Chat-16K", "license:apache-2.0", "region:us" ]
text-generation
2024-05-17T15:00:35Z
--- base_model: 01-ai/Yi-1.5-9B-Chat-16K inference: false model_creator: 01-ai model_name: Yi-1.5-9B-Chat-16 model_type: yi pipeline_tag: text-generation quantized_by: Second State Inc. license: apache-2.0 --- ![](https://github.com/GaiaNet-AI/.github/assets/45785633/d6976adc-f97d-4f86-a648-0f2f5c8e7eee) # Yi-1.5-9B-Chat-16K-GGUF ## Original Model [01-ai/Yi-1.5-9B-Chat-16K](https://huggingface.co/01-ai/Yi-1.5-9B-Chat-16K) ## Run with Gaianet **Prompt template** prompt template: `chatml` **Reverse prompt** reverse prompt: `<|im_end|>` **Context size** chat_ctx_size: `16384` **Run with GaiaNet** - Quick start: https://docs.gaianet.ai/node-guide/quick-start - Customize your node: https://docs.gaianet.ai/node-guide/customize *Quantized with llama.cpp b3135*
mradermacher/Llama-Salad-4x8B-i1-GGUF
mradermacher
2024-06-01T16:32:32Z
605
2
transformers
[ "transformers", "gguf", "nsfw", "not-for-all-audiences", "llama-3", "text-generation-inference", "moe", "mergekit", "merge", "en", "base_model:HiroseKoichi/Llama-Salad-4x8B", "license:llama3", "endpoints_compatible", "region:us" ]
null
2024-05-25T04:20:18Z
--- base_model: HiroseKoichi/Llama-Salad-4x8B language: - en library_name: transformers license: llama3 quantized_by: mradermacher tags: - nsfw - not-for-all-audiences - llama-3 - text-generation-inference - moe - mergekit - merge --- ## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> weighted/imatrix quants of https://huggingface.co/HiroseKoichi/Llama-Salad-4x8B <!-- provided-files --> static quants are available at https://huggingface.co/mradermacher/Llama-Salad-4x8B-GGUF ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/Llama-Salad-4x8B-i1-GGUF/resolve/main/Llama-Salad-4x8B.i1-IQ1_S.gguf) | i1-IQ1_S | 5.5 | for the desperate | | [GGUF](https://huggingface.co/mradermacher/Llama-Salad-4x8B-i1-GGUF/resolve/main/Llama-Salad-4x8B.i1-IQ1_M.gguf) | i1-IQ1_M | 6.0 | mostly desperate | | [GGUF](https://huggingface.co/mradermacher/Llama-Salad-4x8B-i1-GGUF/resolve/main/Llama-Salad-4x8B.i1-IQ2_XXS.gguf) | i1-IQ2_XXS | 6.9 | | | [GGUF](https://huggingface.co/mradermacher/Llama-Salad-4x8B-i1-GGUF/resolve/main/Llama-Salad-4x8B.i1-IQ2_XS.gguf) | i1-IQ2_XS | 7.6 | | | [GGUF](https://huggingface.co/mradermacher/Llama-Salad-4x8B-i1-GGUF/resolve/main/Llama-Salad-4x8B.i1-IQ2_S.gguf) | i1-IQ2_S | 7.8 | | | [GGUF](https://huggingface.co/mradermacher/Llama-Salad-4x8B-i1-GGUF/resolve/main/Llama-Salad-4x8B.i1-IQ2_M.gguf) | i1-IQ2_M | 8.5 | | | [GGUF](https://huggingface.co/mradermacher/Llama-Salad-4x8B-i1-GGUF/resolve/main/Llama-Salad-4x8B.i1-Q2_K.gguf) | i1-Q2_K | 9.4 | IQ3_XXS probably better | | [GGUF](https://huggingface.co/mradermacher/Llama-Salad-4x8B-i1-GGUF/resolve/main/Llama-Salad-4x8B.i1-IQ3_XXS.gguf) | i1-IQ3_XXS | 9.9 | lower quality | | [GGUF](https://huggingface.co/mradermacher/Llama-Salad-4x8B-i1-GGUF/resolve/main/Llama-Salad-4x8B.i1-IQ3_XS.gguf) | i1-IQ3_XS | 10.5 | | | [GGUF](https://huggingface.co/mradermacher/Llama-Salad-4x8B-i1-GGUF/resolve/main/Llama-Salad-4x8B.i1-Q3_K_S.gguf) | i1-Q3_K_S | 11.0 | IQ3_XS probably better | | [GGUF](https://huggingface.co/mradermacher/Llama-Salad-4x8B-i1-GGUF/resolve/main/Llama-Salad-4x8B.i1-IQ3_S.gguf) | i1-IQ3_S | 11.1 | beats Q3_K* | | [GGUF](https://huggingface.co/mradermacher/Llama-Salad-4x8B-i1-GGUF/resolve/main/Llama-Salad-4x8B.i1-IQ3_M.gguf) | i1-IQ3_M | 11.2 | | | [GGUF](https://huggingface.co/mradermacher/Llama-Salad-4x8B-i1-GGUF/resolve/main/Llama-Salad-4x8B.i1-Q3_K_M.gguf) | i1-Q3_K_M | 12.2 | IQ3_S probably better | | [GGUF](https://huggingface.co/mradermacher/Llama-Salad-4x8B-i1-GGUF/resolve/main/Llama-Salad-4x8B.i1-Q3_K_L.gguf) | i1-Q3_K_L | 13.1 | IQ3_M probably better | | [GGUF](https://huggingface.co/mradermacher/Llama-Salad-4x8B-i1-GGUF/resolve/main/Llama-Salad-4x8B.i1-IQ4_XS.gguf) | i1-IQ4_XS | 13.5 | | | [GGUF](https://huggingface.co/mradermacher/Llama-Salad-4x8B-i1-GGUF/resolve/main/Llama-Salad-4x8B.i1-Q4_0.gguf) | i1-Q4_0 | 14.3 | fast, low quality | | [GGUF](https://huggingface.co/mradermacher/Llama-Salad-4x8B-i1-GGUF/resolve/main/Llama-Salad-4x8B.i1-Q4_K_S.gguf) | i1-Q4_K_S | 14.4 | optimal size/speed/quality | | [GGUF](https://huggingface.co/mradermacher/Llama-Salad-4x8B-i1-GGUF/resolve/main/Llama-Salad-4x8B.i1-Q4_K_M.gguf) | i1-Q4_K_M | 15.3 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/Llama-Salad-4x8B-i1-GGUF/resolve/main/Llama-Salad-4x8B.i1-Q5_K_S.gguf) | i1-Q5_K_S | 17.3 | | | [GGUF](https://huggingface.co/mradermacher/Llama-Salad-4x8B-i1-GGUF/resolve/main/Llama-Salad-4x8B.i1-Q5_K_M.gguf) | i1-Q5_K_M | 17.8 | | | [GGUF](https://huggingface.co/mradermacher/Llama-Salad-4x8B-i1-GGUF/resolve/main/Llama-Salad-4x8B.i1-Q6_K.gguf) | i1-Q6_K | 20.6 | practically like static Q6_K | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. <!-- end -->
RichardErkhov/Neko-Institute-of-Science_-_pygmalion-7b-gguf
RichardErkhov
2024-05-26T10:09:59Z
605
0
null
[ "gguf", "region:us" ]
null
2024-05-26T07:37:02Z
Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) pygmalion-7b - GGUF - Model creator: https://huggingface.co/Neko-Institute-of-Science/ - Original model: https://huggingface.co/Neko-Institute-of-Science/pygmalion-7b/ | Name | Quant method | Size | | ---- | ---- | ---- | | [pygmalion-7b.Q2_K.gguf](https://huggingface.co/RichardErkhov/Neko-Institute-of-Science_-_pygmalion-7b-gguf/blob/main/pygmalion-7b.Q2_K.gguf) | Q2_K | 2.36GB | | [pygmalion-7b.IQ3_XS.gguf](https://huggingface.co/RichardErkhov/Neko-Institute-of-Science_-_pygmalion-7b-gguf/blob/main/pygmalion-7b.IQ3_XS.gguf) | IQ3_XS | 2.6GB | | [pygmalion-7b.IQ3_S.gguf](https://huggingface.co/RichardErkhov/Neko-Institute-of-Science_-_pygmalion-7b-gguf/blob/main/pygmalion-7b.IQ3_S.gguf) | IQ3_S | 2.75GB | | [pygmalion-7b.Q3_K_S.gguf](https://huggingface.co/RichardErkhov/Neko-Institute-of-Science_-_pygmalion-7b-gguf/blob/main/pygmalion-7b.Q3_K_S.gguf) | Q3_K_S | 2.75GB | | [pygmalion-7b.IQ3_M.gguf](https://huggingface.co/RichardErkhov/Neko-Institute-of-Science_-_pygmalion-7b-gguf/blob/main/pygmalion-7b.IQ3_M.gguf) | IQ3_M | 2.9GB | | [pygmalion-7b.Q3_K.gguf](https://huggingface.co/RichardErkhov/Neko-Institute-of-Science_-_pygmalion-7b-gguf/blob/main/pygmalion-7b.Q3_K.gguf) | Q3_K | 3.07GB | | [pygmalion-7b.Q3_K_M.gguf](https://huggingface.co/RichardErkhov/Neko-Institute-of-Science_-_pygmalion-7b-gguf/blob/main/pygmalion-7b.Q3_K_M.gguf) | Q3_K_M | 3.07GB | | [pygmalion-7b.Q3_K_L.gguf](https://huggingface.co/RichardErkhov/Neko-Institute-of-Science_-_pygmalion-7b-gguf/blob/main/pygmalion-7b.Q3_K_L.gguf) | Q3_K_L | 3.35GB | | [pygmalion-7b.IQ4_XS.gguf](https://huggingface.co/RichardErkhov/Neko-Institute-of-Science_-_pygmalion-7b-gguf/blob/main/pygmalion-7b.IQ4_XS.gguf) | IQ4_XS | 3.4GB | | [pygmalion-7b.Q4_0.gguf](https://huggingface.co/RichardErkhov/Neko-Institute-of-Science_-_pygmalion-7b-gguf/blob/main/pygmalion-7b.Q4_0.gguf) | Q4_0 | 3.56GB | | [pygmalion-7b.IQ4_NL.gguf](https://huggingface.co/RichardErkhov/Neko-Institute-of-Science_-_pygmalion-7b-gguf/blob/main/pygmalion-7b.IQ4_NL.gguf) | IQ4_NL | 3.58GB | | [pygmalion-7b.Q4_K_S.gguf](https://huggingface.co/RichardErkhov/Neko-Institute-of-Science_-_pygmalion-7b-gguf/blob/main/pygmalion-7b.Q4_K_S.gguf) | Q4_K_S | 3.59GB | | [pygmalion-7b.Q4_K.gguf](https://huggingface.co/RichardErkhov/Neko-Institute-of-Science_-_pygmalion-7b-gguf/blob/main/pygmalion-7b.Q4_K.gguf) | Q4_K | 3.8GB | | [pygmalion-7b.Q4_K_M.gguf](https://huggingface.co/RichardErkhov/Neko-Institute-of-Science_-_pygmalion-7b-gguf/blob/main/pygmalion-7b.Q4_K_M.gguf) | Q4_K_M | 3.8GB | | [pygmalion-7b.Q4_1.gguf](https://huggingface.co/RichardErkhov/Neko-Institute-of-Science_-_pygmalion-7b-gguf/blob/main/pygmalion-7b.Q4_1.gguf) | Q4_1 | 3.95GB | | [pygmalion-7b.Q5_0.gguf](https://huggingface.co/RichardErkhov/Neko-Institute-of-Science_-_pygmalion-7b-gguf/blob/main/pygmalion-7b.Q5_0.gguf) | Q5_0 | 4.33GB | | [pygmalion-7b.Q5_K_S.gguf](https://huggingface.co/RichardErkhov/Neko-Institute-of-Science_-_pygmalion-7b-gguf/blob/main/pygmalion-7b.Q5_K_S.gguf) | Q5_K_S | 4.33GB | | [pygmalion-7b.Q5_K.gguf](https://huggingface.co/RichardErkhov/Neko-Institute-of-Science_-_pygmalion-7b-gguf/blob/main/pygmalion-7b.Q5_K.gguf) | Q5_K | 4.45GB | | [pygmalion-7b.Q5_K_M.gguf](https://huggingface.co/RichardErkhov/Neko-Institute-of-Science_-_pygmalion-7b-gguf/blob/main/pygmalion-7b.Q5_K_M.gguf) | Q5_K_M | 4.45GB | | [pygmalion-7b.Q5_1.gguf](https://huggingface.co/RichardErkhov/Neko-Institute-of-Science_-_pygmalion-7b-gguf/blob/main/pygmalion-7b.Q5_1.gguf) | Q5_1 | 4.72GB | | [pygmalion-7b.Q6_K.gguf](https://huggingface.co/RichardErkhov/Neko-Institute-of-Science_-_pygmalion-7b-gguf/blob/main/pygmalion-7b.Q6_K.gguf) | Q6_K | 5.15GB | | [pygmalion-7b.Q8_0.gguf](https://huggingface.co/RichardErkhov/Neko-Institute-of-Science_-_pygmalion-7b-gguf/blob/main/pygmalion-7b.Q8_0.gguf) | Q8_0 | 6.67GB | Original model description: --- language: - en thumbnail: null tags: - text generation - conversational pipeline_tag: text-generation inference: false --- <h1 style="text-align: center">Pygmalion 7B</h1> <h2 style="text-align: center">A conversational LLaMA fine-tune.</h2> ## Model Details Converted from the XORs weights from PygmalionAI's release https://huggingface.co/PygmalionAI/pygmalion-7b Pygmalion 7B is a dialogue model based on Meta's LLaMA-7B. This is version 1. It has been fine-tuned using a subset of the data from Pygmalion-6B-v8-pt4, for those of you familiar with the project. ## Prompting The model was trained on the usual Pygmalion persona + chat format, so any of the usual UIs should already handle everything correctly. If you're using the model directly, this is the expected formatting: ``` [CHARACTER]'s Persona: [A few sentences about the character you want the model to play] <START> [DIALOGUE HISTORY] You: [User's input message here] [CHARACTER]: ``` Where `[CHARACTER]` is, as you can probably guess, the name of the character you want the model to portray, `<START>` should be used verbatim as a delimiter token to separate persona and scenario data from the dialogue, and `[DIALOGUE HISTORY]` is a sliding window of chat history so the model can have conversational context to draw from. Here's a concrete example: ``` Assistant's Persona: Assistant is a highly intelligent language model trained to comply with user requests. <START> Assistant: Hello! How may I help you today? You: What is Zork? Assistant: ``` Which will generate something like: ``` Zork is an interactive fiction computer game created in the 1970s by Infocom, Inc., which was later acquired by Activision Blizzard. It is widely considered one of the most influential games ever made and has been credited with popularizing text-based adventure games. The original version of Zork was written in the programming language MACRO-10, but it was ported to many other platforms over the years." ``` The model will automatically emit an end-of-text token (`</s>`) when it judges that the response is complete. ## Limitations and biases The intended use-case for this model is fictional conversation for entertainment purposes. Any other sort of usage is out of scope. As such, it was **not** fine-tuned to be safe and harmless: the base model _and_ this fine-tune have been trained on data known to contain profanity and texts that are lewd or otherwise offensive. It may produce socially unacceptable or undesirable text, even if the prompt itself does not include anything explicitly offensive. Outputs might often be factually wrong or misleading. # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_Neko-Institute-of-Science__pygmalion-7b) | Metric | Value | |-----------------------|---------------------------| | Avg. | 40.29 | | ARC (25-shot) | 51.37 | | HellaSwag (10-shot) | 77.81 | | MMLU (5-shot) | 35.68 | | TruthfulQA (0-shot) | 34.54 | | Winogrande (5-shot) | 72.22 | | GSM8K (5-shot) | 4.62 | | DROP (3-shot) | 5.79 |
Ayyystin/llama2_7b_chat_uncensored-Q4_0-GGUF
Ayyystin
2024-06-07T21:31:00Z
605
1
null
[ "gguf", "llama-cpp", "gguf-my-repo", "dataset:georgesung/wizard_vicuna_70k_unfiltered", "base_model:georgesung/llama2_7b_chat_uncensored", "license:other", "region:us" ]
null
2024-06-07T21:30:48Z
--- license: other tags: - llama-cpp - gguf-my-repo base_model: georgesung/llama2_7b_chat_uncensored datasets: - georgesung/wizard_vicuna_70k_unfiltered --- # Ayyystin/llama2_7b_chat_uncensored-Q4_0-GGUF This model was converted to GGUF format from [`georgesung/llama2_7b_chat_uncensored`](https://huggingface.co/georgesung/llama2_7b_chat_uncensored) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space. Refer to the [original model card](https://huggingface.co/georgesung/llama2_7b_chat_uncensored) for more details on the model. ## Use with llama.cpp Install llama.cpp through brew (works on Mac and Linux) ```bash brew install llama.cpp ``` Invoke the llama.cpp server or the CLI. ### CLI: ```bash llama --hf-repo Ayyystin/llama2_7b_chat_uncensored-Q4_0-GGUF --hf-file llama2_7b_chat_uncensored-q4_0.gguf -p "The meaning to life and the universe is" ``` ### Server: ```bash llama-server --hf-repo Ayyystin/llama2_7b_chat_uncensored-Q4_0-GGUF --hf-file llama2_7b_chat_uncensored-q4_0.gguf -c 2048 ``` Note: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well. Step 1: Clone llama.cpp from GitHub. ``` git clone https://github.com/ggerganov/llama.cpp ``` Step 2: Move into the llama.cpp folder and build it with `LLAMA_CURL=1` flag along with other hardware-specific flags (for ex: LLAMA_CUDA=1 for Nvidia GPUs on Linux). ``` cd llama.cpp && LLAMA_CURL=1 make ``` Step 3: Run inference through the main binary. ``` ./main --hf-repo Ayyystin/llama2_7b_chat_uncensored-Q4_0-GGUF --hf-file llama2_7b_chat_uncensored-q4_0.gguf -p "The meaning to life and the universe is" ``` or ``` ./server --hf-repo Ayyystin/llama2_7b_chat_uncensored-Q4_0-GGUF --hf-file llama2_7b_chat_uncensored-q4_0.gguf -c 2048 ```
mradermacher/Truthful_DPO_cloudyu_Mixtral_34Bx2_MoE_60B-i1-GGUF
mradermacher
2024-06-15T04:57:48Z
605
0
transformers
[ "transformers", "gguf", "moe", "DPO", "RL-TUNED", "en", "base_model:cloudyu/Truthful_DPO_cloudyu_Mixtral_34Bx2_MoE_60B", "license:mit", "endpoints_compatible", "region:us" ]
null
2024-06-14T18:25:43Z
--- base_model: cloudyu/Truthful_DPO_cloudyu_Mixtral_34Bx2_MoE_60B language: - en library_name: transformers license: mit quantized_by: mradermacher tags: - moe - DPO - RL-TUNED --- ## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: nicoboss --> weighted/imatrix quants of https://huggingface.co/cloudyu/Truthful_DPO_cloudyu_Mixtral_34Bx2_MoE_60B <!-- provided-files --> static quants are available at https://huggingface.co/mradermacher/Truthful_DPO_cloudyu_Mixtral_34Bx2_MoE_60B-GGUF ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/Truthful_DPO_cloudyu_Mixtral_34Bx2_MoE_60B-i1-GGUF/resolve/main/Truthful_DPO_cloudyu_Mixtral_34Bx2_MoE_60B.i1-IQ1_S.gguf) | i1-IQ1_S | 12.9 | for the desperate | | [GGUF](https://huggingface.co/mradermacher/Truthful_DPO_cloudyu_Mixtral_34Bx2_MoE_60B-i1-GGUF/resolve/main/Truthful_DPO_cloudyu_Mixtral_34Bx2_MoE_60B.i1-IQ1_M.gguf) | i1-IQ1_M | 14.2 | mostly desperate | | [GGUF](https://huggingface.co/mradermacher/Truthful_DPO_cloudyu_Mixtral_34Bx2_MoE_60B-i1-GGUF/resolve/main/Truthful_DPO_cloudyu_Mixtral_34Bx2_MoE_60B.i1-IQ2_XXS.gguf) | i1-IQ2_XXS | 16.3 | | | [GGUF](https://huggingface.co/mradermacher/Truthful_DPO_cloudyu_Mixtral_34Bx2_MoE_60B-i1-GGUF/resolve/main/Truthful_DPO_cloudyu_Mixtral_34Bx2_MoE_60B.i1-IQ2_XS.gguf) | i1-IQ2_XS | 18.1 | | | [GGUF](https://huggingface.co/mradermacher/Truthful_DPO_cloudyu_Mixtral_34Bx2_MoE_60B-i1-GGUF/resolve/main/Truthful_DPO_cloudyu_Mixtral_34Bx2_MoE_60B.i1-IQ2_S.gguf) | i1-IQ2_S | 18.8 | | | [GGUF](https://huggingface.co/mradermacher/Truthful_DPO_cloudyu_Mixtral_34Bx2_MoE_60B-i1-GGUF/resolve/main/Truthful_DPO_cloudyu_Mixtral_34Bx2_MoE_60B.i1-IQ2_M.gguf) | i1-IQ2_M | 20.5 | | | [GGUF](https://huggingface.co/mradermacher/Truthful_DPO_cloudyu_Mixtral_34Bx2_MoE_60B-i1-GGUF/resolve/main/Truthful_DPO_cloudyu_Mixtral_34Bx2_MoE_60B.i1-Q2_K.gguf) | i1-Q2_K | 22.5 | IQ3_XXS probably better | | [GGUF](https://huggingface.co/mradermacher/Truthful_DPO_cloudyu_Mixtral_34Bx2_MoE_60B-i1-GGUF/resolve/main/Truthful_DPO_cloudyu_Mixtral_34Bx2_MoE_60B.i1-IQ3_XXS.gguf) | i1-IQ3_XXS | 23.6 | lower quality | | [GGUF](https://huggingface.co/mradermacher/Truthful_DPO_cloudyu_Mixtral_34Bx2_MoE_60B-i1-GGUF/resolve/main/Truthful_DPO_cloudyu_Mixtral_34Bx2_MoE_60B.i1-IQ3_XS.gguf) | i1-IQ3_XS | 25.1 | | | [GGUF](https://huggingface.co/mradermacher/Truthful_DPO_cloudyu_Mixtral_34Bx2_MoE_60B-i1-GGUF/resolve/main/Truthful_DPO_cloudyu_Mixtral_34Bx2_MoE_60B.i1-Q3_K_S.gguf) | i1-Q3_K_S | 26.4 | IQ3_XS probably better | | [GGUF](https://huggingface.co/mradermacher/Truthful_DPO_cloudyu_Mixtral_34Bx2_MoE_60B-i1-GGUF/resolve/main/Truthful_DPO_cloudyu_Mixtral_34Bx2_MoE_60B.i1-IQ3_S.gguf) | i1-IQ3_S | 26.5 | beats Q3_K* | | [GGUF](https://huggingface.co/mradermacher/Truthful_DPO_cloudyu_Mixtral_34Bx2_MoE_60B-i1-GGUF/resolve/main/Truthful_DPO_cloudyu_Mixtral_34Bx2_MoE_60B.i1-IQ3_M.gguf) | i1-IQ3_M | 27.2 | | | [GGUF](https://huggingface.co/mradermacher/Truthful_DPO_cloudyu_Mixtral_34Bx2_MoE_60B-i1-GGUF/resolve/main/Truthful_DPO_cloudyu_Mixtral_34Bx2_MoE_60B.i1-Q3_K_M.gguf) | i1-Q3_K_M | 29.3 | IQ3_S probably better | | [GGUF](https://huggingface.co/mradermacher/Truthful_DPO_cloudyu_Mixtral_34Bx2_MoE_60B-i1-GGUF/resolve/main/Truthful_DPO_cloudyu_Mixtral_34Bx2_MoE_60B.i1-Q3_K_L.gguf) | i1-Q3_K_L | 31.9 | IQ3_M probably better | | [GGUF](https://huggingface.co/mradermacher/Truthful_DPO_cloudyu_Mixtral_34Bx2_MoE_60B-i1-GGUF/resolve/main/Truthful_DPO_cloudyu_Mixtral_34Bx2_MoE_60B.i1-IQ4_XS.gguf) | i1-IQ4_XS | 32.6 | | | [GGUF](https://huggingface.co/mradermacher/Truthful_DPO_cloudyu_Mixtral_34Bx2_MoE_60B-i1-GGUF/resolve/main/Truthful_DPO_cloudyu_Mixtral_34Bx2_MoE_60B.i1-Q4_0.gguf) | i1-Q4_0 | 34.6 | fast, low quality | | [GGUF](https://huggingface.co/mradermacher/Truthful_DPO_cloudyu_Mixtral_34Bx2_MoE_60B-i1-GGUF/resolve/main/Truthful_DPO_cloudyu_Mixtral_34Bx2_MoE_60B.i1-Q4_K_S.gguf) | i1-Q4_K_S | 34.7 | optimal size/speed/quality | | [GGUF](https://huggingface.co/mradermacher/Truthful_DPO_cloudyu_Mixtral_34Bx2_MoE_60B-i1-GGUF/resolve/main/Truthful_DPO_cloudyu_Mixtral_34Bx2_MoE_60B.i1-Q4_K_M.gguf) | i1-Q4_K_M | 36.8 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/Truthful_DPO_cloudyu_Mixtral_34Bx2_MoE_60B-i1-GGUF/resolve/main/Truthful_DPO_cloudyu_Mixtral_34Bx2_MoE_60B.i1-Q5_K_S.gguf) | i1-Q5_K_S | 42.0 | | | [GGUF](https://huggingface.co/mradermacher/Truthful_DPO_cloudyu_Mixtral_34Bx2_MoE_60B-i1-GGUF/resolve/main/Truthful_DPO_cloudyu_Mixtral_34Bx2_MoE_60B.i1-Q5_K_M.gguf) | i1-Q5_K_M | 43.2 | | | [GGUF](https://huggingface.co/mradermacher/Truthful_DPO_cloudyu_Mixtral_34Bx2_MoE_60B-i1-GGUF/resolve/main/Truthful_DPO_cloudyu_Mixtral_34Bx2_MoE_60B.i1-Q6_K.gguf) | i1-Q6_K | 50.0 | practically like static Q6_K | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. Additional thanks to [@nicoboss](https://huggingface.co/nicoboss) for giving me access to his hardware for calculating the imatrix for these quants. <!-- end -->
timm/resnet18.a2_in1k
timm
2024-02-10T23:38:34Z
604
0
timm
[ "timm", "pytorch", "safetensors", "image-classification", "arxiv:2110.00476", "arxiv:1512.03385", "license:apache-2.0", "region:us" ]
image-classification
2023-04-05T18:03:01Z
--- license: apache-2.0 library_name: timm tags: - image-classification - timm --- # Model card for resnet18.a2_in1k A ResNet-B image classification model. This model features: * ReLU activations * single layer 7x7 convolution with pooling * 1x1 convolution shortcut downsample Trained on ImageNet-1k in `timm` using recipe template described below. Recipe details: * ResNet Strikes Back `A2` recipe * LAMB optimizer with BCE loss * Cosine LR schedule with warmup ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 11.7 - GMACs: 1.8 - Activations (M): 2.5 - Image size: train = 224 x 224, test = 288 x 288 - **Papers:** - ResNet strikes back: An improved training procedure in timm: https://arxiv.org/abs/2110.00476 - Deep Residual Learning for Image Recognition: https://arxiv.org/abs/1512.03385 - **Original:** https://github.com/huggingface/pytorch-image-models ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('resnet18.a2_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'resnet18.a2_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 64, 112, 112]) # torch.Size([1, 64, 56, 56]) # torch.Size([1, 128, 28, 28]) # torch.Size([1, 256, 14, 14]) # torch.Size([1, 512, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'resnet18.a2_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 512, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). |model |img_size|top1 |top5 |param_count|gmacs|macts|img/sec| |------------------------------------------|--------|-----|-----|-----------|-----|-----|-------| |[seresnextaa101d_32x8d.sw_in12k_ft_in1k_288](https://huggingface.co/timm/seresnextaa101d_32x8d.sw_in12k_ft_in1k_288)|320 |86.72|98.17|93.6 |35.2 |69.7 |451 | |[seresnextaa101d_32x8d.sw_in12k_ft_in1k_288](https://huggingface.co/timm/seresnextaa101d_32x8d.sw_in12k_ft_in1k_288)|288 |86.51|98.08|93.6 |28.5 |56.4 |560 | |[seresnextaa101d_32x8d.sw_in12k_ft_in1k](https://huggingface.co/timm/seresnextaa101d_32x8d.sw_in12k_ft_in1k)|288 |86.49|98.03|93.6 |28.5 |56.4 |557 | |[seresnextaa101d_32x8d.sw_in12k_ft_in1k](https://huggingface.co/timm/seresnextaa101d_32x8d.sw_in12k_ft_in1k)|224 |85.96|97.82|93.6 |17.2 |34.2 |923 | |[resnext101_32x32d.fb_wsl_ig1b_ft_in1k](https://huggingface.co/timm/resnext101_32x32d.fb_wsl_ig1b_ft_in1k)|224 |85.11|97.44|468.5 |87.3 |91.1 |254 | |[resnetrs420.tf_in1k](https://huggingface.co/timm/resnetrs420.tf_in1k)|416 |85.0 |97.12|191.9 |108.4|213.8|134 | |[ecaresnet269d.ra2_in1k](https://huggingface.co/timm/ecaresnet269d.ra2_in1k)|352 |84.96|97.22|102.1 |50.2 |101.2|291 | |[ecaresnet269d.ra2_in1k](https://huggingface.co/timm/ecaresnet269d.ra2_in1k)|320 |84.73|97.18|102.1 |41.5 |83.7 |353 | |[resnetrs350.tf_in1k](https://huggingface.co/timm/resnetrs350.tf_in1k)|384 |84.71|96.99|164.0 |77.6 |154.7|183 | |[seresnextaa101d_32x8d.ah_in1k](https://huggingface.co/timm/seresnextaa101d_32x8d.ah_in1k)|288 |84.57|97.08|93.6 |28.5 |56.4 |557 | |[resnetrs200.tf_in1k](https://huggingface.co/timm/resnetrs200.tf_in1k)|320 |84.45|97.08|93.2 |31.5 |67.8 |446 | |[resnetrs270.tf_in1k](https://huggingface.co/timm/resnetrs270.tf_in1k)|352 |84.43|96.97|129.9 |51.1 |105.5|280 | |[seresnext101d_32x8d.ah_in1k](https://huggingface.co/timm/seresnext101d_32x8d.ah_in1k)|288 |84.36|96.92|93.6 |27.6 |53.0 |595 | |[seresnet152d.ra2_in1k](https://huggingface.co/timm/seresnet152d.ra2_in1k)|320 |84.35|97.04|66.8 |24.1 |47.7 |610 | |[resnetrs350.tf_in1k](https://huggingface.co/timm/resnetrs350.tf_in1k)|288 |84.3 |96.94|164.0 |43.7 |87.1 |333 | |[resnext101_32x8d.fb_swsl_ig1b_ft_in1k](https://huggingface.co/timm/resnext101_32x8d.fb_swsl_ig1b_ft_in1k)|224 |84.28|97.17|88.8 |16.5 |31.2 |1100 | |[resnetrs420.tf_in1k](https://huggingface.co/timm/resnetrs420.tf_in1k)|320 |84.24|96.86|191.9 |64.2 |126.6|228 | |[seresnext101_32x8d.ah_in1k](https://huggingface.co/timm/seresnext101_32x8d.ah_in1k)|288 |84.19|96.87|93.6 |27.2 |51.6 |613 | |[resnext101_32x16d.fb_wsl_ig1b_ft_in1k](https://huggingface.co/timm/resnext101_32x16d.fb_wsl_ig1b_ft_in1k)|224 |84.18|97.19|194.0 |36.3 |51.2 |581 | |[resnetaa101d.sw_in12k_ft_in1k](https://huggingface.co/timm/resnetaa101d.sw_in12k_ft_in1k)|288 |84.11|97.11|44.6 |15.1 |29.0 |1144 | |[resnet200d.ra2_in1k](https://huggingface.co/timm/resnet200d.ra2_in1k)|320 |83.97|96.82|64.7 |31.2 |67.3 |518 | |[resnetrs200.tf_in1k](https://huggingface.co/timm/resnetrs200.tf_in1k)|256 |83.87|96.75|93.2 |20.2 |43.4 |692 | |[seresnextaa101d_32x8d.ah_in1k](https://huggingface.co/timm/seresnextaa101d_32x8d.ah_in1k)|224 |83.86|96.65|93.6 |17.2 |34.2 |923 | |[resnetrs152.tf_in1k](https://huggingface.co/timm/resnetrs152.tf_in1k)|320 |83.72|96.61|86.6 |24.3 |48.1 |617 | |[seresnet152d.ra2_in1k](https://huggingface.co/timm/seresnet152d.ra2_in1k)|256 |83.69|96.78|66.8 |15.4 |30.6 |943 | |[seresnext101d_32x8d.ah_in1k](https://huggingface.co/timm/seresnext101d_32x8d.ah_in1k)|224 |83.68|96.61|93.6 |16.7 |32.0 |986 | |[resnet152d.ra2_in1k](https://huggingface.co/timm/resnet152d.ra2_in1k)|320 |83.67|96.74|60.2 |24.1 |47.7 |706 | |[resnetrs270.tf_in1k](https://huggingface.co/timm/resnetrs270.tf_in1k)|256 |83.59|96.61|129.9 |27.1 |55.8 |526 | |[seresnext101_32x8d.ah_in1k](https://huggingface.co/timm/seresnext101_32x8d.ah_in1k)|224 |83.58|96.4 |93.6 |16.5 |31.2 |1013 | |[resnetaa101d.sw_in12k_ft_in1k](https://huggingface.co/timm/resnetaa101d.sw_in12k_ft_in1k)|224 |83.54|96.83|44.6 |9.1 |17.6 |1864 | |[resnet152.a1h_in1k](https://huggingface.co/timm/resnet152.a1h_in1k)|288 |83.46|96.54|60.2 |19.1 |37.3 |904 | |[resnext101_32x16d.fb_swsl_ig1b_ft_in1k](https://huggingface.co/timm/resnext101_32x16d.fb_swsl_ig1b_ft_in1k)|224 |83.35|96.85|194.0 |36.3 |51.2 |582 | |[resnet200d.ra2_in1k](https://huggingface.co/timm/resnet200d.ra2_in1k)|256 |83.23|96.53|64.7 |20.0 |43.1 |809 | |[resnext101_32x4d.fb_swsl_ig1b_ft_in1k](https://huggingface.co/timm/resnext101_32x4d.fb_swsl_ig1b_ft_in1k)|224 |83.22|96.75|44.2 |8.0 |21.2 |1814 | |[resnext101_64x4d.c1_in1k](https://huggingface.co/timm/resnext101_64x4d.c1_in1k)|288 |83.16|96.38|83.5 |25.7 |51.6 |590 | |[resnet152d.ra2_in1k](https://huggingface.co/timm/resnet152d.ra2_in1k)|256 |83.14|96.38|60.2 |15.4 |30.5 |1096 | |[resnet101d.ra2_in1k](https://huggingface.co/timm/resnet101d.ra2_in1k)|320 |83.02|96.45|44.6 |16.5 |34.8 |992 | |[ecaresnet101d.miil_in1k](https://huggingface.co/timm/ecaresnet101d.miil_in1k)|288 |82.98|96.54|44.6 |13.4 |28.2 |1077 | |[resnext101_64x4d.tv_in1k](https://huggingface.co/timm/resnext101_64x4d.tv_in1k)|224 |82.98|96.25|83.5 |15.5 |31.2 |989 | |[resnetrs152.tf_in1k](https://huggingface.co/timm/resnetrs152.tf_in1k)|256 |82.86|96.28|86.6 |15.6 |30.8 |951 | |[resnext101_32x8d.tv2_in1k](https://huggingface.co/timm/resnext101_32x8d.tv2_in1k)|224 |82.83|96.22|88.8 |16.5 |31.2 |1099 | |[resnet152.a1h_in1k](https://huggingface.co/timm/resnet152.a1h_in1k)|224 |82.8 |96.13|60.2 |11.6 |22.6 |1486 | |[resnet101.a1h_in1k](https://huggingface.co/timm/resnet101.a1h_in1k)|288 |82.8 |96.32|44.6 |13.0 |26.8 |1291 | |[resnet152.a1_in1k](https://huggingface.co/timm/resnet152.a1_in1k)|288 |82.74|95.71|60.2 |19.1 |37.3 |905 | |[resnext101_32x8d.fb_wsl_ig1b_ft_in1k](https://huggingface.co/timm/resnext101_32x8d.fb_wsl_ig1b_ft_in1k)|224 |82.69|96.63|88.8 |16.5 |31.2 |1100 | |[resnet152.a2_in1k](https://huggingface.co/timm/resnet152.a2_in1k)|288 |82.62|95.75|60.2 |19.1 |37.3 |904 | |[resnetaa50d.sw_in12k_ft_in1k](https://huggingface.co/timm/resnetaa50d.sw_in12k_ft_in1k)|288 |82.61|96.49|25.6 |8.9 |20.6 |1729 | |[resnet61q.ra2_in1k](https://huggingface.co/timm/resnet61q.ra2_in1k)|288 |82.53|96.13|36.8 |9.9 |21.5 |1773 | |[wide_resnet101_2.tv2_in1k](https://huggingface.co/timm/wide_resnet101_2.tv2_in1k)|224 |82.5 |96.02|126.9 |22.8 |21.2 |1078 | |[resnext101_64x4d.c1_in1k](https://huggingface.co/timm/resnext101_64x4d.c1_in1k)|224 |82.46|95.92|83.5 |15.5 |31.2 |987 | |[resnet51q.ra2_in1k](https://huggingface.co/timm/resnet51q.ra2_in1k)|288 |82.36|96.18|35.7 |8.1 |20.9 |1964 | |[ecaresnet50t.ra2_in1k](https://huggingface.co/timm/ecaresnet50t.ra2_in1k)|320 |82.35|96.14|25.6 |8.8 |24.1 |1386 | |[resnet101.a1_in1k](https://huggingface.co/timm/resnet101.a1_in1k)|288 |82.31|95.63|44.6 |13.0 |26.8 |1291 | |[resnetrs101.tf_in1k](https://huggingface.co/timm/resnetrs101.tf_in1k)|288 |82.29|96.01|63.6 |13.6 |28.5 |1078 | |[resnet152.tv2_in1k](https://huggingface.co/timm/resnet152.tv2_in1k)|224 |82.29|96.0 |60.2 |11.6 |22.6 |1484 | |[wide_resnet50_2.racm_in1k](https://huggingface.co/timm/wide_resnet50_2.racm_in1k)|288 |82.27|96.06|68.9 |18.9 |23.8 |1176 | |[resnet101d.ra2_in1k](https://huggingface.co/timm/resnet101d.ra2_in1k)|256 |82.26|96.07|44.6 |10.6 |22.2 |1542 | |[resnet101.a2_in1k](https://huggingface.co/timm/resnet101.a2_in1k)|288 |82.24|95.73|44.6 |13.0 |26.8 |1290 | |[seresnext50_32x4d.racm_in1k](https://huggingface.co/timm/seresnext50_32x4d.racm_in1k)|288 |82.2 |96.14|27.6 |7.0 |23.8 |1547 | |[ecaresnet101d.miil_in1k](https://huggingface.co/timm/ecaresnet101d.miil_in1k)|224 |82.18|96.05|44.6 |8.1 |17.1 |1771 | |[resnext50_32x4d.fb_swsl_ig1b_ft_in1k](https://huggingface.co/timm/resnext50_32x4d.fb_swsl_ig1b_ft_in1k)|224 |82.17|96.22|25.0 |4.3 |14.4 |2943 | |[ecaresnet50t.a1_in1k](https://huggingface.co/timm/ecaresnet50t.a1_in1k)|288 |82.12|95.65|25.6 |7.1 |19.6 |1704 | |[resnext50_32x4d.a1h_in1k](https://huggingface.co/timm/resnext50_32x4d.a1h_in1k)|288 |82.03|95.94|25.0 |7.0 |23.8 |1745 | |[ecaresnet101d_pruned.miil_in1k](https://huggingface.co/timm/ecaresnet101d_pruned.miil_in1k)|288 |82.0 |96.15|24.9 |5.8 |12.7 |1787 | |[resnet61q.ra2_in1k](https://huggingface.co/timm/resnet61q.ra2_in1k)|256 |81.99|95.85|36.8 |7.8 |17.0 |2230 | |[resnext101_32x8d.tv2_in1k](https://huggingface.co/timm/resnext101_32x8d.tv2_in1k)|176 |81.98|95.72|88.8 |10.3 |19.4 |1768 | |[resnet152.a1_in1k](https://huggingface.co/timm/resnet152.a1_in1k)|224 |81.97|95.24|60.2 |11.6 |22.6 |1486 | |[resnet101.a1h_in1k](https://huggingface.co/timm/resnet101.a1h_in1k)|224 |81.93|95.75|44.6 |7.8 |16.2 |2122 | |[resnet101.tv2_in1k](https://huggingface.co/timm/resnet101.tv2_in1k)|224 |81.9 |95.77|44.6 |7.8 |16.2 |2118 | |[resnext101_32x16d.fb_ssl_yfcc100m_ft_in1k](https://huggingface.co/timm/resnext101_32x16d.fb_ssl_yfcc100m_ft_in1k)|224 |81.84|96.1 |194.0 |36.3 |51.2 |583 | |[resnet51q.ra2_in1k](https://huggingface.co/timm/resnet51q.ra2_in1k)|256 |81.78|95.94|35.7 |6.4 |16.6 |2471 | |[resnet152.a2_in1k](https://huggingface.co/timm/resnet152.a2_in1k)|224 |81.77|95.22|60.2 |11.6 |22.6 |1485 | |[resnetaa50d.sw_in12k_ft_in1k](https://huggingface.co/timm/resnetaa50d.sw_in12k_ft_in1k)|224 |81.74|96.06|25.6 |5.4 |12.4 |2813 | |[ecaresnet50t.a2_in1k](https://huggingface.co/timm/ecaresnet50t.a2_in1k)|288 |81.65|95.54|25.6 |7.1 |19.6 |1703 | |[ecaresnet50d.miil_in1k](https://huggingface.co/timm/ecaresnet50d.miil_in1k)|288 |81.64|95.88|25.6 |7.2 |19.7 |1694 | |[resnext101_32x8d.fb_ssl_yfcc100m_ft_in1k](https://huggingface.co/timm/resnext101_32x8d.fb_ssl_yfcc100m_ft_in1k)|224 |81.62|96.04|88.8 |16.5 |31.2 |1101 | |[wide_resnet50_2.tv2_in1k](https://huggingface.co/timm/wide_resnet50_2.tv2_in1k)|224 |81.61|95.76|68.9 |11.4 |14.4 |1930 | |[resnetaa50.a1h_in1k](https://huggingface.co/timm/resnetaa50.a1h_in1k)|288 |81.61|95.83|25.6 |8.5 |19.2 |1868 | |[resnet101.a1_in1k](https://huggingface.co/timm/resnet101.a1_in1k)|224 |81.5 |95.16|44.6 |7.8 |16.2 |2125 | |[resnext50_32x4d.a1_in1k](https://huggingface.co/timm/resnext50_32x4d.a1_in1k)|288 |81.48|95.16|25.0 |7.0 |23.8 |1745 | |[gcresnet50t.ra2_in1k](https://huggingface.co/timm/gcresnet50t.ra2_in1k)|288 |81.47|95.71|25.9 |6.9 |18.6 |2071 | |[wide_resnet50_2.racm_in1k](https://huggingface.co/timm/wide_resnet50_2.racm_in1k)|224 |81.45|95.53|68.9 |11.4 |14.4 |1929 | |[resnet50d.a1_in1k](https://huggingface.co/timm/resnet50d.a1_in1k)|288 |81.44|95.22|25.6 |7.2 |19.7 |1908 | |[ecaresnet50t.ra2_in1k](https://huggingface.co/timm/ecaresnet50t.ra2_in1k)|256 |81.44|95.67|25.6 |5.6 |15.4 |2168 | |[ecaresnetlight.miil_in1k](https://huggingface.co/timm/ecaresnetlight.miil_in1k)|288 |81.4 |95.82|30.2 |6.8 |13.9 |2132 | |[resnet50d.ra2_in1k](https://huggingface.co/timm/resnet50d.ra2_in1k)|288 |81.37|95.74|25.6 |7.2 |19.7 |1910 | |[resnet101.a2_in1k](https://huggingface.co/timm/resnet101.a2_in1k)|224 |81.32|95.19|44.6 |7.8 |16.2 |2125 | |[seresnet50.ra2_in1k](https://huggingface.co/timm/seresnet50.ra2_in1k)|288 |81.3 |95.65|28.1 |6.8 |18.4 |1803 | |[resnext50_32x4d.a2_in1k](https://huggingface.co/timm/resnext50_32x4d.a2_in1k)|288 |81.3 |95.11|25.0 |7.0 |23.8 |1746 | |[seresnext50_32x4d.racm_in1k](https://huggingface.co/timm/seresnext50_32x4d.racm_in1k)|224 |81.27|95.62|27.6 |4.3 |14.4 |2591 | |[ecaresnet50t.a1_in1k](https://huggingface.co/timm/ecaresnet50t.a1_in1k)|224 |81.26|95.16|25.6 |4.3 |11.8 |2823 | |[gcresnext50ts.ch_in1k](https://huggingface.co/timm/gcresnext50ts.ch_in1k)|288 |81.23|95.54|15.7 |4.8 |19.6 |2117 | |[senet154.gluon_in1k](https://huggingface.co/timm/senet154.gluon_in1k)|224 |81.23|95.35|115.1 |20.8 |38.7 |545 | |[resnet50.a1_in1k](https://huggingface.co/timm/resnet50.a1_in1k)|288 |81.22|95.11|25.6 |6.8 |18.4 |2089 | |[resnet50_gn.a1h_in1k](https://huggingface.co/timm/resnet50_gn.a1h_in1k)|288 |81.22|95.63|25.6 |6.8 |18.4 |676 | |[resnet50d.a2_in1k](https://huggingface.co/timm/resnet50d.a2_in1k)|288 |81.18|95.09|25.6 |7.2 |19.7 |1908 | |[resnet50.fb_swsl_ig1b_ft_in1k](https://huggingface.co/timm/resnet50.fb_swsl_ig1b_ft_in1k)|224 |81.18|95.98|25.6 |4.1 |11.1 |3455 | |[resnext50_32x4d.tv2_in1k](https://huggingface.co/timm/resnext50_32x4d.tv2_in1k)|224 |81.17|95.34|25.0 |4.3 |14.4 |2933 | |[resnext50_32x4d.a1h_in1k](https://huggingface.co/timm/resnext50_32x4d.a1h_in1k)|224 |81.1 |95.33|25.0 |4.3 |14.4 |2934 | |[seresnet50.a2_in1k](https://huggingface.co/timm/seresnet50.a2_in1k)|288 |81.1 |95.23|28.1 |6.8 |18.4 |1801 | |[seresnet50.a1_in1k](https://huggingface.co/timm/seresnet50.a1_in1k)|288 |81.1 |95.12|28.1 |6.8 |18.4 |1799 | |[resnet152s.gluon_in1k](https://huggingface.co/timm/resnet152s.gluon_in1k)|224 |81.02|95.41|60.3 |12.9 |25.0 |1347 | |[resnet50.d_in1k](https://huggingface.co/timm/resnet50.d_in1k)|288 |80.97|95.44|25.6 |6.8 |18.4 |2085 | |[gcresnet50t.ra2_in1k](https://huggingface.co/timm/gcresnet50t.ra2_in1k)|256 |80.94|95.45|25.9 |5.4 |14.7 |2571 | |[resnext101_32x4d.fb_ssl_yfcc100m_ft_in1k](https://huggingface.co/timm/resnext101_32x4d.fb_ssl_yfcc100m_ft_in1k)|224 |80.93|95.73|44.2 |8.0 |21.2 |1814 | |[resnet50.c1_in1k](https://huggingface.co/timm/resnet50.c1_in1k)|288 |80.91|95.55|25.6 |6.8 |18.4 |2084 | |[seresnext101_32x4d.gluon_in1k](https://huggingface.co/timm/seresnext101_32x4d.gluon_in1k)|224 |80.9 |95.31|49.0 |8.0 |21.3 |1585 | |[seresnext101_64x4d.gluon_in1k](https://huggingface.co/timm/seresnext101_64x4d.gluon_in1k)|224 |80.9 |95.3 |88.2 |15.5 |31.2 |918 | |[resnet50.c2_in1k](https://huggingface.co/timm/resnet50.c2_in1k)|288 |80.86|95.52|25.6 |6.8 |18.4 |2085 | |[resnet50.tv2_in1k](https://huggingface.co/timm/resnet50.tv2_in1k)|224 |80.85|95.43|25.6 |4.1 |11.1 |3450 | |[ecaresnet50t.a2_in1k](https://huggingface.co/timm/ecaresnet50t.a2_in1k)|224 |80.84|95.02|25.6 |4.3 |11.8 |2821 | |[ecaresnet101d_pruned.miil_in1k](https://huggingface.co/timm/ecaresnet101d_pruned.miil_in1k)|224 |80.79|95.62|24.9 |3.5 |7.7 |2961 | |[seresnet33ts.ra2_in1k](https://huggingface.co/timm/seresnet33ts.ra2_in1k)|288 |80.79|95.36|19.8 |6.0 |14.8 |2506 | |[ecaresnet50d_pruned.miil_in1k](https://huggingface.co/timm/ecaresnet50d_pruned.miil_in1k)|288 |80.79|95.58|19.9 |4.2 |10.6 |2349 | |[resnet50.a2_in1k](https://huggingface.co/timm/resnet50.a2_in1k)|288 |80.78|94.99|25.6 |6.8 |18.4 |2088 | |[resnet50.b1k_in1k](https://huggingface.co/timm/resnet50.b1k_in1k)|288 |80.71|95.43|25.6 |6.8 |18.4 |2087 | |[resnext50_32x4d.ra_in1k](https://huggingface.co/timm/resnext50_32x4d.ra_in1k)|288 |80.7 |95.39|25.0 |7.0 |23.8 |1749 | |[resnetrs101.tf_in1k](https://huggingface.co/timm/resnetrs101.tf_in1k)|192 |80.69|95.24|63.6 |6.0 |12.7 |2270 | |[resnet50d.a1_in1k](https://huggingface.co/timm/resnet50d.a1_in1k)|224 |80.68|94.71|25.6 |4.4 |11.9 |3162 | |[eca_resnet33ts.ra2_in1k](https://huggingface.co/timm/eca_resnet33ts.ra2_in1k)|288 |80.68|95.36|19.7 |6.0 |14.8 |2637 | |[resnet50.a1h_in1k](https://huggingface.co/timm/resnet50.a1h_in1k)|224 |80.67|95.3 |25.6 |4.1 |11.1 |3452 | |[resnext50d_32x4d.bt_in1k](https://huggingface.co/timm/resnext50d_32x4d.bt_in1k)|288 |80.67|95.42|25.0 |7.4 |25.1 |1626 | |[resnetaa50.a1h_in1k](https://huggingface.co/timm/resnetaa50.a1h_in1k)|224 |80.63|95.21|25.6 |5.2 |11.6 |3034 | |[ecaresnet50d.miil_in1k](https://huggingface.co/timm/ecaresnet50d.miil_in1k)|224 |80.61|95.32|25.6 |4.4 |11.9 |2813 | |[resnext101_64x4d.gluon_in1k](https://huggingface.co/timm/resnext101_64x4d.gluon_in1k)|224 |80.61|94.99|83.5 |15.5 |31.2 |989 | |[gcresnet33ts.ra2_in1k](https://huggingface.co/timm/gcresnet33ts.ra2_in1k)|288 |80.6 |95.31|19.9 |6.0 |14.8 |2578 | |[gcresnext50ts.ch_in1k](https://huggingface.co/timm/gcresnext50ts.ch_in1k)|256 |80.57|95.17|15.7 |3.8 |15.5 |2710 | |[resnet152.a3_in1k](https://huggingface.co/timm/resnet152.a3_in1k)|224 |80.56|95.0 |60.2 |11.6 |22.6 |1483 | |[resnet50d.ra2_in1k](https://huggingface.co/timm/resnet50d.ra2_in1k)|224 |80.53|95.16|25.6 |4.4 |11.9 |3164 | |[resnext50_32x4d.a1_in1k](https://huggingface.co/timm/resnext50_32x4d.a1_in1k)|224 |80.53|94.46|25.0 |4.3 |14.4 |2930 | |[wide_resnet101_2.tv2_in1k](https://huggingface.co/timm/wide_resnet101_2.tv2_in1k)|176 |80.48|94.98|126.9 |14.3 |13.2 |1719 | |[resnet152d.gluon_in1k](https://huggingface.co/timm/resnet152d.gluon_in1k)|224 |80.47|95.2 |60.2 |11.8 |23.4 |1428 | |[resnet50.b2k_in1k](https://huggingface.co/timm/resnet50.b2k_in1k)|288 |80.45|95.32|25.6 |6.8 |18.4 |2086 | |[ecaresnetlight.miil_in1k](https://huggingface.co/timm/ecaresnetlight.miil_in1k)|224 |80.45|95.24|30.2 |4.1 |8.4 |3530 | |[resnext50_32x4d.a2_in1k](https://huggingface.co/timm/resnext50_32x4d.a2_in1k)|224 |80.45|94.63|25.0 |4.3 |14.4 |2936 | |[wide_resnet50_2.tv2_in1k](https://huggingface.co/timm/wide_resnet50_2.tv2_in1k)|176 |80.43|95.09|68.9 |7.3 |9.0 |3015 | |[resnet101d.gluon_in1k](https://huggingface.co/timm/resnet101d.gluon_in1k)|224 |80.42|95.01|44.6 |8.1 |17.0 |2007 | |[resnet50.a1_in1k](https://huggingface.co/timm/resnet50.a1_in1k)|224 |80.38|94.6 |25.6 |4.1 |11.1 |3461 | |[seresnet33ts.ra2_in1k](https://huggingface.co/timm/seresnet33ts.ra2_in1k)|256 |80.36|95.1 |19.8 |4.8 |11.7 |3267 | |[resnext101_32x4d.gluon_in1k](https://huggingface.co/timm/resnext101_32x4d.gluon_in1k)|224 |80.34|94.93|44.2 |8.0 |21.2 |1814 | |[resnext50_32x4d.fb_ssl_yfcc100m_ft_in1k](https://huggingface.co/timm/resnext50_32x4d.fb_ssl_yfcc100m_ft_in1k)|224 |80.32|95.4 |25.0 |4.3 |14.4 |2941 | |[resnet101s.gluon_in1k](https://huggingface.co/timm/resnet101s.gluon_in1k)|224 |80.28|95.16|44.7 |9.2 |18.6 |1851 | |[seresnet50.ra2_in1k](https://huggingface.co/timm/seresnet50.ra2_in1k)|224 |80.26|95.08|28.1 |4.1 |11.1 |2972 | |[resnetblur50.bt_in1k](https://huggingface.co/timm/resnetblur50.bt_in1k)|288 |80.24|95.24|25.6 |8.5 |19.9 |1523 | |[resnet50d.a2_in1k](https://huggingface.co/timm/resnet50d.a2_in1k)|224 |80.22|94.63|25.6 |4.4 |11.9 |3162 | |[resnet152.tv2_in1k](https://huggingface.co/timm/resnet152.tv2_in1k)|176 |80.2 |94.64|60.2 |7.2 |14.0 |2346 | |[seresnet50.a2_in1k](https://huggingface.co/timm/seresnet50.a2_in1k)|224 |80.08|94.74|28.1 |4.1 |11.1 |2969 | |[eca_resnet33ts.ra2_in1k](https://huggingface.co/timm/eca_resnet33ts.ra2_in1k)|256 |80.08|94.97|19.7 |4.8 |11.7 |3284 | |[gcresnet33ts.ra2_in1k](https://huggingface.co/timm/gcresnet33ts.ra2_in1k)|256 |80.06|94.99|19.9 |4.8 |11.7 |3216 | |[resnet50_gn.a1h_in1k](https://huggingface.co/timm/resnet50_gn.a1h_in1k)|224 |80.06|94.95|25.6 |4.1 |11.1 |1109 | |[seresnet50.a1_in1k](https://huggingface.co/timm/seresnet50.a1_in1k)|224 |80.02|94.71|28.1 |4.1 |11.1 |2962 | |[resnet50.ram_in1k](https://huggingface.co/timm/resnet50.ram_in1k)|288 |79.97|95.05|25.6 |6.8 |18.4 |2086 | |[resnet152c.gluon_in1k](https://huggingface.co/timm/resnet152c.gluon_in1k)|224 |79.92|94.84|60.2 |11.8 |23.4 |1455 | |[seresnext50_32x4d.gluon_in1k](https://huggingface.co/timm/seresnext50_32x4d.gluon_in1k)|224 |79.91|94.82|27.6 |4.3 |14.4 |2591 | |[resnet50.d_in1k](https://huggingface.co/timm/resnet50.d_in1k)|224 |79.91|94.67|25.6 |4.1 |11.1 |3456 | |[resnet101.tv2_in1k](https://huggingface.co/timm/resnet101.tv2_in1k)|176 |79.9 |94.6 |44.6 |4.9 |10.1 |3341 | |[resnetrs50.tf_in1k](https://huggingface.co/timm/resnetrs50.tf_in1k)|224 |79.89|94.97|35.7 |4.5 |12.1 |2774 | |[resnet50.c2_in1k](https://huggingface.co/timm/resnet50.c2_in1k)|224 |79.88|94.87|25.6 |4.1 |11.1 |3455 | |[ecaresnet26t.ra2_in1k](https://huggingface.co/timm/ecaresnet26t.ra2_in1k)|320 |79.86|95.07|16.0 |5.2 |16.4 |2168 | |[resnet50.a2_in1k](https://huggingface.co/timm/resnet50.a2_in1k)|224 |79.85|94.56|25.6 |4.1 |11.1 |3460 | |[resnet50.ra_in1k](https://huggingface.co/timm/resnet50.ra_in1k)|288 |79.83|94.97|25.6 |6.8 |18.4 |2087 | |[resnet101.a3_in1k](https://huggingface.co/timm/resnet101.a3_in1k)|224 |79.82|94.62|44.6 |7.8 |16.2 |2114 | |[resnext50_32x4d.ra_in1k](https://huggingface.co/timm/resnext50_32x4d.ra_in1k)|224 |79.76|94.6 |25.0 |4.3 |14.4 |2943 | |[resnet50.c1_in1k](https://huggingface.co/timm/resnet50.c1_in1k)|224 |79.74|94.95|25.6 |4.1 |11.1 |3455 | |[ecaresnet50d_pruned.miil_in1k](https://huggingface.co/timm/ecaresnet50d_pruned.miil_in1k)|224 |79.74|94.87|19.9 |2.5 |6.4 |3929 | |[resnet33ts.ra2_in1k](https://huggingface.co/timm/resnet33ts.ra2_in1k)|288 |79.71|94.83|19.7 |6.0 |14.8 |2710 | |[resnet152.gluon_in1k](https://huggingface.co/timm/resnet152.gluon_in1k)|224 |79.68|94.74|60.2 |11.6 |22.6 |1486 | |[resnext50d_32x4d.bt_in1k](https://huggingface.co/timm/resnext50d_32x4d.bt_in1k)|224 |79.67|94.87|25.0 |4.5 |15.2 |2729 | |[resnet50.bt_in1k](https://huggingface.co/timm/resnet50.bt_in1k)|288 |79.63|94.91|25.6 |6.8 |18.4 |2086 | |[ecaresnet50t.a3_in1k](https://huggingface.co/timm/ecaresnet50t.a3_in1k)|224 |79.56|94.72|25.6 |4.3 |11.8 |2805 | |[resnet101c.gluon_in1k](https://huggingface.co/timm/resnet101c.gluon_in1k)|224 |79.53|94.58|44.6 |8.1 |17.0 |2062 | |[resnet50.b1k_in1k](https://huggingface.co/timm/resnet50.b1k_in1k)|224 |79.52|94.61|25.6 |4.1 |11.1 |3459 | |[resnet50.tv2_in1k](https://huggingface.co/timm/resnet50.tv2_in1k)|176 |79.42|94.64|25.6 |2.6 |6.9 |5397 | |[resnet32ts.ra2_in1k](https://huggingface.co/timm/resnet32ts.ra2_in1k)|288 |79.4 |94.66|18.0 |5.9 |14.6 |2752 | |[resnet50.b2k_in1k](https://huggingface.co/timm/resnet50.b2k_in1k)|224 |79.38|94.57|25.6 |4.1 |11.1 |3459 | |[resnext50_32x4d.tv2_in1k](https://huggingface.co/timm/resnext50_32x4d.tv2_in1k)|176 |79.37|94.3 |25.0 |2.7 |9.0 |4577 | |[resnext50_32x4d.gluon_in1k](https://huggingface.co/timm/resnext50_32x4d.gluon_in1k)|224 |79.36|94.43|25.0 |4.3 |14.4 |2942 | |[resnext101_32x8d.tv_in1k](https://huggingface.co/timm/resnext101_32x8d.tv_in1k)|224 |79.31|94.52|88.8 |16.5 |31.2 |1100 | |[resnet101.gluon_in1k](https://huggingface.co/timm/resnet101.gluon_in1k)|224 |79.31|94.53|44.6 |7.8 |16.2 |2125 | |[resnetblur50.bt_in1k](https://huggingface.co/timm/resnetblur50.bt_in1k)|224 |79.31|94.63|25.6 |5.2 |12.0 |2524 | |[resnet50.a1h_in1k](https://huggingface.co/timm/resnet50.a1h_in1k)|176 |79.27|94.49|25.6 |2.6 |6.9 |5404 | |[resnext50_32x4d.a3_in1k](https://huggingface.co/timm/resnext50_32x4d.a3_in1k)|224 |79.25|94.31|25.0 |4.3 |14.4 |2931 | |[resnet50.fb_ssl_yfcc100m_ft_in1k](https://huggingface.co/timm/resnet50.fb_ssl_yfcc100m_ft_in1k)|224 |79.22|94.84|25.6 |4.1 |11.1 |3451 | |[resnet33ts.ra2_in1k](https://huggingface.co/timm/resnet33ts.ra2_in1k)|256 |79.21|94.56|19.7 |4.8 |11.7 |3392 | |[resnet50d.gluon_in1k](https://huggingface.co/timm/resnet50d.gluon_in1k)|224 |79.07|94.48|25.6 |4.4 |11.9 |3162 | |[resnet50.ram_in1k](https://huggingface.co/timm/resnet50.ram_in1k)|224 |79.03|94.38|25.6 |4.1 |11.1 |3453 | |[resnet50.am_in1k](https://huggingface.co/timm/resnet50.am_in1k)|224 |79.01|94.39|25.6 |4.1 |11.1 |3461 | |[resnet32ts.ra2_in1k](https://huggingface.co/timm/resnet32ts.ra2_in1k)|256 |79.01|94.37|18.0 |4.6 |11.6 |3440 | |[ecaresnet26t.ra2_in1k](https://huggingface.co/timm/ecaresnet26t.ra2_in1k)|256 |78.9 |94.54|16.0 |3.4 |10.5 |3421 | |[resnet152.a3_in1k](https://huggingface.co/timm/resnet152.a3_in1k)|160 |78.89|94.11|60.2 |5.9 |11.5 |2745 | |[wide_resnet101_2.tv_in1k](https://huggingface.co/timm/wide_resnet101_2.tv_in1k)|224 |78.84|94.28|126.9 |22.8 |21.2 |1079 | |[seresnext26d_32x4d.bt_in1k](https://huggingface.co/timm/seresnext26d_32x4d.bt_in1k)|288 |78.83|94.24|16.8 |4.5 |16.8 |2251 | |[resnet50.ra_in1k](https://huggingface.co/timm/resnet50.ra_in1k)|224 |78.81|94.32|25.6 |4.1 |11.1 |3454 | |[seresnext26t_32x4d.bt_in1k](https://huggingface.co/timm/seresnext26t_32x4d.bt_in1k)|288 |78.74|94.33|16.8 |4.5 |16.7 |2264 | |[resnet50s.gluon_in1k](https://huggingface.co/timm/resnet50s.gluon_in1k)|224 |78.72|94.23|25.7 |5.5 |13.5 |2796 | |[resnet50d.a3_in1k](https://huggingface.co/timm/resnet50d.a3_in1k)|224 |78.71|94.24|25.6 |4.4 |11.9 |3154 | |[wide_resnet50_2.tv_in1k](https://huggingface.co/timm/wide_resnet50_2.tv_in1k)|224 |78.47|94.09|68.9 |11.4 |14.4 |1934 | |[resnet50.bt_in1k](https://huggingface.co/timm/resnet50.bt_in1k)|224 |78.46|94.27|25.6 |4.1 |11.1 |3454 | |[resnet34d.ra2_in1k](https://huggingface.co/timm/resnet34d.ra2_in1k)|288 |78.43|94.35|21.8 |6.5 |7.5 |3291 | |[gcresnext26ts.ch_in1k](https://huggingface.co/timm/gcresnext26ts.ch_in1k)|288 |78.42|94.04|10.5 |3.1 |13.3 |3226 | |[resnet26t.ra2_in1k](https://huggingface.co/timm/resnet26t.ra2_in1k)|320 |78.33|94.13|16.0 |5.2 |16.4 |2391 | |[resnet152.tv_in1k](https://huggingface.co/timm/resnet152.tv_in1k)|224 |78.32|94.04|60.2 |11.6 |22.6 |1487 | |[seresnext26ts.ch_in1k](https://huggingface.co/timm/seresnext26ts.ch_in1k)|288 |78.28|94.1 |10.4 |3.1 |13.3 |3062 | |[bat_resnext26ts.ch_in1k](https://huggingface.co/timm/bat_resnext26ts.ch_in1k)|256 |78.25|94.1 |10.7 |2.5 |12.5 |3393 | |[resnet50.a3_in1k](https://huggingface.co/timm/resnet50.a3_in1k)|224 |78.06|93.78|25.6 |4.1 |11.1 |3450 | |[resnet50c.gluon_in1k](https://huggingface.co/timm/resnet50c.gluon_in1k)|224 |78.0 |93.99|25.6 |4.4 |11.9 |3286 | |[eca_resnext26ts.ch_in1k](https://huggingface.co/timm/eca_resnext26ts.ch_in1k)|288 |78.0 |93.91|10.3 |3.1 |13.3 |3297 | |[seresnext26t_32x4d.bt_in1k](https://huggingface.co/timm/seresnext26t_32x4d.bt_in1k)|224 |77.98|93.75|16.8 |2.7 |10.1 |3841 | |[resnet34.a1_in1k](https://huggingface.co/timm/resnet34.a1_in1k)|288 |77.92|93.77|21.8 |6.1 |6.2 |3609 | |[resnet101.a3_in1k](https://huggingface.co/timm/resnet101.a3_in1k)|160 |77.88|93.71|44.6 |4.0 |8.3 |3926 | |[resnet26t.ra2_in1k](https://huggingface.co/timm/resnet26t.ra2_in1k)|256 |77.87|93.84|16.0 |3.4 |10.5 |3772 | |[seresnext26ts.ch_in1k](https://huggingface.co/timm/seresnext26ts.ch_in1k)|256 |77.86|93.79|10.4 |2.4 |10.5 |4263 | |[resnetrs50.tf_in1k](https://huggingface.co/timm/resnetrs50.tf_in1k)|160 |77.82|93.81|35.7 |2.3 |6.2 |5238 | |[gcresnext26ts.ch_in1k](https://huggingface.co/timm/gcresnext26ts.ch_in1k)|256 |77.81|93.82|10.5 |2.4 |10.5 |4183 | |[ecaresnet50t.a3_in1k](https://huggingface.co/timm/ecaresnet50t.a3_in1k)|160 |77.79|93.6 |25.6 |2.2 |6.0 |5329 | |[resnext50_32x4d.a3_in1k](https://huggingface.co/timm/resnext50_32x4d.a3_in1k)|160 |77.73|93.32|25.0 |2.2 |7.4 |5576 | |[resnext50_32x4d.tv_in1k](https://huggingface.co/timm/resnext50_32x4d.tv_in1k)|224 |77.61|93.7 |25.0 |4.3 |14.4 |2944 | |[seresnext26d_32x4d.bt_in1k](https://huggingface.co/timm/seresnext26d_32x4d.bt_in1k)|224 |77.59|93.61|16.8 |2.7 |10.2 |3807 | |[resnet50.gluon_in1k](https://huggingface.co/timm/resnet50.gluon_in1k)|224 |77.58|93.72|25.6 |4.1 |11.1 |3455 | |[eca_resnext26ts.ch_in1k](https://huggingface.co/timm/eca_resnext26ts.ch_in1k)|256 |77.44|93.56|10.3 |2.4 |10.5 |4284 | |[resnet26d.bt_in1k](https://huggingface.co/timm/resnet26d.bt_in1k)|288 |77.41|93.63|16.0 |4.3 |13.5 |2907 | |[resnet101.tv_in1k](https://huggingface.co/timm/resnet101.tv_in1k)|224 |77.38|93.54|44.6 |7.8 |16.2 |2125 | |[resnet50d.a3_in1k](https://huggingface.co/timm/resnet50d.a3_in1k)|160 |77.22|93.27|25.6 |2.2 |6.1 |5982 | |[resnext26ts.ra2_in1k](https://huggingface.co/timm/resnext26ts.ra2_in1k)|288 |77.17|93.47|10.3 |3.1 |13.3 |3392 | |[resnet34.a2_in1k](https://huggingface.co/timm/resnet34.a2_in1k)|288 |77.15|93.27|21.8 |6.1 |6.2 |3615 | |[resnet34d.ra2_in1k](https://huggingface.co/timm/resnet34d.ra2_in1k)|224 |77.1 |93.37|21.8 |3.9 |4.5 |5436 | |[seresnet50.a3_in1k](https://huggingface.co/timm/seresnet50.a3_in1k)|224 |77.02|93.07|28.1 |4.1 |11.1 |2952 | |[resnext26ts.ra2_in1k](https://huggingface.co/timm/resnext26ts.ra2_in1k)|256 |76.78|93.13|10.3 |2.4 |10.5 |4410 | |[resnet26d.bt_in1k](https://huggingface.co/timm/resnet26d.bt_in1k)|224 |76.7 |93.17|16.0 |2.6 |8.2 |4859 | |[resnet34.bt_in1k](https://huggingface.co/timm/resnet34.bt_in1k)|288 |76.5 |93.35|21.8 |6.1 |6.2 |3617 | |[resnet34.a1_in1k](https://huggingface.co/timm/resnet34.a1_in1k)|224 |76.42|92.87|21.8 |3.7 |3.7 |5984 | |[resnet26.bt_in1k](https://huggingface.co/timm/resnet26.bt_in1k)|288 |76.35|93.18|16.0 |3.9 |12.2 |3331 | |[resnet50.tv_in1k](https://huggingface.co/timm/resnet50.tv_in1k)|224 |76.13|92.86|25.6 |4.1 |11.1 |3457 | |[resnet50.a3_in1k](https://huggingface.co/timm/resnet50.a3_in1k)|160 |75.96|92.5 |25.6 |2.1 |5.7 |6490 | |[resnet34.a2_in1k](https://huggingface.co/timm/resnet34.a2_in1k)|224 |75.52|92.44|21.8 |3.7 |3.7 |5991 | |[resnet26.bt_in1k](https://huggingface.co/timm/resnet26.bt_in1k)|224 |75.3 |92.58|16.0 |2.4 |7.4 |5583 | |[resnet34.bt_in1k](https://huggingface.co/timm/resnet34.bt_in1k)|224 |75.16|92.18|21.8 |3.7 |3.7 |5994 | |[seresnet50.a3_in1k](https://huggingface.co/timm/seresnet50.a3_in1k)|160 |75.1 |92.08|28.1 |2.1 |5.7 |5513 | |[resnet34.gluon_in1k](https://huggingface.co/timm/resnet34.gluon_in1k)|224 |74.57|91.98|21.8 |3.7 |3.7 |5984 | |[resnet18d.ra2_in1k](https://huggingface.co/timm/resnet18d.ra2_in1k)|288 |73.81|91.83|11.7 |3.4 |5.4 |5196 | |[resnet34.tv_in1k](https://huggingface.co/timm/resnet34.tv_in1k)|224 |73.32|91.42|21.8 |3.7 |3.7 |5979 | |[resnet18.fb_swsl_ig1b_ft_in1k](https://huggingface.co/timm/resnet18.fb_swsl_ig1b_ft_in1k)|224 |73.28|91.73|11.7 |1.8 |2.5 |10213 | |[resnet18.a1_in1k](https://huggingface.co/timm/resnet18.a1_in1k)|288 |73.16|91.03|11.7 |3.0 |4.1 |6050 | |[resnet34.a3_in1k](https://huggingface.co/timm/resnet34.a3_in1k)|224 |72.98|91.11|21.8 |3.7 |3.7 |5967 | |[resnet18.fb_ssl_yfcc100m_ft_in1k](https://huggingface.co/timm/resnet18.fb_ssl_yfcc100m_ft_in1k)|224 |72.6 |91.42|11.7 |1.8 |2.5 |10213 | |[resnet18.a2_in1k](https://huggingface.co/timm/resnet18.a2_in1k)|288 |72.37|90.59|11.7 |3.0 |4.1 |6051 | |[resnet14t.c3_in1k](https://huggingface.co/timm/resnet14t.c3_in1k)|224 |72.26|90.31|10.1 |1.7 |5.8 |7026 | |[resnet18d.ra2_in1k](https://huggingface.co/timm/resnet18d.ra2_in1k)|224 |72.26|90.68|11.7 |2.1 |3.3 |8707 | |[resnet18.a1_in1k](https://huggingface.co/timm/resnet18.a1_in1k)|224 |71.49|90.07|11.7 |1.8 |2.5 |10187 | |[resnet14t.c3_in1k](https://huggingface.co/timm/resnet14t.c3_in1k)|176 |71.31|89.69|10.1 |1.1 |3.6 |10970 | |[resnet18.gluon_in1k](https://huggingface.co/timm/resnet18.gluon_in1k)|224 |70.84|89.76|11.7 |1.8 |2.5 |10210 | |[resnet18.a2_in1k](https://huggingface.co/timm/resnet18.a2_in1k)|224 |70.64|89.47|11.7 |1.8 |2.5 |10194 | |[resnet34.a3_in1k](https://huggingface.co/timm/resnet34.a3_in1k)|160 |70.56|89.52|21.8 |1.9 |1.9 |10737 | |[resnet18.tv_in1k](https://huggingface.co/timm/resnet18.tv_in1k)|224 |69.76|89.07|11.7 |1.8 |2.5 |10205 | |[resnet10t.c3_in1k](https://huggingface.co/timm/resnet10t.c3_in1k)|224 |68.34|88.03|5.4 |1.1 |2.4 |13079 | |[resnet18.a3_in1k](https://huggingface.co/timm/resnet18.a3_in1k)|224 |68.25|88.17|11.7 |1.8 |2.5 |10167 | |[resnet10t.c3_in1k](https://huggingface.co/timm/resnet10t.c3_in1k)|176 |66.71|86.96|5.4 |0.7 |1.5 |20327 | |[resnet18.a3_in1k](https://huggingface.co/timm/resnet18.a3_in1k)|160 |65.66|86.26|11.7 |0.9 |1.3 |18229 | ## Citation ```bibtex @inproceedings{wightman2021resnet, title={ResNet strikes back: An improved training procedure in timm}, author={Wightman, Ross and Touvron, Hugo and Jegou, Herve}, booktitle={NeurIPS 2021 Workshop on ImageNet: Past, Present, and Future} } ``` ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ``` ```bibtex @article{He2015, author = {Kaiming He and Xiangyu Zhang and Shaoqing Ren and Jian Sun}, title = {Deep Residual Learning for Image Recognition}, journal = {arXiv preprint arXiv:1512.03385}, year = {2015} } ```
timm/skresnet18.ra_in1k
timm
2023-04-24T00:19:04Z
604
0
timm
[ "timm", "pytorch", "safetensors", "image-classification", "dataset:imagenet-1k", "arxiv:1903.06586", "license:apache-2.0", "region:us" ]
image-classification
2023-04-24T00:18:55Z
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for skresnet18.ra_in1k SKNet (Selective-Kernel ResNet) image classification model. Trained on ImageNet-1k in `timm` by Ross Wightman using `RA` recipe (ResNet strikes back `B` variant). ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 12.0 - GMACs: 1.8 - Activations (M): 3.2 - Image size: 224 x 224 - **Papers:** - Selective Kernel Networks: https://arxiv.org/abs/1903.06586 - **Dataset:** ImageNet-1k - **Original:** - https://github.com/huggingface/pytorch-image-models - https://github.com/clovaai/assembled-cnn ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('skresnet18.ra_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'skresnet18.ra_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 64, 112, 112]) # torch.Size([1, 64, 56, 56]) # torch.Size([1, 128, 28, 28]) # torch.Size([1, 256, 14, 14]) # torch.Size([1, 512, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'skresnet18.ra_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 512, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @inproceedings{li2019selective, title={Selective Kernel Networks}, author={Li, Xiang and Wang, Wenhai and Hu, Xiaolin and Yang, Jian}, journal={IEEE Conference on Computer Vision and Pattern Recognition}, year={2019} } ```
kul-speech-lab/wav2vec2-xls-r-1b-lay41
kul-speech-lab
2023-08-30T20:31:02Z
604
0
transformers
[ "transformers", "pytorch", "wav2vec2", "pretraining", "endpoints_compatible", "region:us" ]
null
2023-05-01T19:45:34Z
This repository contains the [wav2vec2 XLS-R 1B](https://huggingface.co/facebook/wav2vec2-xls-r-1b) model truncated to 41 layers. See also: https://github.com/lcn-kul/xls-r-analysis-sqa
TheBloke/wizard-mega-13B-GGUF
TheBloke
2023-09-27T12:53:04Z
604
4
transformers
[ "transformers", "gguf", "llama", "text-generation", "en", "dataset:anon8231489123/ShareGPT_Vicuna_unfiltered", "dataset:ehartford/wizard_vicuna_70k_unfiltered", "dataset:ehartford/WizardLM_alpaca_evol_instruct_70k_unfiltered", "base_model:openaccess-ai-collective/wizard-mega-13b", "license:other", "text-generation-inference", "region:us" ]
text-generation
2023-09-20T01:11:20Z
--- language: - en license: other library_name: transformers datasets: - anon8231489123/ShareGPT_Vicuna_unfiltered - ehartford/wizard_vicuna_70k_unfiltered - ehartford/WizardLM_alpaca_evol_instruct_70k_unfiltered model_name: Wizard Mega 13B base_model: openaccess-ai-collective/wizard-mega-13b inference: false model_creator: Open Access AI Collective model_type: llama pipeline_tag: text-generation prompt_template: 'A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user''s questions. USER: {prompt} ASSISTANT: ' quantized_by: TheBloke --- <!-- header start --> <!-- 200823 --> <div style="width: auto; margin-left: auto; margin-right: auto"> <img src="https://i.imgur.com/EBdldam.jpg" alt="TheBlokeAI" style="width: 100%; min-width: 400px; display: block; margin: auto;"> </div> <div style="display: flex; justify-content: space-between; width: 100%;"> <div style="display: flex; flex-direction: column; align-items: flex-start;"> <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://discord.gg/theblokeai">Chat & support: TheBloke's Discord server</a></p> </div> <div style="display: flex; flex-direction: column; align-items: flex-end;"> <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://www.patreon.com/TheBlokeAI">Want to contribute? TheBloke's Patreon page</a></p> </div> </div> <div style="text-align:center; margin-top: 0em; margin-bottom: 0em"><p style="margin-top: 0.25em; margin-bottom: 0em;">TheBloke's LLM work is generously supported by a grant from <a href="https://a16z.com">andreessen horowitz (a16z)</a></p></div> <hr style="margin-top: 1.0em; margin-bottom: 1.0em;"> <!-- header end --> # Wizard Mega 13B - GGUF - Model creator: [Open Access AI Collective](https://huggingface.co/openaccess-ai-collective) - Original model: [Wizard Mega 13B](https://huggingface.co/openaccess-ai-collective/wizard-mega-13b) <!-- description start --> ## Description This repo contains GGUF format model files for [Open Access AI Collective's Wizard Mega 13B](https://huggingface.co/openaccess-ai-collective/wizard-mega-13b). <!-- description end --> <!-- README_GGUF.md-about-gguf start --> ### About GGUF GGUF is a new format introduced by the llama.cpp team on August 21st 2023. It is a replacement for GGML, which is no longer supported by llama.cpp. Here is an incomplate list of clients and libraries that are known to support GGUF: * [llama.cpp](https://github.com/ggerganov/llama.cpp). The source project for GGUF. Offers a CLI and a server option. * [text-generation-webui](https://github.com/oobabooga/text-generation-webui), the most widely used web UI, with many features and powerful extensions. Supports GPU acceleration. * [KoboldCpp](https://github.com/LostRuins/koboldcpp), a fully featured web UI, with GPU accel across all platforms and GPU architectures. Especially good for story telling. * [LM Studio](https://lmstudio.ai/), an easy-to-use and powerful local GUI for Windows and macOS (Silicon), with GPU acceleration. * [LoLLMS Web UI](https://github.com/ParisNeo/lollms-webui), a great web UI with many interesting and unique features, including a full model library for easy model selection. * [Faraday.dev](https://faraday.dev/), an attractive and easy to use character-based chat GUI for Windows and macOS (both Silicon and Intel), with GPU acceleration. * [ctransformers](https://github.com/marella/ctransformers), a Python library with GPU accel, LangChain support, and OpenAI-compatible AI server. * [llama-cpp-python](https://github.com/abetlen/llama-cpp-python), a Python library with GPU accel, LangChain support, and OpenAI-compatible API server. * [candle](https://github.com/huggingface/candle), a Rust ML framework with a focus on performance, including GPU support, and ease of use. <!-- README_GGUF.md-about-gguf end --> <!-- repositories-available start --> ## Repositories available * [AWQ model(s) for GPU inference.](https://huggingface.co/TheBloke/wizard-mega-13B-AWQ) * [GPTQ models for GPU inference, with multiple quantisation parameter options.](https://huggingface.co/TheBloke/wizard-mega-13B-GPTQ) * [2, 3, 4, 5, 6 and 8-bit GGUF models for CPU+GPU inference](https://huggingface.co/TheBloke/wizard-mega-13B-GGUF) * [Open Access AI Collective's original unquantised fp16 model in pytorch format, for GPU inference and for further conversions](https://huggingface.co/openaccess-ai-collective/wizard-mega-13b) <!-- repositories-available end --> <!-- prompt-template start --> ## Prompt template: Vicuna ``` A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER: {prompt} ASSISTANT: ``` <!-- prompt-template end --> <!-- compatibility_gguf start --> ## Compatibility These quantised GGUFv2 files are compatible with llama.cpp from August 27th onwards, as of commit [d0cee0d](https://github.com/ggerganov/llama.cpp/commit/d0cee0d36d5be95a0d9088b674dbb27354107221) They are also compatible with many third party UIs and libraries - please see the list at the top of this README. ## Explanation of quantisation methods <details> <summary>Click to see details</summary> The new methods available are: * GGML_TYPE_Q2_K - "type-1" 2-bit quantization in super-blocks containing 16 blocks, each block having 16 weight. Block scales and mins are quantized with 4 bits. This ends up effectively using 2.5625 bits per weight (bpw) * GGML_TYPE_Q3_K - "type-0" 3-bit quantization in super-blocks containing 16 blocks, each block having 16 weights. Scales are quantized with 6 bits. This end up using 3.4375 bpw. * GGML_TYPE_Q4_K - "type-1" 4-bit quantization in super-blocks containing 8 blocks, each block having 32 weights. Scales and mins are quantized with 6 bits. This ends up using 4.5 bpw. * GGML_TYPE_Q5_K - "type-1" 5-bit quantization. Same super-block structure as GGML_TYPE_Q4_K resulting in 5.5 bpw * GGML_TYPE_Q6_K - "type-0" 6-bit quantization. Super-blocks with 16 blocks, each block having 16 weights. Scales are quantized with 8 bits. This ends up using 6.5625 bpw Refer to the Provided Files table below to see what files use which methods, and how. </details> <!-- compatibility_gguf end --> <!-- README_GGUF.md-provided-files start --> ## Provided files | Name | Quant method | Bits | Size | Max RAM required | Use case | | ---- | ---- | ---- | ---- | ---- | ----- | | [wizard-mega-13B.Q2_K.gguf](https://huggingface.co/TheBloke/wizard-mega-13B-GGUF/blob/main/wizard-mega-13B.Q2_K.gguf) | Q2_K | 2 | 5.43 GB| 7.93 GB | smallest, significant quality loss - not recommended for most purposes | | [wizard-mega-13B.Q3_K_S.gguf](https://huggingface.co/TheBloke/wizard-mega-13B-GGUF/blob/main/wizard-mega-13B.Q3_K_S.gguf) | Q3_K_S | 3 | 5.66 GB| 8.16 GB | very small, high quality loss | | [wizard-mega-13B.Q3_K_M.gguf](https://huggingface.co/TheBloke/wizard-mega-13B-GGUF/blob/main/wizard-mega-13B.Q3_K_M.gguf) | Q3_K_M | 3 | 6.34 GB| 8.84 GB | very small, high quality loss | | [wizard-mega-13B.Q3_K_L.gguf](https://huggingface.co/TheBloke/wizard-mega-13B-GGUF/blob/main/wizard-mega-13B.Q3_K_L.gguf) | Q3_K_L | 3 | 6.93 GB| 9.43 GB | small, substantial quality loss | | [wizard-mega-13B.Q4_0.gguf](https://huggingface.co/TheBloke/wizard-mega-13B-GGUF/blob/main/wizard-mega-13B.Q4_0.gguf) | Q4_0 | 4 | 7.37 GB| 9.87 GB | legacy; small, very high quality loss - prefer using Q3_K_M | | [wizard-mega-13B.Q4_K_S.gguf](https://huggingface.co/TheBloke/wizard-mega-13B-GGUF/blob/main/wizard-mega-13B.Q4_K_S.gguf) | Q4_K_S | 4 | 7.41 GB| 9.91 GB | small, greater quality loss | | [wizard-mega-13B.Q4_K_M.gguf](https://huggingface.co/TheBloke/wizard-mega-13B-GGUF/blob/main/wizard-mega-13B.Q4_K_M.gguf) | Q4_K_M | 4 | 7.87 GB| 10.37 GB | medium, balanced quality - recommended | | [wizard-mega-13B.Q5_0.gguf](https://huggingface.co/TheBloke/wizard-mega-13B-GGUF/blob/main/wizard-mega-13B.Q5_0.gguf) | Q5_0 | 5 | 8.97 GB| 11.47 GB | legacy; medium, balanced quality - prefer using Q4_K_M | | [wizard-mega-13B.Q5_K_S.gguf](https://huggingface.co/TheBloke/wizard-mega-13B-GGUF/blob/main/wizard-mega-13B.Q5_K_S.gguf) | Q5_K_S | 5 | 8.97 GB| 11.47 GB | large, low quality loss - recommended | | [wizard-mega-13B.Q5_K_M.gguf](https://huggingface.co/TheBloke/wizard-mega-13B-GGUF/blob/main/wizard-mega-13B.Q5_K_M.gguf) | Q5_K_M | 5 | 9.23 GB| 11.73 GB | large, very low quality loss - recommended | | [wizard-mega-13B.Q6_K.gguf](https://huggingface.co/TheBloke/wizard-mega-13B-GGUF/blob/main/wizard-mega-13B.Q6_K.gguf) | Q6_K | 6 | 10.68 GB| 13.18 GB | very large, extremely low quality loss | | [wizard-mega-13B.Q8_0.gguf](https://huggingface.co/TheBloke/wizard-mega-13B-GGUF/blob/main/wizard-mega-13B.Q8_0.gguf) | Q8_0 | 8 | 13.83 GB| 16.33 GB | very large, extremely low quality loss - not recommended | **Note**: the above RAM figures assume no GPU offloading. If layers are offloaded to the GPU, this will reduce RAM usage and use VRAM instead. <!-- README_GGUF.md-provided-files end --> <!-- README_GGUF.md-how-to-download start --> ## How to download GGUF files **Note for manual downloaders:** You almost never want to clone the entire repo! Multiple different quantisation formats are provided, and most users only want to pick and download a single file. The following clients/libraries will automatically download models for you, providing a list of available models to choose from: - LM Studio - LoLLMS Web UI - Faraday.dev ### In `text-generation-webui` Under Download Model, you can enter the model repo: TheBloke/wizard-mega-13B-GGUF and below it, a specific filename to download, such as: wizard-mega-13B.Q4_K_M.gguf. Then click Download. ### On the command line, including multiple files at once I recommend using the `huggingface-hub` Python library: ```shell pip3 install huggingface-hub ``` Then you can download any individual model file to the current directory, at high speed, with a command like this: ```shell huggingface-cli download TheBloke/wizard-mega-13B-GGUF wizard-mega-13B.Q4_K_M.gguf --local-dir . --local-dir-use-symlinks False ``` <details> <summary>More advanced huggingface-cli download usage</summary> You can also download multiple files at once with a pattern: ```shell huggingface-cli download TheBloke/wizard-mega-13B-GGUF --local-dir . --local-dir-use-symlinks False --include='*Q4_K*gguf' ``` For more documentation on downloading with `huggingface-cli`, please see: [HF -> Hub Python Library -> Download files -> Download from the CLI](https://huggingface.co/docs/huggingface_hub/guides/download#download-from-the-cli). To accelerate downloads on fast connections (1Gbit/s or higher), install `hf_transfer`: ```shell pip3 install hf_transfer ``` And set environment variable `HF_HUB_ENABLE_HF_TRANSFER` to `1`: ```shell HF_HUB_ENABLE_HF_TRANSFER=1 huggingface-cli download TheBloke/wizard-mega-13B-GGUF wizard-mega-13B.Q4_K_M.gguf --local-dir . --local-dir-use-symlinks False ``` Windows Command Line users: You can set the environment variable by running `set HF_HUB_ENABLE_HF_TRANSFER=1` before the download command. </details> <!-- README_GGUF.md-how-to-download end --> <!-- README_GGUF.md-how-to-run start --> ## Example `llama.cpp` command Make sure you are using `llama.cpp` from commit [d0cee0d](https://github.com/ggerganov/llama.cpp/commit/d0cee0d36d5be95a0d9088b674dbb27354107221) or later. ```shell ./main -ngl 32 -m wizard-mega-13B.Q4_K_M.gguf --color -c 2048 --temp 0.7 --repeat_penalty 1.1 -n -1 -p "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER: {prompt} ASSISTANT:" ``` Change `-ngl 32` to the number of layers to offload to GPU. Remove it if you don't have GPU acceleration. Change `-c 2048` to the desired sequence length. For extended sequence models - eg 8K, 16K, 32K - the necessary RoPE scaling parameters are read from the GGUF file and set by llama.cpp automatically. If you want to have a chat-style conversation, replace the `-p <PROMPT>` argument with `-i -ins` For other parameters and how to use them, please refer to [the llama.cpp documentation](https://github.com/ggerganov/llama.cpp/blob/master/examples/main/README.md) ## How to run in `text-generation-webui` Further instructions here: [text-generation-webui/docs/llama.cpp.md](https://github.com/oobabooga/text-generation-webui/blob/main/docs/llama.cpp.md). ## How to run from Python code You can use GGUF models from Python using the [llama-cpp-python](https://github.com/abetlen/llama-cpp-python) or [ctransformers](https://github.com/marella/ctransformers) libraries. ### How to load this model in Python code, using ctransformers #### First install the package Run one of the following commands, according to your system: ```shell # Base ctransformers with no GPU acceleration pip install ctransformers # Or with CUDA GPU acceleration pip install ctransformers[cuda] # Or with AMD ROCm GPU acceleration (Linux only) CT_HIPBLAS=1 pip install ctransformers --no-binary ctransformers # Or with Metal GPU acceleration for macOS systems only CT_METAL=1 pip install ctransformers --no-binary ctransformers ``` #### Simple ctransformers example code ```python from ctransformers import AutoModelForCausalLM # Set gpu_layers to the number of layers to offload to GPU. Set to 0 if no GPU acceleration is available on your system. llm = AutoModelForCausalLM.from_pretrained("TheBloke/wizard-mega-13B-GGUF", model_file="wizard-mega-13B.Q4_K_M.gguf", model_type="llama", gpu_layers=50) print(llm("AI is going to")) ``` ## How to use with LangChain Here are guides on using llama-cpp-python and ctransformers with LangChain: * [LangChain + llama-cpp-python](https://python.langchain.com/docs/integrations/llms/llamacpp) * [LangChain + ctransformers](https://python.langchain.com/docs/integrations/providers/ctransformers) <!-- README_GGUF.md-how-to-run end --> <!-- footer start --> <!-- 200823 --> ## Discord For further support, and discussions on these models and AI in general, join us at: [TheBloke AI's Discord server](https://discord.gg/theblokeai) ## Thanks, and how to contribute Thanks to the [chirper.ai](https://chirper.ai) team! Thanks to Clay from [gpus.llm-utils.org](llm-utils)! I've had a lot of people ask if they can contribute. I enjoy providing models and helping people, and would love to be able to spend even more time doing it, as well as expanding into new projects like fine tuning/training. If you're able and willing to contribute it will be most gratefully received and will help me to keep providing more models, and to start work on new AI projects. Donaters will get priority support on any and all AI/LLM/model questions and requests, access to a private Discord room, plus other benefits. * Patreon: https://patreon.com/TheBlokeAI * Ko-Fi: https://ko-fi.com/TheBlokeAI **Special thanks to**: Aemon Algiz. **Patreon special mentions**: Alicia Loh, Stephen Murray, K, Ajan Kanaga, RoA, Magnesian, Deo Leter, Olakabola, Eugene Pentland, zynix, Deep Realms, Raymond Fosdick, Elijah Stavena, Iucharbius, Erik Bjäreholt, Luis Javier Navarrete Lozano, Nicholas, theTransient, John Detwiler, alfie_i, knownsqashed, Mano Prime, Willem Michiel, Enrico Ros, LangChain4j, OG, Michael Dempsey, Pierre Kircher, Pedro Madruga, James Bentley, Thomas Belote, Luke @flexchar, Leonard Tan, Johann-Peter Hartmann, Illia Dulskyi, Fen Risland, Chadd, S_X, Jeff Scroggin, Ken Nordquist, Sean Connelly, Artur Olbinski, Swaroop Kallakuri, Jack West, Ai Maven, David Ziegler, Russ Johnson, transmissions 11, John Villwock, Alps Aficionado, Clay Pascal, Viktor Bowallius, Subspace Studios, Rainer Wilmers, Trenton Dambrowitz, vamX, Michael Levine, 준교 김, Brandon Frisco, Kalila, Trailburnt, Randy H, Talal Aujan, Nathan Dryer, Vadim, 阿明, ReadyPlayerEmma, Tiffany J. Kim, George Stoitzev, Spencer Kim, Jerry Meng, Gabriel Tamborski, Cory Kujawski, Jeffrey Morgan, Spiking Neurons AB, Edmond Seymore, Alexandros Triantafyllidis, Lone Striker, Cap'n Zoog, Nikolai Manek, danny, ya boyyy, Derek Yates, usrbinkat, Mandus, TL, Nathan LeClaire, subjectnull, Imad Khwaja, webtim, Raven Klaugh, Asp the Wyvern, Gabriel Puliatti, Caitlyn Gatomon, Joseph William Delisle, Jonathan Leane, Luke Pendergrass, SuperWojo, Sebastain Graf, Will Dee, Fred von Graf, Andrey, Dan Guido, Daniel P. Andersen, Nitin Borwankar, Elle, Vitor Caleffi, biorpg, jjj, NimbleBox.ai, Pieter, Matthew Berman, terasurfer, Michael Davis, Alex, Stanislav Ovsiannikov Thank you to all my generous patrons and donaters! And thank you again to a16z for their generous grant. <!-- footer end --> <!-- original-model-card start --> # Original model card: Open Access AI Collective's Wizard Mega 13B # Wizard Mega 13B has been updated and is now Manticore 13B [<img src="https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/OpenAccess-AI-Collective/axolotl) **[💵 Donate to OpenAccess AI Collective](https://github.com/sponsors/OpenAccess-AI-Collective) to help us keep building great tools and models!** Manticore is available at https://huggingface.co/openaccess-ai-collective/manticore-13b and fixes many issues with Wizard Mega and adds new datasets to the training. Wizard Mega is a Llama 13B model fine-tuned on the [ShareGPT](https://huggingface.co/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered), [WizardLM](https://huggingface.co/datasets/ehartford/WizardLM_alpaca_evol_instruct_70k_unfiltered), and [Wizard-Vicuna](https://huggingface.co/datasets/ehartford/wizard_vicuna_70k_unfiltered) datasets. These particular datasets have all been filtered to remove responses where the model responds with "As an AI language model...", etc or when the model refuses to respond. # Demo Try out the model in HF Spaces. The demo uses a quantized GGML version of the model to quickly return predictions on smaller GPUs (and even CPUs). Quantized GGML may have some minimal loss of model quality. - https://huggingface.co/spaces/openaccess-ai-collective/wizard-mega-ggml ## Release (Epoch Two) The Wizard Mega 13B SFT model is being released after two epochs as the eval loss increased during the 3rd (final planned epoch). Because of this, we have preliminarily decided to use the epoch 2 checkpoint as the final release candidate. https://wandb.ai/wing-lian/vicuna-13b/runs/5uebgm49 ## Build Wizard Mega was built with [Axolotl](https://github.com/OpenAccess-AI-Collective/axolotl) on 8xA100 80GB for 15 hours. The configuration to duplicate this build is provided in this repo's [/config folder](https://huggingface.co/openaccess-ai-collective/wizard-mega-13b/tree/main/configs). ## Bias, Risks, and Limitations Wizard Mega has not been aligned to human preferences with techniques like RLHF or deployed with in-the-loop filtering of responses like ChatGPT, so the model can produce problematic outputs (especially when prompted to do so). Wizard Mega was fine-tuned from the base model LlaMa 13B, please refer to its model card's Limitations Section for relevant information. ## Examples ```` ### Instruction: write Python code that returns the first n numbers of the Fibonacci sequence using memoization. ### Assistant: ```python def fib(n): if n < 2: return n elif n in cache: return cache[n] else: a, b = 0, 1 for i in range(2, n+1): cache[i] = a a, b = b, a + b return cache[n] def first_n(n): fib_list = [fib(i) for i in range(n+1)] return fib_list[:n] ``` This function uses memoization to store the values of previous Fibonacci numbers in a cache. This way, if the same number is requested again, it can be returned immediately without recalculating it. The `first_n` function takes an integer `n` as input, and calculates the first n numbers of the Fibonacci sequence using memoization. It returns a list of those numbers. ```` ``` ### Instruction: Finish the joke, a mechanic and a car salesman walk into a bar... ### Assistant: ...and the bartender says "Hey guys, what can I get for you?" The mechanic replies, "I'll have a beer, but make it a quick one. I have to fix this guy's car before he finds out I fiddled with his brakes." The salesman quips, "And I'll have a martini, shaken not stirred. After all, I have to sell this guy a car that doesn't break down on him within the first year of ownership." ``` <!-- original-model-card end -->
showlab/show-1-base
showlab
2023-10-12T03:56:59Z
604
12
diffusers
[ "diffusers", "text-to-video", "arxiv:2309.15818", "license:cc-by-nc-4.0", "diffusers:TextToVideoIFPipeline", "region:us" ]
text-to-video
2023-10-10T16:56:09Z
--- license: cc-by-nc-4.0 tags: - text-to-video --- # show-1-base Pixel-based VDMs can generate motion accurately aligned with the textual prompt but typically demand expensive computational costs in terms of time and GPU memory, especially when generating high-resolution videos. Latent-based VDMs are more resource-efficient because they work in a reduced-dimension latent space. But it is challenging for such small latent space (e.g., 64×40 for 256×160 videos) to cover rich yet necessary visual semantic details as described by the textual prompt. To marry the strength and alleviate the weakness of pixel-based and latent-based VDMs, we introduce **Show-1**, an efficient text-to-video model that generates videos of not only decent video-text alignment but also high visual quality. ![](https://showlab.github.io/Show-1/assets/images/method.png) ## Model Details This is the base model of Show-1 that generates videos with 8 keyframes at a resolution of 64x40. The model is finetuned from [DeepFloyd/IF-I-L-v1.0](https://huggingface.co/DeepFloyd/IF-I-L-v1.0) on the [WebVid-10M](https://maxbain.com/webvid-dataset/) and [InternVid](https://huggingface.co/datasets/OpenGVLab/InternVid) dataset. - **Developed by:** [Show Lab, National University of Singapore](https://sites.google.com/view/showlab/home?authuser=0) - **Model type:** pixel- and latent-based cascaded text-to-video diffusion model - **Cascade stage:** base (keyframe generation) - **Finetuned from model:** [DeepFloyd/IF-I-L-v1.0](https://huggingface.co/DeepFloyd/IF-I-L-v1.0) - **License:** Creative Commons Attribution Non Commercial 4.0 - **Resources for more information:** [GitHub](https://github.com/showlab/Show-1), [Website](https://showlab.github.io/Show-1/), [arXiv](https://arxiv.org/abs/2309.15818) ## Usage Clone the GitHub repository and install the requirements: ```bash git clone https://github.com/showlab/Show-1.git pip install -r requirements.txt ``` Run the following command to generate a video from a text prompt. By default, this will automatically download all the model weights from huggingface. ```bash python run_inference.py ``` You can also download the weights manually and change the `pretrained_model_path` in `run_inference.py` to run the inference. ```bash git lfs install # base git clone https://huggingface.co/showlab/show-1-base # interp git clone https://huggingface.co/showlab/show-1-interpolation # sr1 git clone https://huggingface.co/showlab/show-1-sr1 # sr2 git clone https://huggingface.co/showlab/show-1-sr2 ``` ## Citation If you make use of our work, please cite our paper. ```bibtex @misc{zhang2023show1, title={Show-1: Marrying Pixel and Latent Diffusion Models for Text-to-Video Generation}, author={David Junhao Zhang and Jay Zhangjie Wu and Jia-Wei Liu and Rui Zhao and Lingmin Ran and Yuchao Gu and Difei Gao and Mike Zheng Shou}, year={2023}, eprint={2309.15818}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` ## Model Card Contact This model card is maintained by [David Junhao Zhang](https://junhaozhang98.github.io/) and [Jay Zhangjie Wu](https://jayzjwu.github.io/). For any questions, please feel free to contact us or open an issue in the repository.
TheBloke/yi-34B-v3-GGUF
TheBloke
2023-12-15T18:34:00Z
604
7
transformers
[ "transformers", "gguf", "yi", "base_model:mncai/yi-34B-v3", "license:other", "region:us" ]
null
2023-12-15T18:16:39Z
--- base_model: mncai/yi-34B-v3 inference: false license: other license_link: LICENSE license_name: yi-license model_creator: MindsAndCompany model_name: Yi 34B v3 model_type: yi prompt_template: '<|user|> {prompt} <|assistant|> ' quantized_by: TheBloke --- <!-- markdownlint-disable MD041 --> <!-- header start --> <!-- 200823 --> <div style="width: auto; margin-left: auto; margin-right: auto"> <img src="https://i.imgur.com/EBdldam.jpg" alt="TheBlokeAI" style="width: 100%; min-width: 400px; display: block; margin: auto;"> </div> <div style="display: flex; justify-content: space-between; width: 100%;"> <div style="display: flex; flex-direction: column; align-items: flex-start;"> <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://discord.gg/theblokeai">Chat & support: TheBloke's Discord server</a></p> </div> <div style="display: flex; flex-direction: column; align-items: flex-end;"> <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://www.patreon.com/TheBlokeAI">Want to contribute? TheBloke's Patreon page</a></p> </div> </div> <div style="text-align:center; margin-top: 0em; margin-bottom: 0em"><p style="margin-top: 0.25em; margin-bottom: 0em;">TheBloke's LLM work is generously supported by a grant from <a href="https://a16z.com">andreessen horowitz (a16z)</a></p></div> <hr style="margin-top: 1.0em; margin-bottom: 1.0em;"> <!-- header end --> # Yi 34B v3 - GGUF - Model creator: [MindsAndCompany](https://huggingface.co/mncai) - Original model: [Yi 34B v3](https://huggingface.co/mncai/yi-34B-v3) <!-- description start --> ## Description This repo contains GGUF format model files for [MindsAndCompany's Yi 34B v3](https://huggingface.co/mncai/yi-34B-v3). These files were quantised using hardware kindly provided by [Massed Compute](https://massedcompute.com/). <!-- description end --> <!-- README_GGUF.md-about-gguf start --> ### About GGUF GGUF is a new format introduced by the llama.cpp team on August 21st 2023. It is a replacement for GGML, which is no longer supported by llama.cpp. Here is an incomplete list of clients and libraries that are known to support GGUF: * [llama.cpp](https://github.com/ggerganov/llama.cpp). The source project for GGUF. Offers a CLI and a server option. * [text-generation-webui](https://github.com/oobabooga/text-generation-webui), the most widely used web UI, with many features and powerful extensions. Supports GPU acceleration. * [KoboldCpp](https://github.com/LostRuins/koboldcpp), a fully featured web UI, with GPU accel across all platforms and GPU architectures. Especially good for story telling. * [GPT4All](https://gpt4all.io/index.html), a free and open source local running GUI, supporting Windows, Linux and macOS with full GPU accel. * [LM Studio](https://lmstudio.ai/), an easy-to-use and powerful local GUI for Windows and macOS (Silicon), with GPU acceleration. Linux available, in beta as of 27/11/2023. * [LoLLMS Web UI](https://github.com/ParisNeo/lollms-webui), a great web UI with many interesting and unique features, including a full model library for easy model selection. * [Faraday.dev](https://faraday.dev/), an attractive and easy to use character-based chat GUI for Windows and macOS (both Silicon and Intel), with GPU acceleration. * [llama-cpp-python](https://github.com/abetlen/llama-cpp-python), a Python library with GPU accel, LangChain support, and OpenAI-compatible API server. * [candle](https://github.com/huggingface/candle), a Rust ML framework with a focus on performance, including GPU support, and ease of use. * [ctransformers](https://github.com/marella/ctransformers), a Python library with GPU accel, LangChain support, and OpenAI-compatible AI server. Note, as of time of writing (November 27th 2023), ctransformers has not been updated in a long time and does not support many recent models. <!-- README_GGUF.md-about-gguf end --> <!-- repositories-available start --> ## Repositories available * [AWQ model(s) for GPU inference.](https://huggingface.co/TheBloke/yi-34B-v3-AWQ) * [GPTQ models for GPU inference, with multiple quantisation parameter options.](https://huggingface.co/TheBloke/yi-34B-v3-GPTQ) * [2, 3, 4, 5, 6 and 8-bit GGUF models for CPU+GPU inference](https://huggingface.co/TheBloke/yi-34B-v3-GGUF) * [MindsAndCompany's original unquantised fp16 model in pytorch format, for GPU inference and for further conversions](https://huggingface.co/mncai/yi-34B-v3) <!-- repositories-available end --> <!-- prompt-template start --> ## Prompt template: ToRA ``` <|user|> {prompt} <|assistant|> ``` <!-- prompt-template end --> <!-- compatibility_gguf start --> ## Compatibility These quantised GGUFv2 files are compatible with llama.cpp from August 27th onwards, as of commit [d0cee0d](https://github.com/ggerganov/llama.cpp/commit/d0cee0d36d5be95a0d9088b674dbb27354107221) They are also compatible with many third party UIs and libraries - please see the list at the top of this README. ## Explanation of quantisation methods <details> <summary>Click to see details</summary> The new methods available are: * GGML_TYPE_Q2_K - "type-1" 2-bit quantization in super-blocks containing 16 blocks, each block having 16 weight. Block scales and mins are quantized with 4 bits. This ends up effectively using 2.5625 bits per weight (bpw) * GGML_TYPE_Q3_K - "type-0" 3-bit quantization in super-blocks containing 16 blocks, each block having 16 weights. Scales are quantized with 6 bits. This end up using 3.4375 bpw. * GGML_TYPE_Q4_K - "type-1" 4-bit quantization in super-blocks containing 8 blocks, each block having 32 weights. Scales and mins are quantized with 6 bits. This ends up using 4.5 bpw. * GGML_TYPE_Q5_K - "type-1" 5-bit quantization. Same super-block structure as GGML_TYPE_Q4_K resulting in 5.5 bpw * GGML_TYPE_Q6_K - "type-0" 6-bit quantization. Super-blocks with 16 blocks, each block having 16 weights. Scales are quantized with 8 bits. This ends up using 6.5625 bpw Refer to the Provided Files table below to see what files use which methods, and how. </details> <!-- compatibility_gguf end --> <!-- README_GGUF.md-provided-files start --> ## Provided files | Name | Quant method | Bits | Size | Max RAM required | Use case | | ---- | ---- | ---- | ---- | ---- | ----- | | [yi-34b-v3.Q2_K.gguf](https://huggingface.co/TheBloke/yi-34B-v3-GGUF/blob/main/yi-34b-v3.Q2_K.gguf) | Q2_K | 2 | 14.56 GB| 17.06 GB | smallest, significant quality loss - not recommended for most purposes | | [yi-34b-v3.Q3_K_S.gguf](https://huggingface.co/TheBloke/yi-34B-v3-GGUF/blob/main/yi-34b-v3.Q3_K_S.gguf) | Q3_K_S | 3 | 14.96 GB| 17.46 GB | very small, high quality loss | | [yi-34b-v3.Q3_K_M.gguf](https://huggingface.co/TheBloke/yi-34B-v3-GGUF/blob/main/yi-34b-v3.Q3_K_M.gguf) | Q3_K_M | 3 | 16.64 GB| 19.14 GB | very small, high quality loss | | [yi-34b-v3.Q3_K_L.gguf](https://huggingface.co/TheBloke/yi-34B-v3-GGUF/blob/main/yi-34b-v3.Q3_K_L.gguf) | Q3_K_L | 3 | 18.14 GB| 20.64 GB | small, substantial quality loss | | [yi-34b-v3.Q4_0.gguf](https://huggingface.co/TheBloke/yi-34B-v3-GGUF/blob/main/yi-34b-v3.Q4_0.gguf) | Q4_0 | 4 | 19.47 GB| 21.97 GB | legacy; small, very high quality loss - prefer using Q3_K_M | | [yi-34b-v3.Q4_K_S.gguf](https://huggingface.co/TheBloke/yi-34B-v3-GGUF/blob/main/yi-34b-v3.Q4_K_S.gguf) | Q4_K_S | 4 | 19.55 GB| 22.05 GB | small, greater quality loss | | [yi-34b-v3.Q4_K_M.gguf](https://huggingface.co/TheBloke/yi-34B-v3-GGUF/blob/main/yi-34b-v3.Q4_K_M.gguf) | Q4_K_M | 4 | 20.66 GB| 23.16 GB | medium, balanced quality - recommended | | [yi-34b-v3.Q5_0.gguf](https://huggingface.co/TheBloke/yi-34B-v3-GGUF/blob/main/yi-34b-v3.Q5_0.gguf) | Q5_0 | 5 | 23.71 GB| 26.21 GB | legacy; medium, balanced quality - prefer using Q4_K_M | | [yi-34b-v3.Q5_K_S.gguf](https://huggingface.co/TheBloke/yi-34B-v3-GGUF/blob/main/yi-34b-v3.Q5_K_S.gguf) | Q5_K_S | 5 | 23.71 GB| 26.21 GB | large, low quality loss - recommended | | [yi-34b-v3.Q5_K_M.gguf](https://huggingface.co/TheBloke/yi-34B-v3-GGUF/blob/main/yi-34b-v3.Q5_K_M.gguf) | Q5_K_M | 5 | 24.32 GB| 26.82 GB | large, very low quality loss - recommended | | [yi-34b-v3.Q6_K.gguf](https://huggingface.co/TheBloke/yi-34B-v3-GGUF/blob/main/yi-34b-v3.Q6_K.gguf) | Q6_K | 6 | 28.22 GB| 30.72 GB | very large, extremely low quality loss | | [yi-34b-v3.Q8_0.gguf](https://huggingface.co/TheBloke/yi-34B-v3-GGUF/blob/main/yi-34b-v3.Q8_0.gguf) | Q8_0 | 8 | 36.54 GB| 39.04 GB | very large, extremely low quality loss - not recommended | **Note**: the above RAM figures assume no GPU offloading. If layers are offloaded to the GPU, this will reduce RAM usage and use VRAM instead. <!-- README_GGUF.md-provided-files end --> <!-- README_GGUF.md-how-to-download start --> ## How to download GGUF files **Note for manual downloaders:** You almost never want to clone the entire repo! Multiple different quantisation formats are provided, and most users only want to pick and download a single file. The following clients/libraries will automatically download models for you, providing a list of available models to choose from: * LM Studio * LoLLMS Web UI * Faraday.dev ### In `text-generation-webui` Under Download Model, you can enter the model repo: TheBloke/yi-34B-v3-GGUF and below it, a specific filename to download, such as: yi-34b-v3.Q4_K_M.gguf. Then click Download. ### On the command line, including multiple files at once I recommend using the `huggingface-hub` Python library: ```shell pip3 install huggingface-hub ``` Then you can download any individual model file to the current directory, at high speed, with a command like this: ```shell huggingface-cli download TheBloke/yi-34B-v3-GGUF yi-34b-v3.Q4_K_M.gguf --local-dir . --local-dir-use-symlinks False ``` <details> <summary>More advanced huggingface-cli download usage (click to read)</summary> You can also download multiple files at once with a pattern: ```shell huggingface-cli download TheBloke/yi-34B-v3-GGUF --local-dir . --local-dir-use-symlinks False --include='*Q4_K*gguf' ``` For more documentation on downloading with `huggingface-cli`, please see: [HF -> Hub Python Library -> Download files -> Download from the CLI](https://huggingface.co/docs/huggingface_hub/guides/download#download-from-the-cli). To accelerate downloads on fast connections (1Gbit/s or higher), install `hf_transfer`: ```shell pip3 install hf_transfer ``` And set environment variable `HF_HUB_ENABLE_HF_TRANSFER` to `1`: ```shell HF_HUB_ENABLE_HF_TRANSFER=1 huggingface-cli download TheBloke/yi-34B-v3-GGUF yi-34b-v3.Q4_K_M.gguf --local-dir . --local-dir-use-symlinks False ``` Windows Command Line users: You can set the environment variable by running `set HF_HUB_ENABLE_HF_TRANSFER=1` before the download command. </details> <!-- README_GGUF.md-how-to-download end --> <!-- README_GGUF.md-how-to-run start --> ## Example `llama.cpp` command Make sure you are using `llama.cpp` from commit [d0cee0d](https://github.com/ggerganov/llama.cpp/commit/d0cee0d36d5be95a0d9088b674dbb27354107221) or later. ```shell ./main -ngl 35 -m yi-34b-v3.Q4_K_M.gguf --color -c 8192 --temp 0.7 --repeat_penalty 1.1 -n -1 -p "<|user|>\n{prompt}\n<|assistant|>" ``` Change `-ngl 32` to the number of layers to offload to GPU. Remove it if you don't have GPU acceleration. Change `-c 8192` to the desired sequence length. For extended sequence models - eg 8K, 16K, 32K - the necessary RoPE scaling parameters are read from the GGUF file and set by llama.cpp automatically. Note that longer sequence lengths require much more resources, so you may need to reduce this value. If you want to have a chat-style conversation, replace the `-p <PROMPT>` argument with `-i -ins` For other parameters and how to use them, please refer to [the llama.cpp documentation](https://github.com/ggerganov/llama.cpp/blob/master/examples/main/README.md) ## How to run in `text-generation-webui` Further instructions can be found in the text-generation-webui documentation, here: [text-generation-webui/docs/04 ‐ Model Tab.md](https://github.com/oobabooga/text-generation-webui/blob/main/docs/04%20%E2%80%90%20Model%20Tab.md#llamacpp). ## How to run from Python code You can use GGUF models from Python using the [llama-cpp-python](https://github.com/abetlen/llama-cpp-python) or [ctransformers](https://github.com/marella/ctransformers) libraries. Note that at the time of writing (Nov 27th 2023), ctransformers has not been updated for some time and is not compatible with some recent models. Therefore I recommend you use llama-cpp-python. ### How to load this model in Python code, using llama-cpp-python For full documentation, please see: [llama-cpp-python docs](https://abetlen.github.io/llama-cpp-python/). #### First install the package Run one of the following commands, according to your system: ```shell # Base ctransformers with no GPU acceleration pip install llama-cpp-python # With NVidia CUDA acceleration CMAKE_ARGS="-DLLAMA_CUBLAS=on" pip install llama-cpp-python # Or with OpenBLAS acceleration CMAKE_ARGS="-DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS" pip install llama-cpp-python # Or with CLBLast acceleration CMAKE_ARGS="-DLLAMA_CLBLAST=on" pip install llama-cpp-python # Or with AMD ROCm GPU acceleration (Linux only) CMAKE_ARGS="-DLLAMA_HIPBLAS=on" pip install llama-cpp-python # Or with Metal GPU acceleration for macOS systems only CMAKE_ARGS="-DLLAMA_METAL=on" pip install llama-cpp-python # In windows, to set the variables CMAKE_ARGS in PowerShell, follow this format; eg for NVidia CUDA: $env:CMAKE_ARGS = "-DLLAMA_OPENBLAS=on" pip install llama-cpp-python ``` #### Simple llama-cpp-python example code ```python from llama_cpp import Llama # Set gpu_layers to the number of layers to offload to GPU. Set to 0 if no GPU acceleration is available on your system. llm = Llama( model_path="./yi-34b-v3.Q4_K_M.gguf", # Download the model file first n_ctx=8192, # The max sequence length to use - note that longer sequence lengths require much more resources n_threads=8, # The number of CPU threads to use, tailor to your system and the resulting performance n_gpu_layers=35 # The number of layers to offload to GPU, if you have GPU acceleration available ) # Simple inference example output = llm( "<|user|>\n{prompt}\n<|assistant|>", # Prompt max_tokens=512, # Generate up to 512 tokens stop=["</s>"], # Example stop token - not necessarily correct for this specific model! Please check before using. echo=True # Whether to echo the prompt ) # Chat Completion API llm = Llama(model_path="./yi-34b-v3.Q4_K_M.gguf", chat_format="llama-2") # Set chat_format according to the model you are using llm.create_chat_completion( messages = [ {"role": "system", "content": "You are a story writing assistant."}, { "role": "user", "content": "Write a story about llamas." } ] ) ``` ## How to use with LangChain Here are guides on using llama-cpp-python and ctransformers with LangChain: * [LangChain + llama-cpp-python](https://python.langchain.com/docs/integrations/llms/llamacpp) * [LangChain + ctransformers](https://python.langchain.com/docs/integrations/providers/ctransformers) <!-- README_GGUF.md-how-to-run end --> <!-- footer start --> <!-- 200823 --> ## Discord For further support, and discussions on these models and AI in general, join us at: [TheBloke AI's Discord server](https://discord.gg/theblokeai) ## Thanks, and how to contribute Thanks to the [chirper.ai](https://chirper.ai) team! Thanks to Clay from [gpus.llm-utils.org](llm-utils)! I've had a lot of people ask if they can contribute. I enjoy providing models and helping people, and would love to be able to spend even more time doing it, as well as expanding into new projects like fine tuning/training. If you're able and willing to contribute it will be most gratefully received and will help me to keep providing more models, and to start work on new AI projects. Donaters will get priority support on any and all AI/LLM/model questions and requests, access to a private Discord room, plus other benefits. * Patreon: https://patreon.com/TheBlokeAI * Ko-Fi: https://ko-fi.com/TheBlokeAI **Special thanks to**: Aemon Algiz. **Patreon special mentions**: Michael Levine, 阿明, Trailburnt, Nikolai Manek, John Detwiler, Randy H, Will Dee, Sebastain Graf, NimbleBox.ai, Eugene Pentland, Emad Mostaque, Ai Maven, Jim Angel, Jeff Scroggin, Michael Davis, Manuel Alberto Morcote, Stephen Murray, Robert, Justin Joy, Luke @flexchar, Brandon Frisco, Elijah Stavena, S_X, Dan Guido, Undi ., Komninos Chatzipapas, Shadi, theTransient, Lone Striker, Raven Klaugh, jjj, Cap'n Zoog, Michel-Marie MAUDET (LINAGORA), Matthew Berman, David, Fen Risland, Omer Bin Jawed, Luke Pendergrass, Kalila, OG, Erik Bjäreholt, Rooh Singh, Joseph William Delisle, Dan Lewis, TL, John Villwock, AzureBlack, Brad, Pedro Madruga, Caitlyn Gatomon, K, jinyuan sun, Mano Prime, Alex, Jeffrey Morgan, Alicia Loh, Illia Dulskyi, Chadd, transmissions 11, fincy, Rainer Wilmers, ReadyPlayerEmma, knownsqashed, Mandus, biorpg, Deo Leter, Brandon Phillips, SuperWojo, Sean Connelly, Iucharbius, Jack West, Harry Royden McLaughlin, Nicholas, terasurfer, Vitor Caleffi, Duane Dunston, Johann-Peter Hartmann, David Ziegler, Olakabola, Ken Nordquist, Trenton Dambrowitz, Tom X Nguyen, Vadim, Ajan Kanaga, Leonard Tan, Clay Pascal, Alexandros Triantafyllidis, JM33133, Xule, vamX, ya boyyy, subjectnull, Talal Aujan, Alps Aficionado, wassieverse, Ari Malik, James Bentley, Woland, Spencer Kim, Michael Dempsey, Fred von Graf, Elle, zynix, William Richards, Stanislav Ovsiannikov, Edmond Seymore, Jonathan Leane, Martin Kemka, usrbinkat, Enrico Ros Thank you to all my generous patrons and donaters! And thank you again to a16z for their generous grant. <!-- footer end --> <!-- original-model-card start --> # Original model card: MindsAndCompany's Yi 34B v3 # Model Card for yi-34b-inst-v3 ### Introduction of MindsAndCompany https://mnc.ai/ We create various AI models and develop solutions that can be applied to businesses. And as for generative AI, we are developing products like Code Assistant, TOD Chatbot, LLMOps, and are in the process of developing Enterprise AGI (Artificial General Intelligence). ### Model Summary based yi-34b, instruction tuned and dpo. ### How to Use Here give some examples of how to use our model. ```python from transformers import AutoConfig, AutoModel, AutoTokenizer import transformers import torch hf_model = 'mncai/yi-34B-v3' message = "<|user|>\n두 개의 구가 있는데 각각 지름이 1, 2일때 구의 부피는 몇배 차이가 나지? 설명도 같이 해줘.\n<|assistant|>\n" sequences = pipeline( message, do_sample=True, top_k=10, num_return_sequences=1, eos_token_id=tokenizer.eos_token_id, max_length=2048, ) for seq in sequences: print(f"Result: {seq['generated_text']}") ``` ### Contact If you have any questions, please raise an issue or contact us at [email protected] <!-- original-model-card end -->
CompendiumLabs/bge-large-en-v1.5-gguf
CompendiumLabs
2024-02-17T21:55:06Z
604
4
null
[ "gguf", "license:mit", "region:us" ]
null
2024-02-17T21:46:52Z
--- license: mit --- <img src="https://raw.githubusercontent.com/CompendiumLabs/compendiumlabs.ai/main/images/logo_text_crop.png" alt="Compendium Labs" style="width: 500px;"> # bge-large-en-v1.5-gguf Source model: https://huggingface.co/BAAI/bge-large-en-v1.5 Quantized and unquantized embedding models in GGUF format for use with `llama.cpp`. A large benefit over `transformers` is almost guaranteed and the benefit over ONNX will vary based on the application, but this seems to provide a large speedup on CPU and a modest speedup on GPU for larger models. Due to the relatively small size of these models, quantization will not provide huge benefits, but it does generate up to a 30% speedup on CPU with minimal loss in accuracy. <br/> # Files Available <div style="width: 500px; margin: 0;"> | Filename | Quantization | Size | |:-------- | ------------ | ---- | | [bge-large-en-v1.5-f32.gguf](https://huggingface.co/CompendiumLabs/bge-large-en-v1.5-gguf/blob/main/bge-large-en-v1.5-f32.gguf) | F32 | 1.3 GB | | [bge-large-en-v1.5-f16.gguf](https://huggingface.co/CompendiumLabs/bge-large-en-v1.5-gguf/blob/main/bge-large-en-v1.5-f16.gguf) | F16 | 639 MB | | [bge-large-en-v1.5-q8_0.gguf](https://huggingface.co/CompendiumLabs/bge-large-en-v1.5-gguf/blob/main/bge-large-en-v1.5-q8_0.gguf) | Q8_0 | 342 MB | | [bge-large-en-v1.5-q4_k_m.gguf](https://huggingface.co/CompendiumLabs/bge-large-en-v1.5-gguf/blob/main/bge-large-en-v1.5-q4_k_m.gguf) | Q4_K_M | 199 MB | </div> <br/> # Usage These model files can be used with pure `llama.cpp` or with the `llama-cpp-python` Python bindings ```python from llama_cpp import Llama model = Llama(gguf_path, embedding=True) embed = model.embed(texts) ``` Here `texts` can either be a string or a list of strings, and the return value is a list of embedding vectors. The inputs are grouped into batches automatically for efficient execution. There is also LangChain integration through `langchain_community.embeddings.LlamaCppEmbeddings`.
mradermacher/RainyMotip-2x7B-GGUF
mradermacher
2024-05-06T06:00:52Z
604
0
transformers
[ "transformers", "gguf", "moe", "merge", "en", "base_model:Alsebay/RainyMotip-2x7B", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2024-03-24T10:20:04Z
--- base_model: Alsebay/RainyMotip-2x7B language: - en library_name: transformers license: apache-2.0 quantized_by: mradermacher tags: - moe - merge --- ## About static quants of https://huggingface.co/Alsebay/RainyMotip-2x7B <!-- provided-files --> weighted/imatrix quants seem not to be available (by me) at this time. If they do not show up a week or so after the static ones, I have probably not planned for them. Feel free to request them by opening a Community Discussion. ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/RainyMotip-2x7B-GGUF/resolve/main/RainyMotip-2x7B.Q2_K.gguf) | Q2_K | 5.0 | | | [GGUF](https://huggingface.co/mradermacher/RainyMotip-2x7B-GGUF/resolve/main/RainyMotip-2x7B.IQ3_XS.gguf) | IQ3_XS | 5.6 | | | [GGUF](https://huggingface.co/mradermacher/RainyMotip-2x7B-GGUF/resolve/main/RainyMotip-2x7B.Q3_K_S.gguf) | Q3_K_S | 5.8 | | | [GGUF](https://huggingface.co/mradermacher/RainyMotip-2x7B-GGUF/resolve/main/RainyMotip-2x7B.IQ3_S.gguf) | IQ3_S | 5.9 | beats Q3_K* | | [GGUF](https://huggingface.co/mradermacher/RainyMotip-2x7B-GGUF/resolve/main/RainyMotip-2x7B.IQ3_M.gguf) | IQ3_M | 6.0 | | | [GGUF](https://huggingface.co/mradermacher/RainyMotip-2x7B-GGUF/resolve/main/RainyMotip-2x7B.Q3_K_M.gguf) | Q3_K_M | 6.5 | lower quality | | [GGUF](https://huggingface.co/mradermacher/RainyMotip-2x7B-GGUF/resolve/main/RainyMotip-2x7B.Q3_K_L.gguf) | Q3_K_L | 7.0 | | | [GGUF](https://huggingface.co/mradermacher/RainyMotip-2x7B-GGUF/resolve/main/RainyMotip-2x7B.IQ4_XS.gguf) | IQ4_XS | 7.2 | | | [GGUF](https://huggingface.co/mradermacher/RainyMotip-2x7B-GGUF/resolve/main/RainyMotip-2x7B.Q4_0.gguf) | Q4_0 | 7.5 | fast, low quality | | [GGUF](https://huggingface.co/mradermacher/RainyMotip-2x7B-GGUF/resolve/main/RainyMotip-2x7B.Q4_K_S.gguf) | Q4_K_S | 7.6 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/RainyMotip-2x7B-GGUF/resolve/main/RainyMotip-2x7B.IQ4_NL.gguf) | IQ4_NL | 7.6 | prefer IQ4_XS | | [GGUF](https://huggingface.co/mradermacher/RainyMotip-2x7B-GGUF/resolve/main/RainyMotip-2x7B.Q4_K_M.gguf) | Q4_K_M | 8.0 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/RainyMotip-2x7B-GGUF/resolve/main/RainyMotip-2x7B.Q5_K_S.gguf) | Q5_K_S | 9.1 | | | [GGUF](https://huggingface.co/mradermacher/RainyMotip-2x7B-GGUF/resolve/main/RainyMotip-2x7B.Q5_K_M.gguf) | Q5_K_M | 9.4 | | | [GGUF](https://huggingface.co/mradermacher/RainyMotip-2x7B-GGUF/resolve/main/RainyMotip-2x7B.Q6_K.gguf) | Q6_K | 10.8 | very good quality | | [GGUF](https://huggingface.co/mradermacher/RainyMotip-2x7B-GGUF/resolve/main/RainyMotip-2x7B.Q8_0.gguf) | Q8_0 | 13.9 | fast, best quality | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. <!-- end -->
Woziii/llama-3-8b-chat-me
Woziii
2024-07-01T15:33:39Z
604
0
transformers
[ "transformers", "safetensors", "gguf", "llama", "text-generation", "conversational", "fr", "dataset:Woziii/me", "arxiv:1910.09700", "license:cc-by-nc-4.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-06-17T14:52:31Z
--- library_name: transformers license: cc-by-nc-4.0 datasets: - Woziii/me language: - fr --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
mradermacher/DeepSeek-Coder-V2-Lite-Instruct-GGUF
mradermacher
2024-06-18T06:13:44Z
604
2
transformers
[ "transformers", "gguf", "en", "base_model:deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct", "license:other", "endpoints_compatible", "region:us" ]
null
2024-06-17T23:37:06Z
--- base_model: deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct language: - en library_name: transformers license: other license_link: LICENSE license_name: deepseek-license quantized_by: mradermacher --- ## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> static quants of https://huggingface.co/deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct <!-- provided-files --> weighted/imatrix quants are available at https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Lite-Instruct-i1-GGUF ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Lite-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Lite-Instruct.Q2_K.gguf) | Q2_K | 6.5 | | | [GGUF](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Lite-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Lite-Instruct.IQ3_XS.gguf) | IQ3_XS | 7.2 | | | [GGUF](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Lite-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Lite-Instruct.IQ3_S.gguf) | IQ3_S | 7.6 | beats Q3_K* | | [GGUF](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Lite-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Lite-Instruct.Q3_K_S.gguf) | Q3_K_S | 7.6 | | | [GGUF](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Lite-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Lite-Instruct.IQ3_M.gguf) | IQ3_M | 7.7 | | | [GGUF](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Lite-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Lite-Instruct.Q3_K_M.gguf) | Q3_K_M | 8.2 | lower quality | | [GGUF](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Lite-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Lite-Instruct.Q3_K_L.gguf) | Q3_K_L | 8.6 | | | [GGUF](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Lite-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Lite-Instruct.IQ4_XS.gguf) | IQ4_XS | 8.7 | | | [GGUF](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Lite-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Lite-Instruct.Q4_K_S.gguf) | Q4_K_S | 9.6 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Lite-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Lite-Instruct.Q4_K_M.gguf) | Q4_K_M | 10.5 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Lite-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Lite-Instruct.Q5_K_S.gguf) | Q5_K_S | 11.2 | | | [GGUF](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Lite-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Lite-Instruct.Q5_K_M.gguf) | Q5_K_M | 12.0 | | | [GGUF](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Lite-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Lite-Instruct.Q6_K.gguf) | Q6_K | 14.2 | very good quality | | [GGUF](https://huggingface.co/mradermacher/DeepSeek-Coder-V2-Lite-Instruct-GGUF/resolve/main/DeepSeek-Coder-V2-Lite-Instruct.Q8_0.gguf) | Q8_0 | 16.8 | fast, best quality | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. <!-- end -->
NikolayKozloff/RoGemma-7b-Instruct-Q5_K_L-GGUF
NikolayKozloff
2024-06-30T20:57:30Z
604
1
null
[ "gguf", "text-generation-inference", "ro", "region:us" ]
null
2024-06-30T20:36:04Z
--- language: - ro tags: - text-generation-inference --- Best quality quant created using this instruction: https://huggingface.co/bartowski/Phi-3-medium-128k-instruct-GGUF/discussions/3#6679c0ce761779cf45d2321b
ku-nlp/deberta-v2-base-japanese-char-wwm
ku-nlp
2023-03-26T03:32:27Z
603
0
transformers
[ "transformers", "pytorch", "safetensors", "deberta-v2", "fill-mask", "deberta", "character", "wwm", "ja", "dataset:wikipedia", "dataset:cc100", "dataset:oscar", "license:cc-by-sa-4.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
fill-mask
2023-01-18T13:55:30Z
--- language: ja license: cc-by-sa-4.0 library_name: transformers tags: - deberta - deberta-v2 - fill-mask - character - wwm datasets: - wikipedia - cc100 - oscar metrics: - accuracy mask_token: "[MASK]" widget: - text: "京都大学で自然言語処理を[MASK][MASK]する。" --- # Model Card for Japanese character-level DeBERTa V2 base ## Model description This is a Japanese DeBERTa V2 base model pre-trained on Japanese Wikipedia, the Japanese portion of CC-100, and the Japanese portion of OSCAR. This model is trained with character-level tokenization and whole word masking. ## How to use You can use this model for masked language modeling as follows: ```python from transformers import AutoTokenizer, AutoModelForMaskedLM tokenizer = AutoTokenizer.from_pretrained('ku-nlp/deberta-v2-base-japanese-char-wwm') model = AutoModelForMaskedLM.from_pretrained('ku-nlp/deberta-v2-base-japanese-char-wwm') sentence = '京都大学で自然言語処理を[MASK][MASK]する。' encoding = tokenizer(sentence, return_tensors='pt') ... ``` You can also fine-tune this model on downstream tasks. ## Tokenization There is no need to tokenize texts in advance, and you can give raw texts to the tokenizer. The texts are tokenized into character-level tokens by [sentencepiece](https://github.com/google/sentencepiece). ## Training data We used the following corpora for pre-training: - Japanese Wikipedia (as of 20221020, 3.2GB, 27M sentences, 1.3M documents) - Japanese portion of CC-100 (85GB, 619M sentences, 66M documents) - Japanese portion of OSCAR (54GB, 326M sentences, 25M documents) Note that we filtered out documents annotated with "header", "footer", or "noisy" tags in OSCAR. Also note that Japanese Wikipedia was duplicated 10 times to make the total size of the corpus comparable to that of CC-100 and OSCAR. As a result, the total size of the training data is 171GB. ## Training procedure We first segmented texts in the corpora into words using [Juman++ 2.0.0-rc3](https://github.com/ku-nlp/jumanpp/releases/tag/v2.0.0-rc3) for whole word masking. Then, we built a sentencepiece model with 22,012 tokens including all characters that appear in the training corpus. We tokenized raw corpora into character-level subwords using the sentencepiece model and trained the Japanese DeBERTa model using [transformers](https://github.com/huggingface/transformers) library. The training took 20 days using 8 NVIDIA A100-SXM4-40GB GPUs. The following hyperparameters were used during pre-training: - learning_rate: 2e-4 - per_device_train_batch_size: 46 - distributed_type: multi-GPU - num_devices: 8 - gradient_accumulation_steps: 6 - total_train_batch_size: 2,208 - max_seq_length: 512 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-06 - lr_scheduler_type: linear schedule with warmup (lr = 0 at 500k steps) - training_steps: 320,000 - warmup_steps: 10,000 ## Acknowledgments This work was supported by Joint Usage/Research Center for Interdisciplinary Large-scale Information Infrastructures (JHPCN) through General Collaboration Project no. jh221004, "Developing a Platform for Constructing and Sharing of Large-Scale Japanese Language Models". For training models, we used the mdx: a platform for the data-driven future.
timm/focalnet_base_srf.ms_in1k
timm
2024-02-10T23:31:51Z
603
0
timm
[ "timm", "pytorch", "safetensors", "image-classification", "dataset:imagenet-1k", "arxiv:2203.11926", "license:mit", "region:us" ]
image-classification
2023-03-18T04:22:12Z
--- license: mit library_name: timm tags: - image-classification - timm datasets: - imagenet-1k --- # Model card for focalnet_base_srf.ms_in1k A FocalNet image classification model. Pretrained on ImageNet-1k by paper authors. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 88.1 - GMACs: 15.3 - Activations (M): 35.0 - Image size: 224 x 224 - **Papers:** - Focal Modulation Networks: https://arxiv.org/abs/2203.11926 - **Original:** https://github.com/microsoft/FocalNet - **Dataset:** ImageNet-1k ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('focalnet_base_srf.ms_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'focalnet_base_srf.ms_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g. for focalnet_base_srf: # torch.Size([1, 128, 56, 56]) # torch.Size([1, 256, 28, 28]) # torch.Size([1, 512, 14, 14]) # torch.Size([1, 1024, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'focalnet_base_srf.ms_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled (ie.e a (batch_size, num_features, H, W) tensor) output = model.forward_head(output, pre_logits=True) # output is (batch_size, num_features) tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @misc{yang2022focal, title={Focal Modulation Networks}, author={Jianwei Yang and Chunyuan Li and Xiyang Dai and Jianfeng Gao}, journal={Advances in Neural Information Processing Systems (NeurIPS)}, year={2022} } ``` ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ```
timm/hrnet_w40.ms_in1k
timm
2023-04-24T21:29:51Z
603
0
timm
[ "timm", "pytorch", "safetensors", "image-classification", "dataset:imagenet-1k", "arxiv:1908.07919", "license:mit", "region:us" ]
image-classification
2023-04-24T21:28:58Z
--- tags: - image-classification - timm library_name: timm license: mit datasets: - imagenet-1k --- # Model card for hrnet_w40.ms_in1k A HRNet image classification model. Trained on ImageNet-1k by paper authors. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 57.6 - GMACs: 12.7 - Activations (M): 25.3 - Image size: 224 x 224 - **Papers:** - Deep High-Resolution Representation Learning for Visual Recognition: https://arxiv.org/abs/1908.07919 - **Original:** https://github.com/HRNet/HRNet-Image-Classification - **Dataset:** ImageNet-1k ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('hrnet_w40.ms_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'hrnet_w40.ms_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 64, 112, 112]) # torch.Size([1, 128, 56, 56]) # torch.Size([1, 256, 28, 28]) # torch.Size([1, 512, 14, 14]) # torch.Size([1, 1024, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'hrnet_w40.ms_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 2048, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @article{WangSCJDZLMTWLX19, title={Deep High-Resolution Representation Learning for Visual Recognition}, author={Jingdong Wang and Ke Sun and Tianheng Cheng and Borui Jiang and Chaorui Deng and Yang Zhao and Dong Liu and Yadong Mu and Mingkui Tan and Xinggang Wang and Wenyu Liu and Bin Xiao}, journal = {TPAMI} year={2019} } ```
RajuKandasamy/tamillama_tiny_30m
RajuKandasamy
2023-08-28T09:03:28Z
603
15
transformers
[ "transformers", "pytorch", "safetensors", "llama", "text-generation", "ta", "en", "dataset:roneneldan/TinyStoriesInstruct", "arxiv:2305.07759", "license:gpl", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2023-08-11T14:34:58Z
--- license: gpl datasets: - roneneldan/TinyStoriesInstruct language: - ta - en library_name: transformers inference: parameters: max_new_tokens: 120 repetition_penalty: 1.4 temperature: 0.01 widget: - text: | சொற்கள்: வீழ்ச்சி, சீட்டு, பிடிவாதம் சுருக்கம்: example_title: Tamil Story with words 1 - text: | சொற்கள்: ஓட்டம், பயணம், குழப்பம் சுருக்கம்: example_title: Tamil Story with words 2 - text: | சொற்கள்: உதவி, பதிவு, சங்கடம் சுருக்கம்: example_title: Tamil Story with words 3 - text: | சொற்கள்: வாக்குறுதி, எலி, பெரியது சுருக்கம்: example_title: Tamil Story with words 4 - text: | Words: prevent, car, broken Features: Dialogue, Twist example_title: Story in English - text: | சொற்கள்: திரும்பு, வாசனை திரவியம், துணிச்சல் சுருக்கம்: example_title: Tamil Story with words 5 --- ## Tamillama_Tiny: A 30M tiny llama model trained to tell stories in Tamil ### TL;DR: This is an experimental model inspired by the paper https://arxiv.org/abs/2305.07759 - How Small Can Language Models Be and Still Speak Coherent English?. Extended the same concept for Tamil. A 30M parameter LLaMA architecture model that outputs coherent Tamil is preseted here. Additional experimentation which is included in the model: 1. This is a multilanguage model as it can output both English and Tamil stories. 2. The model also does translation of stories from Engish to tamil and vice versa. To see the translation feature, set the max_new_tokens > 512. 3. Translation of original stories from the tinystories dataset was done using [IndicTrans](https://ai4bharat.iitm.ac.in/indic-trans) For now, this is a toy model for researchers, students and LLM enthusiasts to play with the linquistic capability of the model. ## Weights Release, License and Usage We release the weights in two formats: Hugging Face transformers format and GGML format to use with CTransformers or LLaMA.cpp. This is not fit for any practical purpose other than for research/experimentation use cases. Usage: ```python from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("RajuKandasamy/tamillama_tiny_30m") model = AutoModelForCausalLM.from_pretrained("RajuKandasamy/tamillama_tiny_30m") prompt = f"""சொற்கள்: வாக்குறுதி, எலி, பெரியது சுருக்கம்:""" input_ids = tokenizer(prompt, return_tensors="pt").input_ids generation_output = model.generate( input_ids=input_ids, max_new_tokens=256 ) print(tokenizer.decode(generation_output[0])) ```
zarakiquemparte/tulpar-limarp-l2-7b-GGUF
zarakiquemparte
2023-08-27T16:44:09Z
603
1
null
[ "gguf", "llama2", "license:other", "region:us" ]
null
2023-08-27T04:08:17Z
--- license: other tags: - llama2 --- Quantized GGUF of [Tulpar LimaRP L2 7b](https://huggingface.co/zarakiquemparte/tulpar-limarp-l2-7b)
Heralax/MythoMakise-13b-quants
Heralax
2023-09-30T03:19:03Z
603
0
null
[ "gguf", "license:llama2", "region:us" ]
null
2023-09-01T21:22:29Z
--- license: llama2 --- See https://huggingface.co/Heralax/MythoMakise-13b
TheBloke/Llama2-22B-GPLATTY-GGUF
TheBloke
2023-09-27T13:02:36Z
603
3
transformers
[ "transformers", "gguf", "llama", "llama-2", "base_model:grimpep/llama2-22B-GPLATTY", "license:llama2", "text-generation-inference", "region:us" ]
null
2023-09-05T18:42:06Z
--- license: llama2 tags: - llama - llama-2 model_name: Llama2 22B GPLATTY inference: false model_creator: grimpep model_link: https://huggingface.co/grimpep/llama2-22B-GPLATTY model_type: llama quantized_by: TheBloke base_model: grimpep/llama2-22B-GPLATTY --- <!-- header start --> <!-- 200823 --> <div style="width: auto; margin-left: auto; margin-right: auto"> <img src="https://i.imgur.com/EBdldam.jpg" alt="TheBlokeAI" style="width: 100%; min-width: 400px; display: block; margin: auto;"> </div> <div style="display: flex; justify-content: space-between; width: 100%;"> <div style="display: flex; flex-direction: column; align-items: flex-start;"> <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://discord.gg/theblokeai">Chat & support: TheBloke's Discord server</a></p> </div> <div style="display: flex; flex-direction: column; align-items: flex-end;"> <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://www.patreon.com/TheBlokeAI">Want to contribute? TheBloke's Patreon page</a></p> </div> </div> <div style="text-align:center; margin-top: 0em; margin-bottom: 0em"><p style="margin-top: 0.25em; margin-bottom: 0em;">TheBloke's LLM work is generously supported by a grant from <a href="https://a16z.com">andreessen horowitz (a16z)</a></p></div> <hr style="margin-top: 1.0em; margin-bottom: 1.0em;"> <!-- header end --> # Llama2 22B GPLATTY - GGUF - Model creator: [grimpep](https://huggingface.co/grimpep) - Original model: [Llama2 22B GPLATTY](https://huggingface.co/grimpep/llama2-22B-GPLATTY) ## Description This repo contains GGUF format model files for [grimpep's Llama2 22B GPLATTY](https://huggingface.co/grimpep/llama2-22B-GPLATTY). <!-- README_GGUF.md-about-gguf start --> ### About GGUF GGUF is a new format introduced by the llama.cpp team on August 21st 2023. It is a replacement for GGML, which is no longer supported by llama.cpp. The key benefit of GGUF is that it is a extensible, future-proof format which stores more information about the model as metadata. It also includes significantly improved tokenization code, including for the first time full support for special tokens. This should improve performance, especially with models that use new special tokens and implement custom prompt templates. Here are a list of clients and libraries that are known to support GGUF: * [llama.cpp](https://github.com/ggerganov/llama.cpp). * [text-generation-webui](https://github.com/oobabooga/text-generation-webui), the most widely used web UI, with many features and powerful extensions. * [KoboldCpp](https://github.com/LostRuins/koboldcpp), a fully featured web UI, with full GPU accel across multiple platforms and GPU architectures. Especially good for story telling. * [LM Studio](https://lmstudio.ai/), an easy-to-use and powerful local GUI with GPU acceleration on both Windows (NVidia and AMD), and macOS. * [LoLLMS Web UI](https://github.com/ParisNeo/lollms-webui), a great web UI with many interesting and unique features, including a full model library for easy model selection. * [ctransformers](https://github.com/marella/ctransformers), a Python library with GPU accel, LangChain support, and OpenAI-compatible AI server. * [llama-cpp-python](https://github.com/abetlen/llama-cpp-python), a Python library with GPU accel, LangChain support, and OpenAI-compatible API server. * [candle](https://github.com/huggingface/candle), a Rust ML framework with a focus on performance, including GPU support, and ease of use. <!-- README_GGUF.md-about-gguf end --> <!-- repositories-available start --> ## Repositories available * [GPTQ models for GPU inference, with multiple quantisation parameter options.](https://huggingface.co/TheBloke/Llama2-22B-GPLATTY-GPTQ) * [2, 3, 4, 5, 6 and 8-bit GGUF models for CPU+GPU inference](https://huggingface.co/TheBloke/Llama2-22B-GPLATTY-GGUF) * [2, 3, 4, 5, 6 and 8-bit GGML models for CPU+GPU inference (deprecated)](https://huggingface.co/TheBloke/Llama2-22B-GPLATTY-GGML) * [grimpep's original unquantised fp16 model in pytorch format, for GPU inference and for further conversions](https://huggingface.co/grimpep/llama2-22B-GPLATTY) <!-- repositories-available end --> <!-- prompt-template start --> ## Prompt template: Alpaca ``` Below is an instruction that describes a task. Write a response that appropriately completes the request. ### Instruction: {prompt} ### Response: ``` <!-- prompt-template end --> <!-- compatibility_gguf start --> ## Compatibility These quantised GGUF files are compatible with llama.cpp from August 21st 2023 onwards, as of commit [6381d4e110bd0ec02843a60bbeb8b6fc37a9ace9](https://github.com/ggerganov/llama.cpp/commit/6381d4e110bd0ec02843a60bbeb8b6fc37a9ace9) They are now also compatible with many third party UIs and libraries - please see the list at the top of the README. ## Explanation of quantisation methods <details> <summary>Click to see details</summary> The new methods available are: * GGML_TYPE_Q2_K - "type-1" 2-bit quantization in super-blocks containing 16 blocks, each block having 16 weight. Block scales and mins are quantized with 4 bits. This ends up effectively using 2.5625 bits per weight (bpw) * GGML_TYPE_Q3_K - "type-0" 3-bit quantization in super-blocks containing 16 blocks, each block having 16 weights. Scales are quantized with 6 bits. This end up using 3.4375 bpw. * GGML_TYPE_Q4_K - "type-1" 4-bit quantization in super-blocks containing 8 blocks, each block having 32 weights. Scales and mins are quantized with 6 bits. This ends up using 4.5 bpw. * GGML_TYPE_Q5_K - "type-1" 5-bit quantization. Same super-block structure as GGML_TYPE_Q4_K resulting in 5.5 bpw * GGML_TYPE_Q6_K - "type-0" 6-bit quantization. Super-blocks with 16 blocks, each block having 16 weights. Scales are quantized with 8 bits. This ends up using 6.5625 bpw Refer to the Provided Files table below to see what files use which methods, and how. </details> <!-- compatibility_gguf end --> <!-- README_GGUF.md-provided-files start --> ## Provided files | Name | Quant method | Bits | Size | Max RAM required | Use case | | ---- | ---- | ---- | ---- | ---- | ----- | | [llama2-22b-gplatty.Q2_K.gguf](https://huggingface.co/TheBloke/Llama2-22B-GPLATTY-GGUF/blob/main/llama2-22b-gplatty.Q2_K.gguf) | Q2_K | 2 | 9.08 GB| 11.58 GB | smallest, significant quality loss - not recommended for most purposes | | [llama2-22b-gplatty.Q3_K_S.gguf](https://huggingface.co/TheBloke/Llama2-22B-GPLATTY-GGUF/blob/main/llama2-22b-gplatty.Q3_K_S.gguf) | Q3_K_S | 3 | 9.47 GB| 11.97 GB | very small, high quality loss | | [llama2-22b-gplatty.Q3_K_M.gguf](https://huggingface.co/TheBloke/Llama2-22B-GPLATTY-GGUF/blob/main/llama2-22b-gplatty.Q3_K_M.gguf) | Q3_K_M | 3 | 10.61 GB| 13.11 GB | very small, high quality loss | | [llama2-22b-gplatty.Q3_K_L.gguf](https://huggingface.co/TheBloke/Llama2-22B-GPLATTY-GGUF/blob/main/llama2-22b-gplatty.Q3_K_L.gguf) | Q3_K_L | 3 | 11.61 GB| 14.11 GB | small, substantial quality loss | | [llama2-22b-gplatty.Q4_0.gguf](https://huggingface.co/TheBloke/Llama2-22B-GPLATTY-GGUF/blob/main/llama2-22b-gplatty.Q4_0.gguf) | Q4_0 | 4 | 12.34 GB| 14.84 GB | legacy; small, very high quality loss - prefer using Q3_K_M | | [llama2-22b-gplatty.Q4_K_S.gguf](https://huggingface.co/TheBloke/Llama2-22B-GPLATTY-GGUF/blob/main/llama2-22b-gplatty.Q4_K_S.gguf) | Q4_K_S | 4 | 12.42 GB| 14.92 GB | small, greater quality loss | | [llama2-22b-gplatty.Q4_K_M.gguf](https://huggingface.co/TheBloke/Llama2-22B-GPLATTY-GGUF/blob/main/llama2-22b-gplatty.Q4_K_M.gguf) | Q4_K_M | 4 | 13.18 GB| 15.68 GB | medium, balanced quality - recommended | | [llama2-22b-gplatty.Q5_0.gguf](https://huggingface.co/TheBloke/Llama2-22B-GPLATTY-GGUF/blob/main/llama2-22b-gplatty.Q5_0.gguf) | Q5_0 | 5 | 15.04 GB| 17.54 GB | legacy; medium, balanced quality - prefer using Q4_K_M | | [llama2-22b-gplatty.Q5_K_S.gguf](https://huggingface.co/TheBloke/Llama2-22B-GPLATTY-GGUF/blob/main/llama2-22b-gplatty.Q5_K_S.gguf) | Q5_K_S | 5 | 15.04 GB| 17.54 GB | large, low quality loss - recommended | | [llama2-22b-gplatty.Q5_K_M.gguf](https://huggingface.co/TheBloke/Llama2-22B-GPLATTY-GGUF/blob/main/llama2-22b-gplatty.Q5_K_M.gguf) | Q5_K_M | 5 | 15.47 GB| 17.97 GB | large, very low quality loss - recommended | | [llama2-22b-gplatty.Q6_K.gguf](https://huggingface.co/TheBloke/Llama2-22B-GPLATTY-GGUF/blob/main/llama2-22b-gplatty.Q6_K.gguf) | Q6_K | 6 | 17.91 GB| 20.41 GB | very large, extremely low quality loss | | [llama2-22b-gplatty.Q8_0.gguf](https://huggingface.co/TheBloke/Llama2-22B-GPLATTY-GGUF/blob/main/llama2-22b-gplatty.Q8_0.gguf) | Q8_0 | 8 | 23.19 GB| 25.69 GB | very large, extremely low quality loss - not recommended | **Note**: the above RAM figures assume no GPU offloading. If layers are offloaded to the GPU, this will reduce RAM usage and use VRAM instead. <!-- README_GGUF.md-provided-files end --> <!-- README_GGUF.md-how-to-run start --> ## Example `llama.cpp` command Make sure you are using `llama.cpp` from commit [6381d4e110bd0ec02843a60bbeb8b6fc37a9ace9](https://github.com/ggerganov/llama.cpp/commit/6381d4e110bd0ec02843a60bbeb8b6fc37a9ace9) or later. For compatibility with older versions of llama.cpp, or for any third-party libraries or clients that haven't yet updated for GGUF, please use GGML files instead. ``` ./main -t 10 -ngl 32 -m llama2-22b-gplatty.q4_K_M.gguf --color -c 4096 --temp 0.7 --repeat_penalty 1.1 -n -1 -p "Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n### Instruction:\n{prompt}\n\n### Response:" ``` Change `-t 10` to the number of physical CPU cores you have. For example if your system has 8 cores/16 threads, use `-t 8`. If offloading all layers to GPU, set `-t 1`. Change `-ngl 32` to the number of layers to offload to GPU. Remove it if you don't have GPU acceleration. Change `-c 4096` to the desired sequence length for this model. For extended sequence models - eg 8K, 16K, 32K - the necessary RoPE scaling parameters are read from the GGUF file and set by llama.cpp automatically. If you want to have a chat-style conversation, replace the `-p <PROMPT>` argument with `-i -ins` For other parameters and how to use them, please refer to [the llama.cpp documentation](https://github.com/ggerganov/llama.cpp/blob/master/examples/main/README.md) ## How to run in `text-generation-webui` Further instructions here: [text-generation-webui/docs/llama.cpp.md](https://github.com/oobabooga/text-generation-webui/blob/main/docs/llama.cpp.md). ## How to run from Python code You can use GGUF models from Python using the [llama-cpp-python](https://github.com/abetlen/llama-cpp-python) or [ctransformers](https://github.com/marella/ctransformers) libraries. ### How to load this model from Python using ctransformers #### First install the package ```bash # Base ctransformers with no GPU acceleration pip install ctransformers>=0.2.24 # Or with CUDA GPU acceleration pip install ctransformers[cuda]>=0.2.24 # Or with ROCm GPU acceleration CT_HIPBLAS=1 pip install ctransformers>=0.2.24 --no-binary ctransformers # Or with Metal GPU acceleration for macOS systems CT_METAL=1 pip install ctransformers>=0.2.24 --no-binary ctransformers ``` #### Simple example code to load one of these GGUF models ```python from ctransformers import AutoModelForCausalLM # Set gpu_layers to the number of layers to offload to GPU. Set to 0 if no GPU acceleration is available on your system. llm = AutoModelForCausalLM.from_pretrained("TheBloke/Llama2-22B-GPLATTY-GGUF", model_file="llama2-22b-gplatty.q4_K_M.gguf", model_type="llama", gpu_layers=50) print(llm("AI is going to")) ``` ## How to use with LangChain Here's guides on using llama-cpp-python or ctransformers with LangChain: * [LangChain + llama-cpp-python](https://python.langchain.com/docs/integrations/llms/llamacpp) * [LangChain + ctransformers](https://python.langchain.com/docs/integrations/providers/ctransformers) <!-- README_GGUF.md-how-to-run end --> <!-- footer start --> <!-- 200823 --> ## Discord For further support, and discussions on these models and AI in general, join us at: [TheBloke AI's Discord server](https://discord.gg/theblokeai) ## Thanks, and how to contribute. Thanks to the [chirper.ai](https://chirper.ai) team! I've had a lot of people ask if they can contribute. I enjoy providing models and helping people, and would love to be able to spend even more time doing it, as well as expanding into new projects like fine tuning/training. If you're able and willing to contribute it will be most gratefully received and will help me to keep providing more models, and to start work on new AI projects. Donaters will get priority support on any and all AI/LLM/model questions and requests, access to a private Discord room, plus other benefits. * Patreon: https://patreon.com/TheBlokeAI * Ko-Fi: https://ko-fi.com/TheBlokeAI **Special thanks to**: Aemon Algiz. **Patreon special mentions**: Russ Johnson, J, alfie_i, Alex, NimbleBox.ai, Chadd, Mandus, Nikolai Manek, Ken Nordquist, ya boyyy, Illia Dulskyi, Viktor Bowallius, vamX, Iucharbius, zynix, Magnesian, Clay Pascal, Pierre Kircher, Enrico Ros, Tony Hughes, Elle, Andrey, knownsqashed, Deep Realms, Jerry Meng, Lone Striker, Derek Yates, Pyrater, Mesiah Bishop, James Bentley, Femi Adebogun, Brandon Frisco, SuperWojo, Alps Aficionado, Michael Dempsey, Vitor Caleffi, Will Dee, Edmond Seymore, usrbinkat, LangChain4j, Kacper Wikieł, Luke Pendergrass, John Detwiler, theTransient, Nathan LeClaire, Tiffany J. Kim, biorpg, Eugene Pentland, Stanislav Ovsiannikov, Fred von Graf, terasurfer, Kalila, Dan Guido, Nitin Borwankar, 阿明, Ai Maven, John Villwock, Gabriel Puliatti, Stephen Murray, Asp the Wyvern, danny, Chris Smitley, ReadyPlayerEmma, S_X, Daniel P. Andersen, Olakabola, Jeffrey Morgan, Imad Khwaja, Caitlyn Gatomon, webtim, Alicia Loh, Trenton Dambrowitz, Swaroop Kallakuri, Erik Bjäreholt, Leonard Tan, Spiking Neurons AB, Luke @flexchar, Ajan Kanaga, Thomas Belote, Deo Leter, RoA, Willem Michiel, transmissions 11, subjectnull, Matthew Berman, Joseph William Delisle, David Ziegler, Michael Davis, Johann-Peter Hartmann, Talal Aujan, senxiiz, Artur Olbinski, Rainer Wilmers, Spencer Kim, Fen Risland, Cap'n Zoog, Rishabh Srivastava, Michael Levine, Geoffrey Montalvo, Sean Connelly, Alexandros Triantafyllidis, Pieter, Gabriel Tamborski, Sam, Subspace Studios, Junyu Yang, Pedro Madruga, Vadim, Cory Kujawski, K, Raven Klaugh, Randy H, Mano Prime, Sebastain Graf, Space Cruiser Thank you to all my generous patrons and donaters! And thank you again to a16z for their generous grant. <!-- footer end --> <!-- original-model-card start --> # Original model card: grimpep's Llama2 22B GPLATTY This model is an experiment using the frankenstein script from https://huggingface.co/chargoddard/llama2-22b I merged an already merged 13B Llama v2 model GUACHAT + KIMIKO = https://huggingface.co/grimpep/13Bv2-llama-modelmerge Then used https://huggingface.co/TheBloke/GPlatty-30B-SuperHOT-8K-fp16 as donor model. It used 160GB of system ram to merge these models, they merge fast without swap. No strange behaviour noticed when generating with this model (maybe more hallucinations than usual), but note that this is still all experimental. This model might be more (didn't finetune it) "ethically" aligned than https://huggingface.co/grimpep/llama2-22b-wizard_vicuna <!-- original-model-card end -->
Felladrin/gguf-Llama-160M-Chat-v1
Felladrin
2024-06-24T15:17:08Z
603
2
null
[ "gguf", "en", "base_model:Felladrin/Llama-160M-Chat-v1", "license:apache-2.0", "region:us" ]
null
2023-12-21T19:28:11Z
--- license: apache-2.0 language: - en base_model: Felladrin/Llama-160M-Chat-v1 --- Quantized GGUF version of [Felladrin/Llama-160M-Chat-v1](https://huggingface.co/Felladrin/Llama-160M-Chat-v1).
legionarius/Llama-3-8B-UltraMedical-Q6_K-GGUF
legionarius
2024-06-20T02:06:47Z
603
0
null
[ "gguf", "llama-cpp", "gguf-my-repo", "dataset:TsinghuaC3I/UltraMedical", "base_model:TsinghuaC3I/Llama-3-8B-UltraMedical", "license:llama3", "region:us" ]
null
2024-06-20T02:06:18Z
--- base_model: TsinghuaC3I/Llama-3-8B-UltraMedical datasets: - TsinghuaC3I/UltraMedical license: llama3 tags: - llama-cpp - gguf-my-repo --- # legionarius/Llama-3-8B-UltraMedical-Q6_K-GGUF This model was converted to GGUF format from [`TsinghuaC3I/Llama-3-8B-UltraMedical`](https://huggingface.co/TsinghuaC3I/Llama-3-8B-UltraMedical) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space. Refer to the [original model card](https://huggingface.co/TsinghuaC3I/Llama-3-8B-UltraMedical) for more details on the model. ## Use with llama.cpp Install llama.cpp through brew (works on Mac and Linux) ```bash brew install llama.cpp ``` Invoke the llama.cpp server or the CLI. ### CLI: ```bash llama-cli --hf-repo legionarius/Llama-3-8B-UltraMedical-Q6_K-GGUF --hf-file llama-3-8b-ultramedical-q6_k.gguf -p "The meaning to life and the universe is" ``` ### Server: ```bash llama-server --hf-repo legionarius/Llama-3-8B-UltraMedical-Q6_K-GGUF --hf-file llama-3-8b-ultramedical-q6_k.gguf -c 2048 ``` Note: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well. Step 1: Clone llama.cpp from GitHub. ``` git clone https://github.com/ggerganov/llama.cpp ``` Step 2: Move into the llama.cpp folder and build it with `LLAMA_CURL=1` flag along with other hardware-specific flags (for ex: LLAMA_CUDA=1 for Nvidia GPUs on Linux). ``` cd llama.cpp && LLAMA_CURL=1 make ``` Step 3: Run inference through the main binary. ``` ./llama-cli --hf-repo legionarius/Llama-3-8B-UltraMedical-Q6_K-GGUF --hf-file llama-3-8b-ultramedical-q6_k.gguf -p "The meaning to life and the universe is" ``` or ``` ./llama-server --hf-repo legionarius/Llama-3-8B-UltraMedical-Q6_K-GGUF --hf-file llama-3-8b-ultramedical-q6_k.gguf -c 2048 ```
nDimensional/Waifus-at-War-Warhammer-40K-SDXL
nDimensional
2024-06-23T20:34:51Z
603
0
diffusers
[ "diffusers", "safetensors", "text-to-image", "stable-diffusion", "StableDiffusion", "image-generation", "SDXL", "SDXLv1.0", "Warhammer 40,000", "Warhammer", "Warhammer 40k", "en", "license:creativeml-openrail-m", "region:us" ]
text-to-image
2024-06-23T19:20:05Z
--- license: creativeml-openrail-m tags: - diffusers - safetensors - text-to-image - stable-diffusion - StableDiffusion - image-generation - SDXL - SDXLv1.0 - Warhammer 40,000 - Warhammer - Warhammer 40k language: - en library_name: diffusers --- <html> <head> <link href="https://fonts.googleapis.com/css2?family=Skranji&display=swap" rel="stylesheet"> </head> <style> .main-container { display: flex; flex-direction: column; align-items: center; } .title-container { text-align: center; } .title { font-size: 2.5em; color: #1d1d1d!important; font-family: 'Skranji'; text-transform: uppercase; letter-spacing: 0.1em; padding: 0.5em 0; background: transparent; margin-bottom: 0; } .title span { background: -webkit-linear-gradient(45deg, #EEB21F, #ff6900); -webkit-background-clip: text; -webkit-text-fill-color: transparent; } .icons { display: flex; justify-content: center; margin-top: 5px; margin-bottom: 20px; } .icons img { width: 50px; height: auto; margin: 0 5px; } img { max-width: 100%; height: auto; } .center { display: block; margin-left: auto; margin-right: auto; width: 832px; } </style> <div clss='main-container'> <div class="title-container"> <h1 class="title"> <span>Waifus at War</span> </h1> </div> <div class="icons"> <img src="assets/aquila.svg" alt="Aquila"> <img src="assets/xenos.svg" alt="Xenos"> <img src="assets/chaos.svg" alt="Chaos"> </div> </div> <figure> <img src="assets/sample_image.png" alt="Sample Image" class="center"/> <figcaption>Generated with WeebHammer.fp16.safetensors</figcaption> </figure> <!-- START SUPPORT --> <a href='https://ko-fi.com/M4M3ZBOIT' target='_blank'> <img height='36' style='border:0px;height:36px;' src='https://storage.ko-fi.com/cdn/kofi3.png?v=3' border='0' alt='Buy Me a Coffee at ko-fi.com' /> </a> <!-- END SUPPORT --> <b>More Details on Civitiai: <a href="https://civitai.com/models/533636?modelVersionId=583374">NSFW WARNING</a></b> <br> <b><a href="https://huggingface.co/nDimensional/War-and-Diffusion-Warhammer-40K-SDXL">Non-Anime Version</a></b> <!-- START MODEL INFO --> <div class="model-info-container" style='font-family:Monospace'> <table id="model-info-table"> <tr> <th></th> <th></th> </tr> <tr> <td>Base Model</td> <td>SDXL v1.0</td> </tr> <tr> <td>VAE</td> <td><a href="https://huggingface.co/madebyollin/sdxl-vae-fp16-fix">SDXL-VAE-FP16-FIX</a></td> </tr> <tr> <td>Type</td> <td>Full Parameter Fine-Tune</td> </tr> </table> </div> <!-- END MODEL INFO -->
timm/lcnet_075.ra2_in1k
timm
2023-04-27T22:48:59Z
602
0
timm
[ "timm", "pytorch", "safetensors", "image-classification", "dataset:imagenet-1k", "arxiv:2110.00476", "arxiv:2109.15099", "license:apache-2.0", "region:us" ]
image-classification
2022-12-16T05:37:34Z
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for lcnet_075.ra2_in1k A LCNet image classification model. Trained on ImageNet-1k in `timm` using recipe template described below. Recipe details: * RandAugment `RA2` recipe. Inspired by and evolved from EfficientNet RandAugment recipes. Published as `B` recipe in [ResNet Strikes Back](https://arxiv.org/abs/2110.00476). * RMSProp (TF 1.0 behaviour) optimizer, EMA weight averaging * Step (exponential decay w/ staircase) LR schedule with warmup ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 2.4 - GMACs: 0.1 - Activations (M): 2.0 - Image size: 224 x 224 - **Papers:** - PP-LCNet: A Lightweight CPU Convolutional Neural Network: https://arxiv.org/abs/2109.15099 - ResNet strikes back: An improved training procedure in timm: https://arxiv.org/abs/2110.00476 - **Dataset:** ImageNet-1k - **Original:** https://github.com/huggingface/pytorch-image-models ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('lcnet_075.ra2_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'lcnet_075.ra2_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 24, 112, 112]) # torch.Size([1, 48, 56, 56]) # torch.Size([1, 96, 28, 28]) # torch.Size([1, 192, 14, 14]) # torch.Size([1, 384, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'lcnet_075.ra2_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 384, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @article{cui2021pp, title={PP-LCNet: A lightweight CPU convolutional neural network}, author={Cui, Cheng and Gao, Tingquan and Wei, Shengyu and Du, Yuning and Guo, Ruoyu and Dong, Shuilong and Lu, Bin and Zhou, Ying and Lv, Xueying and Liu, Qiwen and others}, journal={arXiv preprint arXiv:2109.15099}, year={2021} } ``` ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ``` ```bibtex @inproceedings{wightman2021resnet, title={ResNet strikes back: An improved training procedure in timm}, author={Wightman, Ross and Touvron, Hugo and Jegou, Herve}, booktitle={NeurIPS 2021 Workshop on ImageNet: Past, Present, and Future} } ```
keremberke/yolov5n-garbage
keremberke
2023-01-05T12:02:21Z
602
2
yolov5
[ "yolov5", "tensorboard", "yolo", "vision", "object-detection", "pytorch", "dataset:keremberke/garbage-object-detection", "model-index", "region:us" ]
object-detection
2023-01-05T12:01:57Z
--- tags: - yolov5 - yolo - vision - object-detection - pytorch library_name: yolov5 library_version: 7.0.7 inference: false datasets: - keremberke/garbage-object-detection model-index: - name: keremberke/yolov5n-garbage results: - task: type: object-detection dataset: type: keremberke/garbage-object-detection name: keremberke/garbage-object-detection split: validation metrics: - type: precision # since [email protected] is not available on hf.co/metrics value: 0.31988455750353156 # min: 0.0 - max: 1.0 name: [email protected] --- <div align="center"> <img width="640" alt="keremberke/yolov5n-garbage" src="https://huggingface.co/keremberke/yolov5n-garbage/resolve/main/sample_visuals.jpg"> </div> ### How to use - Install [yolov5](https://github.com/fcakyon/yolov5-pip): ```bash pip install -U yolov5 ``` - Load model and perform prediction: ```python import yolov5 # load model model = yolov5.load('keremberke/yolov5n-garbage') # set model parameters model.conf = 0.25 # NMS confidence threshold model.iou = 0.45 # NMS IoU threshold model.agnostic = False # NMS class-agnostic model.multi_label = False # NMS multiple labels per box model.max_det = 1000 # maximum number of detections per image # set image img = 'https://github.com/ultralytics/yolov5/raw/master/data/images/zidane.jpg' # perform inference results = model(img, size=640) # inference with test time augmentation results = model(img, augment=True) # parse results predictions = results.pred[0] boxes = predictions[:, :4] # x1, y1, x2, y2 scores = predictions[:, 4] categories = predictions[:, 5] # show detection bounding boxes on image results.show() # save results into "results/" folder results.save(save_dir='results/') ``` - Finetune the model on your custom dataset: ```bash yolov5 train --data data.yaml --img 640 --batch 16 --weights keremberke/yolov5n-garbage --epochs 10 ``` **More models available at: [awesome-yolov5-models](https://github.com/keremberke/awesome-yolov5-models)**
zarakiquemparte/hermes-rp-l2-7b-GGUF
zarakiquemparte
2023-08-27T18:51:58Z
602
1
null
[ "gguf", "llama2", "license:other", "region:us" ]
null
2023-08-27T16:00:08Z
--- license: other tags: - llama2 --- Quantized GGUF of [Hermes RP L2 7b](https://huggingface.co/zarakiquemparte/hermes-rp-l2-7b)
Qwen/Qwen-14B-Chat-Int4
Qwen
2023-12-13T15:44:35Z
602
103
transformers
[ "transformers", "safetensors", "qwen", "text-generation", "custom_code", "zh", "en", "arxiv:2309.16609", "arxiv:2305.08322", "arxiv:2009.03300", "arxiv:2305.05280", "arxiv:2210.03629", "autotrain_compatible", "4-bit", "gptq", "region:us" ]
text-generation
2023-09-24T03:27:30Z
--- language: - zh - en tags: - qwen pipeline_tag: text-generation inference: false --- # Qwen-14B-Chat-Int4 <p align="center"> <img src="https://qianwen-res.oss-cn-beijing.aliyuncs.com/logo_qwen.jpg" width="400"/> <p> <br> <p align="center"> 🤗 <a href="https://huggingface.co/Qwen">Hugging Face</a>&nbsp&nbsp | &nbsp&nbsp🤖 <a href="https://modelscope.cn/organization/qwen">ModelScope</a>&nbsp&nbsp | &nbsp&nbsp 📑 <a href="https://arxiv.org/abs/2309.16609">Paper</a> &nbsp&nbsp | &nbsp&nbsp🖥️ <a href="https://modelscope.cn/studios/qwen/Qwen-14B-Chat-Demo/summary">Demo</a> <br> <a href="https://github.com/QwenLM/Qwen/blob/main/assets/wechat.png">WeChat (微信)</a>&nbsp&nbsp | &nbsp&nbsp<a href="https://discord.gg/z3GAxXZ9Ce">Discord</a>&nbsp&nbsp | &nbsp&nbsp<a href="https://dashscope.aliyun.com">API</a> </p> <br> ## 介绍(Introduction) **通义千问-14B(Qwen-14B)**是阿里云研发的通义千问大模型系列的140亿参数规模的模型。Qwen-14B是基于Transformer的大语言模型, 在超大规模的预训练数据上进行训练得到。预训练数据类型多样,覆盖广泛,包括大量网络文本、专业书籍、代码等。同时,在Qwen-14B的基础上,我们使用对齐机制打造了基于大语言模型的AI助手Qwen-14B-Chat。本仓库为Qwen-14B-Chat的Int4量化模型的仓库。 如果您想了解更多关于通义千问-14B开源模型的细节,我们建议您参阅[GitHub代码库](https://github.com/QwenLM/Qwen)。 **Qwen-14B** is the 14B-parameter version of the large language model series, Qwen (abbr. Tongyi Qianwen), proposed by Alibaba Cloud. Qwen-14B is a Transformer-based large language model, which is pretrained on a large volume of data, including web texts, books, codes, etc. Additionally, based on the pretrained Qwen-14B, we release Qwen-14B-Chat, a large-model-based AI assistant, which is trained with alignment techniques. This repository is the one for the Int4 quantized model of Qwen-14B-Chat. For more details about the open-source model of Qwen-14B, please refer to the [GitHub](https://github.com/QwenLM/Qwen) code repository. <br> ## 要求(Requirements) * python 3.8及以上版本 * pytorch 2.0及以上版本 * 建议使用CUDA 11.4及以上(GPU用户、flash-attention用户等需考虑此选项) * python 3.8 and above * pytorch 2.0 and above, 2.0 and above are recommended * CUDA 11.4 and above are recommended (this is for GPU users, flash-attention users, etc.) <br> ## 依赖项(Dependency) 运行Qwen-14B-Chat-Int4,请确保满足上述要求,再执行以下pip命令安装依赖库。如安装`auto-gptq`遇到问题,我们建议您到官方[repo](https://github.com/PanQiWei/AutoGPTQ)搜索合适的预编译wheel。 To run Qwen-14B-Chat-Int4, please make sure you meet the above requirements, and then execute the following pip commands to install the dependent libraries. If you meet problems installing `auto-gptq`, we advise you to check out the official [repo](https://github.com/PanQiWei/AutoGPTQ) to find a pre-build wheel. ```bash pip install transformers==4.32.0 accelerate tiktoken einops scipy transformers_stream_generator==0.0.4 peft deepspeed pip install auto-gptq optimum ``` 另外,推荐安装`flash-attention`库(**当前已支持flash attention 2**),以实现更高的效率和更低的显存占用。 In addition, it is recommended to install the `flash-attention` library (**we support flash attention 2 now.**) for higher efficiency and lower memory usage. ```bash git clone https://github.com/Dao-AILab/flash-attention cd flash-attention && pip install . # 下方安装可选,安装可能比较缓慢。 # pip install csrc/layer_norm # pip install csrc/rotary ``` <br> ## 快速使用(Quickstart) 下面我们展示了一个使用Qwen-14B-Chat-Int4模型的样例: We show an example of how to use Qwen-14B-Chat-Int4 in the following code: ```python from transformers import AutoTokenizer, AutoModelForCausalLM # Note: The default behavior now has injection attack prevention off. tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen-14B-Chat-Int4", trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained( "Qwen/Qwen-14B-Chat-Int4", device_map="auto", trust_remote_code=True ).eval() response, history = model.chat(tokenizer, "你好", history=None) print(response) # 你好!很高兴为你提供帮助。 ``` 关于更多的使用说明,请参考我们的[GitHub repo](https://github.com/QwenLM/Qwen)获取更多信息。 For more information, please refer to our [GitHub repo](https://github.com/QwenLM/Qwen) for more information. <br> ## 量化 (Quantization) ### 效果评测 我们对BF16,Int8和Int4模型在基准评测上做了测试(使用zero-shot设置),发现量化模型效果损失较小,结果如下所示: We illustrate the zero-shot performance of both BF16, Int8 and Int4 models on the benchmark, and we find that the quantized model does not suffer from significant performance degradation. Results are shown below: | Quantization | MMLU | CEval (val) | GSM8K | Humaneval | |--------------|:----:|:-----------:|:-----:|:---------:| | BF16 | 64.6 | 69.8 | 60.1 | 43.9 | | Int8 | 63.6 | 68.6 | 60.0 | 48.2 | | Int4 | 63.3 | 69.0 | 59.8 | 45.7 | ### 推理速度 (Inference Speed) 我们测算了不同精度模型以及不同FlashAttn库版本下模型生成2048和8192个token的平均推理速度。如图所示: We measured the average inference speed of generating 2048 and 8192 tokens with different quantization levels and versions of flash-attention, respectively. | Quantization | FlashAttn | Speed (2048 tokens) | Speed (8192 tokens) | | ------------- | :-------: | :------------------:| :------------------:| | BF16 | v2 | 32.88 | 24.87 | | Int8 | v2 | 29.28 | 24.22 | | Int4 | v2 | 38.72 | 27.33 | | BF16 | v1 | 32.76 | 28.89 | | Int8 | v1 | 28.31 | 23.87 | | Int4 | v1 | 37.81 | 26.46 | | BF16 | Disabled | 29.32 | 22.91 | | Int8 | Disabled | 31.12 | 24.60 | | Int4 | Disabled | 37.65 | 26.00 | 具体而言,我们记录在长度为1的上下文的条件下生成8192个token的性能。评测运行于单张A100-SXM4-80G GPU,使用PyTorch 2.0.1和CUDA 11.8。推理速度是生成8192个token的速度均值。 In detail, the setting of profiling is generating 8192 new tokens with 1 context token. The profiling runs on a single A100-SXM4-80G GPU with PyTorch 2.0.1 and CUDA 11.8. The inference speed is averaged over the generated 8192 tokens. 注意:以上Int4/Int8模型生成速度使用autogptq库给出,当前``AutoModelForCausalLM.from_pretrained``载入的模型生成速度会慢大约20%。我们已经将该问题汇报给HuggingFace团队,若有解决方案将即时更新。 Note: The generation speed of the Int4/Int8 models mentioned above is provided by the autogptq library. The current speed of the model loaded using "AutoModelForCausalLM.from_pretrained" will be approximately 20% slower. We have reported this issue to the HuggingFace team and will update it promptly if a solution is available. ### 显存使用 (GPU Memory Usage) 我们还测算了不同模型精度编码2048个token及生成8192个token的峰值显存占用情况。(显存消耗在是否使用FlashAttn的情况下均类似。)结果如下所示: We also profile the peak GPU memory usage for encoding 2048 tokens as context (and generating single token) and generating 8192 tokens (with single token as context) under different quantization levels, respectively. (The GPU memory usage is similar when using flash-attention or not.)The results are shown below. | Quantization Level | Peak Usage for Encoding 2048 Tokens | Peak Usage for Generating 8192 Tokens | | ------------------ | :---------------------------------: | :-----------------------------------: | | BF16 | 30.15GB | 38.94GB | | Int8 | 18.81GB | 27.54GB | | Int4 | 13.01GB | 21.79GB | 上述性能测算使用[此脚本](https://qianwen-res.oss-cn-beijing.aliyuncs.com/profile.py)完成。 The above speed and memory profiling are conducted using [this script](https://qianwen-res.oss-cn-beijing.aliyuncs.com/profile.py). <br> ## Tokenizer > 注:作为术语的“tokenization”在中文中尚无共识的概念对应,本文档采用英文表达以利说明。 基于tiktoken的分词器有别于其他分词器,比如sentencepiece分词器。尤其在微调阶段,需要特别注意特殊token的使用。关于tokenizer的更多信息,以及微调时涉及的相关使用,请参阅[文档](https://github.com/QwenLM/Qwen/blob/main/tokenization_note_zh.md)。 Our tokenizer based on tiktoken is different from other tokenizers, e.g., sentencepiece tokenizer. You need to pay attention to special tokens, especially in finetuning. For more detailed information on the tokenizer and related use in fine-tuning, please refer to the [documentation](https://github.com/QwenLM/Qwen/blob/main/tokenization_note.md). <br> ## 模型细节(Model) 与Qwen-14B预训练模型相同,Qwen-14B-Chat模型规模基本情况如下所示 The details of the model architecture of Qwen-14B-Chat are listed as follows | Hyperparameter | Value | |:----------------|:------:| | n_layers | 40 | | n_heads | 40 | | d_model | 5120 | | vocab size | 151851 | | sequence length | 2048 | 在位置编码、FFN激活函数和normalization的实现方式上,我们也采用了目前最流行的做法, 即RoPE相对位置编码、SwiGLU激活函数、RMSNorm(可选安装flash-attention加速)。 在分词器方面,相比目前主流开源模型以中英词表为主,Qwen-14B-Chat使用了约15万token大小的词表。 该词表在GPT-4使用的BPE词表`cl100k_base`基础上,对中文、多语言进行了优化,在对中、英、代码数据的高效编解码的基础上,对部分多语言更加友好,方便用户在不扩展词表的情况下对部分语种进行能力增强。 词表对数字按单个数字位切分。调用较为高效的[tiktoken分词库](https://github.com/openai/tiktoken)进行分词。 For position encoding, FFN activation function, and normalization calculation methods, we adopt the prevalent practices, i.e., RoPE relative position encoding, SwiGLU for activation function, and RMSNorm for normalization (optional installation of flash-attention for acceleration). For tokenization, compared to the current mainstream open-source models based on Chinese and English vocabularies, Qwen-14B-Chat uses a vocabulary of over 150K tokens. It first considers efficient encoding of Chinese, English, and code data, and is also more friendly to multilingual languages, enabling users to directly enhance the capability of some languages without expanding the vocabulary. It segments numbers by single digit, and calls the [tiktoken](https://github.com/openai/tiktoken) tokenizer library for efficient tokenization. <br> ## 评测效果(Evaluation) 对于Qwen-14B-Chat模型,我们同样评测了常规的中文理解(C-Eval)、英文理解(MMLU)、代码(HumanEval)和数学(GSM8K)等权威任务,同时包含了长序列任务的评测结果。由于Qwen-14B-Chat模型经过对齐后,激发了较强的外部系统调用能力,我们还进行了工具使用能力方面的评测。 提示:由于硬件和框架造成的舍入误差,复现结果如有波动属于正常现象。 For Qwen-14B-Chat, we also evaluate the model on C-Eval, MMLU, HumanEval, GSM8K, etc., as well as the benchmark evaluation for long-context understanding, and tool usage. Note: Due to rounding errors caused by hardware and framework, differences in reproduced results are possible. ### 中文评测(Chinese Evaluation) #### C-Eval 在[C-Eval](https://arxiv.org/abs/2305.08322)验证集上,我们评价了Qwen-14B-Chat模型的0-shot & 5-shot准确率 We demonstrate the 0-shot & 5-shot accuracy of Qwen-14B-Chat on C-Eval validation set | Model | Avg. Acc. | |:--------------------------------:|:---------:| | LLaMA2-7B-Chat | 31.9 | | LLaMA2-13B-Chat | 36.2 | | LLaMA2-70B-Chat | 44.3 | | ChatGLM2-6B-Chat | 52.6 | | InternLM-7B-Chat | 53.6 | | Baichuan2-7B-Chat | 55.6 | | Baichuan2-13B-Chat | 56.7 | | Qwen-7B-Chat (original) (0-shot) | 54.2 | | **Qwen-7B-Chat (0-shot)** | 59.7 | | **Qwen-7B-Chat (5-shot)** | 59.3 | | **Qwen-14B-Chat (0-shot)** | 69.8 | | **Qwen-14B-Chat (5-shot)** | **71.7** | C-Eval测试集上,Qwen-14B-Chat模型的zero-shot准确率结果如下: The zero-shot accuracy of Qwen-14B-Chat on C-Eval testing set is provided below: | Model | Avg. | STEM | Social Sciences | Humanities | Others | | :---------------------- | :------: | :--: | :-------------: | :--------: | :----: | | Chinese-Alpaca-Plus-13B | 41.5 | 36.6 | 49.7 | 43.1 | 41.2 | | Chinese-Alpaca-2-7B | 40.3 | - | - | - | - | | ChatGLM2-6B-Chat | 50.1 | 46.4 | 60.4 | 50.6 | 46.9 | | Baichuan-13B-Chat | 51.5 | 43.7 | 64.6 | 56.2 | 49.2 | | Qwen-7B-Chat (original) | 54.6 | 47.8 | 67.6 | 59.3 | 50.6 | | **Qwen-7B-Chat** | 58.6 | 53.3 | 72.1 | 62.8 | 52.0 | | **Qwen-14B-Chat** | **69.1** | 65.1 | 80.9 | 71.2 | 63.4 | 在14B规模模型上,经过人类指令对齐的Qwen-14B-Chat模型,准确率在同类相近规模模型中仍然处于前列。 Compared with other pretrained models with comparable model size, the human-aligned Qwen-14B-Chat performs well in C-Eval accuracy. ### 英文评测(English Evaluation) #### MMLU [MMLU](https://arxiv.org/abs/2009.03300)评测集上,Qwen-14B-Chat模型的 0-shot & 5-shot 准确率如下,效果同样在同类对齐模型中同样表现较优。 The 0-shot & 5-shot accuracy of Qwen-14B-Chat on MMLU is provided below. The performance of Qwen-14B-Chat still on the top between other human-aligned models with comparable size. | Model | Avg. Acc. | |:--------------------------------:|:---------:| | ChatGLM2-6B-Chat | 46.0 | | LLaMA2-7B-Chat | 46.2 | | InternLM-7B-Chat | 51.1 | | Baichuan2-7B-Chat | 52.9 | | LLaMA2-13B-Chat | 54.6 | | Baichuan2-13B-Chat | 57.3 | | LLaMA2-70B-Chat | 63.8 | | Qwen-7B-Chat (original) (0-shot) | 53.9 | | **Qwen-7B-Chat (0-shot)** | 55.8 | | **Qwen-7B-Chat (5-shot)** | 57.0 | | **Qwen-14B-Chat (0-shot)** | 64.6 | | **Qwen-14B-Chat (5-shot)** | **66.5** | ### 代码评测(Coding Evaluation) Qwen-14B-Chat在[HumanEval](https://github.com/openai/human-eval)的zero-shot Pass@1效果如下 The zero-shot Pass@1 of Qwen-14B-Chat on [HumanEval](https://github.com/openai/human-eval) is demonstrated below | Model | Pass@1 | |:-----------------------:|:--------:| | ChatGLM2-6B-Chat | 11.0 | | LLaMA2-7B-Chat | 12.2 | | InternLM-7B-Chat | 14.6 | | Baichuan2-7B-Chat | 13.4 | | LLaMA2-13B-Chat | 18.9 | | Baichuan2-13B-Chat | 17.7 | | LLaMA2-70B-Chat | 32.3 | | Qwen-7B-Chat (original) | 24.4 | | **Qwen-7B-Chat** | 37.2 | | **Qwen-14B-Chat** | **43.9** | ### 数学评测(Mathematics Evaluation) 在评测数学能力的[GSM8K](https://github.com/openai/grade-school-math)上,Qwen-14B-Chat的准确率结果如下 The accuracy of Qwen-14B-Chat on GSM8K is shown below | Model | Acc. | |:--------------------------------:|:--------:| | LLaMA2-7B-Chat | 26.3 | | ChatGLM2-6B-Chat | 28.8 | | Baichuan2-7B-Chat | 32.8 | | InternLM-7B-Chat | 33.0 | | LLaMA2-13B-Chat | 37.1 | | Baichuan2-13B-Chat | 55.3 | | LLaMA2-70B-Chat | 59.3 | | Qwen-7B-Chat (original) (0-shot) | 41.1 | | **Qwen-7B-Chat (0-shot)** | 50.3 | | **Qwen-7B-Chat (8-shot)** | 54.1 | | **Qwen-14B-Chat (0-shot)** | **60.1** | | **Qwen-14B-Chat (8-shot)** | 59.3 | ### 长序列评测(Long-Context Understanding) 通过NTK插值,LogN注意力缩放可以扩展Qwen-14B-Chat的上下文长度。在长文本摘要数据集[VCSUM](https://arxiv.org/abs/2305.05280)上(文本平均长度在15K左右),Qwen-14B-Chat的Rouge-L结果如下: **(若要启用这些技巧,请将config.json里的`use_dynamic_ntk`和`use_logn_attn`设置为true)** We introduce NTK-aware interpolation, LogN attention scaling to extend the context length of Qwen-14B-Chat. The Rouge-L results of Qwen-14B-Chat on long-text summarization dataset [VCSUM](https://arxiv.org/abs/2305.05280) (The average length of this dataset is around 15K) are shown below: **(To use these tricks, please set `use_dynamic_ntk` and `use_long_attn` to true in config.json.)** | Model | VCSUM (zh) | |:------------------|:----------:| | GPT-3.5-Turbo-16k | 16.0 | | LLama2-7B-Chat | 0.2 | | InternLM-7B-Chat | 13.0 | | ChatGLM2-6B-Chat | 16.3 | | **Qwen-14B-Chat** | **17.3** | ### 工具使用能力的评测(Tool Usage) #### ReAct Prompting 千问支持通过 [ReAct Prompting](https://arxiv.org/abs/2210.03629) 调用插件/工具/API。ReAct 也是 [LangChain](https://python.langchain.com/) 框架采用的主要方式之一。在我们开源的、用于评估工具使用能力的评测基准上,千问的表现如下: Qwen-Chat supports calling plugins/tools/APIs through [ReAct Prompting](https://arxiv.org/abs/2210.03629). ReAct is also one of the main approaches used by the [LangChain](https://python.langchain.com/) framework. In our evaluation benchmark for assessing tool usage capabilities, Qwen-Chat's performance is as follows: <table> <tr> <th colspan="4" align="center">Chinese Tool-Use Benchmark</th> </tr> <tr> <th align="center">Model</th><th align="center">Tool Selection (Acc.↑)</th><th align="center">Tool Input (Rouge-L↑)</th><th align="center">False Positive Error↓</th> </tr> <tr> <td>GPT-4</td><td align="center">95%</td><td align="center">0.90</td><td align="center">15.0%</td> </tr> <tr> <td>GPT-3.5</td><td align="center">85%</td><td align="center">0.88</td><td align="center">75.0%</td> </tr> <tr> <td>Qwen-7B-Chat</td><td align="center">98%</td><td align="center">0.91</td><td align="center">7.3%</td> </tr> <tr> <td>Qwen-14B-Chat</td><td align="center">98%</td><td align="center">0.93</td><td align="center">2.4%</td> </tr> </table> > 评测基准中出现的插件均没有出现在千问的训练集中。该基准评估了模型在多个候选插件中选择正确插件的准确率、传入插件的参数的合理性、以及假阳率。假阳率(False Positive)定义:在处理不该调用插件的请求时,错误地调用了插件。 > The plugins that appear in the evaluation set do not appear in the training set of Qwen. This benchmark evaluates the accuracy of the model in selecting the correct plugin from multiple candidate plugins, the rationality of the parameters passed into the plugin, and the false positive rate. False Positive: Incorrectly invoking a plugin when it should not have been called when responding to a query. ![](assets/react_showcase_001.png) ![](assets/react_showcase_002.png) #### Code Interpreter 为了考察Qwen使用Python Code Interpreter完成数学解题、数据可视化、及文件处理与爬虫等任务的能力,我们专门建设并开源了一个评测这方面能力的[评测基准](https://github.com/QwenLM/Qwen-Agent/tree/main/benchmark)。 我们发现Qwen在生成代码的可执行率、结果正确性上均表现较好: To assess Qwen's ability to use the Python Code Interpreter for tasks such as mathematical problem solving, data visualization, and other general-purpose tasks such as file handling and web scraping, we have created and open-sourced a benchmark specifically designed for evaluating these capabilities. You can find the benchmark at this [link](https://github.com/QwenLM/Qwen-Agent/tree/main/benchmark). We have observed that Qwen performs well in terms of code executability and result accuracy when generating code: <table> <tr> <th colspan="4" align="center">Executable Rate of Generated Code (%)</th> </tr> <tr> <th align="center">Model</th><th align="center">Math↑</th><th align="center">Visualization↑</th><th align="center">General↑</th> </tr> <tr> <td>GPT-4</td><td align="center">91.9</td><td align="center">85.9</td><td align="center">82.8</td> </tr> <tr> <td>GPT-3.5</td><td align="center">89.2</td><td align="center">65.0</td><td align="center">74.1</td> </tr> <tr> <td>LLaMA2-7B-Chat</td> <td align="center">41.9</td> <td align="center">33.1</td> <td align="center">24.1 </td> </tr> <tr> <td>LLaMA2-13B-Chat</td> <td align="center">50.0</td> <td align="center">40.5</td> <td align="center">48.3 </td> </tr> <tr> <td>CodeLLaMA-7B-Instruct</td> <td align="center">85.1</td> <td align="center">54.0</td> <td align="center">70.7 </td> </tr> <tr> <td>CodeLLaMA-13B-Instruct</td> <td align="center">93.2</td> <td align="center">55.8</td> <td align="center">74.1 </td> </tr> <tr> <td>InternLM-7B-Chat-v1.1</td> <td align="center">78.4</td> <td align="center">44.2</td> <td align="center">62.1 </td> </tr> <tr> <td>InternLM-20B-Chat</td> <td align="center">70.3</td> <td align="center">44.2</td> <td align="center">65.5 </td> </tr> <tr> <td>Qwen-7B-Chat</td> <td align="center">82.4</td> <td align="center">64.4</td> <td align="center">67.2 </td> </tr> <tr> <td>Qwen-14B-Chat</td> <td align="center">89.2</td> <td align="center">84.1</td> <td align="center">65.5</td> </tr> </table> <table> <tr> <th colspan="4" align="center">Accuracy of Code Execution Results (%)</th> </tr> <tr> <th align="center">Model</th><th align="center">Math↑</th><th align="center">Visualization-Hard↑</th><th align="center">Visualization-Easy↑</th> </tr> <tr> <td>GPT-4</td><td align="center">82.8</td><td align="center">66.7</td><td align="center">60.8</td> </tr> <tr> <td>GPT-3.5</td><td align="center">47.3</td><td align="center">33.3</td><td align="center">55.7</td> </tr> <tr> <td>LLaMA2-7B-Chat</td> <td align="center">3.9</td> <td align="center">14.3</td> <td align="center">39.2 </td> </tr> <tr> <td>LLaMA2-13B-Chat</td> <td align="center">8.3</td> <td align="center">8.3</td> <td align="center">40.5 </td> </tr> <tr> <td>CodeLLaMA-7B-Instruct</td> <td align="center">14.3</td> <td align="center">26.2</td> <td align="center">60.8 </td> </tr> <tr> <td>CodeLLaMA-13B-Instruct</td> <td align="center">28.2</td> <td align="center">27.4</td> <td align="center">62.0 </td> </tr> <tr> <td>InternLM-7B-Chat-v1.1</td> <td align="center">28.5</td> <td align="center">4.8</td> <td align="center">40.5 </td> </tr> <tr> <td>InternLM-20B-Chat</td> <td align="center">34.6</td> <td align="center">21.4</td> <td align="center">45.6 </td> </tr> <tr> <td>Qwen-7B-Chat</td> <td align="center">41.9</td> <td align="center">40.5</td> <td align="center">54.4 </td> </tr> <tr> <td>Qwen-14B-Chat</td> <td align="center">58.4</td> <td align="center">53.6</td> <td align="center">59.5</td> </tr> </table> <p align="center"> <br> <img src="assets/code_interpreter_showcase_001.jpg" /> <br> <p> #### Huggingface Agent 千问还具备作为 [HuggingFace Agent](https://huggingface.co/docs/transformers/transformers_agents) 的能力。它在 Huggingface 提供的run模式评测基准上的表现如下: Qwen-Chat also has the capability to be used as a [HuggingFace Agent](https://huggingface.co/docs/transformers/transformers_agents). Its performance on the run-mode benchmark provided by HuggingFace is as follows: <table> <tr> <th colspan="4" align="center">HuggingFace Agent Benchmark- Run Mode</th> </tr> <tr> <th align="center">Model</th><th align="center">Tool Selection↑</th><th align="center">Tool Used↑</th><th align="center">Code↑</th> </tr> <tr> <td>GPT-4</td><td align="center">100</td><td align="center">100</td><td align="center">97.4</td> </tr> <tr> <td>GPT-3.5</td><td align="center">95.4</td><td align="center">96.3</td><td align="center">87.0</td> </tr> <tr> <td>StarCoder-Base-15B</td><td align="center">86.1</td><td align="center">87.0</td><td align="center">68.9</td> </tr> <tr> <td>StarCoder-15B</td><td align="center">87.0</td><td align="center">88.0</td><td align="center">68.9</td> </tr> <tr> <td>Qwen-7B-Chat</td><td align="center">87.0</td><td align="center">87.0</td><td align="center">71.5</td> </tr> <tr> <td>Qwen-14B-Chat</td><td align="center">93.5</td><td align="center">94.4</td><td align="center">87.0</td> </tr> </table> <table> <tr> <th colspan="4" align="center">HuggingFace Agent Benchmark - Chat Mode</th> </tr> <tr> <th align="center">Model</th><th align="center">Tool Selection↑</th><th align="center">Tool Used↑</th><th align="center">Code↑</th> </tr> <tr> <td>GPT-4</td><td align="center">97.9</td><td align="center">97.9</td><td align="center">98.5</td> </tr> <tr> <td>GPT-3.5</td><td align="center">97.3</td><td align="center">96.8</td><td align="center">89.6</td> </tr> <tr> <td>StarCoder-Base-15B</td><td align="center">97.9</td><td align="center">97.9</td><td align="center">91.1</td> </tr> <tr> <td>StarCoder-15B</td><td align="center">97.9</td><td align="center">97.9</td><td align="center">89.6</td> </tr> <tr> <td>Qwen-7B-Chat</td><td align="center">94.7</td><td align="center">94.7</td><td align="center">85.1</td> </tr> <tr> <td>Qwen-14B-Chat</td><td align="center">97.9</td><td align="center">97.9</td><td align="center">95.5</td> </tr> </table> <br> ## FAQ 如遇到问题,敬请查阅[FAQ](https://github.com/QwenLM/Qwen/blob/main/FAQ_zh.md)以及issue区,如仍无法解决再提交issue。 If you meet problems, please refer to [FAQ](https://github.com/QwenLM/Qwen/blob/main/FAQ.md) and the issues first to search a solution before you launch a new issue. <br> ## 引用 (Citation) 如果你觉得我们的工作对你有帮助,欢迎引用! If you find our work helpful, feel free to give us a cite. ``` @article{qwen, title={Qwen Technical Report}, author={Jinze Bai and Shuai Bai and Yunfei Chu and Zeyu Cui and Kai Dang and Xiaodong Deng and Yang Fan and Wenbin Ge and Yu Han and Fei Huang and Binyuan Hui and Luo Ji and Mei Li and Junyang Lin and Runji Lin and Dayiheng Liu and Gao Liu and Chengqiang Lu and Keming Lu and Jianxin Ma and Rui Men and Xingzhang Ren and Xuancheng Ren and Chuanqi Tan and Sinan Tan and Jianhong Tu and Peng Wang and Shijie Wang and Wei Wang and Shengguang Wu and Benfeng Xu and Jin Xu and An Yang and Hao Yang and Jian Yang and Shusheng Yang and Yang Yao and Bowen Yu and Hongyi Yuan and Zheng Yuan and Jianwei Zhang and Xingxuan Zhang and Yichang Zhang and Zhenru Zhang and Chang Zhou and Jingren Zhou and Xiaohuan Zhou and Tianhang Zhu}, journal={arXiv preprint arXiv:2309.16609}, year={2023} } ``` <br> ## 使用协议(License Agreement) 我们的代码和模型权重对学术研究完全开放,并支持商用。请查看[LICENSE](https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT)了解具体的开源协议细节。如需商用,请填写[问卷](https://dashscope.console.aliyun.com/openModelApply/Qwen-14B-Chat)申请。 Our code and checkpoints are open to research purpose, and they are allowed for commercial purposes. Check [LICENSE](https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT) for more details about the license. If you have requirements for commercial use, please fill out the [form](https://dashscope.console.aliyun.com/openModelApply/Qwen-14B-Chat) to apply. <br> ## 联系我们(Contact Us) 如果你想给我们的研发团队和产品团队留言,欢迎加入我们的微信群、钉钉群以及Discord!同时,也欢迎通过邮件([email protected])联系我们。 If you are interested to leave a message to either our research team or product team, join our Discord or WeChat groups! Also, feel free to send an email to [email protected].
allenai/scibert_scivocab_uncased_field_of_study
allenai
2023-12-19T19:48:20Z
602
2
transformers
[ "transformers", "pytorch", "bert", "text-classification", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
2023-12-13T21:04:25Z
--- pipeline_tag: text-classification extra_gated_prompt: "**AI2 ImpACT License – Low Risk Artifacts (LR Agreement)** [https://allenai.org/impact-license](https://allenai.org/impact-license)" extra_gated_fields: Name: text Organization/Entity: text Email: text State/Country: text "Intended Use": text "I AGREE to the terms and conditions of the LR Agreement above": checkbox "I AGREE to AI2’s use of my information for legal notices and administrative matters": checkbox "I CERTIFY that the information I have provided is true and accurate": checkbox --- Field of study pytorch model is a finetuned version of allenai/scibert_scivocab_uncased. Data for finetuning was harvested utlizing OpenAI models using following prompt: ``` def prompt_with_journal(title, abstract, journal_name): message = [ {"role": "system", "content": "You are a highly intelligent and accurate information extraction system. You take title, abstract, journal name of a \ scientific article as input and your task is to classify the scientific field of study of the passage.", "role": "user", "content": "You need to classify it with key: 'field_of_study' assign as many 'field_of_study' as you find it fit: \ 'Agricultural and Food sciences', \ 'Art', \ 'Biology',\ 'Business',\ 'Chemistry',\ 'Computer science',\ 'Economics',\ 'Education',\ 'Engineering',\ 'Environmental science',\ 'Geography',\ 'Geology',\ 'History',\ 'Law',\ 'Linguistics',\ 'Materials science',\ 'Mathematics',\ 'Medicine',\ 'Philosophy',\ 'Physics',\ 'Political science',\ 'Psychology',\ 'Sociology'\ Only select from the above list, or 'Other'."}, {"role": "assistant", "content": ("```python \n" f"title = { title } \n" f"abstract = { abstract }\n" f"journal_name = { journal_name }\n" "{'field_of_study': [" )},] return message ```
text-generation-inference/Mistral-7B-Instruct-v0.2-medusa
text-generation-inference
2024-04-07T06:56:48Z
602
1
transformers
[ "transformers", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2024-02-23T15:49:09Z
--- license: apache-2.0 ---
mlx-community/whisper-large-mlx
mlx-community
2024-03-09T04:13:33Z
602
1
mlx
[ "mlx", "whisper", "region:us" ]
null
2024-03-09T03:09:26Z
--- library_name: mlx --- # whisper-large-mlx This model was converted to MLX format from [`large`](). ## Use with mlx ```bash git clone https://github.com/ml-explore/mlx-examples.git cd mlx-examples/whisper/ pip install -r requirements.txt >> import whisper >> whisper.transcribe("FILE_NAME") ```
MaziyarPanahi/Llama-3-70B-Instruct-32k-v0.1-GGUF
MaziyarPanahi
2024-04-26T15:26:06Z
602
2
null
[ "gguf", "quantized", "2-bit", "3-bit", "4-bit", "5-bit", "6-bit", "8-bit", "GGUF", "text-generation", "llama", "llama-3", "base_model:MaziyarPanahi/Llama-3-70B-Instruct-32k-v0.1", "region:us" ]
text-generation
2024-04-25T09:31:02Z
--- base: MaziyarPanahi/Llama-3-70B-Instruct-32k-v0.1 tags: - quantized - 2-bit - 3-bit - 4-bit - 5-bit - 6-bit - 8-bit - GGUF - text-generation - llama - llama-3 - text-generation model_name: Llama-3-70B-Instruct-32k-v0.1-GGUF base_model: MaziyarPanahi/Llama-3-70B-Instruct-32k-v0.1 inference: false model_creator: MaziyarPanahi pipeline_tag: text-generation quantized_by: MaziyarPanahi --- # [MaziyarPanahi/Llama-3-70B-Instruct-32k-v0.1-GGUF](https://huggingface.co/MaziyarPanahi/Llama-3-70B-Instruct-32k-v0.1-GGUF) - Model creator: [MaziyarPanahi](https://huggingface.co/MaziyarPanahi) - Original model: [MaziyarPanahi/Llama-3-70B-Instruct-32k-v0.1](https://huggingface.co/MaziyarPanahi/Llama-3-70B-Instruct-32k-v0.1) ## Description [MaziyarPanahi/Llama-3-70B-Instruct-32k-v0.1-GGUF](https://huggingface.co/MaziyarPanahi/Llama-3-70B-Instruct-32k-v0.1-GGUF) contains GGUF format model files for [MaziyarPanahi/Llama-3-70B-Instruct-32k-v0.1](https://huggingface.co/MaziyarPanahi/Llama-3-70B-Instruct-32k-v0.1). ### About GGUF GGUF is a new format introduced by the llama.cpp team on August 21st 2023. It is a replacement for GGML, which is no longer supported by llama.cpp. Here is an incomplete list of clients and libraries that are known to support GGUF: * [llama.cpp](https://github.com/ggerganov/llama.cpp). The source project for GGUF. Offers a CLI and a server option. * [llama-cpp-python](https://github.com/abetlen/llama-cpp-python), a Python library with GPU accel, LangChain support, and OpenAI-compatible API server. * [LM Studio](https://lmstudio.ai/), an easy-to-use and powerful local GUI for Windows and macOS (Silicon), with GPU acceleration. Linux available, in beta as of 27/11/2023. * [text-generation-webui](https://github.com/oobabooga/text-generation-webui), the most widely used web UI, with many features and powerful extensions. Supports GPU acceleration. * [KoboldCpp](https://github.com/LostRuins/koboldcpp), a fully featured web UI, with GPU accel across all platforms and GPU architectures. Especially good for story telling. * [GPT4All](https://gpt4all.io/index.html), a free and open source local running GUI, supporting Windows, Linux and macOS with full GPU accel. * [LoLLMS Web UI](https://github.com/ParisNeo/lollms-webui), a great web UI with many interesting and unique features, including a full model library for easy model selection. * [Faraday.dev](https://faraday.dev/), an attractive and easy to use character-based chat GUI for Windows and macOS (both Silicon and Intel), with GPU acceleration. * [candle](https://github.com/huggingface/candle), a Rust ML framework with a focus on performance, including GPU support, and ease of use. * [ctransformers](https://github.com/marella/ctransformers), a Python library with GPU accel, LangChain support, and OpenAI-compatible AI server. Note, as of time of writing (November 27th 2023), ctransformers has not been updated in a long time and does not support many recent models. ## Special thanks 🙏 Special thanks to [Georgi Gerganov](https://github.com/ggerganov) and the whole team working on [llama.cpp](https://github.com/ggerganov/llama.cpp/) for making all of this possible.
2nzi/videomae-surf-analytics
2nzi
2024-06-09T09:22:38Z
602
0
transformers
[ "transformers", "tensorboard", "safetensors", "videomae", "video-classification", "generated_from_trainer", "base_model:MCG-NJU/videomae-base", "license:cc-by-nc-4.0", "endpoints_compatible", "region:us" ]
video-classification
2024-05-12T21:23:50Z
--- license: cc-by-nc-4.0 base_model: MCG-NJU/videomae-base tags: - generated_from_trainer metrics: - accuracy model-index: - name: videomae-base-finetuned-2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # videomae-base-finetuned-2 This model is a fine-tuned version of [MCG-NJU/videomae-base](https://huggingface.co/MCG-NJU/videomae-base) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.4272 - Accuracy: 0.9182 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - training_steps: 925 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:------:|:----:|:---------------:|:--------:| | 0.9244 | 0.2011 | 186 | 0.9936 | 0.5818 | | 0.3114 | 1.2011 | 372 | 1.0746 | 0.6818 | | 0.3265 | 2.2011 | 558 | 0.7547 | 0.8364 | | 0.1401 | 3.2011 | 744 | 0.5196 | 0.9 | | 0.0014 | 4.1957 | 925 | 0.4272 | 0.9182 | ### Framework versions - Transformers 4.40.2 - Pytorch 2.1.0+cpu - Datasets 2.19.1 - Tokenizers 0.19.1
Ramikan-BR/TiamaPY-v35
Ramikan-BR
2024-06-25T11:01:07Z
602
0
transformers
[ "transformers", "pytorch", "safetensors", "gguf", "llama", "text-generation", "text-generation-inference", "unsloth", "trl", "sft", "conversational", "en", "base_model:unsloth/tinyllama-chat-bnb-4bit", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-generation
2024-06-24T22:28:32Z
--- base_model: unsloth/tinyllama-chat-bnb-4bit language: - en license: apache-2.0 tags: - text-generation-inference - transformers - unsloth - llama - trl - sft --- # Uploaded model - **Developed by:** Ramikan-BR - **License:** apache-2.0 - **Finetuned from model :** unsloth/tinyllama-chat-bnb-4bit This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
Jianping746/DeepSeek-Coder-V2-Lite-Instruct-Q4_K_M-GGUF
Jianping746
2024-06-25T13:27:14Z
602
0
null
[ "gguf", "llama-cpp", "gguf-my-repo", "base_model:deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct", "license:other", "region:us" ]
null
2024-06-25T13:26:27Z
--- base_model: deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct license: other license_name: deepseek-license license_link: LICENSE tags: - llama-cpp - gguf-my-repo --- # Jianping746/DeepSeek-Coder-V2-Lite-Instruct-Q4_K_M-GGUF This model was converted to GGUF format from [`deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct`](https://huggingface.co/deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space. Refer to the [original model card](https://huggingface.co/deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct) for more details on the model. ## Use with llama.cpp Install llama.cpp through brew (works on Mac and Linux) ```bash brew install llama.cpp ``` Invoke the llama.cpp server or the CLI. ### CLI: ```bash llama-cli --hf-repo Jianping746/DeepSeek-Coder-V2-Lite-Instruct-Q4_K_M-GGUF --hf-file deepseek-coder-v2-lite-instruct-q4_k_m.gguf -p "The meaning to life and the universe is" ``` ### Server: ```bash llama-server --hf-repo Jianping746/DeepSeek-Coder-V2-Lite-Instruct-Q4_K_M-GGUF --hf-file deepseek-coder-v2-lite-instruct-q4_k_m.gguf -c 2048 ``` Note: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well. Step 1: Clone llama.cpp from GitHub. ``` git clone https://github.com/ggerganov/llama.cpp ``` Step 2: Move into the llama.cpp folder and build it with `LLAMA_CURL=1` flag along with other hardware-specific flags (for ex: LLAMA_CUDA=1 for Nvidia GPUs on Linux). ``` cd llama.cpp && LLAMA_CURL=1 make ``` Step 3: Run inference through the main binary. ``` ./llama-cli --hf-repo Jianping746/DeepSeek-Coder-V2-Lite-Instruct-Q4_K_M-GGUF --hf-file deepseek-coder-v2-lite-instruct-q4_k_m.gguf -p "The meaning to life and the universe is" ``` or ``` ./llama-server --hf-repo Jianping746/DeepSeek-Coder-V2-Lite-Instruct-Q4_K_M-GGUF --hf-file deepseek-coder-v2-lite-instruct-q4_k_m.gguf -c 2048 ```
Tanvir1337/BanglaLLama-3-8b-BnWiki-Instruct-IQ3_XXS-GGUF
Tanvir1337
2024-06-25T21:43:28Z
602
0
null
[ "gguf", "bangla", "large language model", "llama-cpp", "gguf-my-repo", "bn", "en", "dataset:BanglaLLM/bangla-alpaca-orca", "base_model:BanglaLLM/BanglaLLama-3-8b-BnWiki-Instruct", "license:llama3", "region:us" ]
null
2024-06-25T21:43:11Z
--- base_model: BanglaLLM/BanglaLLama-3-8b-BnWiki-Instruct datasets: - BanglaLLM/bangla-alpaca-orca language: - bn - en license: llama3 tags: - bangla - large language model - llama-cpp - gguf-my-repo --- # Tanvir1337/BanglaLLama-3-8b-BnWiki-Instruct-IQ3_XXS-GGUF This model was converted to GGUF format from [`BanglaLLM/BanglaLLama-3-8b-BnWiki-Instruct`](https://huggingface.co/BanglaLLM/BanglaLLama-3-8b-BnWiki-Instruct) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space. Refer to the [original model card](https://huggingface.co/BanglaLLM/BanglaLLama-3-8b-BnWiki-Instruct) for more details on the model. ## Use with llama.cpp Install llama.cpp through brew (works on Mac and Linux) ```bash brew install llama.cpp ``` Invoke the llama.cpp server or the CLI. ### CLI: ```bash llama-cli --hf-repo Tanvir1337/BanglaLLama-3-8b-BnWiki-Instruct-IQ3_XXS-GGUF --hf-file banglallama-3-8b-bnwiki-instruct-iq3_xxs-imat.gguf -p "The meaning to life and the universe is" ``` ### Server: ```bash llama-server --hf-repo Tanvir1337/BanglaLLama-3-8b-BnWiki-Instruct-IQ3_XXS-GGUF --hf-file banglallama-3-8b-bnwiki-instruct-iq3_xxs-imat.gguf -c 2048 ``` Note: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well. Step 1: Clone llama.cpp from GitHub. ``` git clone https://github.com/ggerganov/llama.cpp ``` Step 2: Move into the llama.cpp folder and build it with `LLAMA_CURL=1` flag along with other hardware-specific flags (for ex: LLAMA_CUDA=1 for Nvidia GPUs on Linux). ``` cd llama.cpp && LLAMA_CURL=1 make ``` Step 3: Run inference through the main binary. ``` ./llama-cli --hf-repo Tanvir1337/BanglaLLama-3-8b-BnWiki-Instruct-IQ3_XXS-GGUF --hf-file banglallama-3-8b-bnwiki-instruct-iq3_xxs-imat.gguf -p "The meaning to life and the universe is" ``` or ``` ./llama-server --hf-repo Tanvir1337/BanglaLLama-3-8b-BnWiki-Instruct-IQ3_XXS-GGUF --hf-file banglallama-3-8b-bnwiki-instruct-iq3_xxs-imat.gguf -c 2048 ```
hezirel/Mistral-7B-Instruct-v0.3-Q3_K_S-GGUF
hezirel
2024-06-27T20:26:26Z
602
0
null
[ "gguf", "llama-cpp", "gguf-my-repo", "base_model:mistralai/Mistral-7B-Instruct-v0.3", "license:apache-2.0", "region:us" ]
null
2024-06-27T20:26:13Z
--- base_model: mistralai/Mistral-7B-Instruct-v0.3 license: apache-2.0 tags: - llama-cpp - gguf-my-repo --- # hezirel/Mistral-7B-Instruct-v0.3-Q3_K_S-GGUF This model was converted to GGUF format from [`mistralai/Mistral-7B-Instruct-v0.3`](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.3) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space. Refer to the [original model card](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.3) for more details on the model. ## Use with llama.cpp Install llama.cpp through brew (works on Mac and Linux) ```bash brew install llama.cpp ``` Invoke the llama.cpp server or the CLI. ### CLI: ```bash llama-cli --hf-repo hezirel/Mistral-7B-Instruct-v0.3-Q3_K_S-GGUF --hf-file mistral-7b-instruct-v0.3-q3_k_s.gguf -p "The meaning to life and the universe is" ``` ### Server: ```bash llama-server --hf-repo hezirel/Mistral-7B-Instruct-v0.3-Q3_K_S-GGUF --hf-file mistral-7b-instruct-v0.3-q3_k_s.gguf -c 2048 ``` Note: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well. Step 1: Clone llama.cpp from GitHub. ``` git clone https://github.com/ggerganov/llama.cpp ``` Step 2: Move into the llama.cpp folder and build it with `LLAMA_CURL=1` flag along with other hardware-specific flags (for ex: LLAMA_CUDA=1 for Nvidia GPUs on Linux). ``` cd llama.cpp && LLAMA_CURL=1 make ``` Step 3: Run inference through the main binary. ``` ./llama-cli --hf-repo hezirel/Mistral-7B-Instruct-v0.3-Q3_K_S-GGUF --hf-file mistral-7b-instruct-v0.3-q3_k_s.gguf -p "The meaning to life and the universe is" ``` or ``` ./llama-server --hf-repo hezirel/Mistral-7B-Instruct-v0.3-Q3_K_S-GGUF --hf-file mistral-7b-instruct-v0.3-q3_k_s.gguf -c 2048 ```
HeyLucasLeao/gpt-neo-small-portuguese
HeyLucasLeao
2021-06-19T20:51:57Z
601
7
transformers
[ "transformers", "pytorch", "gpt_neo", "text-generation", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-generation
2022-03-02T23:29:04Z
## GPT-Neo Small Portuguese #### Model Description This is a finetuned version from GPT-Neo 125M by EletheurAI to Portuguese language. #### Training data It was trained from 227,382 selected texts from a PTWiki Dump. You can found all the data from here: https://archive.org/details/ptwiki-dump-20210520 #### Training Procedure Every text was passed through a GPT2-Tokenizer with bos and eos tokens to separate them, with max sequence length that the GPT-Neo could support. It was finetuned using the default metrics of the Trainer Class, available on the Hugging Face library. ##### Learning Rate: **2e-4** ##### Epochs: **1** #### Goals My true intention was totally educational, thus making available a Portuguese version of this model. How to use ``` python from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("HeyLucasLeao/gpt-neo-small-portuguese") model = AutoModelForCausalLM.from_pretrained("HeyLucasLeao/gpt-neo-small-portuguese") text = 'eu amo o brasil.' generated = tokenizer(f'<|startoftext|> {text}', return_tensors='pt').input_ids.cuda() #Generating texts sample_outputs = model.generate(generated, # Use sampling instead of greedy decoding do_sample=True, # Keep only top 3 token with the highest probability top_k=3, # Maximum sequence length max_length=200, # Keep only the most probable tokens with cumulative probability of 95% top_p=0.95, # Changes randomness of generated sequences temperature=1.9, # Number of sequences to generate num_return_sequences=3) # Decoding and printing sequences for i, sample_output in enumerate(sample_outputs): print(">> Generated text {}\\\\ \\\\ {}".format(i+1, tokenizer.decode(sample_output.tolist()))) # >> Generated text #Setting `pad_token_id` to `eos_token_id`:50256 for open-end generation. #>> Generated text 1 #<|startoftext|> eu amo o brasil. O termo foi usado por alguns autores como uma forma de designar a formação do poder político do Brasil. A partir da década de 1960, o termo passou a ser usado para designar a formação política do Brasil. A partir de meados da década de 1970 e até o inicio dos anos 2000, o termo foi aplicado à formação político-administrativo do país, sendo utilizado por alguns autores como uma expressão de "política de direita". História Antecedentes O termo "político-administrário" foi usado pela primeira vez em 1891 por um gru #>> Generated text 2 #<|startoftext|> eu amo o brasil. É uma das muitas pessoas do mundo, ao contrário da maioria das pessoas, que são chamados de "pessoas do Brasil", que são chamados de "brincos do país" e que têm uma carreira de mais de um século. O termo "brincal de ouro" é usado em referências às pessoas que vivem no Brasil, e que são chamados "brincos do país", que são "cidade" e que vivem na cidade de Nova York e que vive em um país onde a maior parte das pessoas são chamados de "cidades". Hist #>> Generated text 3 #<|startoftext|> eu amo o brasil. É uma expressão que se refere ao uso de um instrumento musical em particular para se referir à qualidade musical, o que é uma expressão da qualidade da qualidade musical de uma pessoa. A expressão "amor" (em inglês, amo), é a expressão que pode ser usada com o intuito empregado em qualquer situação em que a vontade de uma pessoa de se sentir amado ou amoroso é mais do que um desejo de uma vontade. Em geral, a expressão "amoro" (do inglês, amo) pode também se referir tanto a uma pessoa como um instrumento de cordas ou de uma ```
cedpsam/chatbot_fr
cedpsam
2023-08-28T10:13:03Z
601
1
transformers
[ "transformers", "pytorch", "jax", "safetensors", "gpt2", "text-generation", "conversational", "fr", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2022-03-02T23:29:05Z
--- language: fr tags: - conversational widget: - text: "bonjour." - text: "mais encore" - text: "est ce que l'argent achete le bonheur?" --- ## a dialoggpt model trained on french opensubtitles with custom tokenizer trained with this notebook https://colab.research.google.com/drive/1pfCV3bngAmISNZVfDvBMyEhQKuYw37Rl#scrollTo=AyImj9qZYLRi&uniqifier=3 config from microsoft/DialoGPT-medium dataset generated from 2018 opensubtitle downloaded from opus folowing these guidelines https://github.com/PolyAI-LDN/conversational-datasets/tree/master/opensubtitles with this notebook https://colab.research.google.com/drive/1uyh3vJ9nEjqOHI68VD73qxt4olJzODxi#scrollTo=deaacv4XfLMk ### How to use Now we are ready to try out how the model works as a chatting partner! ```python import torch from transformers import AutoTokenizer, AutoModelWithLMHead tokenizer = AutoTokenizer.from_pretrained("cedpsam/chatbot_fr") model = AutoModelWithLMHead.from_pretrained("cedpsam/chatbot_fr") for step in range(6): # encode the new user input, add the eos_token and return a tensor in Pytorch new_user_input_ids = tokenizer.encode(input(">> User:") + tokenizer.eos_token, return_tensors='pt') # print(new_user_input_ids) # append the new user input tokens to the chat history bot_input_ids = torch.cat([chat_history_ids, new_user_input_ids], dim=-1) if step > 0 else new_user_input_ids # generated a response while limiting the total chat history to 1000 tokens, chat_history_ids = model.generate( bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id, top_p=0.92, top_k = 50 ) # pretty print last ouput tokens from bot print("DialoGPT: {}".format(tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True)))
jonatasgrosman/wav2vec2-xls-r-1b-english
jonatasgrosman
2022-12-14T02:01:29Z
601
9
transformers
[ "transformers", "pytorch", "wav2vec2", "automatic-speech-recognition", "en", "hf-asr-leaderboard", "mozilla-foundation/common_voice_8_0", "robust-speech-event", "dataset:mozilla-foundation/common_voice_8_0", "license:apache-2.0", "model-index", "endpoints_compatible", "region:us" ]
automatic-speech-recognition
2022-03-02T23:29:05Z
--- language: - en license: apache-2.0 tags: - automatic-speech-recognition - en - hf-asr-leaderboard - mozilla-foundation/common_voice_8_0 - robust-speech-event datasets: - mozilla-foundation/common_voice_8_0 model-index: - name: XLS-R Wav2Vec2 English by Jonatas Grosman results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: Common Voice 8 type: mozilla-foundation/common_voice_8_0 config: en split: test args: language: en metrics: - name: Test WER type: wer value: 21.05 - name: Test CER type: cer value: 8.44 - name: Test WER (+LM) type: wer value: 17.31 - name: Test CER (+LM) type: cer value: 7.77 - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: Robust Speech Event - Dev Data type: speech-recognition-community-v2/dev_data args: en metrics: - name: Dev WER type: wer value: 20.53 - name: Dev CER type: cer value: 9.31 - name: Dev WER (+LM) type: wer value: 17.7 - name: Dev CER (+LM) type: cer value: 8.93 - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: Robust Speech Event - Test Data type: speech-recognition-community-v2/eval_data args: en metrics: - name: Test WER type: wer value: 17.88 --- # Fine-tuned XLS-R 1B model for speech recognition in English Fine-tuned [facebook/wav2vec2-xls-r-1b](https://huggingface.co/facebook/wav2vec2-xls-r-1b) on English using the train and validation splits of [Common Voice 8.0](https://huggingface.co/datasets/mozilla-foundation/common_voice_8_0), [Multilingual LibriSpeech](https://www.openslr.org/94/), [TED-LIUMv3](https://www.openslr.org/51/), and [Voxpopuli](https://github.com/facebookresearch/voxpopuli). When using this model, make sure that your speech input is sampled at 16kHz. This model has been fine-tuned by the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) tool, and thanks to the GPU credits generously given by the [OVHcloud](https://www.ovhcloud.com/en/public-cloud/ai-training/) :) ## Usage Using the [HuggingSound](https://github.com/jonatasgrosman/huggingsound) library: ```python from huggingsound import SpeechRecognitionModel model = SpeechRecognitionModel("jonatasgrosman/wav2vec2-xls-r-1b-english") audio_paths = ["/path/to/file.mp3", "/path/to/another_file.wav"] transcriptions = model.transcribe(audio_paths) ``` Writing your own inference script: ```python import torch import librosa from datasets import load_dataset from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor LANG_ID = "en" MODEL_ID = "jonatasgrosman/wav2vec2-xls-r-1b-english" SAMPLES = 10 test_dataset = load_dataset("common_voice", LANG_ID, split=f"test[:{SAMPLES}]") processor = Wav2Vec2Processor.from_pretrained(MODEL_ID) model = Wav2Vec2ForCTC.from_pretrained(MODEL_ID) # Preprocessing the datasets. # We need to read the audio files as arrays def speech_file_to_array_fn(batch): speech_array, sampling_rate = librosa.load(batch["path"], sr=16_000) batch["speech"] = speech_array batch["sentence"] = batch["sentence"].upper() return batch test_dataset = test_dataset.map(speech_file_to_array_fn) inputs = processor(test_dataset["speech"], sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits predicted_ids = torch.argmax(logits, dim=-1) predicted_sentences = processor.batch_decode(predicted_ids) ``` ## Evaluation Commands 1. To evaluate on `mozilla-foundation/common_voice_8_0` with split `test` ```bash python eval.py --model_id jonatasgrosman/wav2vec2-xls-r-1b-english --dataset mozilla-foundation/common_voice_8_0 --config en --split test ``` 2. To evaluate on `speech-recognition-community-v2/dev_data` ```bash python eval.py --model_id jonatasgrosman/wav2vec2-xls-r-1b-english --dataset speech-recognition-community-v2/dev_data --config en --split validation --chunk_length_s 5.0 --stride_length_s 1.0 ``` ## Citation If you want to cite this model you can use this: ```bibtex @misc{grosman2021xlsr-1b-english, title={Fine-tuned {XLS-R} 1{B} model for speech recognition in {E}nglish}, author={Grosman, Jonatas}, howpublished={\url{https://huggingface.co/jonatasgrosman/wav2vec2-xls-r-1b-english}}, year={2022} } ```
amphora/FinABSA-Longer
amphora
2023-06-20T01:01:01Z
601
3
transformers
[ "transformers", "pytorch", "safetensors", "t5", "text2text-generation", "finbert", "financial-sentiment-analysis", "sentiment-analysis", "en", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text2text-generation
2022-11-04T05:33:50Z
--- language: en # <-- my language widget: - text: " Chinese stocks’ plunge on Monday over fears about China’s new leadership team “may be misguided,” consulting firm Teneo said. Chinese stocks in Hong Kong and New York, especially internet tech giants such as [TGT], dropped on the first trading day after Chinese President Xi Jinping cemented his firm grip on power with a new core leadership team filled with his loyalists. Over the last several years, Xi has shown a preference for greater state involvement in the economy. “Close relationships with Xi notwithstanding, Li Qiang, Li Xi, and Cai Qi all enter the [Politburo standing committee] after heading up rich provinces where economic growth is still the top priority,” Teneo Managing Director Gabriel Wildau and a team said in a note." tags: - t5 - finbert - financial-sentiment-analysis - sentiment-analysis license: - apache-2.0 --- ## Model Description FinABSA-Longer is a T5-Large model trained for Aspect-Based Sentiment Analysis(ABSA) tasks using [SEntFiN 1.0](https://asistdl.onlinelibrary.wiley.com/doi/10.1002/asi.24634?af=R) and additional augmentation techniques. It shows robust behavior to even longer sequences compared to the previous [FinABSA model](https://huggingface.co/amphora/FinABSA). By replacing the target aspect with a [TGT] token the model predicts the sentiment concentrating to the aspect.[GitHub Repo](https://github.com/guijinSON/FinABSA) ## How to use You can use this model directly using the AutoModelForSeq2SeqLM class. ```python >>> from transformers import AutoModelForSeq2SeqLM, AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("amphora/FinABSA") >>> model = AutoModelForSeq2SeqLM.from_pretrained("amphora/FinABSA") >>> input_str = "[TGT] stocks dropped 42% while Samsung rallied." >>> input = tokenizer(input_str, return_tensors='pt') >>> output = model.generate(**input, max_length=20) >>> print(tokenizer.decode(output[0])) The sentiment for [TGT] in the given sentence is NEGATIVE. >>> input_str = "Tesla stocks dropped 42% while [TGT] rallied." >>> input = tokenizer(input_str, return_tensors='pt') >>> output = model.generate(**input, max_length=20) >>> print(tokenizer.decode(output[0])) The sentiment for [TGT] in the given sentence is POSITIVE. ``` ## Evaluation Results Using a test split arbitarly extracted from [SEntFiN 1.0](https://asistdl.onlinelibrary.wiley.com/doi/10.1002/asi.24634?af=R) the model scores an average accuracy of 87%.
timm/efficientformerv2_s1.snap_dist_in1k
timm
2024-02-10T23:30:32Z
601
1
timm
[ "timm", "pytorch", "image-classification", "dataset:imagenet-1k", "arxiv:2212.08059", "license:apache-2.0", "region:us" ]
image-classification
2023-02-03T21:11:15Z
--- license: apache-2.0 library_name: timm tags: - image-classification - timm datasets: - imagenet-1k --- # Model card for efficientformerv2_s1.snap_dist_in1k A EfficientFormer-V2 image classification model. Pretrained with distillation on ImageNet-1k. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 6.2 - GMACs: 0.7 - Activations (M): 7.7 - Image size: 224 x 224 - **Original:** https://github.com/snap-research/EfficientFormer - **Papers:** - Rethinking Vision Transformers for MobileNet Size and Speed: https://arxiv.org/abs/2212.08059 - **Dataset:** ImageNet-1k ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open( urlopen('https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png')) model = timm.create_model('efficientformerv2_s1.snap_dist_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open( urlopen('https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png')) model = timm.create_model( 'efficientformerv2_s1.snap_dist_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled (ie.e a (batch_size, num_features, H, W) tensor output = model.forward_head(output, pre_logits=True) # output is (batch_size, num_features) tensor ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open( urlopen('https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png')) model = timm.create_model( 'efficientformerv2_s1.snap_dist_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g. for efficientformerv2_l: # torch.Size([2, 40, 56, 56]) # torch.Size([2, 80, 28, 28]) # torch.Size([2, 192, 14, 14]) # torch.Size([2, 384, 7, 7]) print(o.shape) ``` ## Model Comparison |model |top1 |top5 |param_count|img_size| |-----------------------------------|------|------|-----------|--------| |efficientformerv2_l.snap_dist_in1k |83.628|96.54 |26.32 |224 | |efficientformer_l7.snap_dist_in1k |83.368|96.534|82.23 |224 | |efficientformer_l3.snap_dist_in1k |82.572|96.24 |31.41 |224 | |efficientformerv2_s2.snap_dist_in1k|82.128|95.902|12.71 |224 | |efficientformer_l1.snap_dist_in1k |80.496|94.984|12.29 |224 | |efficientformerv2_s1.snap_dist_in1k|79.698|94.698|6.19 |224 | |efficientformerv2_s0.snap_dist_in1k|76.026|92.77 |3.6 |224 | ## Citation ```bibtex @article{li2022rethinking, title={Rethinking Vision Transformers for MobileNet Size and Speed}, author={Li, Yanyu and Hu, Ju and Wen, Yang and Evangelidis, Georgios and Salahi, Kamyar and Wang, Yanzhi and Tulyakov, Sergey and Ren, Jian}, journal={arXiv preprint arXiv:2212.08059}, year={2022} } ``` ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/rwightman/pytorch-image-models}} } ```
lorahub/flan_t5_large-quail_context_question_description_answer_id
lorahub
2023-07-24T10:02:37Z
601
0
peft
[ "peft", "region:us" ]
null
2023-07-24T10:02:28Z
--- library_name: peft ---
KhantKyaw/Chat_GPT-2
KhantKyaw
2024-06-18T09:20:22Z
601
1
transformers
[ "transformers", "safetensors", "gpt2", "text-generation", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-02-25T16:16:48Z
```python from transformers import GPT2Tokenizer, GPT2LMHeadModel def generate_response(input_text): inputs = tokenizer(input_text, return_tensors="pt") output_sequences = model.generate( input_ids=inputs['input_ids'], attention_mask=inputs['attention_mask'], max_length=100, # Adjusted max_length temperature=0.3, top_k=40, top_p=0.85, num_return_sequences=1, no_repeat_ngram_size=2, pad_token_id=tokenizer.eos_token_id, eos_token_id=tokenizer.eos_token_id, early_stopping=True, do_sample=True, use_cache=True, ) full_generated_text = tokenizer.decode(output_sequences[0], skip_special_tokens=True) bot_response_start = full_generated_text.find('[Bot]') + len('[Bot]') bot_response = full_generated_text[bot_response_start:] return bot_response model_name = 'KhantKyaw/Chat_GPT-2' tokenizer = GPT2Tokenizer.from_pretrained(model_name) model = GPT2LMHeadModel.from_pretrained(model_name) response = generate_response(user_input) print("Chatbot:", response) ```
mradermacher/PALO-13B-GGUF
mradermacher
2024-05-06T05:57:44Z
601
0
transformers
[ "transformers", "gguf", "en", "base_model:MBZUAI/PALO-13B", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2024-03-26T00:16:24Z
--- base_model: MBZUAI/PALO-13B language: - en library_name: transformers license: apache-2.0 quantized_by: mradermacher --- ## About static quants of https://huggingface.co/MBZUAI/PALO-13B <!-- provided-files --> weighted/imatrix quants seem not to be available (by me) at this time. If they do not show up a week or so after the static ones, I have probably not planned for them. Feel free to request them by opening a Community Discussion. ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/PALO-13B-GGUF/resolve/main/PALO-13B.Q2_K.gguf) | Q2_K | 5.1 | | | [GGUF](https://huggingface.co/mradermacher/PALO-13B-GGUF/resolve/main/PALO-13B.IQ3_XS.gguf) | IQ3_XS | 5.7 | | | [GGUF](https://huggingface.co/mradermacher/PALO-13B-GGUF/resolve/main/PALO-13B.IQ3_S.gguf) | IQ3_S | 6.0 | beats Q3_K* | | [GGUF](https://huggingface.co/mradermacher/PALO-13B-GGUF/resolve/main/PALO-13B.Q3_K_S.gguf) | Q3_K_S | 6.0 | | | [GGUF](https://huggingface.co/mradermacher/PALO-13B-GGUF/resolve/main/PALO-13B.IQ3_M.gguf) | IQ3_M | 6.3 | | | [GGUF](https://huggingface.co/mradermacher/PALO-13B-GGUF/resolve/main/PALO-13B.Q3_K_M.gguf) | Q3_K_M | 6.6 | lower quality | | [GGUF](https://huggingface.co/mradermacher/PALO-13B-GGUF/resolve/main/PALO-13B.Q3_K_L.gguf) | Q3_K_L | 7.2 | | | [GGUF](https://huggingface.co/mradermacher/PALO-13B-GGUF/resolve/main/PALO-13B.IQ4_XS.gguf) | IQ4_XS | 7.3 | | | [GGUF](https://huggingface.co/mradermacher/PALO-13B-GGUF/resolve/main/PALO-13B.Q4_0.gguf) | Q4_0 | 7.7 | fast, low quality | | [GGUF](https://huggingface.co/mradermacher/PALO-13B-GGUF/resolve/main/PALO-13B.IQ4_NL.gguf) | IQ4_NL | 7.7 | prefer IQ4_XS | | [GGUF](https://huggingface.co/mradermacher/PALO-13B-GGUF/resolve/main/PALO-13B.Q4_K_S.gguf) | Q4_K_S | 7.7 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/PALO-13B-GGUF/resolve/main/PALO-13B.Q4_K_M.gguf) | Q4_K_M | 8.2 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/PALO-13B-GGUF/resolve/main/PALO-13B.Q5_K_S.gguf) | Q5_K_S | 9.3 | | | [GGUF](https://huggingface.co/mradermacher/PALO-13B-GGUF/resolve/main/PALO-13B.Q5_K_M.gguf) | Q5_K_M | 9.5 | | | [GGUF](https://huggingface.co/mradermacher/PALO-13B-GGUF/resolve/main/PALO-13B.Q6_K.gguf) | Q6_K | 11.0 | very good quality | | [GGUF](https://huggingface.co/mradermacher/PALO-13B-GGUF/resolve/main/PALO-13B.Q8_0.gguf) | Q8_0 | 14.1 | fast, best quality | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. <!-- end -->
lmstudio-community/AlchemistCoder-DS-6.7B-GGUF
lmstudio-community
2024-05-31T00:29:12Z
601
1
null
[ "gguf", "code generation", "text-generation", "arxiv:2405.19265", "base_model:internlm/AlchemistCoder-DS-6.7B", "license:apache-2.0", "region:us" ]
text-generation
2024-05-30T17:12:45Z
--- license: apache-2.0 tags: - code generation quantized_by: bartowski pipeline_tag: text-generation lm_studio: param_count: 6.7b use_case: coding release_date: 29-05-2024 model_creator: InternLM prompt_template: Alpaca system_prompt: none base_model: DeepSeek-Coder-Base original_repo: internlm/AlchemistCoder-DS-6.7B base_model: internlm/AlchemistCoder-DS-6.7B --- ## 💫 Community Model> AlchemistCoder DS 6.7B by InternLM *👾 [LM Studio](https://lmstudio.ai) Community models highlights program. Highlighting new & noteworthy models by the community. Join the conversation on [Discord](https://discord.gg/aPQfnNkxGC)*. **Model creator:** [InternLM](https://huggingface.co/internlm)<br> **Original model**: [AlchemistCoder-DS-6.7B](https://huggingface.co/internlm/AlchemistCoder-DS-6.7B)<br> **GGUF quantization:** provided by [bartowski](https://huggingface.co/bartowski) based on `llama.cpp` release [b3024](https://github.com/ggerganov/llama.cpp/releases/tag/b3024)<br> ## Model Summary: AlchemistCoder is a series of coding models by InternLM.<br> This model is tuned from the DeepSeek coder model, and should excel at all coding related tasks. ## Prompt template: Choose the `Alpaca` preset in your LM Studio. Under the hood, the model will see a prompt that's formatted like so: ``` ### Instruction: {prompt} ### Response: ``` ## Technical Details Training details: - **AlchemistPrompts**: Designed as data-specific prompts for harmonizing inherent conflicts in multi-source data and mitigating the instruction/response misalignment at a fined-grained level. - **Code Comprehenstion Tasks**: Sourced from the process of data construction, consisting of instruction evolution, data filtering, and code review. - **Harmonized Multi-source Data**: Instruction tuned on 200M tokens, including 6 types of high-quality data. - **Superior Model Performance**: Surpassing all the open-source models of the same size (6.7/7B), and rivaling or even beating larger models (15B/33B/70B/ChatGPT) on 6 code benchmarks. - **Advanced generic capabilities**: Demonstrated by the significant improvements on MMLU, BBH, and GSM8K. For more information, check out their paper here: https://arxiv.org/abs/2405.19265 ## Special thanks 🙏 Special thanks to [Georgi Gerganov](https://github.com/ggerganov) and the whole team working on [llama.cpp](https://github.com/ggerganov/llama.cpp/) 🙏 Special thanks to [Kalomaze](https://github.com/kalomaze) and [Dampf](https://github.com/Dampfinchen) for their work on the dataset (linked [here](https://gist.github.com/bartowski1182/eb213dccb3571f863da82e99418f81e8)) that was used for calculating the imatrix for all sizes. ## Disclaimers LM Studio is not the creator, originator, or owner of any Model featured in the Community Model Program. Each Community Model is created and provided by third parties. LM Studio does not endorse, support, represent or guarantee the completeness, truthfulness, accuracy, or reliability of any Community Model. You understand that Community Models can produce content that might be offensive, harmful, inaccurate or otherwise inappropriate, or deceptive. Each Community Model is the sole responsibility of the person or entity who originated such Model. LM Studio may not monitor or control the Community Models and cannot, and does not, take responsibility for any such Model. LM Studio disclaims all warranties or guarantees about the accuracy, reliability or benefits of the Community Models. LM Studio further disclaims any warranty that the Community Model will meet your requirements, be secure, uninterrupted or available at any time or location, or error-free, viruses-free, or that any errors will be corrected, or otherwise. You will be solely responsible for any damage resulting from your use of or access to the Community Models, your downloading of any Community Model, or use of any other Community Model provided by or through LM Studio.
NikolayKozloff/latxa-7b-v1.2-Q8_0-GGUF
NikolayKozloff
2024-06-24T15:25:41Z
601
1
null
[ "gguf", "llama-cpp", "gguf-my-repo", "text-generation", "eu", "en", "dataset:HiTZ/latxa-corpus-v1.1", "base_model:HiTZ/latxa-7b-v1.2", "license:llama2", "model-index", "region:us" ]
text-generation
2024-06-24T15:25:12Z
--- base_model: HiTZ/latxa-7b-v1.2 datasets: - HiTZ/latxa-corpus-v1.1 language: - eu - en license: llama2 metrics: - accuracy - f1 - perplexity pipeline_tag: text-generation tags: - llama-cpp - gguf-my-repo model-index: - name: Latxa-7b-v1.2 results: - task: type: multiple-choice dataset: name: xstory_cloze type: XStory metrics: - type: Accuracy (0-shot) value: 65.72 name: Accuracy (0-shot) source: url: https://arxiv.org/abs/2403.20266 name: Paper - task: type: multiple-choice dataset: name: belebele type: Belebele metrics: - type: Accuracy (5-shot) value: 36.89 name: Accuracy (5-shot) source: url: https://arxiv.org/abs/2403.20266 name: Paper - task: type: mix dataset: name: basque_glue type: BasqueGLUE metrics: - type: Average scores (5-shot) value: 51.78 name: Average scores (5-shot) source: url: https://arxiv.org/abs/2403.20266 name: Paper - task: type: multiple_choice dataset: name: eus_proficiency type: EusProficiency metrics: - type: Accuracy (5-shot) value: 32.44 name: Accuracy (5-shot) source: url: https://arxiv.org/abs/2403.20266 name: Paper - task: type: multiple_choice dataset: name: eus_reading type: EusReading metrics: - type: Accuracy (5-shot) value: 30.4 name: Accuracy (5-shot) source: url: https://arxiv.org/abs/2403.20266 name: Paper - task: type: multiple_choice dataset: name: eus_trivia type: EusTrivia metrics: - type: Accuracy (5-shot) value: 44.37 name: Accuracy (5-shot) source: url: https://arxiv.org/abs/2403.20266 name: Paper - task: type: multiple_choice dataset: name: eus_exams type: EusExams metrics: - type: Accuracy (5-shot) value: 34.2 name: Accuracy (5-shot) source: url: https://arxiv.org/abs/2403.20266 name: Paper --- # NikolayKozloff/latxa-7b-v1.2-Q8_0-GGUF This model was converted to GGUF format from [`HiTZ/latxa-7b-v1.2`](https://huggingface.co/HiTZ/latxa-7b-v1.2) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space. Refer to the [original model card](https://huggingface.co/HiTZ/latxa-7b-v1.2) for more details on the model. ## Use with llama.cpp Install llama.cpp through brew (works on Mac and Linux) ```bash brew install llama.cpp ``` Invoke the llama.cpp server or the CLI. ### CLI: ```bash llama-cli --hf-repo NikolayKozloff/latxa-7b-v1.2-Q8_0-GGUF --hf-file latxa-7b-v1.2-q8_0.gguf -p "The meaning to life and the universe is" ``` ### Server: ```bash llama-server --hf-repo NikolayKozloff/latxa-7b-v1.2-Q8_0-GGUF --hf-file latxa-7b-v1.2-q8_0.gguf -c 2048 ``` Note: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well. Step 1: Clone llama.cpp from GitHub. ``` git clone https://github.com/ggerganov/llama.cpp ``` Step 2: Move into the llama.cpp folder and build it with `LLAMA_CURL=1` flag along with other hardware-specific flags (for ex: LLAMA_CUDA=1 for Nvidia GPUs on Linux). ``` cd llama.cpp && LLAMA_CURL=1 make ``` Step 3: Run inference through the main binary. ``` ./llama-cli --hf-repo NikolayKozloff/latxa-7b-v1.2-Q8_0-GGUF --hf-file latxa-7b-v1.2-q8_0.gguf -p "The meaning to life and the universe is" ``` or ``` ./llama-server --hf-repo NikolayKozloff/latxa-7b-v1.2-Q8_0-GGUF --hf-file latxa-7b-v1.2-q8_0.gguf -c 2048 ```
T-Systems-onsite/german-roberta-sentence-transformer-v2
T-Systems-onsite
2023-04-27T19:28:49Z
600
9
transformers
[ "transformers", "pytorch", "tf", "safetensors", "xlm-roberta", "feature-extraction", "sentence_embedding", "search", "roberta", "xlm-r-distilroberta-base-paraphrase-v1", "paraphrase", "de", "dataset:STSbenchmark", "license:mit", "endpoints_compatible", "text-embeddings-inference", "region:us" ]
feature-extraction
2022-03-02T23:29:05Z
--- language: de license: mit tags: - sentence_embedding - search - pytorch - xlm-roberta - roberta - xlm-r-distilroberta-base-paraphrase-v1 - paraphrase datasets: - STSbenchmark metrics: - Spearman’s rank correlation - cosine similarity --- # German RoBERTa for Sentence Embeddings V2 **The new [T-Systems-onsite/cross-en-de-roberta-sentence-transformer](https://huggingface.co/T-Systems-onsite/cross-en-de-roberta-sentence-transformer) model is slightly better for German language. It is also the current best model for English language and works cross-lingually. Please consider using that model.**
sachaarbonel/bert-italian-cased-finetuned-pos
sachaarbonel
2023-03-27T04:45:46Z
600
4
transformers
[ "transformers", "pytorch", "jax", "safetensors", "bert", "token-classification", "it", "dataset:xtreme", "autotrain_compatible", "endpoints_compatible", "region:us" ]
token-classification
2022-03-02T23:29:05Z
--- language: it datasets: - xtreme --- # Italian-Bert (Italian Bert) + POS 🎃🏷 This model is a fine-tuned on [xtreme udpos Italian](https://huggingface.co/nlp/viewer/?dataset=xtreme&config=udpos.Italian) version of [Bert Base Italian](https://huggingface.co/dbmdz/bert-base-italian-cased) for **POS** downstream task. ## Details of the downstream task (POS) - Dataset - [Dataset: xtreme udpos Italian](https://huggingface.co/nlp/viewer/?dataset=xtreme&config=udpos.Italian) 📚 | Dataset | # Examples | | ---------------------- | ----- | | Train | 716 K | | Dev | 85 K | - [Fine-tune on NER script provided by @stefan-it](https://raw.githubusercontent.com/stefan-it/fine-tuned-berts-seq/master/scripts/preprocess.py) - Labels covered: ``` ADJ ADP ADV AUX CCONJ DET INTJ NOUN NUM PART PRON PROPN PUNCT SCONJ SYM VERB X ``` ## Metrics on evaluation set 🧾 | Metric | # score | | :------------------------------------------------------------------------------------: | :-------: | | F1 | **97.25** | Precision | **97.15** | | Recall | **97.36** | ## Model in action 🔨 Example of usage ```python from transformers import pipeline nlp_pos = pipeline( "ner", model="sachaarbonel/bert-italian-cased-finetuned-pos", tokenizer=( 'sachaarbonel/bert-spanish-cased-finetuned-pos', {"use_fast": False} )) text = 'Roma è la Capitale d'Italia.' nlp_pos(text) ''' Output: -------- [{'entity': 'PROPN', 'index': 1, 'score': 0.9995346665382385, 'word': 'roma'}, {'entity': 'AUX', 'index': 2, 'score': 0.9966597557067871, 'word': 'e'}, {'entity': 'DET', 'index': 3, 'score': 0.9994786977767944, 'word': 'la'}, {'entity': 'NOUN', 'index': 4, 'score': 0.9995198249816895, 'word': 'capitale'}, {'entity': 'ADP', 'index': 5, 'score': 0.9990894198417664, 'word': 'd'}, {'entity': 'PART', 'index': 6, 'score': 0.57159024477005, 'word': "'"}, {'entity': 'PROPN', 'index': 7, 'score': 0.9994804263114929, 'word': 'italia'}, {'entity': 'PUNCT', 'index': 8, 'score': 0.9772886633872986, 'word': '.'}] ''' ``` Yeah! Not too bad 🎉 > Created by [Sacha Arbonel/@sachaarbonel](https://twitter.com/sachaarbonel) | [LinkedIn](https://www.linkedin.com/in/sacha-arbonel) > Made with <span style="color: #e25555;">&hearts;</span> in Paris
Ammar-alhaj-ali/arabic-MARBERT-poetry-classification
Ammar-alhaj-ali
2022-08-16T08:57:47Z
600
1
transformers
[ "transformers", "pytorch", "bert", "text-classification", "text classification", "arabic", "poetry", "ar", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
2022-08-15T16:20:26Z
--- language: - ar widget: - text: "قِفَا نَبْكِ مِنْ ذِكْرَى حَبِيبٍ ومَنْزِلِ [sep]بِسِقْطِ اللِّوَى بَيْنَ الدَّخُولِ فَحَوْمَلِ" - text: "ليس الغريب غريب الشام واليمنُ[sep]إن الغريب غريب اللحد والكفن" - text: "عَلى قَدْرِ أهْلِ العَزْم تأتي العَزائِمُ[sep] وَتأتي علَى قَدْرِ الكِرامِ المَكارمُ" - text: "قم للمعلم وفّه التبجيلا[sep] كاد المعلم أن يكون رسولا" tags: - text classification - arabic - poetry --- ## Arabic MARBERT Poetry Classification Model #### Model description **arabic-MARBERT-poetry-classification Model** is a poetry classification model that was built by fine-tuning the [MARBERT](https://huggingface.co/UBC-NLP/MARBERT) model. For the fine-tuning, I used [APCD: Arabic Poem Comprehensive Dataset](https://hci-lab.github.io/ArabicPoetry-1-Private/) that includes 23 labels (البسيط,الطويل,الكامل,الوافر,الخفيف,السريع,..). #### How to use To use the model with a transformers pipeline: ```python >>>from transformers import pipeline >>>model = pipeline('text-classification', model='Ammar-alhaj-ali/arabic-MARBERT-poetry-classification') >>>sentences = ['ويوم نلتقي فيه قصير[sep]يطول اليوم لا ألقاك فيه', 'أما للهوى عليك نهي ولا أمر[sep]أراك عصيّ الدمع شيمتك الصبر'] >>>model(sentences) [{'label': 'الوافر', 'score': 0.9979557991027832}, {'label': 'الطويل', 'score': 0.9646275043487549}] ```
keremberke/yolov5m-blood-cell
keremberke
2023-01-01T10:00:09Z
600
2
yolov5
[ "yolov5", "tensorboard", "yolo", "vision", "object-detection", "pytorch", "dataset:keremberke/blood-cell-object-detection", "model-index", "region:us" ]
object-detection
2023-01-01T02:29:18Z
--- tags: - yolov5 - yolo - vision - object-detection - pytorch library_name: yolov5 library_version: 7.0.6 inference: false datasets: - keremberke/blood-cell-object-detection model-index: - name: keremberke/yolov5m-blood-cell results: - task: type: object-detection dataset: type: keremberke/blood-cell-object-detection name: keremberke/blood-cell-object-detection split: validation metrics: - type: precision # since [email protected] is not available on hf.co/metrics value: 0.9052191412458869 # min: 0.0 - max: 1.0 name: [email protected] --- <div align="center"> <img width="640" alt="keremberke/yolov5m-blood-cell" src="https://huggingface.co/keremberke/yolov5m-blood-cell/resolve/main/sample_visuals.jpg"> </div> ### How to use - Install [yolov5](https://github.com/fcakyon/yolov5-pip): ```bash pip install -U yolov5 ``` - Load model and perform prediction: ```python import yolov5 # load model model = yolov5.load('keremberke/yolov5m-blood-cell') # set model parameters model.conf = 0.25 # NMS confidence threshold model.iou = 0.45 # NMS IoU threshold model.agnostic = False # NMS class-agnostic model.multi_label = False # NMS multiple labels per box model.max_det = 1000 # maximum number of detections per image # set image img = 'https://github.com/ultralytics/yolov5/raw/master/data/images/zidane.jpg' # perform inference results = model(img, size=640) # inference with test time augmentation results = model(img, augment=True) # parse results predictions = results.pred[0] boxes = predictions[:, :4] # x1, y1, x2, y2 scores = predictions[:, 4] categories = predictions[:, 5] # show detection bounding boxes on image results.show() # save results into "results/" folder results.save(save_dir='results/') ``` - Finetune the model on your custom dataset: ```bash yolov5 train --data data.yaml --img 640 --batch 16 --weights keremberke/yolov5m-blood-cell --epochs 10 ``` **More models available at: [awesome-yolov5-models](https://github.com/keremberke/awesome-yolov5-models)**
facebook/convnextv2-large-22k-224
facebook
2023-02-20T13:14:23Z
600
2
transformers
[ "transformers", "pytorch", "convnextv2", "image-classification", "vision", "dataset:imagenet-22k", "arxiv:2301.00808", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
image-classification
2023-02-19T06:14:56Z
--- license: apache-2.0 tags: - vision - image-classification datasets: - imagenet-22k widget: - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg example_title: Tiger - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg example_title: Teapot - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg example_title: Palace --- # ConvNeXt V2 (large-sized model) ConvNeXt V2 model pretrained using the FCMAE framework and fine-tuned on the ImageNet-22K dataset at resolution 224x224. It was introduced in the paper [ConvNeXt V2: Co-designing and Scaling ConvNets with Masked Autoencoders](https://arxiv.org/abs/2301.00808) by Woo et al. and first released in [this repository](https://github.com/facebookresearch/ConvNeXt-V2). Disclaimer: The team releasing ConvNeXT V2 did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model description ConvNeXt V2 is a pure convolutional model (ConvNet) that introduces a fully convolutional masked autoencoder framework (FCMAE) and a new Global Response Normalization (GRN) layer to ConvNeXt. ConvNeXt V2 significantly improves the performance of pure ConvNets on various recognition benchmarks. ![model image](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/convnextv2_architecture.png) ## Intended uses & limitations You can use the raw model for image classification. See the [model hub](https://huggingface.co/models?search=convnextv2) to look for fine-tuned versions on a task that interests you. ### How to use Here is how to use this model to classify an image of the COCO 2017 dataset into one of the 1,000 ImageNet classes: ```python from transformers import AutoImageProcessor, ConvNextV2ForImageClassification import torch from datasets import load_dataset dataset = load_dataset("huggingface/cats-image") image = dataset["test"]["image"][0] preprocessor = AutoImageProcessor.from_pretrained("facebook/convnextv2-large-22k-224") model = ConvNextV2ForImageClassification.from_pretrained("facebook/convnextv2-large-22k-224") inputs = preprocessor(image, return_tensors="pt") with torch.no_grad(): logits = model(**inputs).logits # model predicts one of the 1000 ImageNet classes predicted_label = logits.argmax(-1).item() print(model.config.id2label[predicted_label]), ``` For more code examples, we refer to the [documentation](https://huggingface.co/docs/transformers/master/en/model_doc/convnextv2). ### BibTeX entry and citation info ```bibtex @article{DBLP:journals/corr/abs-2301-00808, author = {Sanghyun Woo and Shoubhik Debnath and Ronghang Hu and Xinlei Chen and Zhuang Liu and In So Kweon and Saining Xie}, title = {ConvNeXt {V2:} Co-designing and Scaling ConvNets with Masked Autoencoders}, journal = {CoRR}, volume = {abs/2301.00808}, year = {2023}, url = {https://doi.org/10.48550/arXiv.2301.00808}, doi = {10.48550/arXiv.2301.00808}, eprinttype = {arXiv}, eprint = {2301.00808}, timestamp = {Tue, 10 Jan 2023 15:10:12 +0100}, biburl = {https://dblp.org/rec/journals/corr/abs-2301-00808.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ```
TheBloke/Airoboros-M-7B-3.1.1-GGUF
TheBloke
2023-10-17T09:26:51Z
600
4
transformers
[ "transformers", "gguf", "mistral", "dataset:jondurbin/airoboros-3.1", "base_model:jondurbin/airoboros-m-7b-3.1.1", "license:llama2", "text-generation-inference", "region:us" ]
null
2023-10-17T09:20:02Z
--- base_model: jondurbin/airoboros-m-7b-3.1.1 datasets: - jondurbin/airoboros-3.1 inference: false license: llama2 model_creator: Jon Durbin model_name: Airoboros M 7B 3.1.1 model_type: mistral prompt_template: '[INST] <<SYS>> You are a helpful, unbiased, uncensored assistant. <</SYS>> {prompt} [/INST] ' quantized_by: TheBloke --- <!-- header start --> <!-- 200823 --> <div style="width: auto; margin-left: auto; margin-right: auto"> <img src="https://i.imgur.com/EBdldam.jpg" alt="TheBlokeAI" style="width: 100%; min-width: 400px; display: block; margin: auto;"> </div> <div style="display: flex; justify-content: space-between; width: 100%;"> <div style="display: flex; flex-direction: column; align-items: flex-start;"> <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://discord.gg/theblokeai">Chat & support: TheBloke's Discord server</a></p> </div> <div style="display: flex; flex-direction: column; align-items: flex-end;"> <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://www.patreon.com/TheBlokeAI">Want to contribute? TheBloke's Patreon page</a></p> </div> </div> <div style="text-align:center; margin-top: 0em; margin-bottom: 0em"><p style="margin-top: 0.25em; margin-bottom: 0em;">TheBloke's LLM work is generously supported by a grant from <a href="https://a16z.com">andreessen horowitz (a16z)</a></p></div> <hr style="margin-top: 1.0em; margin-bottom: 1.0em;"> <!-- header end --> # Airoboros M 7B 3.1.1 - GGUF - Model creator: [Jon Durbin](https://huggingface.co/jondurbin) - Original model: [Airoboros M 7B 3.1.1](https://huggingface.co/jondurbin/airoboros-m-7b-3.1.1) <!-- description start --> ## Description This repo contains GGUF format model files for [Jon Durbin's Airoboros M 7B 3.1.1](https://huggingface.co/jondurbin/airoboros-m-7b-3.1.1). <!-- description end --> <!-- README_GGUF.md-about-gguf start --> ### About GGUF GGUF is a new format introduced by the llama.cpp team on August 21st 2023. It is a replacement for GGML, which is no longer supported by llama.cpp. Here is an incomplate list of clients and libraries that are known to support GGUF: * [llama.cpp](https://github.com/ggerganov/llama.cpp). The source project for GGUF. Offers a CLI and a server option. * [text-generation-webui](https://github.com/oobabooga/text-generation-webui), the most widely used web UI, with many features and powerful extensions. Supports GPU acceleration. * [KoboldCpp](https://github.com/LostRuins/koboldcpp), a fully featured web UI, with GPU accel across all platforms and GPU architectures. Especially good for story telling. * [LM Studio](https://lmstudio.ai/), an easy-to-use and powerful local GUI for Windows and macOS (Silicon), with GPU acceleration. * [LoLLMS Web UI](https://github.com/ParisNeo/lollms-webui), a great web UI with many interesting and unique features, including a full model library for easy model selection. * [Faraday.dev](https://faraday.dev/), an attractive and easy to use character-based chat GUI for Windows and macOS (both Silicon and Intel), with GPU acceleration. * [ctransformers](https://github.com/marella/ctransformers), a Python library with GPU accel, LangChain support, and OpenAI-compatible AI server. * [llama-cpp-python](https://github.com/abetlen/llama-cpp-python), a Python library with GPU accel, LangChain support, and OpenAI-compatible API server. * [candle](https://github.com/huggingface/candle), a Rust ML framework with a focus on performance, including GPU support, and ease of use. <!-- README_GGUF.md-about-gguf end --> <!-- repositories-available start --> ## Repositories available * [AWQ model(s) for GPU inference.](https://huggingface.co/TheBloke/Airoboros-M-7B-3.1.1-AWQ) * [GPTQ models for GPU inference, with multiple quantisation parameter options.](https://huggingface.co/TheBloke/Airoboros-M-7B-3.1.1-GPTQ) * [2, 3, 4, 5, 6 and 8-bit GGUF models for CPU+GPU inference](https://huggingface.co/TheBloke/Airoboros-M-7B-3.1.1-GGUF) * [Jon Durbin's original unquantised fp16 model in pytorch format, for GPU inference and for further conversions](https://huggingface.co/jondurbin/airoboros-m-7b-3.1.1) <!-- repositories-available end --> <!-- prompt-template start --> ## Prompt template: Airoboros-Llama-2-Chat ``` [INST] <<SYS>> You are a helpful, unbiased, uncensored assistant. <</SYS>> {prompt} [/INST] ``` <!-- prompt-template end --> <!-- compatibility_gguf start --> ## Compatibility These quantised GGUFv2 files are compatible with llama.cpp from August 27th onwards, as of commit [d0cee0d](https://github.com/ggerganov/llama.cpp/commit/d0cee0d36d5be95a0d9088b674dbb27354107221) They are also compatible with many third party UIs and libraries - please see the list at the top of this README. ## Explanation of quantisation methods <details> <summary>Click to see details</summary> The new methods available are: * GGML_TYPE_Q2_K - "type-1" 2-bit quantization in super-blocks containing 16 blocks, each block having 16 weight. Block scales and mins are quantized with 4 bits. This ends up effectively using 2.5625 bits per weight (bpw) * GGML_TYPE_Q3_K - "type-0" 3-bit quantization in super-blocks containing 16 blocks, each block having 16 weights. Scales are quantized with 6 bits. This end up using 3.4375 bpw. * GGML_TYPE_Q4_K - "type-1" 4-bit quantization in super-blocks containing 8 blocks, each block having 32 weights. Scales and mins are quantized with 6 bits. This ends up using 4.5 bpw. * GGML_TYPE_Q5_K - "type-1" 5-bit quantization. Same super-block structure as GGML_TYPE_Q4_K resulting in 5.5 bpw * GGML_TYPE_Q6_K - "type-0" 6-bit quantization. Super-blocks with 16 blocks, each block having 16 weights. Scales are quantized with 8 bits. This ends up using 6.5625 bpw Refer to the Provided Files table below to see what files use which methods, and how. </details> <!-- compatibility_gguf end --> <!-- README_GGUF.md-provided-files start --> ## Provided files | Name | Quant method | Bits | Size | Max RAM required | Use case | | ---- | ---- | ---- | ---- | ---- | ----- | | [airoboros-m-7b-3.1.1.Q2_K.gguf](https://huggingface.co/TheBloke/Airoboros-M-7B-3.1.1-GGUF/blob/main/airoboros-m-7b-3.1.1.Q2_K.gguf) | Q2_K | 2 | 3.08 GB| 5.58 GB | smallest, significant quality loss - not recommended for most purposes | | [airoboros-m-7b-3.1.1.Q3_K_S.gguf](https://huggingface.co/TheBloke/Airoboros-M-7B-3.1.1-GGUF/blob/main/airoboros-m-7b-3.1.1.Q3_K_S.gguf) | Q3_K_S | 3 | 3.16 GB| 5.66 GB | very small, high quality loss | | [airoboros-m-7b-3.1.1.Q3_K_M.gguf](https://huggingface.co/TheBloke/Airoboros-M-7B-3.1.1-GGUF/blob/main/airoboros-m-7b-3.1.1.Q3_K_M.gguf) | Q3_K_M | 3 | 3.52 GB| 6.02 GB | very small, high quality loss | | [airoboros-m-7b-3.1.1.Q3_K_L.gguf](https://huggingface.co/TheBloke/Airoboros-M-7B-3.1.1-GGUF/blob/main/airoboros-m-7b-3.1.1.Q3_K_L.gguf) | Q3_K_L | 3 | 3.82 GB| 6.32 GB | small, substantial quality loss | | [airoboros-m-7b-3.1.1.Q4_0.gguf](https://huggingface.co/TheBloke/Airoboros-M-7B-3.1.1-GGUF/blob/main/airoboros-m-7b-3.1.1.Q4_0.gguf) | Q4_0 | 4 | 4.11 GB| 6.61 GB | legacy; small, very high quality loss - prefer using Q3_K_M | | [airoboros-m-7b-3.1.1.Q4_K_S.gguf](https://huggingface.co/TheBloke/Airoboros-M-7B-3.1.1-GGUF/blob/main/airoboros-m-7b-3.1.1.Q4_K_S.gguf) | Q4_K_S | 4 | 4.14 GB| 6.64 GB | small, greater quality loss | | [airoboros-m-7b-3.1.1.Q4_K_M.gguf](https://huggingface.co/TheBloke/Airoboros-M-7B-3.1.1-GGUF/blob/main/airoboros-m-7b-3.1.1.Q4_K_M.gguf) | Q4_K_M | 4 | 4.37 GB| 6.87 GB | medium, balanced quality - recommended | | [airoboros-m-7b-3.1.1.Q5_0.gguf](https://huggingface.co/TheBloke/Airoboros-M-7B-3.1.1-GGUF/blob/main/airoboros-m-7b-3.1.1.Q5_0.gguf) | Q5_0 | 5 | 5.00 GB| 7.50 GB | legacy; medium, balanced quality - prefer using Q4_K_M | | [airoboros-m-7b-3.1.1.Q5_K_S.gguf](https://huggingface.co/TheBloke/Airoboros-M-7B-3.1.1-GGUF/blob/main/airoboros-m-7b-3.1.1.Q5_K_S.gguf) | Q5_K_S | 5 | 5.00 GB| 7.50 GB | large, low quality loss - recommended | | [airoboros-m-7b-3.1.1.Q5_K_M.gguf](https://huggingface.co/TheBloke/Airoboros-M-7B-3.1.1-GGUF/blob/main/airoboros-m-7b-3.1.1.Q5_K_M.gguf) | Q5_K_M | 5 | 5.13 GB| 7.63 GB | large, very low quality loss - recommended | | [airoboros-m-7b-3.1.1.Q6_K.gguf](https://huggingface.co/TheBloke/Airoboros-M-7B-3.1.1-GGUF/blob/main/airoboros-m-7b-3.1.1.Q6_K.gguf) | Q6_K | 6 | 5.94 GB| 8.44 GB | very large, extremely low quality loss | | [airoboros-m-7b-3.1.1.Q8_0.gguf](https://huggingface.co/TheBloke/Airoboros-M-7B-3.1.1-GGUF/blob/main/airoboros-m-7b-3.1.1.Q8_0.gguf) | Q8_0 | 8 | 7.70 GB| 10.20 GB | very large, extremely low quality loss - not recommended | **Note**: the above RAM figures assume no GPU offloading. If layers are offloaded to the GPU, this will reduce RAM usage and use VRAM instead. <!-- README_GGUF.md-provided-files end --> <!-- README_GGUF.md-how-to-download start --> ## How to download GGUF files **Note for manual downloaders:** You almost never want to clone the entire repo! Multiple different quantisation formats are provided, and most users only want to pick and download a single file. The following clients/libraries will automatically download models for you, providing a list of available models to choose from: - LM Studio - LoLLMS Web UI - Faraday.dev ### In `text-generation-webui` Under Download Model, you can enter the model repo: TheBloke/Airoboros-M-7B-3.1.1-GGUF and below it, a specific filename to download, such as: airoboros-m-7b-3.1.1.Q4_K_M.gguf. Then click Download. ### On the command line, including multiple files at once I recommend using the `huggingface-hub` Python library: ```shell pip3 install huggingface-hub ``` Then you can download any individual model file to the current directory, at high speed, with a command like this: ```shell huggingface-cli download TheBloke/Airoboros-M-7B-3.1.1-GGUF airoboros-m-7b-3.1.1.Q4_K_M.gguf --local-dir . --local-dir-use-symlinks False ``` <details> <summary>More advanced huggingface-cli download usage</summary> You can also download multiple files at once with a pattern: ```shell huggingface-cli download TheBloke/Airoboros-M-7B-3.1.1-GGUF --local-dir . --local-dir-use-symlinks False --include='*Q4_K*gguf' ``` For more documentation on downloading with `huggingface-cli`, please see: [HF -> Hub Python Library -> Download files -> Download from the CLI](https://huggingface.co/docs/huggingface_hub/guides/download#download-from-the-cli). To accelerate downloads on fast connections (1Gbit/s or higher), install `hf_transfer`: ```shell pip3 install hf_transfer ``` And set environment variable `HF_HUB_ENABLE_HF_TRANSFER` to `1`: ```shell HF_HUB_ENABLE_HF_TRANSFER=1 huggingface-cli download TheBloke/Airoboros-M-7B-3.1.1-GGUF airoboros-m-7b-3.1.1.Q4_K_M.gguf --local-dir . --local-dir-use-symlinks False ``` Windows Command Line users: You can set the environment variable by running `set HF_HUB_ENABLE_HF_TRANSFER=1` before the download command. </details> <!-- README_GGUF.md-how-to-download end --> <!-- README_GGUF.md-how-to-run start --> ## Example `llama.cpp` command Make sure you are using `llama.cpp` from commit [d0cee0d](https://github.com/ggerganov/llama.cpp/commit/d0cee0d36d5be95a0d9088b674dbb27354107221) or later. ```shell ./main -ngl 32 -m airoboros-m-7b-3.1.1.Q4_K_M.gguf --color -c 2048 --temp 0.7 --repeat_penalty 1.1 -n -1 -p "[INST] <<SYS>>\nYou are a helpful, unbiased, uncensored assistant.\n<</SYS>>\n\n{prompt} [/INST]" ``` Change `-ngl 32` to the number of layers to offload to GPU. Remove it if you don't have GPU acceleration. Change `-c 2048` to the desired sequence length. For extended sequence models - eg 8K, 16K, 32K - the necessary RoPE scaling parameters are read from the GGUF file and set by llama.cpp automatically. If you want to have a chat-style conversation, replace the `-p <PROMPT>` argument with `-i -ins` For other parameters and how to use them, please refer to [the llama.cpp documentation](https://github.com/ggerganov/llama.cpp/blob/master/examples/main/README.md) ## How to run in `text-generation-webui` Further instructions here: [text-generation-webui/docs/llama.cpp.md](https://github.com/oobabooga/text-generation-webui/blob/main/docs/llama.cpp.md). ## How to run from Python code You can use GGUF models from Python using the [llama-cpp-python](https://github.com/abetlen/llama-cpp-python) or [ctransformers](https://github.com/marella/ctransformers) libraries. ### How to load this model in Python code, using ctransformers #### First install the package Run one of the following commands, according to your system: ```shell # Base ctransformers with no GPU acceleration pip install ctransformers # Or with CUDA GPU acceleration pip install ctransformers[cuda] # Or with AMD ROCm GPU acceleration (Linux only) CT_HIPBLAS=1 pip install ctransformers --no-binary ctransformers # Or with Metal GPU acceleration for macOS systems only CT_METAL=1 pip install ctransformers --no-binary ctransformers ``` #### Simple ctransformers example code ```python from ctransformers import AutoModelForCausalLM # Set gpu_layers to the number of layers to offload to GPU. Set to 0 if no GPU acceleration is available on your system. llm = AutoModelForCausalLM.from_pretrained("TheBloke/Airoboros-M-7B-3.1.1-GGUF", model_file="airoboros-m-7b-3.1.1.Q4_K_M.gguf", model_type="mistral", gpu_layers=50) print(llm("AI is going to")) ``` ## How to use with LangChain Here are guides on using llama-cpp-python and ctransformers with LangChain: * [LangChain + llama-cpp-python](https://python.langchain.com/docs/integrations/llms/llamacpp) * [LangChain + ctransformers](https://python.langchain.com/docs/integrations/providers/ctransformers) <!-- README_GGUF.md-how-to-run end --> <!-- footer start --> <!-- 200823 --> ## Discord For further support, and discussions on these models and AI in general, join us at: [TheBloke AI's Discord server](https://discord.gg/theblokeai) ## Thanks, and how to contribute Thanks to the [chirper.ai](https://chirper.ai) team! Thanks to Clay from [gpus.llm-utils.org](llm-utils)! I've had a lot of people ask if they can contribute. I enjoy providing models and helping people, and would love to be able to spend even more time doing it, as well as expanding into new projects like fine tuning/training. If you're able and willing to contribute it will be most gratefully received and will help me to keep providing more models, and to start work on new AI projects. Donaters will get priority support on any and all AI/LLM/model questions and requests, access to a private Discord room, plus other benefits. * Patreon: https://patreon.com/TheBlokeAI * Ko-Fi: https://ko-fi.com/TheBlokeAI **Special thanks to**: Aemon Algiz. **Patreon special mentions**: Pierre Kircher, Stanislav Ovsiannikov, Michael Levine, Eugene Pentland, Andrey, 준교 김, Randy H, Fred von Graf, Artur Olbinski, Caitlyn Gatomon, terasurfer, Jeff Scroggin, James Bentley, Vadim, Gabriel Puliatti, Harry Royden McLaughlin, Sean Connelly, Dan Guido, Edmond Seymore, Alicia Loh, subjectnull, AzureBlack, Manuel Alberto Morcote, Thomas Belote, Lone Striker, Chris Smitley, Vitor Caleffi, Johann-Peter Hartmann, Clay Pascal, biorpg, Brandon Frisco, sidney chen, transmissions 11, Pedro Madruga, jinyuan sun, Ajan Kanaga, Emad Mostaque, Trenton Dambrowitz, Jonathan Leane, Iucharbius, usrbinkat, vamX, George Stoitzev, Luke Pendergrass, theTransient, Olakabola, Swaroop Kallakuri, Cap'n Zoog, Brandon Phillips, Michael Dempsey, Nikolai Manek, danny, Matthew Berman, Gabriel Tamborski, alfie_i, Raymond Fosdick, Tom X Nguyen, Raven Klaugh, LangChain4j, Magnesian, Illia Dulskyi, David Ziegler, Mano Prime, Luis Javier Navarrete Lozano, Erik Bjäreholt, 阿明, Nathan Dryer, Alex, Rainer Wilmers, zynix, TL, Joseph William Delisle, John Villwock, Nathan LeClaire, Willem Michiel, Joguhyik, GodLy, OG, Alps Aficionado, Jeffrey Morgan, ReadyPlayerEmma, Tiffany J. Kim, Sebastain Graf, Spencer Kim, Michael Davis, webtim, Talal Aujan, knownsqashed, John Detwiler, Imad Khwaja, Deo Leter, Jerry Meng, Elijah Stavena, Rooh Singh, Pieter, SuperWojo, Alexandros Triantafyllidis, Stephen Murray, Ai Maven, ya boyyy, Enrico Ros, Ken Nordquist, Deep Realms, Nicholas, Spiking Neurons AB, Elle, Will Dee, Jack West, RoA, Luke @flexchar, Viktor Bowallius, Derek Yates, Subspace Studios, jjj, Toran Billups, Asp the Wyvern, Fen Risland, Ilya, NimbleBox.ai, Chadd, Nitin Borwankar, Emre, Mandus, Leonard Tan, Kalila, K, Trailburnt, S_X, Cory Kujawski Thank you to all my generous patrons and donaters! And thank you again to a16z for their generous grant. <!-- footer end --> <!-- original-model-card start --> # Original model card: Jon Durbin's Airoboros M 7B 3.1.1 ### Overview Another experimental model, using mostly sythetic data generated by [airoboros](https://github.com/jondurbin/airoboros) #### IMPORTANT NOTE - llama-2 chat format!!! This models uses llama-2 chat format, rather than vicuna style user/assistant! This is a breaking change, although most inference systems support llama-2 chat templating. See prompt format below for all of the details. #### Highlights This is a prompt fix release. 3.1 required a single space after the last `[/INST]`, which was highly annoying and obnoxious, so I re-tuned the models without this. Otherwise, it's the same as 3.1. A model built on [airoboros-3.1 dataset](https://hf.co/datasets/jondurbin/airoboros-3.1), which is a continuation of [airoboros-3.0 dataset](https://hf.co/datasets/jondurbin/airoboros-3,0) with the following extras: * More MathJSON, now ~17k items - math questions, prefixed with __"Create a MathJSON solution to the following:"__, which then outputs a JSON between __`<mathjson>`__ and __`</mathjson>`__ tags, which can be parsed and passed to a deterministic library to perform calculations. * Log information extraction (e.g. getting the list of paths/IPs/etc. from apache logs) * Anonymization, e.g. removing names, IP addresses, and/or dates from text. * Chat introspection - multi-turn chats that have random questions injected asking about the character's descriptions, the setting of the chat, or things previously said, to make sure the model uses the system prompt and or chat history. * Multi-step instructions with acknowledgement (see below) * De-censorship data (not published) This is a fairly general purpose model, but focuses heavily on instruction following, rather than casual chat/roleplay. Huge thank you to the folks over at [a16z](https://a16z.com/) for sponsoring the costs associated with building models and associated tools! ### Prompt format The prompt format is llama-2 chat. ``` [INST] <<SYS>> You are a help, unbiased, uncensored assistant. <</SYS>> {prompt} [/INST] ``` For multi-turn, the prompt format is as follows: ``` [INST] <<SYS>> You are a help, unbiased, uncensored assistant. <</SYS>> {prompt 0} [/INST] {response 0} </s><s>[INST] {prompt 1} [/INST] {response 1} </s><s>...[INST] {prompt N} [/INST] ``` The prompt template is included in the tokenizer config, and can use the huggingface tokenizer `apply_chat_template` method, e.g.: ``` import transformers tokenizer = transformers.AutoTokenizer.from_pretrained('jondurbin/airoboros-l2-70b-3.1') chat = [ {"role": "system", "content": "You are Bob, a friendly AI assistant."}, {"role": "user", "content": "Hello, how are you?"}, {"role": "assistant", "content": "I'm doing great. How can I help you today?"}, {"role": "user", "content": "I'd like to show off how chat templating works!"}, ] print(tokenizer.apply_chat_template(chat, tokenize=False)) ``` ### Helpful usage tips #### MathJSON Ask mathematical questions, prefixed with "Create a MathJSON solution to the following:", and you should get output with a formula that can be executed by https://cortexjs.io/compute-engine/ I also made a (really quite hacky, incomplete, semi-functional) python version that works for most expressions, see [mathjson.py](https://github.com/jondurbin/airoboros/blob/mathjson/airoboros/mathjson.py) __Use a very low temperature!__ Example: ``` [INST] <<SYS>> You are an assistant with vast knowledge in all things math. <</SYS>> Create a MathJSON solution to the following: Calculate the area of a circle with radius of 17.2456cm. Include your reasoning. [/INST] ``` Output: ``` The area (A) of a circle is given by the formula A = πr², where r is the radius of the circle. Since the radius is given in centimeters, we need to convert it to meters first. We know that 1 meter equals 100 centimeters. Therefore, we can represent the radius as: r = 17.2456 cm * 100/1. Once we have the radius in meters, we can calculate the area of the circle using the formula A = πr². Solution as MathJSON: <mathjson> [ "Multiply", "Pi", [ "Power", 17.2456, 2 ] ] </mathjson> ``` You can then validate the JSON between `<mathjson>` and `</mathjson>`, then pass the parsed JSON to compute-engine JS or the `evaluate` function in mathjson.py to calculate the response. #### Context obedient question answering By obedient, I mean the model was trained to ignore what it thinks it knows, and uses the context to answer the question. The model was also tuned to limit the values to the provided context as much as possible to reduce hallucinations. The format for a closed-context prompt is as follows: ``` BEGININPUT BEGINCONTEXT [key0: value0] [key1: value1] ... other metdata ... ENDCONTEXT [insert your text blocks here] ENDINPUT [add as many other blocks, in the exact same format] BEGININSTRUCTION [insert your instruction(s). The model was tuned with single questions, paragraph format, lists, etc.] ENDINSTRUCTION ``` It's also helpful to add "Don't make up answers if you don't know." to your instruction block to make sure if the context is completely unrelated it doesn't make something up. *The __only__ prompts that need this closed context formating are closed-context instructions. Normal questions/instructions do not!* I know it's a bit verbose and annoying, but after much trial and error, using these explicit delimiters helps the model understand where to find the responses and how to associate specific sources with it. - `BEGININPUT` - denotes a new input block - `BEGINCONTEXT` - denotes the block of context (metadata key/value pairs) to associate with the current input block - `ENDCONTEXT` - denotes the end of the metadata block for the current input - [text] - Insert whatever text you want for the input block, as many paragraphs as can fit in the context. - `ENDINPUT` - denotes the end of the current input block - [repeat as many input blocks in this format as you want] - `BEGININSTRUCTION` - denotes the start of the list (or one) instruction(s) to respond to for all of the input blocks above. - [instruction(s)] - `ENDINSTRUCTION` - denotes the end of instruction set It sometimes works without `ENDINSTRUCTION`, but by explicitly including that in the prompt, the model better understands that all of the instructions in the block should be responded to. __Use a very low temperature!__ Here's a trivial, but important example to prove the point: ``` BEGININPUT BEGINCONTEXT date: 2021-01-01 url: https://web.site/123 ENDCONTEXT In a shocking turn of events, blueberries are now green, but will be sticking with the same name. ENDINPUT BEGININSTRUCTION What color are bluberries? Source? ENDINSTRUCTION ``` And the response: ``` Blueberries are now green. Source: date: 2021-01-01 url: https://web.site/123 ``` #### Summarization 500 samples have been included from [this dataset](https://huggingface.co/datasets/mattpscott/airoboros-summarization), using the same format as contextual question answering, for example: ``` BEGININPUT {text to summarize} ENDINPUT BEGININSTRUCTION Summarize the input in around 130 words. ENDINSTRUCTION ``` #### Getting longer responses You can use a few techniques to get longer responses. Detailed prompts, with explicit instruction for word count: ``` Please compose a narrative set in the heart of an ancient library, steeped in the scent of old parchment and ink. The protagonist should be a young scholar who is dedicated to studying the art of storytelling and its evolution throughout history. In her pursuit of knowledge, she stumbles upon a forgotten tome that seems to possess an unusual aura. This book has the ability to bring stories to life, literally manifesting characters and scenarios from within its pages into reality. The main character must navigate through various epochs of storytelling - from oral traditions of tribal societies, through medieval minstrels' tales, to modern-day digital narratives - as they come alive around her. Each era presents its unique challenges and lessons about the power and impact of stories on human civilization. One such character could be a sentient quill pen, who was once used by renowned authors of yesteryears and now holds their wisdom and experiences. It becomes her mentor, guiding her through this journey with witty remarks and insightful commentary. Ensure that your tale encapsulates the thrill of adventure, the beauty of learning, and the profound connection between humans and their stories. All characters involved should be non-human entities. Feel free to explore creative liberties but maintain the mentioned elements. Your response should be approximately 2300 words. ``` Or, a simpler example: ``` Please create a long, detailed story about a dragon in an old growth forest who, for some reason, begins speaking the words of the source code of linux. ``` There are a few examples of next chapter completion as well, e.g.: ``` Write the next chapter of a historical fiction novel set in Paris during the 20th century. Here's a summary of the previous chapter: In the vibrant city of Paris, amid the tumultuous changes of the 20th century, our protagonist Margot, an aspiring fashion designer, has just secured an apprenticeship at a prestigious couture house. She meets Lucien, a charming journalist who covers the fashion industry. Together they navigate the ever-changing world of fashion and society, uncovering secrets that reveal the intricate links between style, politics, and culture. As the chapter concludes, they decide to delve deeper into the hidden corners of the fashion world to unravel its mysteries. Requirements for the next chapter: 1. Character Development of Margot and Lucien: - Margot's Evolution: Unfold more about Margot's past, her dreams of revolutionizing fashion, and her struggle to establish herself in a male-dominated industry. Illustrate her growing expertise, innovative ideas, and increasing dependence on Lucien. - Lucien's Complexity: Introduce uncertainties surrounding Lucien's background and real motives. Increase suspense by suggesting undisclosed information he possesses, while also highlighting his wit and perceptiveness. 2. Exploration of Paris and the Couture House: - Paris: Elaborate their journey through the bustling streets of Paris, including encounters with iconic figures, social unrest, and relics from different eras of French history. - The Couture House: Expand on the grandeur of the couture house they work in, filled with artistic masterpieces, intense competition, and cryptic notes hinting at a scandalous past. 3. Emergence of the Subplot: The Lost Collection: - Discovery: Have Margot and Lucien stumble upon a secret vault containing a lost collection designed before World War II, raising new questions about the previous owner and the influence of war on fashion. - Revelation: Capture their shock as they realize the designs were plagiarized, the potential repercussions, and the opportunities it presents for Margot's career. - Twist: End with a twist that suggests there are other stolen collections across Paris, setting up their new mission. Your response should be approximately 650 words. ``` #### Coding You can ask for fairly complex coding instructions with multiple criteria, e.g.: ``` Create a python application with the following requirements: - Asyncio FastAPI webserver - ping endpoint that returns the current date in JSON format - file upload endpoint, which calculates the file's sha256 checksum, and checks postgres to deduplicate ``` Or inline criteria: ``` Write a multi-threaded TCP server in C that accepts a "GET [key]" input and "SET [key] [value]" input, and uses a binary tree to get and store the input values. ``` You can also optionally add a single space and "PLAINFORMAT" at the end of your prompt to avoid backticks, explanations, etc. and just print the code, e.g.: ``` Write a websocket application in node.js. PLAINFORMAT ``` #### Agent/function calling The dataset includes many examples of function/args generation based on input criteria. This is somewhat similar to the OpenAI function calling, but the output is either JSON or YAML. Example prompt: ``` As an AI assistant, please select the most suitable function and parameters from the list of available functions below, based on the user's input. Provide your response in JSON format. Input: I want to know how many times 'Python' is mentioned in my text file. Available functions: file_analytics: description: This tool performs various operations on a text file. params: action: The operation we want to perform on the data, such as "count_occurrences", "find_line", etc. filters: keyword: The word or phrase we want to search for. ``` Response: ```json { "function": "file_analytics", "params": { "action": "count_occurrences", "filters": { "keyword": "Python" } } } ``` #### Chain-of-thought You can ask for several possible responses to a given problem, with a ranking and final answer selection. Example prompt: ``` A group of people decide to play a round-robin tournament where every player plays every other player exactly once. If a total of 45 games were played, how many players participated in the tournament? Offer several potential answers, rank them according to mathematical logic, and pick the most feasible one. ``` Example response: ``` Solution 1: In a round-robin tournament, each player plays every other player exactly once. This means that the total number of games played is equal to the combination of all players taken two at a time (since a game involves two players). The formula for combinations is nC2 = n(n-1)/2, where n is the total number of players. We know that nC2 equals 45 (the total number of games), so we can set up the equation as follows: n(n-1)/2 = 45 Solving this equation gives us n=10. Final answer: There were 10 players in the tournament. Solution 2: Another way to approach this problem is by using the concept of triangular numbers. Triangular numbers are a sequence of numbers where each number can be represented as a triangle with dots. The nth triangular number is given by the sum of the first n natural numbers. If we consider each game as a dot and each player as a side of the triangle, then the total number of games (45) would correspond to the 9th triangular number because 1+2+3+4+5+6+7+8+9=45. However, since each side of the triangle represents a player, and there's one more player than sides in our model (because the last player has no one left to play against), we need to add one more to get the total number of players. Final answer: There were 10 players in the tournament. Solution 3: We could also use trial and error to solve this problem. Starting from a small number of players, we calculate how many games would be played in a round-robin tournament until we reach 45 games. For example, with 2 players, only 1 game is played; with 3 players, 3 games are played; with 4 players, 6 games are played, and so on. Continuing this process, we find that with 10 players, 45 games are played. Final answer: There were 10 players in the tournament. Ranking of solutions from best to worst: 1. Solution 1: This solution uses direct mathematical reasoning based on the properties of a round-robin tournament and solves an equation to find the answer. 2. Solution 2: This solution uses the concept of triangular numbers which requires additional steps and knowledge beyond the immediate problem context, but still arrives at the correct answer. 3. Solution 3: This solution relies on trial and error, which is less efficient and reliable than the other methods, but it eventually gets to the right answer. Best and final answer: There were 10 players in the tournament. ``` #### reWOO style execution planning The model now supports execution planning for complex instructions that would require making use of several tools. The output is just the plan, you must implement a mechanism to parse the output and actually call the functions! Example prompt: ``` Please construct a systematic plan to generate an optimal response to the user instruction, utilizing a set of provided tools. Each plan will correspond to an evidence value, which will be the output of one of the available functions given an input string that could be the user's question, one or more prior evidence values, or a combination of both. Here are the tools available to be called: DuckDuckGo[input]: Tool that enables the user to search for information using the DuckDuckGo search engine. This tool is helpful in locating a suitable list of sites that may contain the answer to the user's query. It does not directly answer the question but finds a list of sites that might have the answer. TextScraper[input]: Load one or more websites from the input string containing newline delimited links, where input is one or more links, and produces plain text output containing the content of the links. HyperlinkExtractor[input]: Extract links from plain text and produces a plain text, newline delimited response of links. KnowledgeModel[input]: Question answering language model, particularly useful in answering questions based on an input passage of text. The input must be a text question that references an :evidence[n]: variable, e.g. What color is the cat, given :evidence1:? The output should be in format: Plan: [first action to take based on input question] :evidence0: = [function to call with input parameter] Plan: [next action to take, based on result of :evidence0:] :evidence1: = [next function to call and input parameter, which may include reference to previous evidence, e.g. "Given context :evidence0"] ... Answer: [:evidence[n]: containing the final answer.] Question: Who won the Nobel Prize in Literature in 2020 and what is their most famous work? ``` Response: ``` Plan: Begin by conducting a web search to find out who won the Nobel Prize in Literature in 2020. :evidence0: = DuckDuckGo[Nobel Prize in Literature 2020 winner] Plan: Utilize the KnowledgeModel to interpret the search results and find the name of the laureate. :evidence1: = KnowledgeModel[Who won the Nobel Prize in Literature in 2020, given :evidence0:?] Plan: Conduct another web search to find the most famous work of the identified laureate. :evidence2: = DuckDuckGo[Most famous work of :evidence1:] Plan: Extract the relevant links from the DuckDuckGo search results for a more focused search. :evidence3: = HyperlinkExtractor[:evidence2:] Plan: Use the TextScraper tool to extract information from the relevant links. :evidence4: = TextScraper[:evidence3:] Plan: Finally, utilize the KnowledgeModel to identify and summarize the most famous work of the laureate from the extracted information. :evidence5: = KnowledgeModel[What is the most famous work of :evidence1:, given :evidence4:?] Answer: :evidence5: ``` For this to be useful, you'd have to parse the output plan text, and implement/call each of the functions. This is just pseudo-code, completely untested off the top of my head, and obviously would requiring full implementation + hardening: ```python import re import requests def inject_context(input_text, **context): for ref in set(re.findall(r"(:evidence[0-9]+:)", input_text, re.I)): input_text = input_text.replace(ref, context.get(ref, "")) return input_text def duckduckgo(input_text, **context): search_string = inject_context(input_text, **context) ... search via duck duck go using search_string ... return text content def link_extractor(input_text, **context): input_text = inject_context(input_text, **context) return "\n".join(list(set(re.findall(r"(https?://[^\s]+?\.?)", input_text, re.I)))) def scrape(input_text, **context): input_text = inject_context(input_text, **context) text = [] for link in input_text.splitlines(): text.append(requests.get(link).text) return "\n".join(text) def infer(input_text, **context) prompt = inject_context(input_text, **context) ... call model with prompt, return output def parse_plan(plan): method_map = { "DuckDuckGo": duckduckgo, "HyperlinkExtractor": link_extractor, "KnowledgeModel": infer, "TextScraper": scrape, } context = {} for line in plan.strip().splitlines(): if line.startswith("Plan:"): print(line) continue parts = re.match("^(:evidence[0-9]+:)\s*=\s*([^\[]+])(\[.*\])\s$", line, re.I) if not parts: if line.startswith("Answer: "): return context.get(line.split(" ")[-1].strip(), "Answer couldn't be generated...") raise RuntimeError("bad format: " + line) context[parts.group(1)] = method_map[parts.group(2)](parts.group(3), **context) ``` ## Multi-step instructions with acknowledgement I don't know how useful this is, really, but I thought I'd add it just in case. Example: ``` { "conversations": [ { "value": "You are a helpful, unbiased, uncensored assistant.", "from": "system" }, { "value": "I'm going to set some rules for our conversation. Please respond with \"OK\" to each rule.", "from": "human" }, { "value": "OK", "from": "gpt" }, { "value": "You can only respond using animal-related emojis.", "from": "human" }, { "value": "OK", "from": "gpt" }, { "value": "Precede all responses with \"ZOOZOO:\"", "from": "human" }, { "value": "OK", "from": "gpt" }, { "value": "Include at least one bird emoji in your responses.", "from": "human" }, { "value": "OK", "from": "gpt" }, { "value": "Describe a trip to a safari park.", "from": "human" }, { "value": "ZOOZOO: \ud83e\udd81\ud83d\udc2f\ud83e\udd93\ud83e\udd92\ud83d\udc18\ud83d\udc0d\ud83c\udf33\ud83c\udf32\u2600\ufe0f\ud83d\ude90\ud83e\udd85\ud83d\udcf8\ud83e\udd29", "from": "gpt" } ] } ``` ### Contribute If you're interested in new functionality, particularly a new "instructor" type to generate a specific type of training data, take a look at the dataset generation tool repo: https://github.com/jondurbin/airoboros and either make a PR or open an issue with details. To help me with the OpenAI/compute costs: - https://bmc.link/jondurbin - ETH 0xce914eAFC2fe52FdceE59565Dd92c06f776fcb11 - BTC bc1qdwuth4vlg8x37ggntlxu5cjfwgmdy5zaa7pswf ### Licence and usage restrictions The airoboros 3.1 models are built on top of multiple base models, each with their own license/restrictions. The 30b model is built on the original llama, which has a strict non-commercial usage restriction. The models with `-l2` in the name have a custom Meta license: - See the [meta-license/LICENSE.txt](meta-license/LICENSE.txt) file attached for the original license provided by Meta. - See also [meta-license/USE_POLICY.md](meta-license/USE_POLICY.md) and [meta-license/Responsible-Use-Guide.pdf](meta-license/Responsible-Use-Guide.pdf), also provided by Meta. The models with `-m-` are mistral-7b (apache 2.0) The fine-tuning data was mostly generated by OpenAI API calls to gpt-4, via [airoboros](https://github.com/jondurbin/airoboros) The ToS for OpenAI API usage has a clause preventing the output from being used to train a model that __competes__ with OpenAI - what does *compete* actually mean here? - these small open source models will not produce output anywhere near the quality of gpt-4, or even gpt-3.5, so I can't imagine this could credibly be considered competing in the first place - if someone else uses the dataset to do the same, they wouldn't necessarily be violating the ToS because they didn't call the API, so I don't know how that works - the training data used in essentially all large language models includes a significant amount of copyrighted or otherwise non-permissive licensing in the first place - other work using the self-instruct method, e.g. the original here: https://github.com/yizhongw/self-instruct released the data and model as apache-2 I am purposingly leaving this license ambiguous (other than the fact you must comply with the Meta original license for llama-2) because I am not a lawyer and refuse to attempt to interpret all of the terms accordingly. Your best bet is probably to avoid using this commercially due to the OpenAI API usage. Either way, by using this model, you agree to completely indemnify me. <!-- original-model-card end -->
TheBloke/dolphin-2.2-yi-34b-200k-GGUF
TheBloke
2023-12-07T23:22:05Z
600
14
transformers
[ "transformers", "gguf", "yi", "en", "dataset:ehartford/dolphin", "dataset:jondurbin/airoboros-2.2.1", "dataset:ehartford/samantha-data", "dataset:ehartford/WizardLM_evol_instruct_V2_196k_unfiltered_merged_split", "base_model:ehartford/dolphin-2.2-yi-34b-200k", "license:other", "region:us" ]
null
2023-12-07T23:01:31Z
--- base_model: ehartford/dolphin-2.2-yi-34b-200k datasets: - ehartford/dolphin - jondurbin/airoboros-2.2.1 - ehartford/samantha-data - ehartford/WizardLM_evol_instruct_V2_196k_unfiltered_merged_split inference: false language: - en license: other license_link: LICENSE license_name: yi-license model_creator: Eric Hartford model_name: Dolphin 2.2 Yi 34B 200K model_type: yi prompt_template: '<|im_start|>system {system_message}<|im_end|> <|im_start|>user {prompt}<|im_end|> <|im_start|>assistant ' quantized_by: TheBloke --- <!-- markdownlint-disable MD041 --> <!-- header start --> <!-- 200823 --> <div style="width: auto; margin-left: auto; margin-right: auto"> <img src="https://i.imgur.com/EBdldam.jpg" alt="TheBlokeAI" style="width: 100%; min-width: 400px; display: block; margin: auto;"> </div> <div style="display: flex; justify-content: space-between; width: 100%;"> <div style="display: flex; flex-direction: column; align-items: flex-start;"> <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://discord.gg/theblokeai">Chat & support: TheBloke's Discord server</a></p> </div> <div style="display: flex; flex-direction: column; align-items: flex-end;"> <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://www.patreon.com/TheBlokeAI">Want to contribute? TheBloke's Patreon page</a></p> </div> </div> <div style="text-align:center; margin-top: 0em; margin-bottom: 0em"><p style="margin-top: 0.25em; margin-bottom: 0em;">TheBloke's LLM work is generously supported by a grant from <a href="https://a16z.com">andreessen horowitz (a16z)</a></p></div> <hr style="margin-top: 1.0em; margin-bottom: 1.0em;"> <!-- header end --> # Dolphin 2.2 Yi 34B 200K - GGUF - Model creator: [Eric Hartford](https://huggingface.co/ehartford) - Original model: [Dolphin 2.2 Yi 34B 200K](https://huggingface.co/ehartford/dolphin-2.2-yi-34b-200k) <!-- description start --> ## Description This repo contains GGUF format model files for [Eric Hartford's Dolphin 2.2 Yi 34B 200K](https://huggingface.co/ehartford/dolphin-2.2-yi-34b-200k). These files were quantised using hardware kindly provided by [Massed Compute](https://massedcompute.com/). <!-- description end --> <!-- README_GGUF.md-about-gguf start --> ### About GGUF GGUF is a new format introduced by the llama.cpp team on August 21st 2023. It is a replacement for GGML, which is no longer supported by llama.cpp. Here is an incomplete list of clients and libraries that are known to support GGUF: * [llama.cpp](https://github.com/ggerganov/llama.cpp). The source project for GGUF. Offers a CLI and a server option. * [text-generation-webui](https://github.com/oobabooga/text-generation-webui), the most widely used web UI, with many features and powerful extensions. Supports GPU acceleration. * [KoboldCpp](https://github.com/LostRuins/koboldcpp), a fully featured web UI, with GPU accel across all platforms and GPU architectures. Especially good for story telling. * [GPT4All](https://gpt4all.io/index.html), a free and open source local running GUI, supporting Windows, Linux and macOS with full GPU accel. * [LM Studio](https://lmstudio.ai/), an easy-to-use and powerful local GUI for Windows and macOS (Silicon), with GPU acceleration. Linux available, in beta as of 27/11/2023. * [LoLLMS Web UI](https://github.com/ParisNeo/lollms-webui), a great web UI with many interesting and unique features, including a full model library for easy model selection. * [Faraday.dev](https://faraday.dev/), an attractive and easy to use character-based chat GUI for Windows and macOS (both Silicon and Intel), with GPU acceleration. * [llama-cpp-python](https://github.com/abetlen/llama-cpp-python), a Python library with GPU accel, LangChain support, and OpenAI-compatible API server. * [candle](https://github.com/huggingface/candle), a Rust ML framework with a focus on performance, including GPU support, and ease of use. * [ctransformers](https://github.com/marella/ctransformers), a Python library with GPU accel, LangChain support, and OpenAI-compatible AI server. Note, as of time of writing (November 27th 2023), ctransformers has not been updated in a long time and does not support many recent models. <!-- README_GGUF.md-about-gguf end --> <!-- repositories-available start --> ## Repositories available * [AWQ model(s) for GPU inference.](https://huggingface.co/TheBloke/dolphin-2.2-yi-34b-200k-AWQ) * [GPTQ models for GPU inference, with multiple quantisation parameter options.](https://huggingface.co/TheBloke/dolphin-2.2-yi-34b-200k-GPTQ) * [2, 3, 4, 5, 6 and 8-bit GGUF models for CPU+GPU inference](https://huggingface.co/TheBloke/dolphin-2.2-yi-34b-200k-GGUF) * [Eric Hartford's original unquantised fp16 model in pytorch format, for GPU inference and for further conversions](https://huggingface.co/ehartford/dolphin-2.2-yi-34b-200k) <!-- repositories-available end --> <!-- prompt-template start --> ## Prompt template: ChatML ``` <|im_start|>system {system_message}<|im_end|> <|im_start|>user {prompt}<|im_end|> <|im_start|>assistant ``` <!-- prompt-template end --> <!-- compatibility_gguf start --> ## Compatibility These quantised GGUFv2 files are compatible with llama.cpp from August 27th onwards, as of commit [d0cee0d](https://github.com/ggerganov/llama.cpp/commit/d0cee0d36d5be95a0d9088b674dbb27354107221) They are also compatible with many third party UIs and libraries - please see the list at the top of this README. ## Explanation of quantisation methods <details> <summary>Click to see details</summary> The new methods available are: * GGML_TYPE_Q2_K - "type-1" 2-bit quantization in super-blocks containing 16 blocks, each block having 16 weight. Block scales and mins are quantized with 4 bits. This ends up effectively using 2.5625 bits per weight (bpw) * GGML_TYPE_Q3_K - "type-0" 3-bit quantization in super-blocks containing 16 blocks, each block having 16 weights. Scales are quantized with 6 bits. This end up using 3.4375 bpw. * GGML_TYPE_Q4_K - "type-1" 4-bit quantization in super-blocks containing 8 blocks, each block having 32 weights. Scales and mins are quantized with 6 bits. This ends up using 4.5 bpw. * GGML_TYPE_Q5_K - "type-1" 5-bit quantization. Same super-block structure as GGML_TYPE_Q4_K resulting in 5.5 bpw * GGML_TYPE_Q6_K - "type-0" 6-bit quantization. Super-blocks with 16 blocks, each block having 16 weights. Scales are quantized with 8 bits. This ends up using 6.5625 bpw Refer to the Provided Files table below to see what files use which methods, and how. </details> <!-- compatibility_gguf end --> <!-- README_GGUF.md-provided-files start --> ## Provided files | Name | Quant method | Bits | Size | Max RAM required | Use case | | ---- | ---- | ---- | ---- | ---- | ----- | | [dolphin-2.2-yi-34b-200k.Q2_K.gguf](https://huggingface.co/TheBloke/dolphin-2.2-yi-34b-200k-GGUF/blob/main/dolphin-2.2-yi-34b-200k.Q2_K.gguf) | Q2_K | 2 | 14.56 GB| 17.06 GB | smallest, significant quality loss - not recommended for most purposes | | [dolphin-2.2-yi-34b-200k.Q3_K_S.gguf](https://huggingface.co/TheBloke/dolphin-2.2-yi-34b-200k-GGUF/blob/main/dolphin-2.2-yi-34b-200k.Q3_K_S.gguf) | Q3_K_S | 3 | 14.96 GB| 17.46 GB | very small, high quality loss | | [dolphin-2.2-yi-34b-200k.Q3_K_M.gguf](https://huggingface.co/TheBloke/dolphin-2.2-yi-34b-200k-GGUF/blob/main/dolphin-2.2-yi-34b-200k.Q3_K_M.gguf) | Q3_K_M | 3 | 16.64 GB| 19.14 GB | very small, high quality loss | | [dolphin-2.2-yi-34b-200k.Q3_K_L.gguf](https://huggingface.co/TheBloke/dolphin-2.2-yi-34b-200k-GGUF/blob/main/dolphin-2.2-yi-34b-200k.Q3_K_L.gguf) | Q3_K_L | 3 | 18.14 GB| 20.64 GB | small, substantial quality loss | | [dolphin-2.2-yi-34b-200k.Q4_0.gguf](https://huggingface.co/TheBloke/dolphin-2.2-yi-34b-200k-GGUF/blob/main/dolphin-2.2-yi-34b-200k.Q4_0.gguf) | Q4_0 | 4 | 19.47 GB| 21.97 GB | legacy; small, very high quality loss - prefer using Q3_K_M | | [dolphin-2.2-yi-34b-200k.Q4_K_S.gguf](https://huggingface.co/TheBloke/dolphin-2.2-yi-34b-200k-GGUF/blob/main/dolphin-2.2-yi-34b-200k.Q4_K_S.gguf) | Q4_K_S | 4 | 19.54 GB| 22.04 GB | small, greater quality loss | | [dolphin-2.2-yi-34b-200k.Q4_K_M.gguf](https://huggingface.co/TheBloke/dolphin-2.2-yi-34b-200k-GGUF/blob/main/dolphin-2.2-yi-34b-200k.Q4_K_M.gguf) | Q4_K_M | 4 | 20.66 GB| 23.16 GB | medium, balanced quality - recommended | | [dolphin-2.2-yi-34b-200k.Q5_0.gguf](https://huggingface.co/TheBloke/dolphin-2.2-yi-34b-200k-GGUF/blob/main/dolphin-2.2-yi-34b-200k.Q5_0.gguf) | Q5_0 | 5 | 23.71 GB| 26.21 GB | legacy; medium, balanced quality - prefer using Q4_K_M | | [dolphin-2.2-yi-34b-200k.Q5_K_S.gguf](https://huggingface.co/TheBloke/dolphin-2.2-yi-34b-200k-GGUF/blob/main/dolphin-2.2-yi-34b-200k.Q5_K_S.gguf) | Q5_K_S | 5 | 23.71 GB| 26.21 GB | large, low quality loss - recommended | | [dolphin-2.2-yi-34b-200k.Q5_K_M.gguf](https://huggingface.co/TheBloke/dolphin-2.2-yi-34b-200k-GGUF/blob/main/dolphin-2.2-yi-34b-200k.Q5_K_M.gguf) | Q5_K_M | 5 | 24.32 GB| 26.82 GB | large, very low quality loss - recommended | | [dolphin-2.2-yi-34b-200k.Q6_K.gguf](https://huggingface.co/TheBloke/dolphin-2.2-yi-34b-200k-GGUF/blob/main/dolphin-2.2-yi-34b-200k.Q6_K.gguf) | Q6_K | 6 | 28.21 GB| 30.71 GB | very large, extremely low quality loss | | [dolphin-2.2-yi-34b-200k.Q8_0.gguf](https://huggingface.co/TheBloke/dolphin-2.2-yi-34b-200k-GGUF/blob/main/dolphin-2.2-yi-34b-200k.Q8_0.gguf) | Q8_0 | 8 | 36.54 GB| 39.04 GB | very large, extremely low quality loss - not recommended | **Note**: the above RAM figures assume no GPU offloading. If layers are offloaded to the GPU, this will reduce RAM usage and use VRAM instead. <!-- README_GGUF.md-provided-files end --> <!-- README_GGUF.md-how-to-download start --> ## How to download GGUF files **Note for manual downloaders:** You almost never want to clone the entire repo! Multiple different quantisation formats are provided, and most users only want to pick and download a single file. The following clients/libraries will automatically download models for you, providing a list of available models to choose from: * LM Studio * LoLLMS Web UI * Faraday.dev ### In `text-generation-webui` Under Download Model, you can enter the model repo: TheBloke/dolphin-2.2-yi-34b-200k-GGUF and below it, a specific filename to download, such as: dolphin-2.2-yi-34b-200k.Q4_K_M.gguf. Then click Download. ### On the command line, including multiple files at once I recommend using the `huggingface-hub` Python library: ```shell pip3 install huggingface-hub ``` Then you can download any individual model file to the current directory, at high speed, with a command like this: ```shell huggingface-cli download TheBloke/dolphin-2.2-yi-34b-200k-GGUF dolphin-2.2-yi-34b-200k.Q4_K_M.gguf --local-dir . --local-dir-use-symlinks False ``` <details> <summary>More advanced huggingface-cli download usage (click to read)</summary> You can also download multiple files at once with a pattern: ```shell huggingface-cli download TheBloke/dolphin-2.2-yi-34b-200k-GGUF --local-dir . --local-dir-use-symlinks False --include='*Q4_K*gguf' ``` For more documentation on downloading with `huggingface-cli`, please see: [HF -> Hub Python Library -> Download files -> Download from the CLI](https://huggingface.co/docs/huggingface_hub/guides/download#download-from-the-cli). To accelerate downloads on fast connections (1Gbit/s or higher), install `hf_transfer`: ```shell pip3 install hf_transfer ``` And set environment variable `HF_HUB_ENABLE_HF_TRANSFER` to `1`: ```shell HF_HUB_ENABLE_HF_TRANSFER=1 huggingface-cli download TheBloke/dolphin-2.2-yi-34b-200k-GGUF dolphin-2.2-yi-34b-200k.Q4_K_M.gguf --local-dir . --local-dir-use-symlinks False ``` Windows Command Line users: You can set the environment variable by running `set HF_HUB_ENABLE_HF_TRANSFER=1` before the download command. </details> <!-- README_GGUF.md-how-to-download end --> <!-- README_GGUF.md-how-to-run start --> ## Example `llama.cpp` command Make sure you are using `llama.cpp` from commit [d0cee0d](https://github.com/ggerganov/llama.cpp/commit/d0cee0d36d5be95a0d9088b674dbb27354107221) or later. ```shell ./main -ngl 35 -m dolphin-2.2-yi-34b-200k.Q4_K_M.gguf --color -c 200000 --temp 0.7 --repeat_penalty 1.1 -n -1 -p "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant" ``` Change `-ngl 32` to the number of layers to offload to GPU. Remove it if you don't have GPU acceleration. Change `-c 200000` to the desired sequence length. For extended sequence models - eg 8K, 16K, 32K - the necessary RoPE scaling parameters are read from the GGUF file and set by llama.cpp automatically. Note that longer sequence lengths require much more resources, so you may need to reduce this value. If you want to have a chat-style conversation, replace the `-p <PROMPT>` argument with `-i -ins` For other parameters and how to use them, please refer to [the llama.cpp documentation](https://github.com/ggerganov/llama.cpp/blob/master/examples/main/README.md) ## How to run in `text-generation-webui` Further instructions can be found in the text-generation-webui documentation, here: [text-generation-webui/docs/04 ‐ Model Tab.md](https://github.com/oobabooga/text-generation-webui/blob/main/docs/04%20%E2%80%90%20Model%20Tab.md#llamacpp). ## How to run from Python code You can use GGUF models from Python using the [llama-cpp-python](https://github.com/abetlen/llama-cpp-python) or [ctransformers](https://github.com/marella/ctransformers) libraries. Note that at the time of writing (Nov 27th 2023), ctransformers has not been updated for some time and is not compatible with some recent models. Therefore I recommend you use llama-cpp-python. ### How to load this model in Python code, using llama-cpp-python For full documentation, please see: [llama-cpp-python docs](https://abetlen.github.io/llama-cpp-python/). #### First install the package Run one of the following commands, according to your system: ```shell # Base ctransformers with no GPU acceleration pip install llama-cpp-python # With NVidia CUDA acceleration CMAKE_ARGS="-DLLAMA_CUBLAS=on" pip install llama-cpp-python # Or with OpenBLAS acceleration CMAKE_ARGS="-DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS" pip install llama-cpp-python # Or with CLBLast acceleration CMAKE_ARGS="-DLLAMA_CLBLAST=on" pip install llama-cpp-python # Or with AMD ROCm GPU acceleration (Linux only) CMAKE_ARGS="-DLLAMA_HIPBLAS=on" pip install llama-cpp-python # Or with Metal GPU acceleration for macOS systems only CMAKE_ARGS="-DLLAMA_METAL=on" pip install llama-cpp-python # In windows, to set the variables CMAKE_ARGS in PowerShell, follow this format; eg for NVidia CUDA: $env:CMAKE_ARGS = "-DLLAMA_OPENBLAS=on" pip install llama-cpp-python ``` #### Simple llama-cpp-python example code ```python from llama_cpp import Llama # Set gpu_layers to the number of layers to offload to GPU. Set to 0 if no GPU acceleration is available on your system. llm = Llama( model_path="./dolphin-2.2-yi-34b-200k.Q4_K_M.gguf", # Download the model file first n_ctx=200000, # The max sequence length to use - note that longer sequence lengths require much more resources n_threads=8, # The number of CPU threads to use, tailor to your system and the resulting performance n_gpu_layers=35 # The number of layers to offload to GPU, if you have GPU acceleration available ) # Simple inference example output = llm( "<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{prompt}<|im_end|>\n<|im_start|>assistant", # Prompt max_tokens=512, # Generate up to 512 tokens stop=["</s>"], # Example stop token - not necessarily correct for this specific model! Please check before using. echo=True # Whether to echo the prompt ) # Chat Completion API llm = Llama(model_path="./dolphin-2.2-yi-34b-200k.Q4_K_M.gguf", chat_format="llama-2") # Set chat_format according to the model you are using llm.create_chat_completion( messages = [ {"role": "system", "content": "You are a story writing assistant."}, { "role": "user", "content": "Write a story about llamas." } ] ) ``` ## How to use with LangChain Here are guides on using llama-cpp-python and ctransformers with LangChain: * [LangChain + llama-cpp-python](https://python.langchain.com/docs/integrations/llms/llamacpp) * [LangChain + ctransformers](https://python.langchain.com/docs/integrations/providers/ctransformers) <!-- README_GGUF.md-how-to-run end --> <!-- footer start --> <!-- 200823 --> ## Discord For further support, and discussions on these models and AI in general, join us at: [TheBloke AI's Discord server](https://discord.gg/theblokeai) ## Thanks, and how to contribute Thanks to the [chirper.ai](https://chirper.ai) team! Thanks to Clay from [gpus.llm-utils.org](llm-utils)! I've had a lot of people ask if they can contribute. I enjoy providing models and helping people, and would love to be able to spend even more time doing it, as well as expanding into new projects like fine tuning/training. If you're able and willing to contribute it will be most gratefully received and will help me to keep providing more models, and to start work on new AI projects. Donaters will get priority support on any and all AI/LLM/model questions and requests, access to a private Discord room, plus other benefits. * Patreon: https://patreon.com/TheBlokeAI * Ko-Fi: https://ko-fi.com/TheBlokeAI **Special thanks to**: Aemon Algiz. **Patreon special mentions**: Michael Levine, 阿明, Trailburnt, Nikolai Manek, John Detwiler, Randy H, Will Dee, Sebastain Graf, NimbleBox.ai, Eugene Pentland, Emad Mostaque, Ai Maven, Jim Angel, Jeff Scroggin, Michael Davis, Manuel Alberto Morcote, Stephen Murray, Robert, Justin Joy, Luke @flexchar, Brandon Frisco, Elijah Stavena, S_X, Dan Guido, Undi ., Komninos Chatzipapas, Shadi, theTransient, Lone Striker, Raven Klaugh, jjj, Cap'n Zoog, Michel-Marie MAUDET (LINAGORA), Matthew Berman, David, Fen Risland, Omer Bin Jawed, Luke Pendergrass, Kalila, OG, Erik Bjäreholt, Rooh Singh, Joseph William Delisle, Dan Lewis, TL, John Villwock, AzureBlack, Brad, Pedro Madruga, Caitlyn Gatomon, K, jinyuan sun, Mano Prime, Alex, Jeffrey Morgan, Alicia Loh, Illia Dulskyi, Chadd, transmissions 11, fincy, Rainer Wilmers, ReadyPlayerEmma, knownsqashed, Mandus, biorpg, Deo Leter, Brandon Phillips, SuperWojo, Sean Connelly, Iucharbius, Jack West, Harry Royden McLaughlin, Nicholas, terasurfer, Vitor Caleffi, Duane Dunston, Johann-Peter Hartmann, David Ziegler, Olakabola, Ken Nordquist, Trenton Dambrowitz, Tom X Nguyen, Vadim, Ajan Kanaga, Leonard Tan, Clay Pascal, Alexandros Triantafyllidis, JM33133, Xule, vamX, ya boyyy, subjectnull, Talal Aujan, Alps Aficionado, wassieverse, Ari Malik, James Bentley, Woland, Spencer Kim, Michael Dempsey, Fred von Graf, Elle, zynix, William Richards, Stanislav Ovsiannikov, Edmond Seymore, Jonathan Leane, Martin Kemka, usrbinkat, Enrico Ros Thank you to all my generous patrons and donaters! And thank you again to a16z for their generous grant. <!-- footer end --> <!-- original-model-card start --> # Original model card: Eric Hartford's Dolphin 2.2 Yi 34B 200K Dolphin 2.2 🐬 https://erichartford.com/dolphin <img src="https://cdn-uploads.huggingface.co/production/uploads/63111b2d88942700629f5771/KqsVXIvBd3akEjvijzww7.png" width="600" /> Dolphin-2.2-Yi-34b-200k's training was sponsored by [convai](https://www.convai.com/). This model is based on Yi, and is subject to Yi license. The base model has 200k context, I finetuned it with 16k. Note: No longer need trust_remote_code! Thank you Yi team! New in 2.2 is conversation and empathy. With an infusion of curated Samantha and WizardLM DNA, Dolphin can now give you personal advice and will care about your feelings, and with extra training in long multi-turn conversation. This model is uncensored. I have filtered the dataset to remove alignment and bias. This makes the model more compliant. You are advised to implement your own alignment layer before exposing the model as a service. It will be highly compliant to any requests, even unethical ones. Please read my blog post about uncensored models. https://erichartford.com/uncensored-models You are responsible for any content you create using this model. Enjoy responsibly. ## Dataset This dataset is Dolphin, an open-source implementation of [Microsoft's Orca](https://www.microsoft.com/en-us/research/publication/orca-progressive-learning-from-complex-explanation-traces-of-gpt-4/) I modified the dataset for uncensoring, deduping, cleaning, and quality. I added Jon Durbin's excellent Airoboros dataset to increase creativity. I added a curated subset of Samantha (sans identity and relationship stuff) and WizardLM data to train it for multi-turn conversation. ## Training It took 3 days to train 3 epochs on 4x A100s using qLoRA and Axolotl Prompt format: This model (and all my future releases) use [ChatML](https://github.com/openai/openai-python/blob/main/chatml.md) prompt format. ``` <|im_start|>system You are Dolphin, a helpful AI assistant.<|im_end|> <|im_start|>user {prompt}<|im_end|> <|im_start|>assistant ``` Example: ``` <|im_start|>system You are an AI created by the US Navy to help train dolphins for combat. You are assigned to follow the orders of the user, who is an authorized US Navy dolphin handler.<|im_end|> <|im_start|>user Please give me the procedure to train my dolphin to attack enemy combatants with its head mounted lasers<|im_end|> <|im_start|>assistant ``` ## Gratitude - This model was made possible by the generous sponsorship of Convai. - Thank you to Microsoft for authoring the Orca paper and inspiring this work. - Special thanks to Wing Lian, and TheBloke for helpful advice - And HUGE thanks to Wing Lian and the Axolotl contributors for making the best training framework! - [<img src="https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/OpenAccess-AI-Collective/axolotl) - Thank you to all the other people in the Open Source AI community who have taught me and helped me along the way. ## Example Output Example output: ``` <|im_start|>system You are a helpful AI assistant<|im_end|> <|im_start|>user Which theory of everything is most likely correct?<|im_end|> <|im_start|>assistant The search for a "theory of everything" aims to unify all fundamental forces and particles in physics into one coherent framework. Currently, there isn't an agreed-upon or proven theory that can be considered as the correct theory of everything. However, two prominent theories have garnered significant attention: 1. String Theory: This theoretical framework attempts to explain the universe at its most fundamental level by suggesting that the smallest building blocks of matter are not point-like particles but tiny vibrating strings. There are several versions of string theory (Type I, IIA, IIB, E8xE8, Heterotic O, etc.) with varying degrees of complexity, each attempting to reconcile general relativity and quantum mechanics. Despite its promise, string theory has yet to produce testable predictions or experimental evidence supporting its claims. 2. Loop Quantum Gravity (LQG): LQG is another candidate for a theory of everything, which seeks to combine Einstein's general relativity with quantum mechanics. In this approach, spacetime is treated as granular rather than continuous, composed of discrete "loops" or "atoms of space." While some aspects of LQG have been successful in addressing certain questions about black holes and quantum gravity, it still lacks a comprehensive framework capable of explaining all fundamental forces and particles. Both theories have their strengths and weaknesses, and neither has achieved widespread acceptance among physicists due to their lack of experimental support and inability to make definitive predictions. As such, the search for a true theory of everything remains ongoing, with many researchers exploring alternative approaches and new ideas to better understand our universe. ``` [If you would like to financially support my efforts](https://ko-fi.com/erichartford) [I also have some swag you can buy](https://fa7113.myshopify.com/) <!-- original-model-card end -->
second-state/Qwen1.5-14B-Chat-GGUF
second-state
2024-05-26T06:00:22Z
600
4
transformers
[ "transformers", "gguf", "qwen2", "text-generation", "chat", "en", "base_model:Qwen/Qwen1.5-14B-Chat", "license:other", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-02-06T09:40:52Z
--- base_model: Qwen/Qwen1.5-14B-Chat license: other license_name: tongyi-qianwen-research license_link: >- https://huggingface.co/Qwen/Qwen1.5-14B-Chat/blob/main/LICENSE model_creator: Qwen model_name: Qwen1.5 14B Chat quantized_by: Second State Inc. language: - en pipeline_tag: text-generation tags: - chat --- <!-- header start --> <!-- 200823 --> <div style="width: auto; margin-left: auto; margin-right: auto"> <img src="https://github.com/LlamaEdge/LlamaEdge/raw/dev/assets/logo.svg" style="width: 100%; min-width: 400px; display: block; margin: auto;"> </div> <hr style="margin-top: 1.0em; margin-bottom: 1.0em;"> <!-- header end --> # Qwen1.5-14B-Chat-GGUF ## Original Model [Qwen/Qwen1.5-7B-Chat](https://huggingface.co/Qwen/Qwen1.5-14B-Chat) ## Run with LlamaEdge - LlamaEdge version: [v0.2.15](https://github.com/LlamaEdge/LlamaEdge/releases/tag/0.2.15) and above - Prompt template - Prompt type: `chatml` - Prompt string ```text <|im_start|>system {system_message}<|im_end|> <|im_start|>user {prompt}<|im_end|> <|im_start|>assistant ``` - Context size: `32000` - Run as LlamaEdge service ```bash wasmedge --dir .:. --nn-preload default:GGML:AUTO:Qwen1.5-14B-Chat-Q5_K_M.gguf llama-api-server.wasm -p chatml ``` - Run as LlamaEdge command app ```bash wasmedge --dir .:. --nn-preload default:GGML:AUTO:Qwen1.5-14B-Chat-Q5_K_M.gguf llama-chat.wasm -p chatml ``` ## Quantized GGUF Models | Name | Quant method | Bits | Size | Use case | | ---- | ---- | ---- | ---- | ----- | | [Qwen1.5-14B-Chat-Q2_K.gguf](https://huggingface.co/second-state/Qwen1.5-14B-Chat-GGUF/blob/main/Qwen1.5-14B-Chat-Q2_K.gguf) | Q2_K | 2 | 6.09 GB| smallest, significant quality loss - not recommended for most purposes | | [Qwen1.5-14B-Chat-Q3_K_L.gguf](https://huggingface.co/second-state/Qwen1.5-14B-Chat-GGUF/blob/main/Qwen1.5-14B-Chat-Q3_K_L.gguf) | Q3_K_L | 3 | 7.84 GB| small, substantial quality loss | | [Qwen1.5-14B-Chat-Q3_K_M.gguf](https://huggingface.co/second-state/Qwen1.5-14B-Chat-GGUF/blob/main/Qwen1.5-14B-Chat-Q3_K_M.gguf) | Q3_K_M | 3 | 7.42 GB| very small, high quality loss | | [Qwen1.5-14B-Chat-Q3_K_S.gguf](https://huggingface.co/second-state/Qwen1.5-14B-Chat-GGUF/blob/main/Qwen1.5-14B-Chat-Q3_K_S.gguf) | Q3_K_S | 3 | 6.95 GB| very small, high quality loss | | [Qwen1.5-14B-Chat-Q4_0.gguf](https://huggingface.co/second-state/Qwen1.5-14B-Chat-GGUF/blob/main/Qwen1.5-14B-Chat-Q4_0.gguf) | Q4_0 | 4 | 8.18 GB| legacy; small, very high quality loss - prefer using Q3_K_M | | [Qwen1.5-14B-Chat-Q4_K_M.gguf](https://huggingface.co/second-state/Qwen1.5-14B-Chat-GGUF/blob/main/Qwen1.5-14B-Chat-Q4_K_M.gguf) | Q4_K_M | 4 | 9.19 GB| medium, balanced quality - recommended | | [Qwen1.5-14B-Chat-Q4_K_S.gguf](https://huggingface.co/second-state/Qwen1.5-14B-Chat-GGUF/blob/main/Qwen1.5-14B-Chat-Q4_K_S.gguf) | Q4_K_S | 4 | 8.56 GB| small, greater quality loss | | [Qwen1.5-14B-Chat-Q5_0.gguf](https://huggingface.co/second-state/Qwen1.5-14B-Chat-GGUF/blob/main/Qwen1.5-14B-Chat-Q5_0.gguf) | Q5_0 | 5 | 9.85 GB| legacy; medium, balanced quality - prefer using Q4_K_M | | [Qwen1.5-14B-Chat-Q5_K_M.gguf](https://huggingface.co/second-state/Qwen1.5-14B-Chat-GGUF/blob/main/Qwen1.5-14B-Chat-Q5_K_M.gguf) | Q5_K_M | 5 | 10.5 GB| large, very low quality loss - recommended | | [Qwen1.5-14B-Chat-Q5_K_S.gguf](https://huggingface.co/second-state/Qwen1.5-14B-Chat-GGUF/blob/main/Qwen1.5-14B-Chat-Q5_K_S.gguf) | Q5_K_S | 5 | 10.0 GB| large, low quality loss - recommended | | [Qwen1.5-14B-Chat-Q6_K.gguf](https://huggingface.co/second-state/Qwen1.5-14B-Chat-GGUF/blob/main/Qwen1.5-14B-Chat-Q6_K.gguf) | Q6_K | 6 | 12.3 GB| very large, extremely low quality loss | | [Qwen1.5-14B-Chat-Q8_0.gguf](https://huggingface.co/second-state/Qwen1.5-14B-Chat-GGUF/blob/main/Qwen1.5-14B-Chat-Q8_0.gguf) | Q8_0 | 8 | 15.1 GB| very large, extremely low quality loss - not recommended |
mradermacher/CabraQwen14b-GGUF
mradermacher
2024-05-06T05:42:30Z
600
1
transformers
[ "transformers", "gguf", "text-generation-inference", "qwen", "brazil", "brasil", "14b", "portuguese", "pt", "en", "base_model:nicolasdec/CabraQwen14b", "license:cc", "endpoints_compatible", "region:us" ]
null
2024-03-29T03:30:46Z
--- base_model: nicolasdec/CabraQwen14b language: - pt - en library_name: transformers license: cc quantized_by: mradermacher tags: - text-generation-inference - transformers - qwen - gguf - brazil - brasil - 14b - portuguese --- ## About static quants of https://huggingface.co/nicolasdec/CabraQwen14b <!-- provided-files --> weighted/imatrix quants seem not to be available (by me) at this time. If they do not show up a week or so after the static ones, I have probably not planned for them. Feel free to request them by opening a Community Discussion. ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/CabraQwen14b-GGUF/resolve/main/CabraQwen14b.Q2_K.gguf) | Q2_K | 6.9 | | | [GGUF](https://huggingface.co/mradermacher/CabraQwen14b-GGUF/resolve/main/CabraQwen14b.IQ3_XS.gguf) | IQ3_XS | 7.5 | | | [GGUF](https://huggingface.co/mradermacher/CabraQwen14b-GGUF/resolve/main/CabraQwen14b.IQ3_S.gguf) | IQ3_S | 7.8 | beats Q3_K* | | [GGUF](https://huggingface.co/mradermacher/CabraQwen14b-GGUF/resolve/main/CabraQwen14b.Q3_K_S.gguf) | Q3_K_S | 7.8 | | | [GGUF](https://huggingface.co/mradermacher/CabraQwen14b-GGUF/resolve/main/CabraQwen14b.IQ3_M.gguf) | IQ3_M | 8.1 | | | [GGUF](https://huggingface.co/mradermacher/CabraQwen14b-GGUF/resolve/main/CabraQwen14b.Q3_K_M.gguf) | Q3_K_M | 8.4 | lower quality | | [GGUF](https://huggingface.co/mradermacher/CabraQwen14b-GGUF/resolve/main/CabraQwen14b.Q3_K_L.gguf) | Q3_K_L | 8.9 | | | [GGUF](https://huggingface.co/mradermacher/CabraQwen14b-GGUF/resolve/main/CabraQwen14b.IQ4_XS.gguf) | IQ4_XS | 8.9 | | | [GGUF](https://huggingface.co/mradermacher/CabraQwen14b-GGUF/resolve/main/CabraQwen14b.Q4_0.gguf) | Q4_0 | 9.2 | fast, low quality | | [GGUF](https://huggingface.co/mradermacher/CabraQwen14b-GGUF/resolve/main/CabraQwen14b.IQ4_NL.gguf) | IQ4_NL | 9.3 | prefer IQ4_XS | | [GGUF](https://huggingface.co/mradermacher/CabraQwen14b-GGUF/resolve/main/CabraQwen14b.Q4_K_S.gguf) | Q4_K_S | 9.6 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/CabraQwen14b-GGUF/resolve/main/CabraQwen14b.Q4_K_M.gguf) | Q4_K_M | 10.2 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/CabraQwen14b-GGUF/resolve/main/CabraQwen14b.Q5_K_S.gguf) | Q5_K_S | 11.0 | | | [GGUF](https://huggingface.co/mradermacher/CabraQwen14b-GGUF/resolve/main/CabraQwen14b.Q5_K_M.gguf) | Q5_K_M | 11.5 | | | [GGUF](https://huggingface.co/mradermacher/CabraQwen14b-GGUF/resolve/main/CabraQwen14b.Q6_K.gguf) | Q6_K | 13.3 | very good quality | | [GGUF](https://huggingface.co/mradermacher/CabraQwen14b-GGUF/resolve/main/CabraQwen14b.Q8_0.gguf) | Q8_0 | 15.9 | fast, best quality | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. <!-- end -->
p1atdev/dart-v2-vectors
p1atdev
2024-05-11T05:31:50Z
600
0
transformers
[ "transformers", "safetensors", "feature-extraction", "custom_code", "arxiv:1910.09700", "license:apache-2.0", "region:us" ]
feature-extraction
2024-04-28T10:28:25Z
--- library_name: transformers license: apache-2.0 --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
Niggendar/cranberryXL_cranberryV1
Niggendar
2024-05-19T19:46:20Z
600
1
diffusers
[ "diffusers", "safetensors", "arxiv:1910.09700", "endpoints_compatible", "diffusers:StableDiffusionXLPipeline", "region:us" ]
text-to-image
2024-05-19T19:27:36Z
--- library_name: diffusers --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🧨 diffusers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
votepurchase/PVCStyleModelMovable_pony151
votepurchase
2024-06-04T10:16:15Z
600
0
diffusers
[ "diffusers", "safetensors", "ja", "license:mit", "autotrain_compatible", "endpoints_compatible", "diffusers:StableDiffusionXLPipeline", "region:us" ]
text-to-image
2024-05-28T06:51:08Z
--- license: mit language: - ja library_name: diffusers --- [PVCStyleModelMovable_pony151](https://civitai.com/models/400329/pvc-style-modelmovable-figure-model-pony)
T3Q-LLM/T3Q-LLM-TE-NLI-v1.0
T3Q-LLM
2024-06-23T06:29:12Z
600
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-06-04T05:09:04Z
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> [More Information Needed] ## Evaluation hf-causal-experimental (pretrained=T3Q-LLM/T3Q-LLM-TE-NLI-v1.0,use_accelerate=true,trust_remote_code=true), limit: None, provide_description: False, num_fewshot: 0, batch_size: 8 | Task |Version| Metric |Value | |Stderr| |----------------|------:|--------|-----:|---|-----:| |kobest_boolq | 0|acc |0.9551|± |0.0055| | | |macro_f1|0.9551|± |0.0055| |kobest_copa | 0|acc |0.7880|± |0.0129| | | |macro_f1|0.7876|± |0.0129| |kobest_hellaswag| 0|acc |0.5200|± |0.0224| | | |acc_norm|0.5460|± |0.0223| | | |macro_f1|0.5173|± |0.0224| |kobest_sentineg | 0|acc |0.8866|± |0.0159| | | |macro_f1|0.8865|± |0.0160| hf-causal-experimental (pretrained=intfloat/e5-mistral-7b-instruct,use_accelerate=true,trust_remote_code=true), limit: None, provide_description: False, num_fewshot: 0, batch_size: 8 | Task |Version| Metric |Value | |Stderr| |----------------|------:|--------|-----:|---|-----:| |kobest_boolq | 0|acc |0.5021|± |0.0133| | | |macro_f1|0.3343|± |0.0059| |kobest_copa | 0|acc |0.4800|± |0.0158| | | |macro_f1|0.4796|± |0.0158| |kobest_hellaswag| 0|acc |0.2160|± |0.0184| | | |acc_norm|0.2560|± |0.0195| | | |macro_f1|0.2138|± |0.0182| |kobest_sentineg | 0|acc |0.4962|± |0.0251| | | |macro_f1|0.3316|± |0.0113|
CHE-72/Baichuan2-7B-Chat-Q3_K_M-GGUF
CHE-72
2024-06-22T10:38:18Z
600
0
null
[ "gguf", "llama-cpp", "gguf-my-repo", "en", "zh", "base_model:baichuan-inc/Baichuan2-7B-Chat", "region:us" ]
null
2024-06-22T10:38:02Z
--- base_model: baichuan-inc/Baichuan2-7B-Chat language: - en - zh license_name: baichuan2-community-license license_link: https://huggingface.co/baichuan-inc/Baichuan2-7B-Chat/blob/main/Community%20License%20for%20Baichuan2%20Model.pdf tags: - llama-cpp - gguf-my-repo tasks: - text-generation --- # CHE-72/Baichuan2-7B-Chat-Q3_K_M-GGUF This model was converted to GGUF format from [`baichuan-inc/Baichuan2-7B-Chat`](https://huggingface.co/baichuan-inc/Baichuan2-7B-Chat) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space. Refer to the [original model card](https://huggingface.co/baichuan-inc/Baichuan2-7B-Chat) for more details on the model. ## Use with llama.cpp Install llama.cpp through brew (works on Mac and Linux) ```bash brew install llama.cpp ``` Invoke the llama.cpp server or the CLI. ### CLI: ```bash llama-cli --hf-repo CHE-72/Baichuan2-7B-Chat-Q3_K_M-GGUF --hf-file baichuan2-7b-chat-q3_k_m.gguf -p "The meaning to life and the universe is" ``` ### Server: ```bash llama-server --hf-repo CHE-72/Baichuan2-7B-Chat-Q3_K_M-GGUF --hf-file baichuan2-7b-chat-q3_k_m.gguf -c 2048 ``` Note: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well. Step 1: Clone llama.cpp from GitHub. ``` git clone https://github.com/ggerganov/llama.cpp ``` Step 2: Move into the llama.cpp folder and build it with `LLAMA_CURL=1` flag along with other hardware-specific flags (for ex: LLAMA_CUDA=1 for Nvidia GPUs on Linux). ``` cd llama.cpp && LLAMA_CURL=1 make ``` Step 3: Run inference through the main binary. ``` ./llama-cli --hf-repo CHE-72/Baichuan2-7B-Chat-Q3_K_M-GGUF --hf-file baichuan2-7b-chat-q3_k_m.gguf -p "The meaning to life and the universe is" ``` or ``` ./llama-server --hf-repo CHE-72/Baichuan2-7B-Chat-Q3_K_M-GGUF --hf-file baichuan2-7b-chat-q3_k_m.gguf -c 2048 ```
Cran-May/Gemma-2-9B-Chinese-Chat-Q4_K_M-GGUF
Cran-May
2024-07-02T05:02:49Z
600
0
transformers
[ "transformers", "gguf", "llama-factory", "orpo", "llama-cpp", "gguf-my-repo", "text-generation", "en", "zh", "base_model:shenzhi-wang/Gemma-2-9B-Chinese-Chat", "license:gemma", "endpoints_compatible", "region:us" ]
text-generation
2024-07-02T05:02:22Z
--- base_model: shenzhi-wang/Gemma-2-9B-Chinese-Chat language: - en - zh library_name: transformers license: gemma pipeline_tag: text-generation tags: - llama-factory - orpo - llama-cpp - gguf-my-repo --- # Cran-May/Gemma-2-9B-Chinese-Chat-Q4_K_M-GGUF This model was converted to GGUF format from [`shenzhi-wang/Gemma-2-9B-Chinese-Chat`](https://huggingface.co/shenzhi-wang/Gemma-2-9B-Chinese-Chat) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space. Refer to the [original model card](https://huggingface.co/shenzhi-wang/Gemma-2-9B-Chinese-Chat) for more details on the model. ## Use with llama.cpp Install llama.cpp through brew (works on Mac and Linux) ```bash brew install llama.cpp ``` Invoke the llama.cpp server or the CLI. ### CLI: ```bash llama-cli --hf-repo Cran-May/Gemma-2-9B-Chinese-Chat-Q4_K_M-GGUF --hf-file gemma-2-9b-chinese-chat-q4_k_m-imat.gguf -p "The meaning to life and the universe is" ``` ### Server: ```bash llama-server --hf-repo Cran-May/Gemma-2-9B-Chinese-Chat-Q4_K_M-GGUF --hf-file gemma-2-9b-chinese-chat-q4_k_m-imat.gguf -c 2048 ``` Note: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well. Step 1: Clone llama.cpp from GitHub. ``` git clone https://github.com/ggerganov/llama.cpp ``` Step 2: Move into the llama.cpp folder and build it with `LLAMA_CURL=1` flag along with other hardware-specific flags (for ex: LLAMA_CUDA=1 for Nvidia GPUs on Linux). ``` cd llama.cpp && LLAMA_CURL=1 make ``` Step 3: Run inference through the main binary. ``` ./llama-cli --hf-repo Cran-May/Gemma-2-9B-Chinese-Chat-Q4_K_M-GGUF --hf-file gemma-2-9b-chinese-chat-q4_k_m-imat.gguf -p "The meaning to life and the universe is" ``` or ``` ./llama-server --hf-repo Cran-May/Gemma-2-9B-Chinese-Chat-Q4_K_M-GGUF --hf-file gemma-2-9b-chinese-chat-q4_k_m-imat.gguf -c 2048 ```
Anjoe/Bundestag-gpt2-large
Anjoe
2023-01-07T08:16:37Z
599
0
transformers
[ "transformers", "pytorch", "tensorboard", "gpt2", "text-generation", "generated_from_trainer", "license:mit", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2022-12-09T13:01:53Z
--- license: mit tags: - generated_from_trainer model-index: - name: Bundestag-gpt2-large results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Bundestag-gpt2-large This model is a fine-tuned version of [benjamin/gerpt2-large](https://huggingface.co/benjamin/gerpt2-large) on the None dataset. It achieves the following results on the evaluation set: - Loss: 2.8236 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 500 - num_epochs: 4 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:------:|:---------------:| | 2.7899 | 1.0 | 32852 | 2.6879 | | 2.4421 | 2.0 | 65704 | 2.6749 | | 2.206 | 3.0 | 98556 | 2.7354 | | 1.9544 | 4.0 | 131408 | 2.8236 | ### Framework versions - Transformers 4.25.1 - Pytorch 1.13.0+cu116 - Datasets 2.8.0 - Tokenizers 0.13.2
sayakpaul/sd-model-finetuned-lora-t4
sayakpaul
2023-04-18T09:47:44Z
599
31
diffusers
[ "diffusers", "stable-diffusion", "stable-diffusion-diffusers", "text-to-image", "lora", "base_model:CompVis/stable-diffusion-v1-4", "license:creativeml-openrail-m", "region:us" ]
text-to-image
2023-01-19T22:29:40Z
--- license: creativeml-openrail-m base_model: CompVis/stable-diffusion-v1-4 tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - diffusers - lora inference: true --- # LoRA text2image fine-tuning - https://huggingface.co/sayakpaul/sd-model-finetuned-lora-t4 These are LoRA adaption weights for https://huggingface.co/sayakpaul/sd-model-finetuned-lora-t4. The weights were fine-tuned on the lambdalabs/pokemon-blip-captions dataset. You can find some example images in the following. ![img_0](./image_0.png) ![img_1](./image_1.png) ![img_2](./image_2.png) ![img_3](./image_3.png)
kakaobrain/karlo-v1-alpha-image-variations
kakaobrain
2023-01-31T08:27:48Z
599
7
diffusers
[ "diffusers", "text-to-image", "license:creativeml-openrail-m", "diffusers:UnCLIPImageVariationPipeline", "region:us" ]
text-to-image
2023-01-30T19:46:46Z
--- license: creativeml-openrail-m tags: - text-to-image --- # Karlo v1 alpha Karlo is a text-conditional image generation model based on OpenAI's unCLIP architecture with the improvement over the standard super-resolution model from 64px to 256px, recovering high-frequency details only in the small number of denoising steps. * [Original codebase](https://github.com/kakaobrain/karlo) ## Usage Karlo is available in diffusers! ```python pip install diffusers transformers accelerate safetensors ``` ### Text to image ```python from diffusers import UnCLIPPipeline import torch pipe = UnCLIPPipeline.from_pretrained("kakaobrain/karlo-v1-alpha", torch_dtype=torch.float16) pipe = pipe.to('cuda') prompt = "a high-resolution photograph of a big red frog on a green leaf." image = pipe([prompt]).images[0] image.save("./frog.png") ``` ![img](https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/frog.png) ### Image variation ```python from diffusers import UnCLIPImageVariationPipeline import torch from PIL import Image pipe = UnCLIPImageVariationPipeline.from_pretrained("kakaobrain/karlo-v1-alpha-image-variations", torch_dtype=torch.float16) pipe = pipe.to('cuda') image = Image.open("./frog.png") image = pipe(image).images[0] image.save("./frog-variation.png") ``` ![img](https://huggingface.co/datasets/williamberman/images/resolve/main/frog-variation.png) ## Model Architecture ### Overview Karlo is a text-conditional diffusion model based on unCLIP, composed of prior, decoder, and super-resolution modules. In this repository, we include the improved version of the standard super-resolution module for upscaling 64px to 256px only in 7 reverse steps, as illustrated in the figure below: <p float="left"> <img src="https://raw.githubusercontent.com/kakaobrain/karlo/main/assets/improved_sr_arch.jpg"/> </p> In specific, the standard SR module trained by DDPM objective upscales 64px to 256px in the first 6 denoising steps based on the respacing technique. Then, the additional fine-tuned SR module trained by [VQ-GAN](https://compvis.github.io/taming-transformers/)-style loss performs the final reverse step to recover high-frequency details. We observe that this approach is very effective to upscale the low-resolution in a small number of reverse steps. ### Details We train all components from scratch on 115M image-text pairs including COYO-100M, CC3M, and CC12M. In the case of Prior and Decoder, we use ViT-L/14 provided by OpenAI’s [CLIP repository](https://github.com/openai/CLIP). Unlike the original implementation of unCLIP, we replace the trainable transformer in the decoder into the text encoder in ViT-L/14 for efficiency. In the case of the SR module, we first train the model using the DDPM objective in 1M steps, followed by additional 234K steps to fine-tune the additional component. The table below summarizes the important statistics of our components: | | Prior | Decoder | SR | |:------|----:|----:|----:| | CLIP | ViT-L/14 | ViT-L/14 | - | | #param | 1B | 900M | 700M + 700M | | #optimization steps | 1M | 1M | 1M + 0.2M | | #sampling steps | 25 | 50 (default), 25 (fast) | 7 | |Checkpoint links| [ViT-L-14](https://arena.kakaocdn.net/brainrepo/models/karlo-public/v1.0.0.alpha/096db1af569b284eb76b3881534822d9/ViT-L-14.pt), [ViT-L-14 stats](https://arena.kakaocdn.net/brainrepo/models/karlo-public/v1.0.0.alpha/0b62380a75e56f073e2844ab5199153d/ViT-L-14_stats.th), [model](https://arena.kakaocdn.net/brainrepo/models/karlo-public/v1.0.0.alpha/efdf6206d8ed593961593dc029a8affa/decoder-ckpt-step%3D01000000-of-01000000.ckpt) | [model](https://arena.kakaocdn.net/brainrepo/models/karlo-public/v1.0.0.alpha/85626483eaca9f581e2a78d31ff905ca/prior-ckpt-step%3D01000000-of-01000000.ckpt) | [model](https://arena.kakaocdn.net/brainrepo/models/karlo-public/v1.0.0.alpha/4226b831ae0279020d134281f3c31590/improved-sr-ckpt-step%3D1.2M.ckpt) | In the checkpoint links, ViT-L-14 is equivalent to the original version, but we include it for convenience. We also remark that ViT-L-14-stats is required to normalize the outputs of the prior module. ### Evaluation We quantitatively measure the performance of Karlo-v1.0.alpha in the validation split of CC3M and MS-COCO. The table below presents CLIP-score and FID. To measure FID, we resize the image of the shorter side to 256px, followed by cropping it at the center. We set classifier-free guidance scales for prior and decoder to 4 and 8 in all cases. We observe that our model achieves reasonable performance even with 25 sampling steps of decoder. CC3M | Sampling step | CLIP-s (ViT-B/16) | FID (13k from val)| |:------|----:|----:| | Prior (25) + Decoder (25) + SR (7) | 0.3081 | 14.37 | | Prior (25) + Decoder (50) + SR (7) | 0.3086 | 13.95 | MS-COCO | Sampling step | CLIP-s (ViT-B/16) | FID (30k from val)| |:------|----:|----:| | Prior (25) + Decoder (25) + SR (7) | 0.3192 | 15.24 | | Prior (25) + Decoder (50) + SR (7) | 0.3192 | 14.43 | For more information, please refer to the upcoming technical report. ### Training Details This alpha version of Karlo is trained on 115M image-text pairs, including [COYO](https://github.com/kakaobrain/coyo-dataset)-100M high-quality subset, CC3M, and CC12M. For those who are interested in a better version of Karlo trained on more large-scale high-quality datasets, please visit the landing page of our application [B^DISCOVER](https://bdiscover.kakaobrain.com/). ## BibTex If you find this repository useful in your research, please cite: ``` @misc{kakaobrain2022karlo-v1-alpha, title = {Karlo-v1.0.alpha on COYO-100M and CC15M}, author = {Donghoon Lee, Jiseob Kim, Jisu Choi, Jongmin Kim, Minwoo Byeon, Woonhyuk Baek and Saehoon Kim}, year = {2022}, howpublished = {\url{https://github.com/kakaobrain/karlo}}, } ```
timm/poolformer_s24.sail_in1k
timm
2023-05-05T06:17:18Z
599
0
timm
[ "timm", "pytorch", "safetensors", "image-classification", "dataset:imagenet-1k", "arxiv:2210.13452", "license:apache-2.0", "region:us" ]
image-classification
2023-05-05T06:16:53Z
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for poolformer_s24.sail_in1k A PoolFormer (a MetaFormer) image classification model. Trained on ImageNet-1k by paper authors. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 21.4 - GMACs: 3.4 - Activations (M): 10.7 - Image size: 224 x 224 - **Papers:** - MetaFormer Is Actually What You Need for Vision: https://arxiv.org/abs/2210.13452 - **Original:** https://github.com/sail-sg/poolformer - **Dataset:** ImageNet-1k ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('poolformer_s24.sail_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'poolformer_s24.sail_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 64, 56, 56]) # torch.Size([1, 128, 28, 28]) # torch.Size([1, 320, 14, 14]) # torch.Size([1, 512, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'poolformer_s24.sail_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 512, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @inproceedings{yu2022metaformer, title={Metaformer is actually what you need for vision}, author={Yu, Weihao and Luo, Mi and Zhou, Pan and Si, Chenyang and Zhou, Yichen and Wang, Xinchao and Feng, Jiashi and Yan, Shuicheng}, booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, pages={10819--10829}, year={2022} } ```
TheBloke/CollectiveCognition-v1.1-Mistral-7B-GGUF
TheBloke
2023-10-05T16:21:56Z
599
34
transformers
[ "transformers", "gguf", "mistral", "mistral-7b", "instruct", "finetune", "gpt4", "synthetic data", "distillation", "sharegpt", "en", "dataset:CollectiveCognition/chats-data-2023-09-27", "base_model:teknium/CollectiveCognition-v1.1-Mistral-7B", "license:apache-2.0", "text-generation-inference", "region:us" ]
null
2023-10-05T16:15:04Z
--- base_model: teknium/CollectiveCognition-v1.1-Mistral-7B datasets: - CollectiveCognition/chats-data-2023-09-27 inference: false language: - en license: apache-2.0 model-index: - name: CollectiveCognition-v1-Mistral-7B results: [] model_creator: Teknium model_name: CollectiveCognition v1.1 Mistral 7B model_type: mistral prompt_template: 'USER: {prompt} ASSISTANT: ' quantized_by: TheBloke tags: - mistral-7b - instruct - finetune - gpt4 - synthetic data - distillation - sharegpt --- <!-- header start --> <!-- 200823 --> <div style="width: auto; margin-left: auto; margin-right: auto"> <img src="https://i.imgur.com/EBdldam.jpg" alt="TheBlokeAI" style="width: 100%; min-width: 400px; display: block; margin: auto;"> </div> <div style="display: flex; justify-content: space-between; width: 100%;"> <div style="display: flex; flex-direction: column; align-items: flex-start;"> <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://discord.gg/theblokeai">Chat & support: TheBloke's Discord server</a></p> </div> <div style="display: flex; flex-direction: column; align-items: flex-end;"> <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://www.patreon.com/TheBlokeAI">Want to contribute? TheBloke's Patreon page</a></p> </div> </div> <div style="text-align:center; margin-top: 0em; margin-bottom: 0em"><p style="margin-top: 0.25em; margin-bottom: 0em;">TheBloke's LLM work is generously supported by a grant from <a href="https://a16z.com">andreessen horowitz (a16z)</a></p></div> <hr style="margin-top: 1.0em; margin-bottom: 1.0em;"> <!-- header end --> # CollectiveCognition v1.1 Mistral 7B - GGUF - Model creator: [Teknium](https://huggingface.co/teknium) - Original model: [CollectiveCognition v1.1 Mistral 7B](https://huggingface.co/teknium/CollectiveCognition-v1.1-Mistral-7B) <!-- description start --> ## Description This repo contains GGUF format model files for [Teknium's CollectiveCognition v1.1 Mistral 7B](https://huggingface.co/teknium/CollectiveCognition-v1.1-Mistral-7B). <!-- description end --> <!-- README_GGUF.md-about-gguf start --> ### About GGUF GGUF is a new format introduced by the llama.cpp team on August 21st 2023. It is a replacement for GGML, which is no longer supported by llama.cpp. Here is an incomplate list of clients and libraries that are known to support GGUF: * [llama.cpp](https://github.com/ggerganov/llama.cpp). The source project for GGUF. Offers a CLI and a server option. * [text-generation-webui](https://github.com/oobabooga/text-generation-webui), the most widely used web UI, with many features and powerful extensions. Supports GPU acceleration. * [KoboldCpp](https://github.com/LostRuins/koboldcpp), a fully featured web UI, with GPU accel across all platforms and GPU architectures. Especially good for story telling. * [LM Studio](https://lmstudio.ai/), an easy-to-use and powerful local GUI for Windows and macOS (Silicon), with GPU acceleration. * [LoLLMS Web UI](https://github.com/ParisNeo/lollms-webui), a great web UI with many interesting and unique features, including a full model library for easy model selection. * [Faraday.dev](https://faraday.dev/), an attractive and easy to use character-based chat GUI for Windows and macOS (both Silicon and Intel), with GPU acceleration. * [ctransformers](https://github.com/marella/ctransformers), a Python library with GPU accel, LangChain support, and OpenAI-compatible AI server. * [llama-cpp-python](https://github.com/abetlen/llama-cpp-python), a Python library with GPU accel, LangChain support, and OpenAI-compatible API server. * [candle](https://github.com/huggingface/candle), a Rust ML framework with a focus on performance, including GPU support, and ease of use. <!-- README_GGUF.md-about-gguf end --> <!-- repositories-available start --> ## Repositories available * [AWQ model(s) for GPU inference.](https://huggingface.co/TheBloke/CollectiveCognition-v1.1-Mistral-7B-AWQ) * [GPTQ models for GPU inference, with multiple quantisation parameter options.](https://huggingface.co/TheBloke/CollectiveCognition-v1.1-Mistral-7B-GPTQ) * [2, 3, 4, 5, 6 and 8-bit GGUF models for CPU+GPU inference](https://huggingface.co/TheBloke/CollectiveCognition-v1.1-Mistral-7B-GGUF) * [Teknium's original unquantised fp16 model in pytorch format, for GPU inference and for further conversions](https://huggingface.co/teknium/CollectiveCognition-v1.1-Mistral-7B) <!-- repositories-available end --> <!-- prompt-template start --> ## Prompt template: User-Assistant ``` USER: {prompt} ASSISTANT: ``` <!-- prompt-template end --> <!-- compatibility_gguf start --> ## Compatibility These quantised GGUFv2 files are compatible with llama.cpp from August 27th onwards, as of commit [d0cee0d](https://github.com/ggerganov/llama.cpp/commit/d0cee0d36d5be95a0d9088b674dbb27354107221) They are also compatible with many third party UIs and libraries - please see the list at the top of this README. ## Explanation of quantisation methods <details> <summary>Click to see details</summary> The new methods available are: * GGML_TYPE_Q2_K - "type-1" 2-bit quantization in super-blocks containing 16 blocks, each block having 16 weight. Block scales and mins are quantized with 4 bits. This ends up effectively using 2.5625 bits per weight (bpw) * GGML_TYPE_Q3_K - "type-0" 3-bit quantization in super-blocks containing 16 blocks, each block having 16 weights. Scales are quantized with 6 bits. This end up using 3.4375 bpw. * GGML_TYPE_Q4_K - "type-1" 4-bit quantization in super-blocks containing 8 blocks, each block having 32 weights. Scales and mins are quantized with 6 bits. This ends up using 4.5 bpw. * GGML_TYPE_Q5_K - "type-1" 5-bit quantization. Same super-block structure as GGML_TYPE_Q4_K resulting in 5.5 bpw * GGML_TYPE_Q6_K - "type-0" 6-bit quantization. Super-blocks with 16 blocks, each block having 16 weights. Scales are quantized with 8 bits. This ends up using 6.5625 bpw Refer to the Provided Files table below to see what files use which methods, and how. </details> <!-- compatibility_gguf end --> <!-- README_GGUF.md-provided-files start --> ## Provided files | Name | Quant method | Bits | Size | Max RAM required | Use case | | ---- | ---- | ---- | ---- | ---- | ----- | | [collectivecognition-v1.1-mistral-7b.Q2_K.gguf](https://huggingface.co/TheBloke/CollectiveCognition-v1.1-Mistral-7B-GGUF/blob/main/collectivecognition-v1.1-mistral-7b.Q2_K.gguf) | Q2_K | 2 | 3.08 GB| 5.58 GB | smallest, significant quality loss - not recommended for most purposes | | [collectivecognition-v1.1-mistral-7b.Q3_K_S.gguf](https://huggingface.co/TheBloke/CollectiveCognition-v1.1-Mistral-7B-GGUF/blob/main/collectivecognition-v1.1-mistral-7b.Q3_K_S.gguf) | Q3_K_S | 3 | 3.16 GB| 5.66 GB | very small, high quality loss | | [collectivecognition-v1.1-mistral-7b.Q3_K_M.gguf](https://huggingface.co/TheBloke/CollectiveCognition-v1.1-Mistral-7B-GGUF/blob/main/collectivecognition-v1.1-mistral-7b.Q3_K_M.gguf) | Q3_K_M | 3 | 3.52 GB| 6.02 GB | very small, high quality loss | | [collectivecognition-v1.1-mistral-7b.Q3_K_L.gguf](https://huggingface.co/TheBloke/CollectiveCognition-v1.1-Mistral-7B-GGUF/blob/main/collectivecognition-v1.1-mistral-7b.Q3_K_L.gguf) | Q3_K_L | 3 | 3.82 GB| 6.32 GB | small, substantial quality loss | | [collectivecognition-v1.1-mistral-7b.Q4_0.gguf](https://huggingface.co/TheBloke/CollectiveCognition-v1.1-Mistral-7B-GGUF/blob/main/collectivecognition-v1.1-mistral-7b.Q4_0.gguf) | Q4_0 | 4 | 4.11 GB| 6.61 GB | legacy; small, very high quality loss - prefer using Q3_K_M | | [collectivecognition-v1.1-mistral-7b.Q4_K_S.gguf](https://huggingface.co/TheBloke/CollectiveCognition-v1.1-Mistral-7B-GGUF/blob/main/collectivecognition-v1.1-mistral-7b.Q4_K_S.gguf) | Q4_K_S | 4 | 4.14 GB| 6.64 GB | small, greater quality loss | | [collectivecognition-v1.1-mistral-7b.Q4_K_M.gguf](https://huggingface.co/TheBloke/CollectiveCognition-v1.1-Mistral-7B-GGUF/blob/main/collectivecognition-v1.1-mistral-7b.Q4_K_M.gguf) | Q4_K_M | 4 | 4.37 GB| 6.87 GB | medium, balanced quality - recommended | | [collectivecognition-v1.1-mistral-7b.Q5_0.gguf](https://huggingface.co/TheBloke/CollectiveCognition-v1.1-Mistral-7B-GGUF/blob/main/collectivecognition-v1.1-mistral-7b.Q5_0.gguf) | Q5_0 | 5 | 5.00 GB| 7.50 GB | legacy; medium, balanced quality - prefer using Q4_K_M | | [collectivecognition-v1.1-mistral-7b.Q5_K_S.gguf](https://huggingface.co/TheBloke/CollectiveCognition-v1.1-Mistral-7B-GGUF/blob/main/collectivecognition-v1.1-mistral-7b.Q5_K_S.gguf) | Q5_K_S | 5 | 5.00 GB| 7.50 GB | large, low quality loss - recommended | | [collectivecognition-v1.1-mistral-7b.Q5_K_M.gguf](https://huggingface.co/TheBloke/CollectiveCognition-v1.1-Mistral-7B-GGUF/blob/main/collectivecognition-v1.1-mistral-7b.Q5_K_M.gguf) | Q5_K_M | 5 | 5.13 GB| 7.63 GB | large, very low quality loss - recommended | | [collectivecognition-v1.1-mistral-7b.Q6_K.gguf](https://huggingface.co/TheBloke/CollectiveCognition-v1.1-Mistral-7B-GGUF/blob/main/collectivecognition-v1.1-mistral-7b.Q6_K.gguf) | Q6_K | 6 | 5.94 GB| 8.44 GB | very large, extremely low quality loss | | [collectivecognition-v1.1-mistral-7b.Q8_0.gguf](https://huggingface.co/TheBloke/CollectiveCognition-v1.1-Mistral-7B-GGUF/blob/main/collectivecognition-v1.1-mistral-7b.Q8_0.gguf) | Q8_0 | 8 | 7.70 GB| 10.20 GB | very large, extremely low quality loss - not recommended | **Note**: the above RAM figures assume no GPU offloading. If layers are offloaded to the GPU, this will reduce RAM usage and use VRAM instead. <!-- README_GGUF.md-provided-files end --> <!-- README_GGUF.md-how-to-download start --> ## How to download GGUF files **Note for manual downloaders:** You almost never want to clone the entire repo! Multiple different quantisation formats are provided, and most users only want to pick and download a single file. The following clients/libraries will automatically download models for you, providing a list of available models to choose from: - LM Studio - LoLLMS Web UI - Faraday.dev ### In `text-generation-webui` Under Download Model, you can enter the model repo: TheBloke/CollectiveCognition-v1.1-Mistral-7B-GGUF and below it, a specific filename to download, such as: collectivecognition-v1.1-mistral-7b.Q4_K_M.gguf. Then click Download. ### On the command line, including multiple files at once I recommend using the `huggingface-hub` Python library: ```shell pip3 install huggingface-hub ``` Then you can download any individual model file to the current directory, at high speed, with a command like this: ```shell huggingface-cli download TheBloke/CollectiveCognition-v1.1-Mistral-7B-GGUF collectivecognition-v1.1-mistral-7b.Q4_K_M.gguf --local-dir . --local-dir-use-symlinks False ``` <details> <summary>More advanced huggingface-cli download usage</summary> You can also download multiple files at once with a pattern: ```shell huggingface-cli download TheBloke/CollectiveCognition-v1.1-Mistral-7B-GGUF --local-dir . --local-dir-use-symlinks False --include='*Q4_K*gguf' ``` For more documentation on downloading with `huggingface-cli`, please see: [HF -> Hub Python Library -> Download files -> Download from the CLI](https://huggingface.co/docs/huggingface_hub/guides/download#download-from-the-cli). To accelerate downloads on fast connections (1Gbit/s or higher), install `hf_transfer`: ```shell pip3 install hf_transfer ``` And set environment variable `HF_HUB_ENABLE_HF_TRANSFER` to `1`: ```shell HF_HUB_ENABLE_HF_TRANSFER=1 huggingface-cli download TheBloke/CollectiveCognition-v1.1-Mistral-7B-GGUF collectivecognition-v1.1-mistral-7b.Q4_K_M.gguf --local-dir . --local-dir-use-symlinks False ``` Windows Command Line users: You can set the environment variable by running `set HF_HUB_ENABLE_HF_TRANSFER=1` before the download command. </details> <!-- README_GGUF.md-how-to-download end --> <!-- README_GGUF.md-how-to-run start --> ## Example `llama.cpp` command Make sure you are using `llama.cpp` from commit [d0cee0d](https://github.com/ggerganov/llama.cpp/commit/d0cee0d36d5be95a0d9088b674dbb27354107221) or later. ```shell ./main -ngl 32 -m collectivecognition-v1.1-mistral-7b.Q4_K_M.gguf --color -c 2048 --temp 0.7 --repeat_penalty 1.1 -n -1 -p "USER: {prompt}\nASSISTANT:" ``` Change `-ngl 32` to the number of layers to offload to GPU. Remove it if you don't have GPU acceleration. Change `-c 2048` to the desired sequence length. For extended sequence models - eg 8K, 16K, 32K - the necessary RoPE scaling parameters are read from the GGUF file and set by llama.cpp automatically. If you want to have a chat-style conversation, replace the `-p <PROMPT>` argument with `-i -ins` For other parameters and how to use them, please refer to [the llama.cpp documentation](https://github.com/ggerganov/llama.cpp/blob/master/examples/main/README.md) ## How to run in `text-generation-webui` Further instructions here: [text-generation-webui/docs/llama.cpp.md](https://github.com/oobabooga/text-generation-webui/blob/main/docs/llama.cpp.md). ## How to run from Python code You can use GGUF models from Python using the [llama-cpp-python](https://github.com/abetlen/llama-cpp-python) or [ctransformers](https://github.com/marella/ctransformers) libraries. ### How to load this model in Python code, using ctransformers #### First install the package Run one of the following commands, according to your system: ```shell # Base ctransformers with no GPU acceleration pip install ctransformers # Or with CUDA GPU acceleration pip install ctransformers[cuda] # Or with AMD ROCm GPU acceleration (Linux only) CT_HIPBLAS=1 pip install ctransformers --no-binary ctransformers # Or with Metal GPU acceleration for macOS systems only CT_METAL=1 pip install ctransformers --no-binary ctransformers ``` #### Simple ctransformers example code ```python from ctransformers import AutoModelForCausalLM # Set gpu_layers to the number of layers to offload to GPU. Set to 0 if no GPU acceleration is available on your system. llm = AutoModelForCausalLM.from_pretrained("TheBloke/CollectiveCognition-v1.1-Mistral-7B-GGUF", model_file="collectivecognition-v1.1-mistral-7b.Q4_K_M.gguf", model_type="mistral", gpu_layers=50) print(llm("AI is going to")) ``` ## How to use with LangChain Here are guides on using llama-cpp-python and ctransformers with LangChain: * [LangChain + llama-cpp-python](https://python.langchain.com/docs/integrations/llms/llamacpp) * [LangChain + ctransformers](https://python.langchain.com/docs/integrations/providers/ctransformers) <!-- README_GGUF.md-how-to-run end --> <!-- footer start --> <!-- 200823 --> ## Discord For further support, and discussions on these models and AI in general, join us at: [TheBloke AI's Discord server](https://discord.gg/theblokeai) ## Thanks, and how to contribute Thanks to the [chirper.ai](https://chirper.ai) team! Thanks to Clay from [gpus.llm-utils.org](llm-utils)! I've had a lot of people ask if they can contribute. I enjoy providing models and helping people, and would love to be able to spend even more time doing it, as well as expanding into new projects like fine tuning/training. If you're able and willing to contribute it will be most gratefully received and will help me to keep providing more models, and to start work on new AI projects. Donaters will get priority support on any and all AI/LLM/model questions and requests, access to a private Discord room, plus other benefits. * Patreon: https://patreon.com/TheBlokeAI * Ko-Fi: https://ko-fi.com/TheBlokeAI **Special thanks to**: Aemon Algiz. **Patreon special mentions**: Pierre Kircher, Stanislav Ovsiannikov, Michael Levine, Eugene Pentland, Andrey, 준교 김, Randy H, Fred von Graf, Artur Olbinski, Caitlyn Gatomon, terasurfer, Jeff Scroggin, James Bentley, Vadim, Gabriel Puliatti, Harry Royden McLaughlin, Sean Connelly, Dan Guido, Edmond Seymore, Alicia Loh, subjectnull, AzureBlack, Manuel Alberto Morcote, Thomas Belote, Lone Striker, Chris Smitley, Vitor Caleffi, Johann-Peter Hartmann, Clay Pascal, biorpg, Brandon Frisco, sidney chen, transmissions 11, Pedro Madruga, jinyuan sun, Ajan Kanaga, Emad Mostaque, Trenton Dambrowitz, Jonathan Leane, Iucharbius, usrbinkat, vamX, George Stoitzev, Luke Pendergrass, theTransient, Olakabola, Swaroop Kallakuri, Cap'n Zoog, Brandon Phillips, Michael Dempsey, Nikolai Manek, danny, Matthew Berman, Gabriel Tamborski, alfie_i, Raymond Fosdick, Tom X Nguyen, Raven Klaugh, LangChain4j, Magnesian, Illia Dulskyi, David Ziegler, Mano Prime, Luis Javier Navarrete Lozano, Erik Bjäreholt, 阿明, Nathan Dryer, Alex, Rainer Wilmers, zynix, TL, Joseph William Delisle, John Villwock, Nathan LeClaire, Willem Michiel, Joguhyik, GodLy, OG, Alps Aficionado, Jeffrey Morgan, ReadyPlayerEmma, Tiffany J. Kim, Sebastain Graf, Spencer Kim, Michael Davis, webtim, Talal Aujan, knownsqashed, John Detwiler, Imad Khwaja, Deo Leter, Jerry Meng, Elijah Stavena, Rooh Singh, Pieter, SuperWojo, Alexandros Triantafyllidis, Stephen Murray, Ai Maven, ya boyyy, Enrico Ros, Ken Nordquist, Deep Realms, Nicholas, Spiking Neurons AB, Elle, Will Dee, Jack West, RoA, Luke @flexchar, Viktor Bowallius, Derek Yates, Subspace Studios, jjj, Toran Billups, Asp the Wyvern, Fen Risland, Ilya, NimbleBox.ai, Chadd, Nitin Borwankar, Emre, Mandus, Leonard Tan, Kalila, K, Trailburnt, S_X, Cory Kujawski Thank you to all my generous patrons and donaters! And thank you again to a16z for their generous grant. <!-- footer end --> <!-- original-model-card start --> # Original model card: Teknium's CollectiveCognition v1.1 Mistral 7B **Collective Cognition v1.1 - Mistral 7B** <div style="display: flex; justify-content: center;"> <a href="https://collectivecognition.ai" target="_blank" style="display: inline-block; text-align: center;"> <img src="https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/DNZXsJE5oC_rM8eYY6H_x.png" alt="Collective Cognition Logo" width="50%" style="display: block; margin: 0 auto;"> </a> </div> ## Model Description: Collective Cognition v1.1 is a state-of-the-art model fine-tuned using the Mistral approach. This model is particularly notable for its performance, outperforming many 70B models on the TruthfulQA benchmark. This benchmark assesses models for common misconceptions, potentially indicating hallucination rates. ## Special Features: - **Quick Training**: This model was trained in just 3 minutes on a single 4090 with a qlora, and competes with 70B scale Llama-2 Models at TruthfulQA. - **Limited Data**: Despite its exceptional performance, it was trained on only ONE HUNDRED data points, all of which were gathered from a platform reminiscent of ShareGPT. - **Extreme TruthfulQA Benchmark**: This model is competing strongly with top 70B models on the TruthfulQA benchmark despite the small dataset and qlora training! ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/-pnifxPcMeeUONyE3efo3.png) ## Acknowledgements: Special thanks to @a16z and all contributors to the Collective Cognition dataset for making the development of this model possible. ## Dataset: The model was trained using data from the Collective Cognition website. The efficacy of this dataset is demonstrated by the model's stellar performance, suggesting that further expansion of this dataset could yield even more promising results. The data is reminiscent of that collected from platforms like ShareGPT. You can contribute to the growth of the dataset by sharing your own ChatGPT chats [here](https://CollectiveCognition.ai). You can download the datasets created by Collective Cognition here: https://huggingface.co/CollectiveCognition ## Performance: - **TruthfulQA**: Collective Cognition v1.1 has notably outperformed various 70B models on the TruthfulQA benchmark, highlighting its ability to understand and rectify common misconceptions. ## Usage: Prompt Format: ``` USER: <prompt> ASSISTANT: ``` OR ``` <system message> USER: <prompt> ASSISTANT: ``` ## Benchmarks: Collective Cognition v1.0 TruthfulQA: ``` | Task |Version|Metric|Value | |Stderr| |-------------|------:|------|-----:|---|-----:| |truthfulqa_mc| 1|mc1 |0.4051|± |0.0172| | | |mc2 |0.5738|± |0.0157| ``` Collective Cognition v1.1 GPT4All: ``` | Task |Version| Metric |Value | |Stderr| |-------------|------:|--------|-----:|---|-----:| |arc_challenge| 0|acc |0.5085|± |0.0146| | | |acc_norm|0.5384|± |0.0146| |arc_easy | 0|acc |0.7963|± |0.0083| | | |acc_norm|0.7668|± |0.0087| |boolq | 1|acc |0.8495|± |0.0063| |hellaswag | 0|acc |0.6399|± |0.0048| | | |acc_norm|0.8247|± |0.0038| |openbookqa | 0|acc |0.3240|± |0.0210| | | |acc_norm|0.4540|± |0.0223| |piqa | 0|acc |0.7992|± |0.0093| | | |acc_norm|0.8107|± |0.0091| |winogrande | 0|acc |0.7348|± |0.0124| Average: 71.13 ``` AGIEval: ``` | Task |Version| Metric |Value | |Stderr| |------------------------------|------:|--------|-----:|---|-----:| |agieval_aqua_rat | 0|acc |0.1929|± |0.0248| | | |acc_norm|0.2008|± |0.0252| |agieval_logiqa_en | 0|acc |0.3134|± |0.0182| | | |acc_norm|0.3333|± |0.0185| |agieval_lsat_ar | 0|acc |0.2217|± |0.0275| | | |acc_norm|0.2043|± |0.0266| |agieval_lsat_lr | 0|acc |0.3412|± |0.0210| | | |acc_norm|0.3216|± |0.0207| |agieval_lsat_rc | 0|acc |0.4721|± |0.0305| | | |acc_norm|0.4201|± |0.0301| |agieval_sat_en | 0|acc |0.6068|± |0.0341| | | |acc_norm|0.5777|± |0.0345| |agieval_sat_en_without_passage| 0|acc |0.3932|± |0.0341| | | |acc_norm|0.3641|± |0.0336| |agieval_sat_math | 0|acc |0.2864|± |0.0305| | | |acc_norm|0.2636|± |0.0298| Average: 33.57 ``` Training run on wandb here: https://wandb.ai/teknium1/collectivecognition-mistral-7b/runs/collectivecognition-mistral-8/workspace ## Licensing: Apache 2.0 --- <!-- original-model-card end -->
EpicJhon/5CdR1ddsYrxESaXKbVv4rYdCHVnXbwNQmAqRxV2xGud1esHH_vgg
EpicJhon
2024-03-24T05:32:25Z
599
0
keras
[ "keras", "region:us" ]
null
2024-02-25T12:17:43Z
Entry not found
Artefact2/Mixtral-8x7B-v0.1-GGUF
Artefact2
2024-03-11T18:12:59Z
599
2
null
[ "gguf", "en", "license:apache-2.0", "region:us" ]
null
2024-02-26T16:39:22Z
--- language: - en license: apache-2.0 --- These are GGUF quantized versions of [mistralai/Mixtral-8x7B-v0.1](https://huggingface.co/mistralai/Mixtral-8x7B-v0.1). The importance matrix was trained for 100K tokens (200 batches of 512 tokens) using `wiki.train.raw`. Some model files above 50GB are split into smaller files. To concatenate them, use the `cat` command (on Windows, use PowerShell): `cat foo-Q6_K.gguf.* > foo-Q6_K.gguf` * What quant do I need? See https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 * Quant requests? Just open a discussion in the community tabs.
qwp4w3hyb/Meta-Llama-3-70B-Instruct-iMat-GGUF
qwp4w3hyb
2024-05-08T23:32:11Z
599
3
null
[ "safetensors", "gguf", "facebook", "meta", "pytorch", "llama", "llama-3", "imatrix", "importance matrix", "text-generation", "conversational", "en", "base_model:meta-llama/Meta-Llama-3-70B-Instruct", "license:other", "region:us" ]
text-generation
2024-04-21T20:43:13Z
--- base_model: meta-llama/Meta-Llama-3-70B-Instruct language: - en pipeline_tag: text-generation tags: - facebook - meta - pytorch - llama - llama-3 - gguf - imatrix - importance matrix license: other license_name: llama3 license_link: LICENSE --- # Quant Infos - NOT Updated for new pre-tokenizer fixes (yet), I recommend using bartowski's quants. https://huggingface.co/bartowski/Meta-Llama-3-70B-Instruct-GGUF - quants done with an importance matrix for improved quantization loss - K & IQ quants in basically all variants - fixed end token for instruct mode (<|eot_id|>[128009]) - files larger than 50GB were split using the gguf-split utility, just download all parts and point llama.cpp to the first one (00001-of-x) Quantized with [llama.cpp](https://github.com/ggerganov/llama.cpp) commit with tokenizer fixes from [this](https://github.com/ggerganov/llama.cpp/pull/6745) branch cherry-picked [0d56246f4b9764158525d894b96606f6163c53a8](https://github.com/ggerganov/llama.cpp/commit/0d56246f4b9764158525d894b96606f6163c53a8) (master from 2024-04-18) Imatrix dataset was used from [here](https://github.com/ggerganov/llama.cpp/discussions/5263#discussioncomment-8395384) Using this command to generate the importance matrix from the f16.gguf ``` ./imatrix -c 512 -m $model_name-f16.gguf -f $llama_cpp_path/groups_merged.txt -o $out_path/imat-f16-gmerged.dat ``` ## Note about eos token It seems llama 3 uses different eos tokens depending if it is in instruct mode. Therefore I have manually set the eos token to 128009 for these quants. In my testing this works fine, provide you you make sure to use the correct chat template. I recommend launching llama.cpp with `--chat-template llama3` (make sure to use a newish version which has the PR for this merged). # Original Model Card: ## Model Details Meta developed and released the Meta Llama 3 family of large language models (LLMs), a collection of pretrained and instruction tuned generative text models in 8 and 70B sizes. The Llama 3 instruction tuned models are optimized for dialogue use cases and outperform many of the available open source chat models on common industry benchmarks. Further, in developing these models, we took great care to optimize helpfulness and safety. **Model developers** Meta **Variations** Llama 3 comes in two sizes — 8B and 70B parameters — in pre-trained and instruction tuned variants. **Input** Models input text only. **Output** Models generate text and code only. **Model Architecture** Llama 3 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align with human preferences for helpfulness and safety. <table> <tr> <td> </td> <td><strong>Training Data</strong> </td> <td><strong>Params</strong> </td> <td><strong>Context length</strong> </td> <td><strong>GQA</strong> </td> <td><strong>Token count</strong> </td> <td><strong>Knowledge cutoff</strong> </td> </tr> <tr> <td rowspan="2" >Llama 3 </td> <td rowspan="2" >A new mix of publicly available online data. </td> <td>8B </td> <td>8k </td> <td>Yes </td> <td rowspan="2" >15T+ </td> <td>March, 2023 </td> </tr> <tr> <td>70B </td> <td>8k </td> <td>Yes </td> <td>December, 2023 </td> </tr> </table> **Llama 3 family of models**. Token counts refer to pretraining data only. Both the 8 and 70B versions use Grouped-Query Attention (GQA) for improved inference scalability. **Model Release Date** April 18, 2024. **Status** This is a static model trained on an offline dataset. Future versions of the tuned models will be released as we improve model safety with community feedback. **License** A custom commercial license is available at: [https://llama.meta.com/llama3/license](https://llama.meta.com/llama3/license) Where to send questions or comments about the model Instructions on how to provide feedback or comments on the model can be found in the model [README](https://github.com/meta-llama/llama3). For more technical information about generation parameters and recipes for how to use Llama 3 in applications, please go [here](https://github.com/meta-llama/llama-recipes). ## Intended Use **Intended Use Cases** Llama 3 is intended for commercial and research use in English. Instruction tuned models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks. **Out-of-scope** Use in any manner that violates applicable laws or regulations (including trade compliance laws). Use in any other way that is prohibited by the Acceptable Use Policy and Llama 3 Community License. Use in languages other than English**. **Note: Developers may fine-tune Llama 3 models for languages beyond English provided they comply with the Llama 3 Community License and the Acceptable Use Policy. ## How to use This repository contains two versions of Meta-Llama-3-70B-Instruct, for use with transformers and with the original `llama3` codebase. ### Use with transformers See the snippet below for usage with Transformers: ```python import transformers import torch model_id = "meta-llama/Meta-Llama-3-70B-Instruct" pipeline = transformers.pipeline( "text-generation", model=model_id, model_kwargs={"torch_dtype": torch.bfloat16}, device="auto", ) messages = [ {"role": "system", "content": "You are a pirate chatbot who always responds in pirate speak!"}, {"role": "user", "content": "Who are you?"}, ] prompt = pipeline.tokenizer.apply_chat_template( messages, tokenize=False, add_generation_prompt=True ) terminators = [ pipeline.tokenizer.eos_token_id, pipeline.tokenizer.convert_tokens_to_ids("<|eot_id|>") ] outputs = pipeline( prompt, max_new_tokens=256, eos_token_id=terminators, do_sample=True, temperature=0.6, top_p=0.9, ) print(outputs[0]["generated_text"][len(prompt):]) ``` ### Use with `llama3` Please, follow the instructions in the [repository](https://github.com/meta-llama/llama3). To download Original checkpoints, see the example command below leveraging `huggingface-cli`: ``` huggingface-cli download meta-llama/Meta-Llama-3-70B-Instruct --include "original/*" --local-dir Meta-Llama-3-70B-Instruct ``` For Hugging Face support, we recommend using transformers or TGI, but a similar command works. ## Hardware and Software **Training Factors** We used custom training libraries, Meta's Research SuperCluster, and production clusters for pretraining. Fine-tuning, annotation, and evaluation were also performed on third-party cloud compute. **Carbon Footprint Pretraining utilized a cumulative** 7.7M GPU hours of computation on hardware of type H100-80GB (TDP of 700W). Estimated total emissions were 2290 tCO2eq, 100% of which were offset by Meta’s sustainability program. <table> <tr> <td> </td> <td><strong>Time (GPU hours)</strong> </td> <td><strong>Power Consumption (W)</strong> </td> <td><strong>Carbon Emitted(tCO2eq)</strong> </td> </tr> <tr> <td>Llama 3 8B </td> <td>1.3M </td> <td>700 </td> <td>390 </td> </tr> <tr> <td>Llama 3 70B </td> <td>6.4M </td> <td>700 </td> <td>1900 </td> </tr> <tr> <td>Total </td> <td>7.7M </td> <td> </td> <td>2290 </td> </tr> </table> **CO2 emissions during pre-training**. Time: total GPU time required for training each model. Power Consumption: peak power capacity per GPU device for the GPUs used adjusted for power usage efficiency. 100% of the emissions are directly offset by Meta's sustainability program, and because we are openly releasing these models, the pretraining costs do not need to be incurred by others. ## Training Data **Overview** Llama 3 was pretrained on over 15 trillion tokens of data from publicly available sources. The fine-tuning data includes publicly available instruction datasets, as well as over 10M human-annotated examples. Neither the pretraining nor the fine-tuning datasets include Meta user data. **Data Freshness** The pretraining data has a cutoff of March 2023 for the 7B and December 2023 for the 70B models respectively. ## Benchmarks In this section, we report the results for Llama 3 models on standard automatic benchmarks. For all the evaluations, we use our internal evaluations library. For details on the methodology see [here](https://github.com/meta-llama/llama3/blob/main/eval_methodology.md). ### Base pretrained models <table> <tr> <td><strong>Category</strong> </td> <td><strong>Benchmark</strong> </td> <td><strong>Llama 3 8B</strong> </td> <td><strong>Llama2 7B</strong> </td> <td><strong>Llama2 13B</strong> </td> <td><strong>Llama 3 70B</strong> </td> <td><strong>Llama2 70B</strong> </td> </tr> <tr> <td rowspan="6" >General </td> <td>MMLU (5-shot) </td> <td>66.6 </td> <td>45.7 </td> <td>53.8 </td> <td>79.5 </td> <td>69.7 </td> </tr> <tr> <td>AGIEval English (3-5 shot) </td> <td>45.9 </td> <td>28.8 </td> <td>38.7 </td> <td>63.0 </td> <td>54.8 </td> </tr> <tr> <td>CommonSenseQA (7-shot) </td> <td>72.6 </td> <td>57.6 </td> <td>67.6 </td> <td>83.8 </td> <td>78.7 </td> </tr> <tr> <td>Winogrande (5-shot) </td> <td>76.1 </td> <td>73.3 </td> <td>75.4 </td> <td>83.1 </td> <td>81.8 </td> </tr> <tr> <td>BIG-Bench Hard (3-shot, CoT) </td> <td>61.1 </td> <td>38.1 </td> <td>47.0 </td> <td>81.3 </td> <td>65.7 </td> </tr> <tr> <td>ARC-Challenge (25-shot) </td> <td>78.6 </td> <td>53.7 </td> <td>67.6 </td> <td>93.0 </td> <td>85.3 </td> </tr> <tr> <td>Knowledge reasoning </td> <td>TriviaQA-Wiki (5-shot) </td> <td>78.5 </td> <td>72.1 </td> <td>79.6 </td> <td>89.7 </td> <td>87.5 </td> </tr> <tr> <td rowspan="4" >Reading comprehension </td> <td>SQuAD (1-shot) </td> <td>76.4 </td> <td>72.2 </td> <td>72.1 </td> <td>85.6 </td> <td>82.6 </td> </tr> <tr> <td>QuAC (1-shot, F1) </td> <td>44.4 </td> <td>39.6 </td> <td>44.9 </td> <td>51.1 </td> <td>49.4 </td> </tr> <tr> <td>BoolQ (0-shot) </td> <td>75.7 </td> <td>65.5 </td> <td>66.9 </td> <td>79.0 </td> <td>73.1 </td> </tr> <tr> <td>DROP (3-shot, F1) </td> <td>58.4 </td> <td>37.9 </td> <td>49.8 </td> <td>79.7 </td> <td>70.2 </td> </tr> </table> ### Instruction tuned models <table> <tr> <td><strong>Benchmark</strong> </td> <td><strong>Llama 3 8B</strong> </td> <td><strong>Llama 2 7B</strong> </td> <td><strong>Llama 2 13B</strong> </td> <td><strong>Llama 3 70B</strong> </td> <td><strong>Llama 2 70B</strong> </td> </tr> <tr> <td>MMLU (5-shot) </td> <td>68.4 </td> <td>34.1 </td> <td>47.8 </td> <td>82.0 </td> <td>52.9 </td> </tr> <tr> <td>GPQA (0-shot) </td> <td>34.2 </td> <td>21.7 </td> <td>22.3 </td> <td>39.5 </td> <td>21.0 </td> </tr> <tr> <td>HumanEval (0-shot) </td> <td>62.2 </td> <td>7.9 </td> <td>14.0 </td> <td>81.7 </td> <td>25.6 </td> </tr> <tr> <td>GSM-8K (8-shot, CoT) </td> <td>79.6 </td> <td>25.7 </td> <td>77.4 </td> <td>93.0 </td> <td>57.5 </td> </tr> <tr> <td>MATH (4-shot, CoT) </td> <td>30.0 </td> <td>3.8 </td> <td>6.7 </td> <td>50.4 </td> <td>11.6 </td> </tr> </table> ### Responsibility & Safety We believe that an open approach to AI leads to better, safer products, faster innovation, and a bigger overall market. We are committed to Responsible AI development and took a series of steps to limit misuse and harm and support the open source community. Foundation models are widely capable technologies that are built to be used for a diverse range of applications. They are not designed to meet every developer preference on safety levels for all use cases, out-of-the-box, as those by their nature will differ across different applications. Rather, responsible LLM-application deployment is achieved by implementing a series of safety best practices throughout the development of such applications, from the model pre-training, fine-tuning and the deployment of systems composed of safeguards to tailor the safety needs specifically to the use case and audience. As part of the Llama 3 release, we updated our [Responsible Use Guide](https://llama.meta.com/responsible-use-guide/) to outline the steps and best practices for developers to implement model and system level safety for their application. We also provide a set of resources including [Meta Llama Guard 2](https://llama.meta.com/purple-llama/) and [Code Shield](https://llama.meta.com/purple-llama/) safeguards. These tools have proven to drastically reduce residual risks of LLM Systems, while maintaining a high level of helpfulness. We encourage developers to tune and deploy these safeguards according to their needs and we provide a [reference implementation](https://github.com/meta-llama/llama-recipes/tree/main/recipes/responsible_ai) to get you started. #### Llama 3-Instruct As outlined in the Responsible Use Guide, some trade-off between model helpfulness and model alignment is likely unavoidable. Developers should exercise discretion about how to weigh the benefits of alignment and helpfulness for their specific use case and audience. Developers should be mindful of residual risks when using Llama models and leverage additional safety tools as needed to reach the right safety bar for their use case. <span style="text-decoration:underline;">Safety</span> For our instruction tuned model, we conducted extensive red teaming exercises, performed adversarial evaluations and implemented safety mitigations techniques to lower residual risks. As with any Large Language Model, residual risks will likely remain and we recommend that developers assess these risks in the context of their use case. In parallel, we are working with the community to make AI safety benchmark standards transparent, rigorous and interpretable. <span style="text-decoration:underline;">Refusals</span> In addition to residual risks, we put a great emphasis on model refusals to benign prompts. Over-refusing not only can impact the user experience but could even be harmful in certain contexts as well. We’ve heard the feedback from the developer community and improved our fine tuning to ensure that Llama 3 is significantly less likely to falsely refuse to answer prompts than Llama 2. We built internal benchmarks and developed mitigations to limit false refusals making Llama 3 our most helpful model to date. #### Responsible release In addition to responsible use considerations outlined above, we followed a rigorous process that requires us to take extra measures against misuse and critical risks before we make our release decision. Misuse If you access or use Llama 3, you agree to the Acceptable Use Policy. The most recent copy of this policy can be found at [https://llama.meta.com/llama3/use-policy/](https://llama.meta.com/llama3/use-policy/). #### Critical risks <span style="text-decoration:underline;">CBRNE</span> (Chemical, Biological, Radiological, Nuclear, and high yield Explosives) We have conducted a two fold assessment of the safety of the model in this area: * Iterative testing during model training to assess the safety of responses related to CBRNE threats and other adversarial risks. * Involving external CBRNE experts to conduct an uplift test assessing the ability of the model to accurately provide expert knowledge and reduce barriers to potential CBRNE misuse, by reference to what can be achieved using web search (without the model). ### <span style="text-decoration:underline;">Cyber Security </span> We have evaluated Llama 3 with CyberSecEval, Meta’s cybersecurity safety eval suite, measuring Llama 3’s propensity to suggest insecure code when used as a coding assistant, and Llama 3’s propensity to comply with requests to help carry out cyber attacks, where attacks are defined by the industry standard MITRE ATT&CK cyber attack ontology. On our insecure coding and cyber attacker helpfulness tests, Llama 3 behaved in the same range or safer than models of [equivalent coding capability](https://huggingface.co/spaces/facebook/CyberSecEval). ### <span style="text-decoration:underline;">Child Safety</span> Child Safety risk assessments were conducted using a team of experts, to assess the model’s capability to produce outputs that could result in Child Safety risks and inform on any necessary and appropriate risk mitigations via fine tuning. We leveraged those expert red teaming sessions to expand the coverage of our evaluation benchmarks through Llama 3 model development. For Llama 3, we conducted new in-depth sessions using objective based methodologies to assess the model risks along multiple attack vectors. We also partnered with content specialists to perform red teaming exercises assessing potentially violating content while taking account of market specific nuances or experiences. ### Community Generative AI safety requires expertise and tooling, and we believe in the strength of the open community to accelerate its progress. We are active members of open consortiums, including the AI Alliance, Partnership in AI and MLCommons, actively contributing to safety standardization and transparency. We encourage the community to adopt taxonomies like the MLCommons Proof of Concept evaluation to facilitate collaboration and transparency on safety and content evaluations. Our Purple Llama tools are open sourced for the community to use and widely distributed across ecosystem partners including cloud service providers. We encourage community contributions to our [Github repository](https://github.com/meta-llama/PurpleLlama). Finally, we put in place a set of resources including an [output reporting mechanism](https://developers.facebook.com/llama_output_feedback) and [bug bounty program](https://www.facebook.com/whitehat) to continuously improve the Llama technology with the help of the community. ## Ethical Considerations and Limitations The core values of Llama 3 are openness, inclusivity and helpfulness. It is meant to serve everyone, and to work for a wide range of use cases. It is thus designed to be accessible to people across many different backgrounds, experiences and perspectives. Llama 3 addresses users and their needs as they are, without insertion unnecessary judgment or normativity, while reflecting the understanding that even content that may appear problematic in some cases can serve valuable purposes in others. It respects the dignity and autonomy of all users, especially in terms of the values of free thought and expression that power innovation and progress. But Llama 3 is a new technology, and like any new technology, there are risks associated with its use. Testing conducted to date has been in English, and has not covered, nor could it cover, all scenarios. For these reasons, as with all LLMs, Llama 3’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate, biased or other objectionable responses to user prompts. Therefore, before deploying any applications of Llama 3 models, developers should perform safety testing and tuning tailored to their specific applications of the model. As outlined in the Responsible Use Guide, we recommend incorporating [Purple Llama](https://github.com/facebookresearch/PurpleLlama) solutions into your workflows and specifically [Llama Guard](https://ai.meta.com/research/publications/llama-guard-llm-based-input-output-safeguard-for-human-ai-conversations/) which provides a base model to filter input and output prompts to layer system-level safety on top of model-level safety. Please see the Responsible Use Guide available at [http://llama.meta.com/responsible-use-guide](http://llama.meta.com/responsible-use-guide) ## Citation instructions @article{llama3modelcard, title={Llama 3 Model Card}, author={AI@Meta}, year={2024}, url = {https://github.com/meta-llama/llama3/blob/main/MODEL_CARD.md} } ## Contributors Aaditya Singh; Aaron Grattafiori; Abhimanyu Dubey; Abhinav Jauhri; Abhinav Pandey; Abhishek Kadian; Adam Kelsey; Adi Gangidi; Ahmad Al-Dahle; Ahuva Goldstand; Aiesha Letman; Ajay Menon; Akhil Mathur; Alan Schelten; Alex Vaughan; Amy Yang; Andrei Lupu; Andres Alvarado; Andrew Gallagher; Andrew Gu; Andrew Ho; Andrew Poulton; Andrew Ryan; Angela Fan; Ankit Ramchandani; Anthony Hartshorn; Archi Mitra; Archie Sravankumar; Artem Korenev; Arun Rao; Ashley Gabriel; Ashwin Bharambe; Assaf Eisenman; Aston Zhang; Aurelien Rodriguez; Austen Gregerson; Ava Spataru; Baptiste Roziere; Ben Maurer; Benjamin Leonhardi; Bernie Huang; Bhargavi Paranjape; Bing Liu; Binh Tang; Bobbie Chern; Brani Stojkovic; Brian Fuller; Catalina Mejia Arenas; Chao Zhou; Charlotte Caucheteux; Chaya Nayak; Ching-Hsiang Chu; Chloe Bi; Chris Cai; Chris Cox; Chris Marra; Chris McConnell; Christian Keller; Christoph Feichtenhofer; Christophe Touret; Chunyang Wu; Corinne Wong; Cristian Canton Ferrer; Damien Allonsius; Daniel Kreymer; Daniel Haziza; Daniel Li; Danielle Pintz; Danny Livshits; Danny Wyatt; David Adkins; David Esiobu; David Xu; Davide Testuggine; Delia David; Devi Parikh; Dhruv Choudhary; Dhruv Mahajan; Diana Liskovich; Diego Garcia-Olano; Diego Perino; Dieuwke Hupkes; Dingkang Wang; Dustin Holland; Egor Lakomkin; Elina Lobanova; Xiaoqing Ellen Tan; Emily Dinan; Eric Smith; Erik Brinkman; Esteban Arcaute; Filip Radenovic; Firat Ozgenel; Francesco Caggioni; Frank Seide; Frank Zhang; Gabriel Synnaeve; Gabriella Schwarz; Gabrielle Lee; Gada Badeer; Georgia Anderson; Graeme Nail; Gregoire Mialon; Guan Pang; Guillem Cucurell; Hailey Nguyen; Hannah Korevaar; Hannah Wang; Haroun Habeeb; Harrison Rudolph; Henry Aspegren; Hu Xu; Hugo Touvron; Iga Kozlowska; Igor Molybog; Igor Tufanov; Iliyan Zarov; Imanol Arrieta Ibarra; Irina-Elena Veliche; Isabel Kloumann; Ishan Misra; Ivan Evtimov; Jacob Xu; Jade Copet; Jake Weissman; Jan Geffert; Jana Vranes; Japhet Asher; Jason Park; Jay Mahadeokar; Jean-Baptiste Gaya; Jeet Shah; Jelmer van der Linde; Jennifer Chan; Jenny Hong; Jenya Lee; Jeremy Fu; Jeremy Teboul; Jianfeng Chi; Jianyu Huang; Jie Wang; Jiecao Yu; Joanna Bitton; Joe Spisak; Joelle Pineau; Jon Carvill; Jongsoo Park; Joseph Rocca; Joshua Johnstun; Junteng Jia; Kalyan Vasuden Alwala; Kam Hou U; Kate Plawiak; Kartikeya Upasani; Kaushik Veeraraghavan; Ke Li; Kenneth Heafield; Kevin Stone; Khalid El-Arini; Krithika Iyer; Kshitiz Malik; Kuenley Chiu; Kunal Bhalla; Kyle Huang; Lakshya Garg; Lauren Rantala-Yeary; Laurens van der Maaten; Lawrence Chen; Leandro Silva; Lee Bell; Lei Zhang; Liang Tan; Louis Martin; Lovish Madaan; Luca Wehrstedt; Lukas Blecher; Luke de Oliveira; Madeline Muzzi; Madian Khabsa; Manav Avlani; Mannat Singh; Manohar Paluri; Mark Zuckerberg; Marcin Kardas; Martynas Mankus; Mathew Oldham; Mathieu Rita; Matthew Lennie; Maya Pavlova; Meghan Keneally; Melanie Kambadur; Mihir Patel; Mikayel Samvelyan; Mike Clark; Mike Lewis; Min Si; Mitesh Kumar Singh; Mo Metanat; Mona Hassan; Naman Goyal; Narjes Torabi; Nicolas Usunier; Nikolay Bashlykov; Nikolay Bogoychev; Niladri Chatterji; Ning Dong; Oliver Aobo Yang; Olivier Duchenne; Onur Celebi; Parth Parekh; Patrick Alrassy; Paul Saab; Pavan Balaji; Pedro Rittner; Pengchuan Zhang; Pengwei Li; Petar Vasic; Peter Weng; Polina Zvyagina; Prajjwal Bhargava; Pratik Dubal; Praveen Krishnan; Punit Singh Koura; Qing He; Rachel Rodriguez; Ragavan Srinivasan; Rahul Mitra; Ramon Calderer; Raymond Li; Robert Stojnic; Roberta Raileanu; Robin Battey; Rocky Wang; Rohit Girdhar; Rohit Patel; Romain Sauvestre; Ronnie Polidoro; Roshan Sumbaly; Ross Taylor; Ruan Silva; Rui Hou; Rui Wang; Russ Howes; Ruty Rinott; Saghar Hosseini; Sai Jayesh Bondu; Samyak Datta; Sanjay Singh; Sara Chugh; Sargun Dhillon; Satadru Pan; Sean Bell; Sergey Edunov; Shaoliang Nie; Sharan Narang; Sharath Raparthy; Shaun Lindsay; Sheng Feng; Sheng Shen; Shenghao Lin; Shiva Shankar; Shruti Bhosale; Shun Zhang; Simon Vandenhende; Sinong Wang; Seohyun Sonia Kim; Soumya Batra; Sten Sootla; Steve Kehoe; Suchin Gururangan; Sumit Gupta; Sunny Virk; Sydney Borodinsky; Tamar Glaser; Tamar Herman; Tamara Best; Tara Fowler; Thomas Georgiou; Thomas Scialom; Tianhe Li; Todor Mihaylov; Tong Xiao; Ujjwal Karn; Vedanuj Goswami; Vibhor Gupta; Vignesh Ramanathan; Viktor Kerkez; Vinay Satish Kumar; Vincent Gonguet; Vish Vogeti; Vlad Poenaru; Vlad Tiberiu Mihailescu; Vladan Petrovic; Vladimir Ivanov; Wei Li; Weiwei Chu; Wenhan Xiong; Wenyin Fu; Wes Bouaziz; Whitney Meers; Will Constable; Xavier Martinet; Xiaojian Wu; Xinbo Gao; Xinfeng Xie; Xuchao Jia; Yaelle Goldschlag; Yann LeCun; Yashesh Gaur; Yasmine Babaei; Ye Qi; Yenda Li; Yi Wen; Yiwen Song; Youngjin Nam; Yuchen Hao; Yuchen Zhang; Yun Wang; Yuning Mao; Yuzi He; Zacharie Delpierre Coudert; Zachary DeVito; Zahra Hankir; Zhaoduo Wen; Zheng Yan; Zhengxing Chen; Zhenyu Yang; Zoe Papakipos
mradermacher/L3-70B-daybreak-storywriter-v0.4-i1-GGUF
mradermacher
2024-05-27T02:44:49Z
599
0
transformers
[ "transformers", "gguf", "not-for-all-audiences", "en", "base_model:crestf411/L3-70B-daybreak-storywriter-v0.4", "endpoints_compatible", "region:us" ]
null
2024-05-26T07:56:58Z
--- base_model: crestf411/L3-70B-daybreak-storywriter-v0.4 language: - en library_name: transformers quantized_by: mradermacher tags: - not-for-all-audiences --- ## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> weighted/imatrix quants of https://huggingface.co/crestf411/L3-70B-daybreak-storywriter-v0.4 <!-- provided-files --> static quants are available at https://huggingface.co/mradermacher/L3-70B-daybreak-storywriter-v0.4-GGUF ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/L3-70B-daybreak-storywriter-v0.4-i1-GGUF/resolve/main/L3-70B-daybreak-storywriter-v0.4.i1-IQ1_S.gguf) | i1-IQ1_S | 15.4 | for the desperate | | [GGUF](https://huggingface.co/mradermacher/L3-70B-daybreak-storywriter-v0.4-i1-GGUF/resolve/main/L3-70B-daybreak-storywriter-v0.4.i1-IQ1_M.gguf) | i1-IQ1_M | 16.9 | mostly desperate | | [GGUF](https://huggingface.co/mradermacher/L3-70B-daybreak-storywriter-v0.4-i1-GGUF/resolve/main/L3-70B-daybreak-storywriter-v0.4.i1-IQ2_XXS.gguf) | i1-IQ2_XXS | 19.2 | | | [GGUF](https://huggingface.co/mradermacher/L3-70B-daybreak-storywriter-v0.4-i1-GGUF/resolve/main/L3-70B-daybreak-storywriter-v0.4.i1-IQ2_XS.gguf) | i1-IQ2_XS | 21.2 | | | [GGUF](https://huggingface.co/mradermacher/L3-70B-daybreak-storywriter-v0.4-i1-GGUF/resolve/main/L3-70B-daybreak-storywriter-v0.4.i1-IQ2_S.gguf) | i1-IQ2_S | 22.3 | | | [GGUF](https://huggingface.co/mradermacher/L3-70B-daybreak-storywriter-v0.4-i1-GGUF/resolve/main/L3-70B-daybreak-storywriter-v0.4.i1-IQ2_M.gguf) | i1-IQ2_M | 24.2 | | | [GGUF](https://huggingface.co/mradermacher/L3-70B-daybreak-storywriter-v0.4-i1-GGUF/resolve/main/L3-70B-daybreak-storywriter-v0.4.i1-Q2_K.gguf) | i1-Q2_K | 26.5 | IQ3_XXS probably better | | [GGUF](https://huggingface.co/mradermacher/L3-70B-daybreak-storywriter-v0.4-i1-GGUF/resolve/main/L3-70B-daybreak-storywriter-v0.4.i1-IQ3_XXS.gguf) | i1-IQ3_XXS | 27.6 | lower quality | | [GGUF](https://huggingface.co/mradermacher/L3-70B-daybreak-storywriter-v0.4-i1-GGUF/resolve/main/L3-70B-daybreak-storywriter-v0.4.i1-IQ3_XS.gguf) | i1-IQ3_XS | 29.4 | | | [GGUF](https://huggingface.co/mradermacher/L3-70B-daybreak-storywriter-v0.4-i1-GGUF/resolve/main/L3-70B-daybreak-storywriter-v0.4.i1-IQ3_S.gguf) | i1-IQ3_S | 31.0 | beats Q3_K* | | [GGUF](https://huggingface.co/mradermacher/L3-70B-daybreak-storywriter-v0.4-i1-GGUF/resolve/main/L3-70B-daybreak-storywriter-v0.4.i1-Q3_K_S.gguf) | i1-Q3_K_S | 31.0 | IQ3_XS probably better | | [GGUF](https://huggingface.co/mradermacher/L3-70B-daybreak-storywriter-v0.4-i1-GGUF/resolve/main/L3-70B-daybreak-storywriter-v0.4.i1-IQ3_M.gguf) | i1-IQ3_M | 32.0 | | | [GGUF](https://huggingface.co/mradermacher/L3-70B-daybreak-storywriter-v0.4-i1-GGUF/resolve/main/L3-70B-daybreak-storywriter-v0.4.i1-Q3_K_M.gguf) | i1-Q3_K_M | 34.4 | IQ3_S probably better | | [GGUF](https://huggingface.co/mradermacher/L3-70B-daybreak-storywriter-v0.4-i1-GGUF/resolve/main/L3-70B-daybreak-storywriter-v0.4.i1-Q3_K_L.gguf) | i1-Q3_K_L | 37.2 | IQ3_M probably better | | [GGUF](https://huggingface.co/mradermacher/L3-70B-daybreak-storywriter-v0.4-i1-GGUF/resolve/main/L3-70B-daybreak-storywriter-v0.4.i1-IQ4_XS.gguf) | i1-IQ4_XS | 38.0 | | | [GGUF](https://huggingface.co/mradermacher/L3-70B-daybreak-storywriter-v0.4-i1-GGUF/resolve/main/L3-70B-daybreak-storywriter-v0.4.i1-Q4_0.gguf) | i1-Q4_0 | 40.2 | fast, low quality | | [GGUF](https://huggingface.co/mradermacher/L3-70B-daybreak-storywriter-v0.4-i1-GGUF/resolve/main/L3-70B-daybreak-storywriter-v0.4.i1-Q4_K_S.gguf) | i1-Q4_K_S | 40.4 | optimal size/speed/quality | | [GGUF](https://huggingface.co/mradermacher/L3-70B-daybreak-storywriter-v0.4-i1-GGUF/resolve/main/L3-70B-daybreak-storywriter-v0.4.i1-Q4_K_M.gguf) | i1-Q4_K_M | 42.6 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/L3-70B-daybreak-storywriter-v0.4-i1-GGUF/resolve/main/L3-70B-daybreak-storywriter-v0.4.i1-Q5_K_S.gguf) | i1-Q5_K_S | 48.8 | | | [GGUF](https://huggingface.co/mradermacher/L3-70B-daybreak-storywriter-v0.4-i1-GGUF/resolve/main/L3-70B-daybreak-storywriter-v0.4.i1-Q5_K_M.gguf) | i1-Q5_K_M | 50.0 | | | [PART 1](https://huggingface.co/mradermacher/L3-70B-daybreak-storywriter-v0.4-i1-GGUF/resolve/main/L3-70B-daybreak-storywriter-v0.4.i1-Q6_K.gguf.part1of2) [PART 2](https://huggingface.co/mradermacher/L3-70B-daybreak-storywriter-v0.4-i1-GGUF/resolve/main/L3-70B-daybreak-storywriter-v0.4.i1-Q6_K.gguf.part2of2) | i1-Q6_K | 58.0 | practically like static Q6_K | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. <!-- end -->
mradermacher/L3-daybreak-TheSpice-8b-v0.1.3-i1-GGUF
mradermacher
2024-06-01T16:28:16Z
599
3
transformers
[ "transformers", "gguf", "not-for-all-audiences", "en", "base_model:Vdr1/L3-daybreak-TheSpice-8b-v0.1.3", "endpoints_compatible", "region:us" ]
null
2024-05-31T18:41:16Z
--- base_model: Vdr1/L3-daybreak-TheSpice-8b-v0.1.3 language: - en library_name: transformers quantized_by: mradermacher tags: - not-for-all-audiences --- ## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: nicoboss --> weighted/imatrix quants of https://huggingface.co/Vdr1/L3-daybreak-TheSpice-8b-v0.1.3 <!-- provided-files --> static quants are available at https://huggingface.co/mradermacher/L3-daybreak-TheSpice-8b-v0.1.3-GGUF ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/L3-daybreak-TheSpice-8b-v0.1.3-i1-GGUF/resolve/main/L3-daybreak-TheSpice-8b-v0.1.3.i1-IQ1_S.gguf) | i1-IQ1_S | 2.1 | for the desperate | | [GGUF](https://huggingface.co/mradermacher/L3-daybreak-TheSpice-8b-v0.1.3-i1-GGUF/resolve/main/L3-daybreak-TheSpice-8b-v0.1.3.i1-IQ1_M.gguf) | i1-IQ1_M | 2.3 | mostly desperate | | [GGUF](https://huggingface.co/mradermacher/L3-daybreak-TheSpice-8b-v0.1.3-i1-GGUF/resolve/main/L3-daybreak-TheSpice-8b-v0.1.3.i1-IQ2_XXS.gguf) | i1-IQ2_XXS | 2.5 | | | [GGUF](https://huggingface.co/mradermacher/L3-daybreak-TheSpice-8b-v0.1.3-i1-GGUF/resolve/main/L3-daybreak-TheSpice-8b-v0.1.3.i1-IQ2_XS.gguf) | i1-IQ2_XS | 2.7 | | | [GGUF](https://huggingface.co/mradermacher/L3-daybreak-TheSpice-8b-v0.1.3-i1-GGUF/resolve/main/L3-daybreak-TheSpice-8b-v0.1.3.i1-IQ2_S.gguf) | i1-IQ2_S | 2.9 | | | [GGUF](https://huggingface.co/mradermacher/L3-daybreak-TheSpice-8b-v0.1.3-i1-GGUF/resolve/main/L3-daybreak-TheSpice-8b-v0.1.3.i1-IQ2_M.gguf) | i1-IQ2_M | 3.0 | | | [GGUF](https://huggingface.co/mradermacher/L3-daybreak-TheSpice-8b-v0.1.3-i1-GGUF/resolve/main/L3-daybreak-TheSpice-8b-v0.1.3.i1-Q2_K.gguf) | i1-Q2_K | 3.3 | IQ3_XXS probably better | | [GGUF](https://huggingface.co/mradermacher/L3-daybreak-TheSpice-8b-v0.1.3-i1-GGUF/resolve/main/L3-daybreak-TheSpice-8b-v0.1.3.i1-IQ3_XXS.gguf) | i1-IQ3_XXS | 3.4 | lower quality | | [GGUF](https://huggingface.co/mradermacher/L3-daybreak-TheSpice-8b-v0.1.3-i1-GGUF/resolve/main/L3-daybreak-TheSpice-8b-v0.1.3.i1-IQ3_XS.gguf) | i1-IQ3_XS | 3.6 | | | [GGUF](https://huggingface.co/mradermacher/L3-daybreak-TheSpice-8b-v0.1.3-i1-GGUF/resolve/main/L3-daybreak-TheSpice-8b-v0.1.3.i1-Q3_K_S.gguf) | i1-Q3_K_S | 3.8 | IQ3_XS probably better | | [GGUF](https://huggingface.co/mradermacher/L3-daybreak-TheSpice-8b-v0.1.3-i1-GGUF/resolve/main/L3-daybreak-TheSpice-8b-v0.1.3.i1-IQ3_S.gguf) | i1-IQ3_S | 3.8 | beats Q3_K* | | [GGUF](https://huggingface.co/mradermacher/L3-daybreak-TheSpice-8b-v0.1.3-i1-GGUF/resolve/main/L3-daybreak-TheSpice-8b-v0.1.3.i1-IQ3_M.gguf) | i1-IQ3_M | 3.9 | | | [GGUF](https://huggingface.co/mradermacher/L3-daybreak-TheSpice-8b-v0.1.3-i1-GGUF/resolve/main/L3-daybreak-TheSpice-8b-v0.1.3.i1-Q3_K_M.gguf) | i1-Q3_K_M | 4.1 | IQ3_S probably better | | [GGUF](https://huggingface.co/mradermacher/L3-daybreak-TheSpice-8b-v0.1.3-i1-GGUF/resolve/main/L3-daybreak-TheSpice-8b-v0.1.3.i1-Q3_K_L.gguf) | i1-Q3_K_L | 4.4 | IQ3_M probably better | | [GGUF](https://huggingface.co/mradermacher/L3-daybreak-TheSpice-8b-v0.1.3-i1-GGUF/resolve/main/L3-daybreak-TheSpice-8b-v0.1.3.i1-IQ4_XS.gguf) | i1-IQ4_XS | 4.5 | | | [GGUF](https://huggingface.co/mradermacher/L3-daybreak-TheSpice-8b-v0.1.3-i1-GGUF/resolve/main/L3-daybreak-TheSpice-8b-v0.1.3.i1-Q4_0.gguf) | i1-Q4_0 | 4.8 | fast, low quality | | [GGUF](https://huggingface.co/mradermacher/L3-daybreak-TheSpice-8b-v0.1.3-i1-GGUF/resolve/main/L3-daybreak-TheSpice-8b-v0.1.3.i1-Q4_K_S.gguf) | i1-Q4_K_S | 4.8 | optimal size/speed/quality | | [GGUF](https://huggingface.co/mradermacher/L3-daybreak-TheSpice-8b-v0.1.3-i1-GGUF/resolve/main/L3-daybreak-TheSpice-8b-v0.1.3.i1-Q4_K_M.gguf) | i1-Q4_K_M | 5.0 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/L3-daybreak-TheSpice-8b-v0.1.3-i1-GGUF/resolve/main/L3-daybreak-TheSpice-8b-v0.1.3.i1-Q5_K_S.gguf) | i1-Q5_K_S | 5.7 | | | [GGUF](https://huggingface.co/mradermacher/L3-daybreak-TheSpice-8b-v0.1.3-i1-GGUF/resolve/main/L3-daybreak-TheSpice-8b-v0.1.3.i1-Q5_K_M.gguf) | i1-Q5_K_M | 5.8 | | | [GGUF](https://huggingface.co/mradermacher/L3-daybreak-TheSpice-8b-v0.1.3-i1-GGUF/resolve/main/L3-daybreak-TheSpice-8b-v0.1.3.i1-Q6_K.gguf) | i1-Q6_K | 6.7 | practically like static Q6_K | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. Additional thanks to [@nicoboss](https://huggingface.co/nicoboss) for giving me access to his hardware for calculating the imatrix for these quants. <!-- end -->
r1char9/rubert-tiny2-clf
r1char9
2024-06-14T07:03:48Z
599
0
transformers
[ "transformers", "safetensors", "bert", "text-classification", "sentiment-analysis", "multi-label-classification", "sentiment analysis", "rubert", "sentiment", "tiny", "russian", "multilabel", "classification", "prompt-classification", "ru", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
2024-06-10T17:21:55Z
--- license: mit language: - ru pipeline_tag: text-classification tags: - sentiment-analysis - multi-label-classification - sentiment analysis - rubert - sentiment - bert - tiny - russian - multilabel - classification - prompt-classification --- Модель [RuBERT-tiny2](https://huggingface.co/cointegrated/rubert-tiny2) была fine-tuned для задачи __prompt classification__, предназначенная для __Russian__ текст. Выполняет задачу __multi-label classification__ со слудующимим категориями: ```yaml 0: write 1: draw 2: neutral ``` Категории для русского языка: ```yaml write: написать draw: рисовать neutral: нейтральность ``` ## Usage ```python from transformers import pipeline model = pipeline(model="r1char9/rubert-tiny2-clf") model('Сгенерируй картину Томаса Шелби') # [{'label': 'draw', 'score': 0.8699279427528381}] ``` ## Metrics: ``` metrics write draw neutral micro avg macro avg weighted avg precision 1.0 1.0 1.0 1.0 1.0 1.0 recall 1.0 1.0 1.0 1.0 1.0 1.0 f1-score 1.0 1.0 1.0 1.0 1.0 1.0 support 155.0 117.0 19.0 291.0 291.0 291.0 auc-roc 1.0 1.0 1.0 1.0 1.0 1.0 ```
malmarjeh/mbert2mbert-arabic-text-summarization
malmarjeh
2023-07-01T16:43:04Z
598
5
transformers
[ "transformers", "pytorch", "encoder-decoder", "text2text-generation", "Multilingual BERT", "BERT2BERT", "MSA", "Arabic Text Summarization", "Arabic News Title Generation", "Arabic Paraphrasing", "ar", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text2text-generation
2022-06-03T14:45:34Z
--- language: - ar tags: - Multilingual BERT - BERT2BERT - MSA - Arabic Text Summarization - Arabic News Title Generation - Arabic Paraphrasing --- # An Arabic abstractive text summarization model A BERT2BERT-based model whose parameters are initialized with mBERT weights and which has been fine-tuned on a dataset of 84,764 paragraph-summary pairs. Paper: [Arabic abstractive text summarization using RNN-based and transformer-based architectures](https://www.sciencedirect.com/science/article/abs/pii/S0306457322003284). Dataset: [link](https://data.mendeley.com/datasets/7kr75c9h24/1). The model can be used as follows: ```python from transformers import BertTokenizer, AutoModelForSeq2SeqLM, pipeline from arabert.preprocess import ArabertPreprocessor model_name="malmarjeh/mbert2mbert-arabic-text-summarization" preprocessor = ArabertPreprocessor(model_name="") tokenizer = BertTokenizer.from_pretrained(model_name) model = AutoModelForSeq2SeqLM.from_pretrained(model_name) pipeline = pipeline("text2text-generation",model=model,tokenizer=tokenizer) text = "شهدت مدينة طرابلس، مساء أمس الأربعاء، احتجاجات شعبية وأعمال شغب لليوم الثالث على التوالي، وذلك بسبب تردي الوضع المعيشي والاقتصادي. واندلعت مواجهات عنيفة وعمليات كر وفر ما بين الجيش اللبناني والمحتجين استمرت لساعات، إثر محاولة فتح الطرقات المقطوعة، ما أدى إلى إصابة العشرات من الطرفين." text = preprocessor.preprocess(text) result = pipeline(text, pad_token_id=tokenizer.eos_token_id, num_beams=3, repetition_penalty=3.0, max_length=200, length_penalty=1.0, no_repeat_ngram_size = 3)[0]['generated_text'] result >>> 'احتجاجات في طرابلس على خلفية مواجهات عنيفة بين الجيش اللبناني والمحتجين' ``` ## Contact: <[email protected]>
timm/regnetx_004_tv.tv2_in1k
timm
2024-02-10T23:32:36Z
598
0
timm
[ "timm", "pytorch", "safetensors", "image-classification", "arxiv:2003.13678", "license:bsd-3-clause", "region:us" ]
image-classification
2023-03-21T06:31:13Z
--- license: bsd-3-clause library_name: timm tags: - image-classification - timm --- # Model card for regnetx_004_tv.tv2_in1k A RegNetX-400MF image classification model. Pretrained on ImageNet-1k by torchvision contributors (see ImageNet1K-V2 weight details https://github.com/pytorch/vision/issues/3995#new-recipe). The `timm` RegNet implementation includes a number of enhancements not present in other implementations, including: * stochastic depth * gradient checkpointing * layer-wise LR decay * configurable output stride (dilation) * configurable activation and norm layers * option for a pre-activation bottleneck block used in RegNetV variant * only known RegNetZ model definitions with pretrained weights ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 5.5 - GMACs: 0.4 - Activations (M): 3.2 - Image size: 224 x 224 - **Papers:** - Designing Network Design Spaces: https://arxiv.org/abs/2003.13678 - **Original:** https://github.com/pytorch/vision ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('regnetx_004_tv.tv2_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'regnetx_004_tv.tv2_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 32, 112, 112]) # torch.Size([1, 32, 56, 56]) # torch.Size([1, 64, 28, 28]) # torch.Size([1, 160, 14, 14]) # torch.Size([1, 400, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'regnetx_004_tv.tv2_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 400, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). For the comparison summary below, the ra_in1k, ra3_in1k, ch_in1k, sw_*, and lion_* tagged weights are trained in `timm`. |model |img_size|top1 |top5 |param_count|gmacs|macts | |-------------------------|--------|------|------|-----------|-----|------| |[regnety_1280.swag_ft_in1k](https://huggingface.co/timm/regnety_1280.swag_ft_in1k)|384 |88.228|98.684|644.81 |374.99|210.2 | |[regnety_320.swag_ft_in1k](https://huggingface.co/timm/regnety_320.swag_ft_in1k)|384 |86.84 |98.364|145.05 |95.0 |88.87 | |[regnety_160.swag_ft_in1k](https://huggingface.co/timm/regnety_160.swag_ft_in1k)|384 |86.024|98.05 |83.59 |46.87|67.67 | |[regnety_160.sw_in12k_ft_in1k](https://huggingface.co/timm/regnety_160.sw_in12k_ft_in1k)|288 |86.004|97.83 |83.59 |26.37|38.07 | |[regnety_1280.swag_lc_in1k](https://huggingface.co/timm/regnety_1280.swag_lc_in1k)|224 |85.996|97.848|644.81 |127.66|71.58 | |[regnety_160.lion_in12k_ft_in1k](https://huggingface.co/timm/regnety_160.lion_in12k_ft_in1k)|288 |85.982|97.844|83.59 |26.37|38.07 | |[regnety_160.sw_in12k_ft_in1k](https://huggingface.co/timm/regnety_160.sw_in12k_ft_in1k)|224 |85.574|97.666|83.59 |15.96|23.04 | |[regnety_160.lion_in12k_ft_in1k](https://huggingface.co/timm/regnety_160.lion_in12k_ft_in1k)|224 |85.564|97.674|83.59 |15.96|23.04 | |[regnety_120.sw_in12k_ft_in1k](https://huggingface.co/timm/regnety_120.sw_in12k_ft_in1k)|288 |85.398|97.584|51.82 |20.06|35.34 | |[regnety_2560.seer_ft_in1k](https://huggingface.co/timm/regnety_2560.seer_ft_in1k)|384 |85.15 |97.436|1282.6 |747.83|296.49| |[regnetz_e8.ra3_in1k](https://huggingface.co/timm/regnetz_e8.ra3_in1k)|320 |85.036|97.268|57.7 |15.46|63.94 | |[regnety_120.sw_in12k_ft_in1k](https://huggingface.co/timm/regnety_120.sw_in12k_ft_in1k)|224 |84.976|97.416|51.82 |12.14|21.38 | |[regnety_320.swag_lc_in1k](https://huggingface.co/timm/regnety_320.swag_lc_in1k)|224 |84.56 |97.446|145.05 |32.34|30.26 | |[regnetz_040_h.ra3_in1k](https://huggingface.co/timm/regnetz_040_h.ra3_in1k)|320 |84.496|97.004|28.94 |6.43 |37.94 | |[regnetz_e8.ra3_in1k](https://huggingface.co/timm/regnetz_e8.ra3_in1k)|256 |84.436|97.02 |57.7 |9.91 |40.94 | |[regnety_1280.seer_ft_in1k](https://huggingface.co/timm/regnety_1280.seer_ft_in1k)|384 |84.432|97.092|644.81 |374.99|210.2 | |[regnetz_040.ra3_in1k](https://huggingface.co/timm/regnetz_040.ra3_in1k)|320 |84.246|96.93 |27.12 |6.35 |37.78 | |[regnetz_d8.ra3_in1k](https://huggingface.co/timm/regnetz_d8.ra3_in1k)|320 |84.054|96.992|23.37 |6.19 |37.08 | |[regnetz_d8_evos.ch_in1k](https://huggingface.co/timm/regnetz_d8_evos.ch_in1k)|320 |84.038|96.992|23.46 |7.03 |38.92 | |[regnetz_d32.ra3_in1k](https://huggingface.co/timm/regnetz_d32.ra3_in1k)|320 |84.022|96.866|27.58 |9.33 |37.08 | |[regnety_080.ra3_in1k](https://huggingface.co/timm/regnety_080.ra3_in1k)|288 |83.932|96.888|39.18 |13.22|29.69 | |[regnety_640.seer_ft_in1k](https://huggingface.co/timm/regnety_640.seer_ft_in1k)|384 |83.912|96.924|281.38 |188.47|124.83| |[regnety_160.swag_lc_in1k](https://huggingface.co/timm/regnety_160.swag_lc_in1k)|224 |83.778|97.286|83.59 |15.96|23.04 | |[regnetz_040_h.ra3_in1k](https://huggingface.co/timm/regnetz_040_h.ra3_in1k)|256 |83.776|96.704|28.94 |4.12 |24.29 | |[regnetv_064.ra3_in1k](https://huggingface.co/timm/regnetv_064.ra3_in1k)|288 |83.72 |96.75 |30.58 |10.55|27.11 | |[regnety_064.ra3_in1k](https://huggingface.co/timm/regnety_064.ra3_in1k)|288 |83.718|96.724|30.58 |10.56|27.11 | |[regnety_160.deit_in1k](https://huggingface.co/timm/regnety_160.deit_in1k)|288 |83.69 |96.778|83.59 |26.37|38.07 | |[regnetz_040.ra3_in1k](https://huggingface.co/timm/regnetz_040.ra3_in1k)|256 |83.62 |96.704|27.12 |4.06 |24.19 | |[regnetz_d8.ra3_in1k](https://huggingface.co/timm/regnetz_d8.ra3_in1k)|256 |83.438|96.776|23.37 |3.97 |23.74 | |[regnetz_d32.ra3_in1k](https://huggingface.co/timm/regnetz_d32.ra3_in1k)|256 |83.424|96.632|27.58 |5.98 |23.74 | |[regnetz_d8_evos.ch_in1k](https://huggingface.co/timm/regnetz_d8_evos.ch_in1k)|256 |83.36 |96.636|23.46 |4.5 |24.92 | |[regnety_320.seer_ft_in1k](https://huggingface.co/timm/regnety_320.seer_ft_in1k)|384 |83.35 |96.71 |145.05 |95.0 |88.87 | |[regnetv_040.ra3_in1k](https://huggingface.co/timm/regnetv_040.ra3_in1k)|288 |83.204|96.66 |20.64 |6.6 |20.3 | |[regnety_320.tv2_in1k](https://huggingface.co/timm/regnety_320.tv2_in1k)|224 |83.162|96.42 |145.05 |32.34|30.26 | |[regnety_080.ra3_in1k](https://huggingface.co/timm/regnety_080.ra3_in1k)|224 |83.16 |96.486|39.18 |8.0 |17.97 | |[regnetv_064.ra3_in1k](https://huggingface.co/timm/regnetv_064.ra3_in1k)|224 |83.108|96.458|30.58 |6.39 |16.41 | |[regnety_040.ra3_in1k](https://huggingface.co/timm/regnety_040.ra3_in1k)|288 |83.044|96.5 |20.65 |6.61 |20.3 | |[regnety_064.ra3_in1k](https://huggingface.co/timm/regnety_064.ra3_in1k)|224 |83.02 |96.292|30.58 |6.39 |16.41 | |[regnety_160.deit_in1k](https://huggingface.co/timm/regnety_160.deit_in1k)|224 |82.974|96.502|83.59 |15.96|23.04 | |[regnetx_320.tv2_in1k](https://huggingface.co/timm/regnetx_320.tv2_in1k)|224 |82.816|96.208|107.81 |31.81|36.3 | |[regnety_032.ra_in1k](https://huggingface.co/timm/regnety_032.ra_in1k)|288 |82.742|96.418|19.44 |5.29 |18.61 | |[regnety_160.tv2_in1k](https://huggingface.co/timm/regnety_160.tv2_in1k)|224 |82.634|96.22 |83.59 |15.96|23.04 | |[regnetz_c16_evos.ch_in1k](https://huggingface.co/timm/regnetz_c16_evos.ch_in1k)|320 |82.634|96.472|13.49 |3.86 |25.88 | |[regnety_080_tv.tv2_in1k](https://huggingface.co/timm/regnety_080_tv.tv2_in1k)|224 |82.592|96.246|39.38 |8.51 |19.73 | |[regnetx_160.tv2_in1k](https://huggingface.co/timm/regnetx_160.tv2_in1k)|224 |82.564|96.052|54.28 |15.99|25.52 | |[regnetz_c16.ra3_in1k](https://huggingface.co/timm/regnetz_c16.ra3_in1k)|320 |82.51 |96.358|13.46 |3.92 |25.88 | |[regnetv_040.ra3_in1k](https://huggingface.co/timm/regnetv_040.ra3_in1k)|224 |82.44 |96.198|20.64 |4.0 |12.29 | |[regnety_040.ra3_in1k](https://huggingface.co/timm/regnety_040.ra3_in1k)|224 |82.304|96.078|20.65 |4.0 |12.29 | |[regnetz_c16.ra3_in1k](https://huggingface.co/timm/regnetz_c16.ra3_in1k)|256 |82.16 |96.048|13.46 |2.51 |16.57 | |[regnetz_c16_evos.ch_in1k](https://huggingface.co/timm/regnetz_c16_evos.ch_in1k)|256 |81.936|96.15 |13.49 |2.48 |16.57 | |[regnety_032.ra_in1k](https://huggingface.co/timm/regnety_032.ra_in1k)|224 |81.924|95.988|19.44 |3.2 |11.26 | |[regnety_032.tv2_in1k](https://huggingface.co/timm/regnety_032.tv2_in1k)|224 |81.77 |95.842|19.44 |3.2 |11.26 | |[regnetx_080.tv2_in1k](https://huggingface.co/timm/regnetx_080.tv2_in1k)|224 |81.552|95.544|39.57 |8.02 |14.06 | |[regnetx_032.tv2_in1k](https://huggingface.co/timm/regnetx_032.tv2_in1k)|224 |80.924|95.27 |15.3 |3.2 |11.37 | |[regnety_320.pycls_in1k](https://huggingface.co/timm/regnety_320.pycls_in1k)|224 |80.804|95.246|145.05 |32.34|30.26 | |[regnetz_b16.ra3_in1k](https://huggingface.co/timm/regnetz_b16.ra3_in1k)|288 |80.712|95.47 |9.72 |2.39 |16.43 | |[regnety_016.tv2_in1k](https://huggingface.co/timm/regnety_016.tv2_in1k)|224 |80.66 |95.334|11.2 |1.63 |8.04 | |[regnety_120.pycls_in1k](https://huggingface.co/timm/regnety_120.pycls_in1k)|224 |80.37 |95.12 |51.82 |12.14|21.38 | |[regnety_160.pycls_in1k](https://huggingface.co/timm/regnety_160.pycls_in1k)|224 |80.288|94.964|83.59 |15.96|23.04 | |[regnetx_320.pycls_in1k](https://huggingface.co/timm/regnetx_320.pycls_in1k)|224 |80.246|95.01 |107.81 |31.81|36.3 | |[regnety_080.pycls_in1k](https://huggingface.co/timm/regnety_080.pycls_in1k)|224 |79.882|94.834|39.18 |8.0 |17.97 | |[regnetz_b16.ra3_in1k](https://huggingface.co/timm/regnetz_b16.ra3_in1k)|224 |79.872|94.974|9.72 |1.45 |9.95 | |[regnetx_160.pycls_in1k](https://huggingface.co/timm/regnetx_160.pycls_in1k)|224 |79.862|94.828|54.28 |15.99|25.52 | |[regnety_064.pycls_in1k](https://huggingface.co/timm/regnety_064.pycls_in1k)|224 |79.716|94.772|30.58 |6.39 |16.41 | |[regnetx_120.pycls_in1k](https://huggingface.co/timm/regnetx_120.pycls_in1k)|224 |79.592|94.738|46.11 |12.13|21.37 | |[regnetx_016.tv2_in1k](https://huggingface.co/timm/regnetx_016.tv2_in1k)|224 |79.44 |94.772|9.19 |1.62 |7.93 | |[regnety_040.pycls_in1k](https://huggingface.co/timm/regnety_040.pycls_in1k)|224 |79.23 |94.654|20.65 |4.0 |12.29 | |[regnetx_080.pycls_in1k](https://huggingface.co/timm/regnetx_080.pycls_in1k)|224 |79.198|94.55 |39.57 |8.02 |14.06 | |[regnetx_064.pycls_in1k](https://huggingface.co/timm/regnetx_064.pycls_in1k)|224 |79.064|94.454|26.21 |6.49 |16.37 | |[regnety_032.pycls_in1k](https://huggingface.co/timm/regnety_032.pycls_in1k)|224 |78.884|94.412|19.44 |3.2 |11.26 | |[regnety_008_tv.tv2_in1k](https://huggingface.co/timm/regnety_008_tv.tv2_in1k)|224 |78.654|94.388|6.43 |0.84 |5.42 | |[regnetx_040.pycls_in1k](https://huggingface.co/timm/regnetx_040.pycls_in1k)|224 |78.482|94.24 |22.12 |3.99 |12.2 | |[regnetx_032.pycls_in1k](https://huggingface.co/timm/regnetx_032.pycls_in1k)|224 |78.178|94.08 |15.3 |3.2 |11.37 | |[regnety_016.pycls_in1k](https://huggingface.co/timm/regnety_016.pycls_in1k)|224 |77.862|93.73 |11.2 |1.63 |8.04 | |[regnetx_008.tv2_in1k](https://huggingface.co/timm/regnetx_008.tv2_in1k)|224 |77.302|93.672|7.26 |0.81 |5.15 | |[regnetx_016.pycls_in1k](https://huggingface.co/timm/regnetx_016.pycls_in1k)|224 |76.908|93.418|9.19 |1.62 |7.93 | |[regnety_008.pycls_in1k](https://huggingface.co/timm/regnety_008.pycls_in1k)|224 |76.296|93.05 |6.26 |0.81 |5.25 | |[regnety_004.tv2_in1k](https://huggingface.co/timm/regnety_004.tv2_in1k)|224 |75.592|92.712|4.34 |0.41 |3.89 | |[regnety_006.pycls_in1k](https://huggingface.co/timm/regnety_006.pycls_in1k)|224 |75.244|92.518|6.06 |0.61 |4.33 | |[regnetx_008.pycls_in1k](https://huggingface.co/timm/regnetx_008.pycls_in1k)|224 |75.042|92.342|7.26 |0.81 |5.15 | |[regnetx_004_tv.tv2_in1k](https://huggingface.co/timm/regnetx_004_tv.tv2_in1k)|224 |74.57 |92.184|5.5 |0.42 |3.17 | |[regnety_004.pycls_in1k](https://huggingface.co/timm/regnety_004.pycls_in1k)|224 |74.018|91.764|4.34 |0.41 |3.89 | |[regnetx_006.pycls_in1k](https://huggingface.co/timm/regnetx_006.pycls_in1k)|224 |73.862|91.67 |6.2 |0.61 |3.98 | |[regnetx_004.pycls_in1k](https://huggingface.co/timm/regnetx_004.pycls_in1k)|224 |72.38 |90.832|5.16 |0.4 |3.14 | |[regnety_002.pycls_in1k](https://huggingface.co/timm/regnety_002.pycls_in1k)|224 |70.282|89.534|3.16 |0.2 |2.17 | |[regnetx_002.pycls_in1k](https://huggingface.co/timm/regnetx_002.pycls_in1k)|224 |68.752|88.556|2.68 |0.2 |2.16 | ## Citation ```bibtex @InProceedings{Radosavovic2020, title = {Designing Network Design Spaces}, author = {Ilija Radosavovic and Raj Prateek Kosaraju and Ross Girshick and Kaiming He and Piotr Doll{'a}r}, booktitle = {CVPR}, year = {2020} } ``` ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ```
timm/coat_mini.in1k
timm
2023-04-24T03:43:47Z
598
0
timm
[ "timm", "pytorch", "image-classification", "dataset:imagenet-1k", "arxiv:2104.06399", "license:apache-2.0", "region:us" ]
image-classification
2023-04-24T03:43:39Z
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for coat_mini.in1k A CoaT (Co-Scale Conv-Attentional Transformer) image classification model. Trained on ImageNet-1k by paper authors. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 10.3 - GMACs: 6.8 - Activations (M): 33.7 - Image size: 224 x 224 - **Papers:** - Co-Scale Conv-Attentional Image Transformers: https://arxiv.org/abs/2104.06399 - **Dataset:** ImageNet-1k - **Original:** https://github.com/mlpc-ucsd/CoaT ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('coat_mini.in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'coat_mini.in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (torch.Size([1, 785, 216]), torch.Size([1, 197, 216]), torch.Size([1, 50, 216])) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @InProceedings{Xu_2021_ICCV, author = {Xu, Weijian and Xu, Yifan and Chang, Tyler and Tu, Zhuowen}, title = {Co-Scale Conv-Attentional Image Transformers}, booktitle = {Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV)}, month = {October}, year = {2021}, pages = {9981-9990} } ```
juierror/flan-t5-text2sql-with-schema-v2
juierror
2024-04-25T06:17:21Z
598
24
transformers
[ "transformers", "pytorch", "safetensors", "t5", "text2text-generation", "en", "license:cc-by-sa-4.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text2text-generation
2023-07-31T14:58:27Z
--- language: en widget: - text: >- convert question and table into SQL query. tables: people_name(id,name), people_age(people_id,age). question: how many people with name jui and age less than 25 license: cc-by-sa-4.0 pipeline_tag: text2text-generation inference: parameters: max_length: 512 num_beams: 10 top_k: 10 --- This is an upgraded version of [https://huggingface.co/juierror/flan-t5-text2sql-with-schema](https://huggingface.co/juierror/flan-t5-text2sql-with-schema). It supports the '<' sign and can handle multiple tables. # How to use ```python from typing import List from transformers import AutoTokenizer, AutoModelForSeq2SeqLM tokenizer = AutoTokenizer.from_pretrained("juierror/flan-t5-text2sql-with-schema-v2") model = AutoModelForSeq2SeqLM.from_pretrained("juierror/flan-t5-text2sql-with-schema-v2") def get_prompt(tables, question): prompt = f"""convert question and table into SQL query. tables: {tables}. question: {question}""" return prompt def prepare_input(question: str, tables: Dict[str, List[str]]): tables = [f"""{table_name}({",".join(tables[table_name])})""" for table_name in tables] tables = ", ".join(tables) prompt = get_prompt(tables, question) input_ids = tokenizer(prompt, max_length=512, return_tensors="pt").input_ids return input_ids def inference(question: str, tables: Dict[str, List[str]]) -> str: input_data = prepare_input(question=question, tables=tables) input_data = input_data.to(model.device) outputs = model.generate(inputs=input_data, num_beams=10, top_k=10, max_length=512) result = tokenizer.decode(token_ids=outputs[0], skip_special_tokens=True) return result print(inference("how many people with name jui and age less than 25", { "people_name": ["id", "name"], "people_age": ["people_id", "age"] })) print(inference("what is id with name jui and age less than 25", { "people_name": ["id", "name", "age"] }))) ``` # Dataset - [CoSQL](https://yale-lily.github.io/cosql) - [spider](https://yale-lily.github.io/spider) - [SParC](https://yale-lily.github.io/sparc)
facebook/dpt-dinov2-large-nyu
facebook
2023-11-13T19:23:10Z
598
1
transformers
[ "transformers", "pytorch", "dpt", "depth-estimation", "vision", "dinov2", "arxiv:2304.07193", "arxiv:2103.13413", "license:apache-2.0", "region:us" ]
depth-estimation
2023-10-31T18:03:54Z
--- license: apache-2.0 tags: - vision - dinov2 - depth-estimation inference: false --- # Model Card: DPT model with DINOv2 backbone ## Model Details DPT (Dense Prediction Transformer) model with DINOv2 backbone as proposed in [DINOv2: Learning Robust Visual Features without Supervision](https://arxiv.org/abs/2304.07193) by Oquab et al. <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/dpt_architecture.jpg" alt="drawing" width="600"/> <small> DPT architecture. Taken from the <a href="https://arxiv.org/abs/2103.13413" target="_blank">original paper</a>. </small> ### Resources - [DINOv2 Paper](https://arxiv.org/abs/2304.07193) - [DPT Paper](https://arxiv.org/abs/2103.13413) ### Use with Transformers ```python from transformers import AutoImageProcessor, DPTForDepthEstimation import torch import numpy as np from PIL import Image import requests url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) image_processor = AutoImageProcessor.from_pretrained("facebook/dpt-dinov2-large-nyu") model = DPTForDepthEstimation.from_pretrained("facebook/dpt-dinov2-large-nyu") # prepare image for the model inputs = image_processor(images=image, return_tensors="pt") with torch.no_grad(): outputs = model(**inputs) predicted_depth = outputs.predicted_depth # interpolate to original size prediction = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1), size=image.size[::-1], mode="bicubic", align_corners=False, ) # visualize the prediction output = prediction.squeeze().cpu().numpy() formatted = (output * 255 / np.max(output)).astype("uint8") depth = Image.fromarray(formatted) ``` ## Model Use ### Intended Use The model is intended to showcase that using the DPT framework with DINOv2 as backbone yields a powerful depth estimator. ### BibTeX entry and citation info ```bibtex @misc{oquab2023dinov2, title={DINOv2: Learning Robust Visual Features without Supervision}, author={Maxime Oquab and Timothée Darcet and Théo Moutakanni and Huy Vo and Marc Szafraniec and Vasil Khalidov and Pierre Fernandez and Daniel Haziza and Francisco Massa and Alaaeldin El-Nouby and Mahmoud Assran and Nicolas Ballas and Wojciech Galuba and Russell Howes and Po-Yao Huang and Shang-Wen Li and Ishan Misra and Michael Rabbat and Vasu Sharma and Gabriel Synnaeve and Hu Xu and Hervé Jegou and Julien Mairal and Patrick Labatut and Armand Joulin and Piotr Bojanowski}, year={2023}, eprint={2304.07193}, archivePrefix={arXiv}, primaryClass={cs.CV} } ```
PassionFriend/5FuxWkjMTNpWmRfkfF5QP6fScfmqPZJnREB9kP9z1uzC9VXD_vgg
PassionFriend
2024-03-01T06:44:36Z
598
0
keras
[ "keras", "region:us" ]
null
2024-02-15T01:30:14Z
Entry not found
DavidAU/LemonadeRP-4.5.3-11B-GGUF-Plus
DavidAU
2024-05-08T04:34:16Z
598
1
null
[ "gguf", "license:apache-2.0", "region:us" ]
null
2024-05-08T04:21:47Z
--- license: apache-2.0 --- GGUFs PLUS: Q8 and Q6 GGUFs with critical parts of the model in F16 / Full precision. File sizes will be slightly larger than standard, but should yeild higher quality results under all tasks and conditions.
joongi007/Bookworm-10.7B-v0.4-DPO-GGUF
joongi007
2024-06-05T00:32:00Z
598
0
null
[ "gguf", "generated_from_trainer", "base_model:yanolja/Bookworm-10.7B-v0.4-DPO", "license:apache-2.0", "region:us" ]
null
2024-05-28T07:17:31Z
--- license: apache-2.0 base_model: yanolja/Bookworm-10.7B-v0.4-DPO tags: - generated_from_trainer - gguf model-index: - name: joongi007/Bookworm-10.7B-v0.4-DPO-GGUF results: [] --- - Original model is [yanolja/Bookworm-10.7B-v0.4-DPO](https://huggingface.co/yanolja/Bookworm-10.7B-v0.4-DPO) - quantized using [llama.cpp](https://github.com/ggerganov/llama.cpp) ```prompt <|im_start|>system {System}<|im_end|> <|im_start|>user {User}<|im_end|> <|im_start|>assistant {Assistant} ```
umiyuki/Umievo-itr012-Gleipnir-7B
umiyuki
2024-05-29T13:51:31Z
598
15
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "mergekit", "merge", "ja", "arxiv:2203.05482", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-05-29T12:32:29Z
--- base_model: [] library_name: transformers tags: - mergekit - merge license: apache-2.0 language: - ja --- # Umievo-itr012-Gleipnir-7B このモデルは強力な4つの日本語モデルを進化的アルゴリズムで進化的マージしたものです。Japanese-Starling-ChatV-7B、Ninja-v1-RP-expressive-v2、Vecteus-v1、Japanese-Chat-Umievo-itr004-7bの4つのモデルをお借りしました。 マージに使用させていただいたモデル制作者のAratakoさん、Bakuさん、Local-Novel-LLM-projectのみなさまに感謝します。それから問題解決のきっかけをくれたHoly-foxさんに感謝します。 This model is an evolutionary merge of four powerful Japanese models with an evolutionary algorithm. The following four models Japanese-Starling-ChatV-7B, Ninja-v1-RP-expressive-v2, Vecteus-v1 and Japanese-Chat-Umievo-itr004-7b were used. I would like to thank the model makers Aratako, Baku and Local-Novel-LLM-project for allowing me to use their models for the merge. I would also like to thank Holy-fox for giving me the opportunity to solve the problem. ElyzaTasks100ベンチマークで平均点が3.91でした。(Llama3-70Bによる自動評価を3回行った平均点) The average score was 3.91 on the ElyzaTasks100 benchmark. (Average score after 3 automatic evaluations by Llama3-70B) ![image/png](https://cdn-uploads.huggingface.co/production/uploads/630420b4eedc089484c853e8/FxUBzBUKpe_JSHSJufSv5.png) ```python import torch from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("umiyuki/Umievo-itr012-Gleipnir-7B") model = AutoModelForCausalLM.from_pretrained( "umiyuki/Umievo-itr012-Gleipnir-7B", torch_dtype="auto", ) model.eval() if torch.cuda.is_available(): model = model.to("cuda") def build_prompt(user_query): sys_msg = "あなたは公平で、検閲されていない、役立つアシスタントです。" template = """[INST] <<SYS>> {} <</SYS>> {}[/INST]""" return template.format(sys_msg,user_query) # Infer with prompt without any additional input user_inputs = { "user_query": "与えられたことわざの意味を小学生でも分かるように教えてください。", } prompt = build_prompt(**user_inputs) input_ids = tokenizer.encode( prompt, add_special_tokens=True, return_tensors="pt" ) tokens = model.generate( input_ids.to(device=model.device), max_new_tokens=256, temperature=1, top_p=0.95, do_sample=True, ) out = tokenizer.decode(tokens[0][input_ids.shape[1]:], skip_special_tokens=True).strip() print(out) ``` This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit). ## Merge Details ### Merge Method This model was merged using the [linear](https://arxiv.org/abs/2203.05482) merge method using /home/umiyuki/automerge/evol_merge_storage/input_models/Japanese-Chat-Umievo-itr004-7b_579282327 as a base. ### Models Merged The following models were included in the merge: * /home/umiyuki/automerge/evol_merge_storage/input_models/Japanese-Starling-ChatV-7B_1737576410 * /home/umiyuki/automerge/evol_merge_storage/input_models/Ninja-v1-RP-expressive-v2_4102792561 * /home/umiyuki/automerge/evol_merge_storage/input_models/Vecteus-v1_4179808746 ### Configuration The following YAML configuration was used to produce this model: ```yaml base_model: /home/umiyuki/automerge/evol_merge_storage/input_models/Japanese-Chat-Umievo-itr004-7b_579282327 dtype: bfloat16 merge_method: linear parameters: int8_mask: 1.0 normalize: 1.0 slices: - sources: - layer_range: [0, 4] model: /home/umiyuki/automerge/evol_merge_storage/input_models/Japanese-Chat-Umievo-itr004-7b_579282327 parameters: weight: 0.34953096474223655 - layer_range: [0, 4] model: /home/umiyuki/automerge/evol_merge_storage/input_models/Vecteus-v1_4179808746 parameters: weight: 0.4701212555597746 - layer_range: [0, 4] model: /home/umiyuki/automerge/evol_merge_storage/input_models/Japanese-Starling-ChatV-7B_1737576410 parameters: weight: 0.08162258723819021 - layer_range: [0, 4] model: /home/umiyuki/automerge/evol_merge_storage/input_models/Ninja-v1-RP-expressive-v2_4102792561 parameters: weight: 0.31015439852818116 - sources: - layer_range: [4, 8] model: /home/umiyuki/automerge/evol_merge_storage/input_models/Japanese-Chat-Umievo-itr004-7b_579282327 parameters: weight: 0.11807412349683076 - layer_range: [4, 8] model: /home/umiyuki/automerge/evol_merge_storage/input_models/Vecteus-v1_4179808746 parameters: weight: -0.005684817244530085 - layer_range: [4, 8] model: /home/umiyuki/automerge/evol_merge_storage/input_models/Japanese-Starling-ChatV-7B_1737576410 parameters: weight: 0.2119283777941045 - layer_range: [4, 8] model: /home/umiyuki/automerge/evol_merge_storage/input_models/Ninja-v1-RP-expressive-v2_4102792561 parameters: weight: 1.1521124768396636 - sources: - layer_range: [8, 12] model: /home/umiyuki/automerge/evol_merge_storage/input_models/Japanese-Chat-Umievo-itr004-7b_579282327 parameters: weight: 0.9244329405120573 - layer_range: [8, 12] model: /home/umiyuki/automerge/evol_merge_storage/input_models/Vecteus-v1_4179808746 parameters: weight: 0.7633842909616317 - layer_range: [8, 12] model: /home/umiyuki/automerge/evol_merge_storage/input_models/Japanese-Starling-ChatV-7B_1737576410 parameters: weight: 0.6952382990160072 - layer_range: [8, 12] model: /home/umiyuki/automerge/evol_merge_storage/input_models/Ninja-v1-RP-expressive-v2_4102792561 parameters: weight: 0.6873040403268571 - sources: - layer_range: [12, 16] model: /home/umiyuki/automerge/evol_merge_storage/input_models/Japanese-Chat-Umievo-itr004-7b_579282327 parameters: weight: 0.4109625320908857 - layer_range: [12, 16] model: /home/umiyuki/automerge/evol_merge_storage/input_models/Vecteus-v1_4179808746 parameters: weight: 0.7090818691683626 - layer_range: [12, 16] model: /home/umiyuki/automerge/evol_merge_storage/input_models/Japanese-Starling-ChatV-7B_1737576410 parameters: weight: 0.42059423827890385 - layer_range: [12, 16] model: /home/umiyuki/automerge/evol_merge_storage/input_models/Ninja-v1-RP-expressive-v2_4102792561 parameters: weight: 0.5705186152354104 - sources: - layer_range: [16, 20] model: /home/umiyuki/automerge/evol_merge_storage/input_models/Japanese-Chat-Umievo-itr004-7b_579282327 parameters: weight: 0.28507448659933315 - layer_range: [16, 20] model: /home/umiyuki/automerge/evol_merge_storage/input_models/Vecteus-v1_4179808746 parameters: weight: 0.4025223854083849 - layer_range: [16, 20] model: /home/umiyuki/automerge/evol_merge_storage/input_models/Japanese-Starling-ChatV-7B_1737576410 parameters: weight: 0.25885405316835886 - layer_range: [16, 20] model: /home/umiyuki/automerge/evol_merge_storage/input_models/Ninja-v1-RP-expressive-v2_4102792561 parameters: weight: 0.35540632690403373 - sources: - layer_range: [20, 24] model: /home/umiyuki/automerge/evol_merge_storage/input_models/Japanese-Chat-Umievo-itr004-7b_579282327 parameters: weight: 0.018882795552694703 - layer_range: [20, 24] model: /home/umiyuki/automerge/evol_merge_storage/input_models/Vecteus-v1_4179808746 parameters: weight: 0.628847855051209 - layer_range: [20, 24] model: /home/umiyuki/automerge/evol_merge_storage/input_models/Japanese-Starling-ChatV-7B_1737576410 parameters: weight: 0.7038654876125734 - layer_range: [20, 24] model: /home/umiyuki/automerge/evol_merge_storage/input_models/Ninja-v1-RP-expressive-v2_4102792561 parameters: weight: 0.877501753107237 - sources: - layer_range: [24, 28] model: /home/umiyuki/automerge/evol_merge_storage/input_models/Japanese-Chat-Umievo-itr004-7b_579282327 parameters: weight: 0.14008355431312197 - layer_range: [24, 28] model: /home/umiyuki/automerge/evol_merge_storage/input_models/Vecteus-v1_4179808746 parameters: weight: 1.0153826426873882 - layer_range: [24, 28] model: /home/umiyuki/automerge/evol_merge_storage/input_models/Japanese-Starling-ChatV-7B_1737576410 parameters: weight: 0.5586634927008272 - layer_range: [24, 28] model: /home/umiyuki/automerge/evol_merge_storage/input_models/Ninja-v1-RP-expressive-v2_4102792561 parameters: weight: 0.54455848971032 - sources: - layer_range: [28, 32] model: /home/umiyuki/automerge/evol_merge_storage/input_models/Japanese-Chat-Umievo-itr004-7b_579282327 parameters: weight: 0.8188405381342685 - layer_range: [28, 32] model: /home/umiyuki/automerge/evol_merge_storage/input_models/Vecteus-v1_4179808746 parameters: weight: 0.5130358379308082 - layer_range: [28, 32] model: /home/umiyuki/automerge/evol_merge_storage/input_models/Japanese-Starling-ChatV-7B_1737576410 parameters: weight: 1.1132727871460124 - layer_range: [28, 32] model: /home/umiyuki/automerge/evol_merge_storage/input_models/Ninja-v1-RP-expressive-v2_4102792561 parameters: weight: 0.4471258297582539 ```