modelId
stringlengths
5
122
author
stringlengths
2
42
last_modified
timestamp[us, tz=UTC]
downloads
int64
0
738M
likes
int64
0
11k
library_name
stringclasses
245 values
tags
sequencelengths
1
4.05k
pipeline_tag
stringclasses
48 values
createdAt
timestamp[us, tz=UTC]
card
stringlengths
1
901k
digiplay/PlanetBumix_v1
digiplay
2024-03-06T16:30:45Z
1,023
3
diffusers
[ "diffusers", "safetensors", "stable-diffusion", "stable-diffusion-diffusers", "text-to-image", "license:other", "autotrain_compatible", "endpoints_compatible", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
2023-06-18T04:59:37Z
--- license: other tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - diffusers inference: true --- Model info: https://civitai.com/models/91651/orplanetbumix Original Author's DEMO image : ![](https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/7f789b00-80fc-4ba8-bb5a-034cd50f58d6/width=768/02451-1774884344-Alien%20planet,%201girl,%20extraterrestrial%20beings,%20unknown%20landscapes,%20interstellar%20exploration.jpeg) Sample image I made: ![](https://cdn-uploads.huggingface.co/production/uploads/646c83c871d0c8a6e4455854/b148kseNrXErVA2GfHtO0.jpeg)
openskyml/midjourney-mini
openskyml
2023-11-13T04:22:14Z
1,023
10
diffusers
[ "diffusers", "midjourney", "midjourney-mini", "openskyml", "text-to-image", "en", "ru", "de", "fr", "license:mit", "autotrain_compatible", "endpoints_compatible", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
2023-10-11T17:45:46Z
--- license: mit tags: - midjourney - midjourney-mini - openskyml pinned: true language: - en - ru - de - fr library_name: diffusers pipeline_tag: text-to-image --- <h1><center>Midjourney-mini</center></h1> ## Description Midjourney-mini is a free artificial intelligence model that can create realistic images based on textual descriptions. It has the following advantages: - **Free:** Midjourney-mini is completely free to use for anyone. - **High-quality image generation:** The model uses modern deep learning methods to create high-quality images. - **Ease of use:** Working with Midjourney-mini does not require special programming or machine learning knowledge. The model has a convenient interface and works in your browser. Although Midjoureymini is a trimmed-down version of the paid MIjdoureny modle, it still provides powerful functionality and can be used in various applications. # Use ## In Diffusers ```py from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained("midjourney-community/midjourney-mini") ``` ## Deploy in Spaces ```py import gradio as gr gr.Interface.load("models/midjourney-community/midjourney-mini").launch() ``` ## Deploy in Inference API ```py import requests API_URL = "https://api-inference.huggingface.co/models/midjourney-community/midjourney-mini" headers = {"Authorization": "Bearer hf_token"} def query(payload): response = requests.post(API_URL, headers=headers, json=payload) return response.content image_bytes = query({ "inputs": "Astronaut riding a horse", }) ``` ```js async function query(data) { const response = await fetch( "https://api-inference.huggingface.co/models/midjourney-community/midjourney-mini", { headers: { Authorization: "Bearer hf_token" }, method: "POST", body: JSON.stringify(data), } ); const result = await response.blob(); return result; } query({"inputs": "Astronaut riding a horse"}).then((response) => { // Use image }); ```
infgrad/stella-base-zh-v2
infgrad
2024-04-06T02:49:25Z
1,023
13
sentence-transformers
[ "sentence-transformers", "pytorch", "bert", "feature-extraction", "sentence-similarity", "mteb", "arxiv:1612.00796", "model-index", "autotrain_compatible", "endpoints_compatible", "text-embeddings-inference", "region:us" ]
sentence-similarity
2023-10-13T04:42:53Z
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - mteb model-index: - name: stella-base-zh-v2 results: - task: type: STS dataset: type: C-MTEB/AFQMC name: MTEB AFQMC config: default split: validation revision: None metrics: - type: cos_sim_pearson value: 44.62083443545288 - type: cos_sim_spearman value: 46.72814628391134 - type: euclidean_pearson value: 45.11522093816821 - type: euclidean_spearman value: 46.72818648900957 - type: manhattan_pearson value: 44.98820754682395 - type: manhattan_spearman value: 46.63576705524296 - task: type: STS dataset: type: C-MTEB/ATEC name: MTEB ATEC config: default split: test revision: None metrics: - type: cos_sim_pearson value: 49.543902370260234 - type: cos_sim_spearman value: 51.22161152883018 - type: euclidean_pearson value: 53.49586541060596 - type: euclidean_spearman value: 51.22161490583934 - type: manhattan_pearson value: 53.51023339947787 - type: manhattan_spearman value: 51.22426632538443 - task: type: Classification dataset: type: mteb/amazon_reviews_multi name: MTEB AmazonReviewsClassification (zh) config: zh split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 39.644 - type: f1 value: 37.67897186741224 - task: type: STS dataset: type: C-MTEB/BQ name: MTEB BQ config: default split: test revision: None metrics: - type: cos_sim_pearson value: 61.96416237112325 - type: cos_sim_spearman value: 64.80484064041543 - type: euclidean_pearson value: 63.281983537100594 - type: euclidean_spearman value: 64.80483024694405 - type: manhattan_pearson value: 63.266046412399426 - type: manhattan_spearman value: 64.79643672829964 - task: type: Clustering dataset: type: C-MTEB/CLSClusteringP2P name: MTEB CLSClusteringP2P config: default split: test revision: None metrics: - type: v_measure value: 40.25857488823951 - task: type: Clustering dataset: type: C-MTEB/CLSClusteringS2S name: MTEB CLSClusteringS2S config: default split: test revision: None metrics: - type: v_measure value: 37.17501553349549 - task: type: Reranking dataset: type: C-MTEB/CMedQAv1-reranking name: MTEB CMedQAv1 config: default split: test revision: None metrics: - type: map value: 84.69751849160603 - type: mrr value: 87.16257936507937 - task: type: Reranking dataset: type: C-MTEB/CMedQAv2-reranking name: MTEB CMedQAv2 config: default split: test revision: None metrics: - type: map value: 85.31468551417655 - type: mrr value: 87.74658730158731 - task: type: Retrieval dataset: type: C-MTEB/CmedqaRetrieval name: MTEB CmedqaRetrieval config: default split: dev revision: None metrics: - type: map_at_1 value: 24.181 - type: map_at_10 value: 35.615 - type: map_at_100 value: 37.444 - type: map_at_1000 value: 37.573 - type: map_at_3 value: 31.679000000000002 - type: map_at_5 value: 33.854 - type: mrr_at_1 value: 37.108999999999995 - type: mrr_at_10 value: 44.653 - type: mrr_at_100 value: 45.647 - type: mrr_at_1000 value: 45.701 - type: mrr_at_3 value: 42.256 - type: mrr_at_5 value: 43.497 - type: ndcg_at_1 value: 37.108999999999995 - type: ndcg_at_10 value: 42.028999999999996 - type: ndcg_at_100 value: 49.292 - type: ndcg_at_1000 value: 51.64 - type: ndcg_at_3 value: 37.017 - type: ndcg_at_5 value: 38.997 - type: precision_at_1 value: 37.108999999999995 - type: precision_at_10 value: 9.386999999999999 - type: precision_at_100 value: 1.536 - type: precision_at_1000 value: 0.183 - type: precision_at_3 value: 20.93 - type: precision_at_5 value: 15.268999999999998 - type: recall_at_1 value: 24.181 - type: recall_at_10 value: 51.961999999999996 - type: recall_at_100 value: 82.122 - type: recall_at_1000 value: 98.059 - type: recall_at_3 value: 36.730000000000004 - type: recall_at_5 value: 42.884 - task: type: PairClassification dataset: type: C-MTEB/CMNLI name: MTEB Cmnli config: default split: validation revision: None metrics: - type: cos_sim_accuracy value: 76.23571858087793 - type: cos_sim_ap value: 84.75290046905519 - type: cos_sim_f1 value: 77.70114942528735 - type: cos_sim_precision value: 73.05475504322767 - type: cos_sim_recall value: 82.97872340425532 - type: dot_accuracy value: 76.23571858087793 - type: dot_ap value: 84.75113928508674 - type: dot_f1 value: 77.70114942528735 - type: dot_precision value: 73.05475504322767 - type: dot_recall value: 82.97872340425532 - type: euclidean_accuracy value: 76.23571858087793 - type: euclidean_ap value: 84.75289931658567 - type: euclidean_f1 value: 77.70114942528735 - type: euclidean_precision value: 73.05475504322767 - type: euclidean_recall value: 82.97872340425532 - type: manhattan_accuracy value: 76.17558628983764 - type: manhattan_ap value: 84.75764676597448 - type: manhattan_f1 value: 77.73437499999999 - type: manhattan_precision value: 72.52480259161773 - type: manhattan_recall value: 83.75029226093056 - type: max_accuracy value: 76.23571858087793 - type: max_ap value: 84.75764676597448 - type: max_f1 value: 77.73437499999999 - task: type: Retrieval dataset: type: C-MTEB/CovidRetrieval name: MTEB CovidRetrieval config: default split: dev revision: None metrics: - type: map_at_1 value: 67.43900000000001 - type: map_at_10 value: 76.00099999999999 - type: map_at_100 value: 76.297 - type: map_at_1000 value: 76.29899999999999 - type: map_at_3 value: 74.412 - type: map_at_5 value: 75.177 - type: mrr_at_1 value: 67.65 - type: mrr_at_10 value: 76.007 - type: mrr_at_100 value: 76.322 - type: mrr_at_1000 value: 76.324 - type: mrr_at_3 value: 74.464 - type: mrr_at_5 value: 75.265 - type: ndcg_at_1 value: 67.65 - type: ndcg_at_10 value: 79.85600000000001 - type: ndcg_at_100 value: 81.34400000000001 - type: ndcg_at_1000 value: 81.44200000000001 - type: ndcg_at_3 value: 76.576 - type: ndcg_at_5 value: 77.956 - type: precision_at_1 value: 67.65 - type: precision_at_10 value: 9.283 - type: precision_at_100 value: 0.9990000000000001 - type: precision_at_1000 value: 0.101 - type: precision_at_3 value: 27.749000000000002 - type: precision_at_5 value: 17.345 - type: recall_at_1 value: 67.43900000000001 - type: recall_at_10 value: 91.781 - type: recall_at_100 value: 98.84100000000001 - type: recall_at_1000 value: 99.684 - type: recall_at_3 value: 82.719 - type: recall_at_5 value: 86.038 - task: type: Retrieval dataset: type: C-MTEB/DuRetrieval name: MTEB DuRetrieval config: default split: dev revision: None metrics: - type: map_at_1 value: 25.354 - type: map_at_10 value: 79.499 - type: map_at_100 value: 82.416 - type: map_at_1000 value: 82.451 - type: map_at_3 value: 54.664 - type: map_at_5 value: 69.378 - type: mrr_at_1 value: 89.25 - type: mrr_at_10 value: 92.666 - type: mrr_at_100 value: 92.738 - type: mrr_at_1000 value: 92.74 - type: mrr_at_3 value: 92.342 - type: mrr_at_5 value: 92.562 - type: ndcg_at_1 value: 89.25 - type: ndcg_at_10 value: 86.97 - type: ndcg_at_100 value: 89.736 - type: ndcg_at_1000 value: 90.069 - type: ndcg_at_3 value: 85.476 - type: ndcg_at_5 value: 84.679 - type: precision_at_1 value: 89.25 - type: precision_at_10 value: 41.9 - type: precision_at_100 value: 4.811 - type: precision_at_1000 value: 0.48900000000000005 - type: precision_at_3 value: 76.86699999999999 - type: precision_at_5 value: 65.25 - type: recall_at_1 value: 25.354 - type: recall_at_10 value: 88.64999999999999 - type: recall_at_100 value: 97.56 - type: recall_at_1000 value: 99.37 - type: recall_at_3 value: 57.325 - type: recall_at_5 value: 74.614 - task: type: Retrieval dataset: type: C-MTEB/EcomRetrieval name: MTEB EcomRetrieval config: default split: dev revision: None metrics: - type: map_at_1 value: 48.3 - type: map_at_10 value: 57.765 - type: map_at_100 value: 58.418000000000006 - type: map_at_1000 value: 58.43899999999999 - type: map_at_3 value: 54.883 - type: map_at_5 value: 56.672999999999995 - type: mrr_at_1 value: 48.3 - type: mrr_at_10 value: 57.765 - type: mrr_at_100 value: 58.418000000000006 - type: mrr_at_1000 value: 58.43899999999999 - type: mrr_at_3 value: 54.883 - type: mrr_at_5 value: 56.672999999999995 - type: ndcg_at_1 value: 48.3 - type: ndcg_at_10 value: 62.846000000000004 - type: ndcg_at_100 value: 65.845 - type: ndcg_at_1000 value: 66.369 - type: ndcg_at_3 value: 56.996 - type: ndcg_at_5 value: 60.214999999999996 - type: precision_at_1 value: 48.3 - type: precision_at_10 value: 7.9 - type: precision_at_100 value: 0.9259999999999999 - type: precision_at_1000 value: 0.097 - type: precision_at_3 value: 21.032999999999998 - type: precision_at_5 value: 14.180000000000001 - type: recall_at_1 value: 48.3 - type: recall_at_10 value: 79.0 - type: recall_at_100 value: 92.60000000000001 - type: recall_at_1000 value: 96.7 - type: recall_at_3 value: 63.1 - type: recall_at_5 value: 70.89999999999999 - task: type: Classification dataset: type: C-MTEB/IFlyTek-classification name: MTEB IFlyTek config: default split: validation revision: None metrics: - type: accuracy value: 47.895344363216616 - type: f1 value: 34.95151253165417 - task: type: Classification dataset: type: C-MTEB/JDReview-classification name: MTEB JDReview config: default split: test revision: None metrics: - type: accuracy value: 84.78424015009381 - type: ap value: 52.436279969597685 - type: f1 value: 79.49258679392281 - task: type: STS dataset: type: C-MTEB/LCQMC name: MTEB LCQMC config: default split: test revision: None metrics: - type: cos_sim_pearson value: 70.2307617475436 - type: cos_sim_spearman value: 76.88912653700545 - type: euclidean_pearson value: 75.47976675486538 - type: euclidean_spearman value: 76.88912210059333 - type: manhattan_pearson value: 75.45834919257487 - type: manhattan_spearman value: 76.8669208121889 - task: type: Reranking dataset: type: C-MTEB/Mmarco-reranking name: MTEB MMarcoReranking config: default split: dev revision: None metrics: - type: map value: 28.047948482579244 - type: mrr value: 26.63809523809524 - task: type: Retrieval dataset: type: C-MTEB/MMarcoRetrieval name: MTEB MMarcoRetrieval config: default split: dev revision: None metrics: - type: map_at_1 value: 65.837 - type: map_at_10 value: 74.72 - type: map_at_100 value: 75.068 - type: map_at_1000 value: 75.079 - type: map_at_3 value: 72.832 - type: map_at_5 value: 74.07000000000001 - type: mrr_at_1 value: 68.009 - type: mrr_at_10 value: 75.29400000000001 - type: mrr_at_100 value: 75.607 - type: mrr_at_1000 value: 75.617 - type: mrr_at_3 value: 73.677 - type: mrr_at_5 value: 74.74199999999999 - type: ndcg_at_1 value: 68.009 - type: ndcg_at_10 value: 78.36 - type: ndcg_at_100 value: 79.911 - type: ndcg_at_1000 value: 80.226 - type: ndcg_at_3 value: 74.825 - type: ndcg_at_5 value: 76.9 - type: precision_at_1 value: 68.009 - type: precision_at_10 value: 9.463000000000001 - type: precision_at_100 value: 1.023 - type: precision_at_1000 value: 0.105 - type: precision_at_3 value: 28.075 - type: precision_at_5 value: 17.951 - type: recall_at_1 value: 65.837 - type: recall_at_10 value: 89.00099999999999 - type: recall_at_100 value: 95.968 - type: recall_at_1000 value: 98.461 - type: recall_at_3 value: 79.69800000000001 - type: recall_at_5 value: 84.623 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (zh-CN) config: zh-CN split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 68.08675184936112 - type: f1 value: 65.51466585063827 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (zh-CN) config: zh-CN split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 73.22461331540013 - type: f1 value: 72.675432030145 - task: type: Retrieval dataset: type: C-MTEB/MedicalRetrieval name: MTEB MedicalRetrieval config: default split: dev revision: None metrics: - type: map_at_1 value: 49.2 - type: map_at_10 value: 55.394 - type: map_at_100 value: 55.883 - type: map_at_1000 value: 55.93900000000001 - type: map_at_3 value: 53.733 - type: map_at_5 value: 54.778000000000006 - type: mrr_at_1 value: 49.3 - type: mrr_at_10 value: 55.444 - type: mrr_at_100 value: 55.933 - type: mrr_at_1000 value: 55.989 - type: mrr_at_3 value: 53.783 - type: mrr_at_5 value: 54.827999999999996 - type: ndcg_at_1 value: 49.2 - type: ndcg_at_10 value: 58.501999999999995 - type: ndcg_at_100 value: 61.181 - type: ndcg_at_1000 value: 62.848000000000006 - type: ndcg_at_3 value: 55.143 - type: ndcg_at_5 value: 57.032000000000004 - type: precision_at_1 value: 49.2 - type: precision_at_10 value: 6.83 - type: precision_at_100 value: 0.815 - type: precision_at_1000 value: 0.095 - type: precision_at_3 value: 19.733 - type: precision_at_5 value: 12.76 - type: recall_at_1 value: 49.2 - type: recall_at_10 value: 68.30000000000001 - type: recall_at_100 value: 81.5 - type: recall_at_1000 value: 95.0 - type: recall_at_3 value: 59.199999999999996 - type: recall_at_5 value: 63.800000000000004 - task: type: Classification dataset: type: C-MTEB/MultilingualSentiment-classification name: MTEB MultilingualSentiment config: default split: validation revision: None metrics: - type: accuracy value: 71.66666666666666 - type: f1 value: 70.92944632461379 - task: type: PairClassification dataset: type: C-MTEB/OCNLI name: MTEB Ocnli config: default split: validation revision: None metrics: - type: cos_sim_accuracy value: 70.00541418516514 - type: cos_sim_ap value: 75.16499510773514 - type: cos_sim_f1 value: 73.09435517099301 - type: cos_sim_precision value: 59.932432432432435 - type: cos_sim_recall value: 93.66420274551214 - type: dot_accuracy value: 70.00541418516514 - type: dot_ap value: 75.16499510773514 - type: dot_f1 value: 73.09435517099301 - type: dot_precision value: 59.932432432432435 - type: dot_recall value: 93.66420274551214 - type: euclidean_accuracy value: 70.00541418516514 - type: euclidean_ap value: 75.16499510773514 - type: euclidean_f1 value: 73.09435517099301 - type: euclidean_precision value: 59.932432432432435 - type: euclidean_recall value: 93.66420274551214 - type: manhattan_accuracy value: 70.11369788846778 - type: manhattan_ap value: 75.1259071890593 - type: manhattan_f1 value: 72.91399229781771 - type: manhattan_precision value: 61.294964028776974 - type: manhattan_recall value: 89.96832101372756 - type: max_accuracy value: 70.11369788846778 - type: max_ap value: 75.16499510773514 - type: max_f1 value: 73.09435517099301 - task: type: Classification dataset: type: C-MTEB/OnlineShopping-classification name: MTEB OnlineShopping config: default split: test revision: None metrics: - type: accuracy value: 91.38000000000002 - type: ap value: 89.12250244489272 - type: f1 value: 91.36604511107015 - task: type: STS dataset: type: C-MTEB/PAWSX name: MTEB PAWSX config: default split: test revision: None metrics: - type: cos_sim_pearson value: 24.231255568030463 - type: cos_sim_spearman value: 29.6964906904186 - type: euclidean_pearson value: 30.166130502867016 - type: euclidean_spearman value: 29.69614167804371 - type: manhattan_pearson value: 30.166606116745935 - type: manhattan_spearman value: 29.62681453661945 - task: type: STS dataset: type: C-MTEB/QBQTC name: MTEB QBQTC config: default split: test revision: None metrics: - type: cos_sim_pearson value: 34.88835755574809 - type: cos_sim_spearman value: 37.3797926051053 - type: euclidean_pearson value: 35.46629492698549 - type: euclidean_spearman value: 37.37987510604593 - type: manhattan_pearson value: 35.4953353526957 - type: manhattan_spearman value: 37.41397231689605 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (zh) config: zh split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 67.79575721136626 - type: cos_sim_spearman value: 69.02068400784196 - type: euclidean_pearson value: 68.30675023447176 - type: euclidean_spearman value: 69.02068400784196 - type: manhattan_pearson value: 69.91284259797827 - type: manhattan_spearman value: 70.31717787763641 - task: type: STS dataset: type: C-MTEB/STSB name: MTEB STSB config: default split: test revision: None metrics: - type: cos_sim_pearson value: 79.05026785034129 - type: cos_sim_spearman value: 79.62719014756249 - type: euclidean_pearson value: 79.13305301290063 - type: euclidean_spearman value: 79.62710682651051 - type: manhattan_pearson value: 79.07012559140433 - type: manhattan_spearman value: 79.58333069893605 - task: type: Reranking dataset: type: C-MTEB/T2Reranking name: MTEB T2Reranking config: default split: dev revision: None metrics: - type: map value: 66.34533369244325 - type: mrr value: 75.93632792769557 - task: type: Retrieval dataset: type: C-MTEB/T2Retrieval name: MTEB T2Retrieval config: default split: dev revision: None metrics: - type: map_at_1 value: 26.995 - type: map_at_10 value: 76.083 - type: map_at_100 value: 79.727 - type: map_at_1000 value: 79.798 - type: map_at_3 value: 53.455 - type: map_at_5 value: 65.747 - type: mrr_at_1 value: 89.536 - type: mrr_at_10 value: 91.972 - type: mrr_at_100 value: 92.07 - type: mrr_at_1000 value: 92.07499999999999 - type: mrr_at_3 value: 91.52900000000001 - type: mrr_at_5 value: 91.806 - type: ndcg_at_1 value: 89.536 - type: ndcg_at_10 value: 83.756 - type: ndcg_at_100 value: 87.468 - type: ndcg_at_1000 value: 88.16199999999999 - type: ndcg_at_3 value: 85.349 - type: ndcg_at_5 value: 83.855 - type: precision_at_1 value: 89.536 - type: precision_at_10 value: 41.713 - type: precision_at_100 value: 4.994 - type: precision_at_1000 value: 0.515 - type: precision_at_3 value: 74.81400000000001 - type: precision_at_5 value: 62.678 - type: recall_at_1 value: 26.995 - type: recall_at_10 value: 82.586 - type: recall_at_100 value: 94.726 - type: recall_at_1000 value: 98.276 - type: recall_at_3 value: 55.106 - type: recall_at_5 value: 69.096 - task: type: Classification dataset: type: C-MTEB/TNews-classification name: MTEB TNews config: default split: validation revision: None metrics: - type: accuracy value: 51.25200000000001 - type: f1 value: 49.43760438233612 - task: type: Clustering dataset: type: C-MTEB/ThuNewsClusteringP2P name: MTEB ThuNewsClusteringP2P config: default split: test revision: None metrics: - type: v_measure value: 62.18575394560257 - task: type: Clustering dataset: type: C-MTEB/ThuNewsClusteringS2S name: MTEB ThuNewsClusteringS2S config: default split: test revision: None metrics: - type: v_measure value: 57.97489103903411 - task: type: Retrieval dataset: type: C-MTEB/VideoRetrieval name: MTEB VideoRetrieval config: default split: dev revision: None metrics: - type: map_at_1 value: 52.2 - type: map_at_10 value: 63.23800000000001 - type: map_at_100 value: 63.788 - type: map_at_1000 value: 63.800999999999995 - type: map_at_3 value: 61.016999999999996 - type: map_at_5 value: 62.392 - type: mrr_at_1 value: 52.2 - type: mrr_at_10 value: 63.23800000000001 - type: mrr_at_100 value: 63.788 - type: mrr_at_1000 value: 63.800999999999995 - type: mrr_at_3 value: 61.016999999999996 - type: mrr_at_5 value: 62.392 - type: ndcg_at_1 value: 52.2 - type: ndcg_at_10 value: 68.273 - type: ndcg_at_100 value: 70.892 - type: ndcg_at_1000 value: 71.207 - type: ndcg_at_3 value: 63.794 - type: ndcg_at_5 value: 66.268 - type: precision_at_1 value: 52.2 - type: precision_at_10 value: 8.39 - type: precision_at_100 value: 0.96 - type: precision_at_1000 value: 0.098 - type: precision_at_3 value: 23.933 - type: precision_at_5 value: 15.559999999999999 - type: recall_at_1 value: 52.2 - type: recall_at_10 value: 83.89999999999999 - type: recall_at_100 value: 96.0 - type: recall_at_1000 value: 98.4 - type: recall_at_3 value: 71.8 - type: recall_at_5 value: 77.8 - task: type: Classification dataset: type: C-MTEB/waimai-classification name: MTEB Waimai config: default split: test revision: None metrics: - type: accuracy value: 86.67999999999999 - type: ap value: 69.96366657730151 - type: f1 value: 84.92349905611292 --- **新闻 | News** **[2024-04-06]** 开源[puff](https://huggingface.co/infgrad/puff-base-v1)系列模型,**专门针对检索和语义匹配任务,更多的考虑泛化性和私有通用测试集效果,向量维度可变,中英双语**。 **[2024-02-27]** 开源stella-mrl-large-zh-v3.5-1792d模型,支持**向量可变维度**。 **[2024-02-17]** 开源stella v3系列、dialogue编码模型和相关训练数据。 **[2023-10-19]** 开源stella-base-en-v2 使用简单,**不需要任何前缀文本**。 **[2023-10-12]** 开源stella-base-zh-v2和stella-large-zh-v2, 效果更好且使用简单,**不需要任何前缀文本**。 **[2023-09-11]** 开源stella-base-zh和stella-large-zh 欢迎去[本人主页](https://huggingface.co/infgrad)查看最新模型,并提出您的宝贵意见! ## stella model stella是一个通用的文本编码模型,主要有以下模型: | Model Name | Model Size (GB) | Dimension | Sequence Length | Language | Need instruction for retrieval? | |:------------------:|:---------------:|:---------:|:---------------:|:--------:|:-------------------------------:| | stella-large-zh-v2 | 0.65 | 1024 | 1024 | Chinese | No | | stella-base-zh-v2 | 0.2 | 768 | 1024 | Chinese | No | | stella-large-zh | 0.65 | 1024 | 1024 | Chinese | Yes | | stella-base-zh | 0.2 | 768 | 1024 | Chinese | Yes | 完整的训练思路和训练过程已记录在[博客](https://zhuanlan.zhihu.com/p/655322183),欢迎阅读讨论。 **训练数据:** 1. 开源数据(wudao_base_200GB[1]、m3e[2]和simclue[3]),着重挑选了长度大于512的文本 2. 在通用语料库上使用LLM构造一批(question, paragraph)和(sentence, paragraph)数据 **训练方法:** 1. 对比学习损失函数 2. 带有难负例的对比学习损失函数(分别基于bm25和vector构造了难负例) 3. EWC(Elastic Weights Consolidation)[4] 4. cosent loss[5] 5. 每一种类型的数据一个迭代器,分别计算loss进行更新 stella-v2在stella模型的基础上,使用了更多的训练数据,同时知识蒸馏等方法去除了前置的instruction( 比如piccolo的`查询:`, `结果:`, e5的`query:`和`passage:`)。 **初始权重:**\ stella-base-zh和stella-large-zh分别以piccolo-base-zh[6]和piccolo-large-zh作为基础模型,512-1024的position embedding使用层次分解位置编码[7]进行初始化。\ 感谢商汤科技研究院开源的[piccolo系列模型](https://huggingface.co/sensenova)。 stella is a general-purpose text encoder, which mainly includes the following models: | Model Name | Model Size (GB) | Dimension | Sequence Length | Language | Need instruction for retrieval? | |:------------------:|:---------------:|:---------:|:---------------:|:--------:|:-------------------------------:| | stella-large-zh-v2 | 0.65 | 1024 | 1024 | Chinese | No | | stella-base-zh-v2 | 0.2 | 768 | 1024 | Chinese | No | | stella-large-zh | 0.65 | 1024 | 1024 | Chinese | Yes | | stella-base-zh | 0.2 | 768 | 1024 | Chinese | Yes | The training data mainly includes: 1. Open-source training data (wudao_base_200GB, m3e, and simclue), with a focus on selecting texts with lengths greater than 512. 2. A batch of (question, paragraph) and (sentence, paragraph) data constructed on a general corpus using LLM. The loss functions mainly include: 1. Contrastive learning loss function 2. Contrastive learning loss function with hard negative examples (based on bm25 and vector hard negatives) 3. EWC (Elastic Weights Consolidation) 4. cosent loss Model weight initialization:\ stella-base-zh and stella-large-zh use piccolo-base-zh and piccolo-large-zh as the base models, respectively, and the 512-1024 position embedding uses the initialization strategy of hierarchical decomposed position encoding. Training strategy:\ One iterator for each type of data, separately calculating the loss. Based on stella models, stella-v2 use more training data and remove instruction by Knowledge Distillation. ## Metric #### C-MTEB leaderboard (Chinese) | Model Name | Model Size (GB) | Dimension | Sequence Length | Average (35) | Classification (9) | Clustering (4) | Pair Classification (2) | Reranking (4) | Retrieval (8) | STS (8) | |:------------------:|:---------------:|:---------:|:---------------:|:------------:|:------------------:|:--------------:|:-----------------------:|:-------------:|:-------------:|:-------:| | stella-large-zh-v2 | 0.65 | 1024 | 1024 | 65.13 | 69.05 | 49.16 | 82.68 | 66.41 | 70.14 | 58.66 | | stella-base-zh-v2 | 0.2 | 768 | 1024 | 64.36 | 68.29 | 49.4 | 79.95 | 66.1 | 70.08 | 56.92 | | stella-large-zh | 0.65 | 1024 | 1024 | 64.54 | 67.62 | 48.65 | 78.72 | 65.98 | 71.02 | 58.3 | | stella-base-zh | 0.2 | 768 | 1024 | 64.16 | 67.77 | 48.7 | 76.09 | 66.95 | 71.07 | 56.54 | #### Reproduce our results Codes: ```python import torch import numpy as np from typing import List from mteb import MTEB from sentence_transformers import SentenceTransformer class FastTextEncoder(): def __init__(self, model_name): self.model = SentenceTransformer(model_name).cuda().half().eval() self.model.max_seq_length = 512 def encode( self, input_texts: List[str], *args, **kwargs ): new_sens = list(set(input_texts)) new_sens.sort(key=lambda x: len(x), reverse=True) vecs = self.model.encode( new_sens, normalize_embeddings=True, convert_to_numpy=True, batch_size=256 ).astype(np.float32) sen2arrid = {sen: idx for idx, sen in enumerate(new_sens)} vecs = vecs[[sen2arrid[sen] for sen in input_texts]] torch.cuda.empty_cache() return vecs if __name__ == '__main__': model_name = "infgrad/stella-base-zh-v2" output_folder = "zh_mteb_results/stella-base-zh-v2" task_names = [t.description["name"] for t in MTEB(task_langs=['zh', 'zh-CN']).tasks] model = FastTextEncoder(model_name) for task in task_names: MTEB(tasks=[task], task_langs=['zh', 'zh-CN']).run(model, output_folder=output_folder) ``` #### Evaluation for long text 经过实际观察发现,C-MTEB的评测数据长度基本都是小于512的, 更致命的是那些长度大于512的文本,其重点都在前半部分 这里以CMRC2018的数据为例说明这个问题: ``` question: 《无双大蛇z》是谁旗下ω-force开发的动作游戏? passage:《无双大蛇z》是光荣旗下ω-force开发的动作游戏,于2009年3月12日登陆索尼playstation3,并于2009年11月27日推...... ``` passage长度为800多,大于512,但是对于这个question而言只需要前面40个字就足以检索,多的内容对于模型而言是一种噪声,反而降低了效果。\ 简言之,现有数据集的2个问题:\ 1)长度大于512的过少\ 2)即便大于512,对于检索而言也只需要前512的文本内容\ 导致**无法准确评估模型的长文本编码能力。** 为了解决这个问题,搜集了相关开源数据并使用规则进行过滤,最终整理了6份长文本测试集,他们分别是: - CMRC2018,通用百科 - CAIL,法律阅读理解 - DRCD,繁体百科,已转简体 - Military,军工问答 - Squad,英文阅读理解,已转中文 - Multifieldqa_zh,清华的大模型长文本理解能力评测数据[9] 处理规则是选取答案在512长度之后的文本,短的测试数据会欠采样一下,长短文本占比约为1:2,所以模型既得理解短文本也得理解长文本。 除了Military数据集,我们提供了其他5个测试数据的下载地址:https://drive.google.com/file/d/1WC6EWaCbVgz-vPMDFH4TwAMkLyh5WNcN/view?usp=sharing 评测指标为Recall@5, 结果如下: | Dataset | piccolo-base-zh | piccolo-large-zh | bge-base-zh | bge-large-zh | stella-base-zh | stella-large-zh | |:---------------:|:---------------:|:----------------:|:-----------:|:------------:|:--------------:|:---------------:| | CMRC2018 | 94.34 | 93.82 | 91.56 | 93.12 | 96.08 | 95.56 | | CAIL | 28.04 | 33.64 | 31.22 | 33.94 | 34.62 | 37.18 | | DRCD | 78.25 | 77.9 | 78.34 | 80.26 | 86.14 | 84.58 | | Military | 76.61 | 73.06 | 75.65 | 75.81 | 83.71 | 80.48 | | Squad | 91.21 | 86.61 | 87.87 | 90.38 | 93.31 | 91.21 | | Multifieldqa_zh | 81.41 | 83.92 | 83.92 | 83.42 | 79.9 | 80.4 | | **Average** | 74.98 | 74.83 | 74.76 | 76.15 | **78.96** | **78.24** | **注意:** 因为长文本评测数据数量稀少,所以构造时也使用了train部分,如果自行评测,请注意模型的训练数据以免数据泄露。 ## Usage #### stella 中文系列模型 stella-base-zh 和 stella-large-zh: 本模型是在piccolo基础上训练的,因此**用法和piccolo完全一致** ,即在检索重排任务上给query和passage加上`查询: `和`结果: `。对于短短匹配不需要做任何操作。 stella-base-zh-v2 和 stella-large-zh-v2: 本模型使用简单,**任何使用场景中都不需要加前缀文本**。 stella中文系列模型均使用mean pooling做为文本向量。 在sentence-transformer库中的使用方法: ```python # 对于短对短数据集,下面是通用的使用方式 from sentence_transformers import SentenceTransformer sentences = ["数据1", "数据2"] model = SentenceTransformer('infgrad/stella-base-zh-v2') print(model.max_seq_length) embeddings_1 = model.encode(sentences, normalize_embeddings=True) embeddings_2 = model.encode(sentences, normalize_embeddings=True) similarity = embeddings_1 @ embeddings_2.T print(similarity) ``` 直接使用transformers库: ```python from transformers import AutoModel, AutoTokenizer from sklearn.preprocessing import normalize model = AutoModel.from_pretrained('infgrad/stella-base-zh-v2') tokenizer = AutoTokenizer.from_pretrained('infgrad/stella-base-zh-v2') sentences = ["数据1", "数据ABCDEFGH"] batch_data = tokenizer( batch_text_or_text_pairs=sentences, padding="longest", return_tensors="pt", max_length=1024, truncation=True, ) attention_mask = batch_data["attention_mask"] model_output = model(**batch_data) last_hidden = model_output.last_hidden_state.masked_fill(~attention_mask[..., None].bool(), 0.0) vectors = last_hidden.sum(dim=1) / attention_mask.sum(dim=1)[..., None] vectors = normalize(vectors, norm="l2", axis=1, ) print(vectors.shape) # 2,768 ``` #### stella models for English developing... ## Training Detail **硬件:** 单卡A100-80GB **环境:** torch1.13.*; transformers-trainer + deepspeed + gradient-checkpointing **学习率:** 1e-6 **batch_size:** base模型为1024,额外增加20%的难负例;large模型为768,额外增加20%的难负例 **数据量:** 第一版模型约100万,其中用LLM构造的数据约有200K. LLM模型大小为13b。v2系列模型到了2000万训练数据。 ## ToDoList **评测的稳定性:** 评测过程中发现Clustering任务会和官方的结果不一致,大约有±0.0x的小差距,原因是聚类代码没有设置random_seed,差距可以忽略不计,不影响评测结论。 **更高质量的长文本训练和测试数据:** 训练数据多是用13b模型构造的,肯定会存在噪声。 测试数据基本都是从mrc数据整理来的,所以问题都是factoid类型,不符合真实分布。 **OOD的性能:** 虽然近期出现了很多向量编码模型,但是对于不是那么通用的domain,这一众模型包括stella、openai和cohere, 它们的效果均比不上BM25。 ## Reference 1. https://www.scidb.cn/en/detail?dataSetId=c6a3fe684227415a9db8e21bac4a15ab 2. https://github.com/wangyuxinwhy/uniem 3. https://github.com/CLUEbenchmark/SimCLUE 4. https://arxiv.org/abs/1612.00796 5. https://kexue.fm/archives/8847 6. https://huggingface.co/sensenova/piccolo-base-zh 7. https://kexue.fm/archives/7947 8. https://github.com/FlagOpen/FlagEmbedding 9. https://github.com/THUDM/LongBench
Gille/StrangeMerges_12-7B-slerp
Gille
2024-03-04T21:50:08Z
1,023
0
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "merge", "mergekit", "lazymergekit", "Keynote-Technology/KAI-7B-v0.1", "Gille/StrangeMerges_11-7B-slerp", "base_model:Keynote-Technology/KAI-7B-v0.1", "base_model:Gille/StrangeMerges_11-7B-slerp", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-01-30T13:22:24Z
--- license: apache-2.0 tags: - merge - mergekit - lazymergekit - Keynote-Technology/KAI-7B-v0.1 - Gille/StrangeMerges_11-7B-slerp base_model: - Keynote-Technology/KAI-7B-v0.1 - Gille/StrangeMerges_11-7B-slerp model-index: - name: StrangeMerges_12-7B-slerp results: - task: type: text-generation name: Text Generation dataset: name: AI2 Reasoning Challenge (25-Shot) type: ai2_arc config: ARC-Challenge split: test args: num_few_shot: 25 metrics: - type: acc_norm value: 66.64 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Gille/StrangeMerges_12-7B-slerp name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: HellaSwag (10-Shot) type: hellaswag split: validation args: num_few_shot: 10 metrics: - type: acc_norm value: 85.89 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Gille/StrangeMerges_12-7B-slerp name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MMLU (5-Shot) type: cais/mmlu config: all split: test args: num_few_shot: 5 metrics: - type: acc value: 64.94 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Gille/StrangeMerges_12-7B-slerp name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: TruthfulQA (0-shot) type: truthful_qa config: multiple_choice split: validation args: num_few_shot: 0 metrics: - type: mc2 value: 52.55 source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Gille/StrangeMerges_12-7B-slerp name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: Winogrande (5-shot) type: winogrande config: winogrande_xl split: validation args: num_few_shot: 5 metrics: - type: acc value: 81.69 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Gille/StrangeMerges_12-7B-slerp name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: GSM8k (5-shot) type: gsm8k config: main split: test args: num_few_shot: 5 metrics: - type: acc value: 63.08 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Gille/StrangeMerges_12-7B-slerp name: Open LLM Leaderboard --- # StrangeMerges_12-7B-slerp StrangeMerges_12-7B-slerp is a merge of the following models using [LazyMergekit](https://colab.research.google.com/drive/1obulZ1ROXHjYLn6PPZJwRR6GzgQogxxb?usp=sharing): * [Keynote-Technology/KAI-7B-v0.1](https://huggingface.co/Keynote-Technology/KAI-7B-v0.1) * [Gille/StrangeMerges_11-7B-slerp](https://huggingface.co/Gille/StrangeMerges_11-7B-slerp) ## 🧩 Configuration ```yaml slices: - sources: - model: Keynote-Technology/KAI-7B-v0.1 layer_range: [0, 32] - model: Gille/StrangeMerges_11-7B-slerp layer_range: [0, 32] merge_method: slerp base_model: Keynote-Technology/KAI-7B-v0.1 parameters: t: - filter: self_attn value: [0.1, 0.6, 0.3, 0.8, 0.5] - filter: mlp value: [0.9, 0.4, 0.7, 0.2, 0.5] - value: 0.5 dtype: bfloat16 ``` ## 💻 Usage ```python !pip install -qU transformers accelerate from transformers import AutoTokenizer import transformers import torch model = "Gille/StrangeMerges_12-7B-slerp" messages = [{"role": "user", "content": "What is a large language model?"}] tokenizer = AutoTokenizer.from_pretrained(model) prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) pipeline = transformers.pipeline( "text-generation", model=model, torch_dtype=torch.float16, device_map="auto", ) outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) print(outputs[0]["generated_text"]) ``` # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_Gille__StrangeMerges_12-7B-slerp) | Metric |Value| |---------------------------------|----:| |Avg. |69.13| |AI2 Reasoning Challenge (25-Shot)|66.64| |HellaSwag (10-Shot) |85.89| |MMLU (5-Shot) |64.94| |TruthfulQA (0-shot) |52.55| |Winogrande (5-shot) |81.69| |GSM8k (5-shot) |63.08|
abacusai/bigyi-15b
abacusai
2024-03-07T05:25:29Z
1,023
11
transformers
[ "transformers", "safetensors", "llama", "text-generation", "mergekit", "merge", "base_model:01-ai/Yi-9B", "license:other", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-03-06T19:30:11Z
--- base_model: - 01-ai/Yi-9B library_name: transformers tags: - mergekit - merge license: other license_name: yi-license license_link: LICENSE --- # bigyi-15b I recently made [bigstral-12b](https://huggingface.co/abacusai/bigstral-12b-32k) and then I saw this new awesome model [yi-9b](https://huggingface.co/01-ai/Yi-9B) and decided to make an embiggened version. This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit). Bigyi-15b is a base / completion model, so there is no chat template. It has a 4k context. ## Example Here is a recipe for Mai Tai:\n\n1: > 3 parts rum, 2: 3 parts pineapple juice, 3: half a cup of lime juice, 4: 6 to 8 fresh or frozen pineapple chunks, 5: crushed ice. > Mix all ingredients except ice and pour into glasses with ice. Garnish with a pineapple slice. Here is an implementation of 2-sum in golang: > ```go > func twoSum(nums []int, target int) []int { > if len(nums) <= 1 { return nil } > m := map[int] bool{} > for i := range(nums) {{ > n = nums[i] > > // find the complement of current number in map > comp = target - n > if comp in m { return [m[comp], i+1 ] } > else { m[n] = true } > }} > return nil > } > ``` ## Merge Details ### Merge Method This model was merged using the passthrough merge method. ### Models Merged The following models were included in the merge: * [01-ai/Yi-9B](https://huggingface.co/01-ai/Yi-9B) ### Configuration The following YAML configuration was used to produce this model: ```yaml dtype: float16 merge_method: passthrough slices: - sources: - layer_range: [0, 12] model: 01-ai/Yi-9B - sources: - layer_range: [6, 18] model: 01-ai/Yi-9B - sources: - layer_range: [12, 24] model: 01-ai/Yi-9B - sources: - layer_range: [18, 30] model: 01-ai/Yi-9B - sources: - layer_range: [24, 36] model: 01-ai/Yi-9B - sources: - layer_range: [30, 42] model: 01-ai/Yi-9B - sources: - layer_range: [36, 48] model: 01-ai/Yi-9B ```
fla-hub/gla-1.3B-100B
fla-hub
2024-05-23T17:22:44Z
1,023
0
fla
[ "fla", "safetensors", "gla", "text-generation", "en", "dataset:cerebras/SlimPajama-627B", "license:mit", "region:us" ]
text-generation
2024-03-31T03:03:42Z
--- language: - en tags: - text-generation - gla license: mit datasets: - cerebras/SlimPajama-627B library_name: fla ---
patriotyk/vocos-mel-hifigan-compat-44100khz
patriotyk
2024-06-20T07:32:28Z
1,023
3
null
[ "pytorch", "tensorboard", "vocos", "hifigan", "tts", "melspectrogram", "vocoder", "mel", "text-to-speech", "license:mit", "region:us" ]
text-to-speech
2024-05-10T11:14:29Z
--- license: mit pipeline_tag: text-to-speech tags: - vocos - hifigan - tts - melspectrogram - vocoder - mel --- ### Model Description <!-- Provide a longer summary of what this model is. --> **Vocos** is a fast neural vocoder designed to synthesize audio waveforms from acoustic features. Unlike other typical GAN-based vocoders, Vocos does not model audio samples in the time domain. Instead, it generates spectral coefficients, facilitating rapid audio reconstruction through inverse Fourier transform. This version of vocos uses 80-bin mel spectrograms as acoustic features which are widespread in the TTS domain since the introduction of [hifi-gan](https://github.com/jik876/hifi-gan/blob/master/meldataset.py) The goal of this model is to provide an alternative to hifi-gan that is faster and compatible with the acoustic output of several TTS models. ## Intended Uses and limitations The model is aimed to serve as a vocoder to synthesize audio waveforms from mel spectrograms. Is trained to generate speech and if is used in other audio domain is possible that the model won't produce high quality samples. ### Installation To use Vocos only in inference mode, install it using: ```bash pip install git+https://github.com/langtech-bsc/vocos.git@matcha ``` ### Reconstruct audio from mel-spectrogram ```python import torch from vocos import Vocos vocos = Vocos.from_pretrained("patriotyk/vocos-mel-hifigan-compat-44100khz") mel = torch.randn(1, 80, 256) # B, C, T audio = vocos.decode(mel) ``` ### Training Data The model was trained on private 800+ hours dataset, made from Ukrainian audio books, using [narizaka](https://github.com/patriotyk/narizaka) tool. ### Training Procedure The model was trained for 2.0M steps and 210 epochs with a batch size of 20. We used a Cosine scheduler with a initial learning rate of 3e-4. We where using two RTX-3090 video cards for training, and it took about one month of continuous training. #### Training Hyperparameters * initial_learning_rate: 3e-4 * scheduler: cosine without warmup or restarts * mel_loss_coeff: 45 * mrd_loss_coeff: 1.0 * batch_size: 20 * num_samples: 32768 ## Evaluation Evaluation was done using the metrics on the original repo, after 210 epochs we achieve: * val_loss: 3.703 * f1_score: 0.950 * mel_loss: 0.248 * periodicity_loss:0.127 * pesq_score: 3.399 * pitch_loss: 38.26 * utmos_score: 3.146 ## Citation If this code contributes to your research, please cite the work: ``` @article{siuzdak2023vocos, title={Vocos: Closing the gap between time-domain and Fourier-based neural vocoders for high-quality audio synthesis}, author={Siuzdak, Hubert}, journal={arXiv preprint arXiv:2306.00814}, year={2023} } ```
nbeerbower/llama-3-stinky-v2-8B
nbeerbower
2024-05-23T09:44:30Z
1,023
4
transformers
[ "transformers", "safetensors", "llama", "text-generation", "mergekit", "merge", "conversational", "arxiv:2403.19522", "base_model:mlabonne/ChimeraLlama-3-8B-v2", "base_model:grimjim/llama-3-merge-pp-instruct-8B", "base_model:grimjim/llama-3-merge-virt-req-8B", "base_model:uygarkurt/llama-3-merged-linear", "base_model:jeiku/Orthocopter_8B", "base_model:grimjim/llama-3-nvidia-ChatQA-1.5-8B", "base_model:openlynn/Llama-3-Soliloquy-8B-v2", "base_model:VAGOsolutions/Llama-3-SauerkrautLM-8b-Instruct", "base_model:nbeerbower/llama-3-stella-8B", "base_model:cloudyu/Meta-Llama-3-8B-Instruct-DPO", "base_model:NeverSleep/Llama-3-Lumimaid-8B-v0.1-OAS", "base_model:flammenai/Mahou-1.0-llama3-8B", "base_model:flammenai/Mahou-1.1-llama3-8B", "license:other", "model-index", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-05-11T20:37:00Z
--- license: other library_name: transformers tags: - mergekit - merge base_model: - mlabonne/ChimeraLlama-3-8B-v2 - grimjim/llama-3-merge-pp-instruct-8B - grimjim/llama-3-merge-virt-req-8B - uygarkurt/llama-3-merged-linear - jeiku/Orthocopter_8B - grimjim/llama-3-nvidia-ChatQA-1.5-8B - openlynn/Llama-3-Soliloquy-8B-v2 - VAGOsolutions/Llama-3-SauerkrautLM-8b-Instruct - nbeerbower/llama-3-stella-8B - cloudyu/Meta-Llama-3-8B-Instruct-DPO - NeverSleep/Llama-3-Lumimaid-8B-v0.1-OAS - flammenai/Mahou-1.0-llama3-8B - flammenai/Mahou-1.1-llama3-8B license_name: llama3 model-index: - name: llama-3-stinky-v2-8B results: - task: type: text-generation name: Text Generation dataset: name: AI2 Reasoning Challenge (25-Shot) type: ai2_arc config: ARC-Challenge split: test args: num_few_shot: 25 metrics: - type: acc_norm value: 66.98 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=nbeerbower/llama-3-stinky-v2-8B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: HellaSwag (10-Shot) type: hellaswag split: validation args: num_few_shot: 10 metrics: - type: acc_norm value: 83.2 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=nbeerbower/llama-3-stinky-v2-8B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MMLU (5-Shot) type: cais/mmlu config: all split: test args: num_few_shot: 5 metrics: - type: acc value: 68.33 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=nbeerbower/llama-3-stinky-v2-8B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: TruthfulQA (0-shot) type: truthful_qa config: multiple_choice split: validation args: num_few_shot: 0 metrics: - type: mc2 value: 55.83 source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=nbeerbower/llama-3-stinky-v2-8B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: Winogrande (5-shot) type: winogrande config: winogrande_xl split: validation args: num_few_shot: 5 metrics: - type: acc value: 77.51 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=nbeerbower/llama-3-stinky-v2-8B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: GSM8k (5-shot) type: gsm8k config: main split: test args: num_few_shot: 5 metrics: - type: acc value: 69.75 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=nbeerbower/llama-3-stinky-v2-8B name: Open LLM Leaderboard --- # llama-3-stinky-v2-8B This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit). ## Merge Details ### Merge Method This model was merged using the [Model Stock](https://arxiv.org/abs/2403.19522) merge method using [flammenai/Mahou-1.1-llama3-8B](https://huggingface.co/flammenai/Mahou-1.1-llama3-8B) as a base. ### Models Merged The following models were included in the merge: * [mlabonne/ChimeraLlama-3-8B-v2](https://huggingface.co/mlabonne/ChimeraLlama-3-8B-v2) * [grimjim/llama-3-merge-pp-instruct-8B](https://huggingface.co/grimjim/llama-3-merge-pp-instruct-8B) * [grimjim/llama-3-merge-virt-req-8B](https://huggingface.co/grimjim/llama-3-merge-virt-req-8B) * [uygarkurt/llama-3-merged-linear](https://huggingface.co/uygarkurt/llama-3-merged-linear) * [jeiku/Orthocopter_8B](https://huggingface.co/jeiku/Orthocopter_8B) * [grimjim/llama-3-nvidia-ChatQA-1.5-8B](https://huggingface.co/grimjim/llama-3-nvidia-ChatQA-1.5-8B) * [openlynn/Llama-3-Soliloquy-8B-v2](https://huggingface.co/openlynn/Llama-3-Soliloquy-8B-v2) * [VAGOsolutions/Llama-3-SauerkrautLM-8b-Instruct](https://huggingface.co/VAGOsolutions/Llama-3-SauerkrautLM-8b-Instruct) * [nbeerbower/llama-3-stella-8B](https://huggingface.co/nbeerbower/llama-3-stella-8B) * [cloudyu/Meta-Llama-3-8B-Instruct-DPO](https://huggingface.co/cloudyu/Meta-Llama-3-8B-Instruct-DPO) * [NeverSleep/Llama-3-Lumimaid-8B-v0.1-OAS](https://huggingface.co/NeverSleep/Llama-3-Lumimaid-8B-v0.1-OAS) * [flammenai/Mahou-1.0-llama3-8B](https://huggingface.co/flammenai/Mahou-1.0-llama3-8B) ### Configuration The following YAML configuration was used to produce this model: ```yaml models: - model: mlabonne/ChimeraLlama-3-8B-v2 - model: cloudyu/Meta-Llama-3-8B-Instruct-DPO - model: nbeerbower/llama-3-stella-8B - model: VAGOsolutions/Llama-3-SauerkrautLM-8b-Instruct - model: uygarkurt/llama-3-merged-linear - model: openlynn/Llama-3-Soliloquy-8B-v2 - model: grimjim/llama-3-merge-pp-instruct-8B - model: NeverSleep/Llama-3-Lumimaid-8B-v0.1-OAS - model: grimjim/llama-3-merge-virt-req-8B - model: jeiku/Orthocopter_8B - model: grimjim/llama-3-nvidia-ChatQA-1.5-8B - model: flammenai/Mahou-1.0-llama3-8B merge_method: model_stock base_model: flammenai/Mahou-1.1-llama3-8B dtype: bfloat16 ``` # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_nbeerbower__llama-3-stinky-v2-8B) | Metric |Value| |---------------------------------|----:| |Avg. |70.27| |AI2 Reasoning Challenge (25-Shot)|66.98| |HellaSwag (10-Shot) |83.20| |MMLU (5-Shot) |68.33| |TruthfulQA (0-shot) |55.83| |Winogrande (5-shot) |77.51| |GSM8k (5-shot) |69.75|
legraphista/DeepSeek-Coder-V2-Instruct-IMat-GGUF
legraphista
2024-06-19T09:40:08Z
1,023
3
gguf
[ "gguf", "quantized", "GGUF", "quantization", "imat", "imatrix", "static", "8bit", "6bit", "5bit", "4bit", "3bit", "2bit", "1bit", "text-generation", "base_model:deepseek-ai/DeepSeek-Coder-V2-Instruct", "license:other", "region:us" ]
text-generation
2024-06-17T21:21:49Z
--- base_model: deepseek-ai/DeepSeek-Coder-V2-Instruct inference: false library_name: gguf license: other license_link: LICENSE license_name: deepseek-license pipeline_tag: text-generation quantized_by: legraphista tags: - quantized - GGUF - quantization - imat - imatrix - static - 8bit - 6bit - 5bit - 4bit - 3bit - 2bit - 1bit --- # DeepSeek-Coder-V2-Instruct-IMat-GGUF _Llama.cpp imatrix quantization of deepseek-ai/DeepSeek-Coder-V2-Instruct_ Original Model: [deepseek-ai/DeepSeek-Coder-V2-Instruct](https://huggingface.co/deepseek-ai/DeepSeek-Coder-V2-Instruct) Original dtype: `BF16` (`bfloat16`) Quantized by: llama.cpp [b3166](https://github.com/ggerganov/llama.cpp/releases/tag/b3166) IMatrix dataset: [here](https://gist.githubusercontent.com/bartowski1182/eb213dccb3571f863da82e99418f81e8/raw/b2869d80f5c16fd7082594248e80144677736635/calibration_datav3.txt) - [Files](#files) - [IMatrix](#imatrix) - [Common Quants](#common-quants) - [All Quants](#all-quants) - [Downloading using huggingface-cli](#downloading-using-huggingface-cli) - [Inference](#inference) - [Simple chat template](#simple-chat-template) - [Chat template with system prompt](#chat-template-with-system-prompt) - [Llama.cpp](#llama-cpp) - [FAQ](#faq) - [Why is the IMatrix not applied everywhere?](#why-is-the-imatrix-not-applied-everywhere) - [How do I merge a split GGUF?](#how-do-i-merge-a-split-gguf) --- ## Files ### IMatrix Status: ✅ Available Link: [here](https://huggingface.co/legraphista/DeepSeek-Coder-V2-Instruct-IMat-GGUF/blob/main/imatrix.dat) ### Common Quants | Filename | Quant type | File Size | Status | Uses IMatrix | Is Split | | -------- | ---------- | --------- | ------ | ------------ | -------- | | [DeepSeek-Coder-V2-Instruct.Q8_0/*](https://huggingface.co/legraphista/DeepSeek-Coder-V2-Instruct-IMat-GGUF/tree/main/DeepSeek-Coder-V2-Instruct.Q8_0) | Q8_0 | 250.62GB | ✅ Available | ⚪ Static | ✂ Yes | [DeepSeek-Coder-V2-Instruct.Q6_K/*](https://huggingface.co/legraphista/DeepSeek-Coder-V2-Instruct-IMat-GGUF/tree/main/DeepSeek-Coder-V2-Instruct.Q6_K) | Q6_K | 193.54GB | ✅ Available | ⚪ Static | ✂ Yes | [DeepSeek-Coder-V2-Instruct.Q4_K/*](https://huggingface.co/legraphista/DeepSeek-Coder-V2-Instruct-IMat-GGUF/tree/main/DeepSeek-Coder-V2-Instruct.Q4_K) | Q4_K | 142.45GB | ✅ Available | 🟢 IMatrix | ✂ Yes | [DeepSeek-Coder-V2-Instruct.Q3_K/*](https://huggingface.co/legraphista/DeepSeek-Coder-V2-Instruct-IMat-GGUF/tree/main/DeepSeek-Coder-V2-Instruct.Q3_K) | Q3_K | 112.67GB | ✅ Available | 🟢 IMatrix | ✂ Yes | [DeepSeek-Coder-V2-Instruct.Q2_K/*](https://huggingface.co/legraphista/DeepSeek-Coder-V2-Instruct-IMat-GGUF/tree/main/DeepSeek-Coder-V2-Instruct.Q2_K) | Q2_K | 85.95GB | ✅ Available | 🟢 IMatrix | ✂ Yes ### All Quants | Filename | Quant type | File Size | Status | Uses IMatrix | Is Split | | -------- | ---------- | --------- | ------ | ------------ | -------- | | [DeepSeek-Coder-V2-Instruct.Q8_0/*](https://huggingface.co/legraphista/DeepSeek-Coder-V2-Instruct-IMat-GGUF/tree/main/DeepSeek-Coder-V2-Instruct.Q8_0) | Q8_0 | 250.62GB | ✅ Available | ⚪ Static | ✂ Yes | [DeepSeek-Coder-V2-Instruct.Q6_K/*](https://huggingface.co/legraphista/DeepSeek-Coder-V2-Instruct-IMat-GGUF/tree/main/DeepSeek-Coder-V2-Instruct.Q6_K) | Q6_K | 193.54GB | ✅ Available | ⚪ Static | ✂ Yes | [DeepSeek-Coder-V2-Instruct.Q5_K/*](https://huggingface.co/legraphista/DeepSeek-Coder-V2-Instruct-IMat-GGUF/tree/main/DeepSeek-Coder-V2-Instruct.Q5_K) | Q5_K | 167.22GB | ✅ Available | ⚪ Static | ✂ Yes | [DeepSeek-Coder-V2-Instruct.Q5_K_S/*](https://huggingface.co/legraphista/DeepSeek-Coder-V2-Instruct-IMat-GGUF/tree/main/DeepSeek-Coder-V2-Instruct.Q5_K_S) | Q5_K_S | 162.31GB | ✅ Available | ⚪ Static | ✂ Yes | [DeepSeek-Coder-V2-Instruct.Q4_K/*](https://huggingface.co/legraphista/DeepSeek-Coder-V2-Instruct-IMat-GGUF/tree/main/DeepSeek-Coder-V2-Instruct.Q4_K) | Q4_K | 142.45GB | ✅ Available | 🟢 IMatrix | ✂ Yes | [DeepSeek-Coder-V2-Instruct.Q4_K_S/*](https://huggingface.co/legraphista/DeepSeek-Coder-V2-Instruct-IMat-GGUF/tree/main/DeepSeek-Coder-V2-Instruct.Q4_K_S) | Q4_K_S | 133.88GB | ✅ Available | 🟢 IMatrix | ✂ Yes | [DeepSeek-Coder-V2-Instruct.IQ4_NL/*](https://huggingface.co/legraphista/DeepSeek-Coder-V2-Instruct-IMat-GGUF/tree/main/DeepSeek-Coder-V2-Instruct.IQ4_NL) | IQ4_NL | 132.91GB | ✅ Available | 🟢 IMatrix | ✂ Yes | [DeepSeek-Coder-V2-Instruct.IQ4_XS/*](https://huggingface.co/legraphista/DeepSeek-Coder-V2-Instruct-IMat-GGUF/tree/main/DeepSeek-Coder-V2-Instruct.IQ4_XS) | IQ4_XS | 125.56GB | ✅ Available | 🟢 IMatrix | ✂ Yes | [DeepSeek-Coder-V2-Instruct.Q3_K/*](https://huggingface.co/legraphista/DeepSeek-Coder-V2-Instruct-IMat-GGUF/tree/main/DeepSeek-Coder-V2-Instruct.Q3_K) | Q3_K | 112.67GB | ✅ Available | 🟢 IMatrix | ✂ Yes | [DeepSeek-Coder-V2-Instruct.Q3_K_L/*](https://huggingface.co/legraphista/DeepSeek-Coder-V2-Instruct-IMat-GGUF/tree/main/DeepSeek-Coder-V2-Instruct.Q3_K_L) | Q3_K_L | 122.37GB | ✅ Available | 🟢 IMatrix | ✂ Yes | [DeepSeek-Coder-V2-Instruct.Q3_K_S/*](https://huggingface.co/legraphista/DeepSeek-Coder-V2-Instruct-IMat-GGUF/tree/main/DeepSeek-Coder-V2-Instruct.Q3_K_S) | Q3_K_S | 101.68GB | ✅ Available | 🟢 IMatrix | ✂ Yes | [DeepSeek-Coder-V2-Instruct.IQ3_M/*](https://huggingface.co/legraphista/DeepSeek-Coder-V2-Instruct-IMat-GGUF/tree/main/DeepSeek-Coder-V2-Instruct.IQ3_M) | IQ3_M | 103.37GB | ✅ Available | 🟢 IMatrix | ✂ Yes | [DeepSeek-Coder-V2-Instruct.IQ3_S/*](https://huggingface.co/legraphista/DeepSeek-Coder-V2-Instruct-IMat-GGUF/tree/main/DeepSeek-Coder-V2-Instruct.IQ3_S) | IQ3_S | 101.68GB | ✅ Available | 🟢 IMatrix | ✂ Yes | [DeepSeek-Coder-V2-Instruct.IQ3_XS/*](https://huggingface.co/legraphista/DeepSeek-Coder-V2-Instruct-IMat-GGUF/tree/main/DeepSeek-Coder-V2-Instruct.IQ3_XS) | IQ3_XS | 96.30GB | ✅ Available | 🟢 IMatrix | ✂ Yes | [DeepSeek-Coder-V2-Instruct.IQ3_XXS/*](https://huggingface.co/legraphista/DeepSeek-Coder-V2-Instruct-IMat-GGUF/tree/main/DeepSeek-Coder-V2-Instruct.IQ3_XXS) | IQ3_XXS | 90.85GB | ✅ Available | 🟢 IMatrix | ✂ Yes | [DeepSeek-Coder-V2-Instruct.Q2_K/*](https://huggingface.co/legraphista/DeepSeek-Coder-V2-Instruct-IMat-GGUF/tree/main/DeepSeek-Coder-V2-Instruct.Q2_K) | Q2_K | 85.95GB | ✅ Available | 🟢 IMatrix | ✂ Yes | [DeepSeek-Coder-V2-Instruct.Q2_K_S/*](https://huggingface.co/legraphista/DeepSeek-Coder-V2-Instruct-IMat-GGUF/tree/main/DeepSeek-Coder-V2-Instruct.Q2_K_S) | Q2_K_S | 79.60GB | ✅ Available | 🟢 IMatrix | ✂ Yes | [DeepSeek-Coder-V2-Instruct.IQ2_M/*](https://huggingface.co/legraphista/DeepSeek-Coder-V2-Instruct-IMat-GGUF/tree/main/DeepSeek-Coder-V2-Instruct.IQ2_M) | IQ2_M | 76.92GB | ✅ Available | 🟢 IMatrix | ✂ Yes | [DeepSeek-Coder-V2-Instruct.IQ2_S/*](https://huggingface.co/legraphista/DeepSeek-Coder-V2-Instruct-IMat-GGUF/tree/main/DeepSeek-Coder-V2-Instruct.IQ2_S) | IQ2_S | 69.87GB | ✅ Available | 🟢 IMatrix | ✂ Yes | [DeepSeek-Coder-V2-Instruct.IQ2_XS/*](https://huggingface.co/legraphista/DeepSeek-Coder-V2-Instruct-IMat-GGUF/tree/main/DeepSeek-Coder-V2-Instruct.IQ2_XS) | IQ2_XS | 68.71GB | ✅ Available | 🟢 IMatrix | ✂ Yes | [DeepSeek-Coder-V2-Instruct.IQ2_XXS/*](https://huggingface.co/legraphista/DeepSeek-Coder-V2-Instruct-IMat-GGUF/tree/main/DeepSeek-Coder-V2-Instruct.IQ2_XXS) | IQ2_XXS | 61.50GB | ✅ Available | 🟢 IMatrix | ✂ Yes | [DeepSeek-Coder-V2-Instruct.IQ1_M/*](https://huggingface.co/legraphista/DeepSeek-Coder-V2-Instruct-IMat-GGUF/tree/main/DeepSeek-Coder-V2-Instruct.IQ1_M) | IQ1_M | 52.68GB | ✅ Available | 🟢 IMatrix | ✂ Yes | [DeepSeek-Coder-V2-Instruct.IQ1_S/*](https://huggingface.co/legraphista/DeepSeek-Coder-V2-Instruct-IMat-GGUF/tree/main/DeepSeek-Coder-V2-Instruct.IQ1_S) | IQ1_S | 47.39GB | ✅ Available | 🟢 IMatrix | ✂ Yes ## Downloading using huggingface-cli If you do not have hugginface-cli installed: ``` pip install -U "huggingface_hub[cli]" ``` Download the specific file you want: ``` huggingface-cli download legraphista/DeepSeek-Coder-V2-Instruct-IMat-GGUF --include "DeepSeek-Coder-V2-Instruct.Q8_0.gguf" --local-dir ./ ``` If the model file is big, it has been split into multiple files. In order to download them all to a local folder, run: ``` huggingface-cli download legraphista/DeepSeek-Coder-V2-Instruct-IMat-GGUF --include "DeepSeek-Coder-V2-Instruct.Q8_0/*" --local-dir ./ # see FAQ for merging GGUF's ``` --- ## Inference ### Simple chat template ``` <|begin▁of▁sentence|>User: {user_prompt} Assistant: {assistant_response}<|end▁of▁sentence|>User: {next_user_prompt} ``` ### Chat template with system prompt ``` <|begin▁of▁sentence|>{system_prompt} User: {user_prompt} Assistant: {assistant_response}<|end▁of▁sentence|>User: {next_user_prompt} ``` ### Llama.cpp ``` llama.cpp/main -m DeepSeek-Coder-V2-Instruct.Q8_0.gguf --color -i -p "prompt here (according to the chat template)" ``` --- ## FAQ ### Why is the IMatrix not applied everywhere? According to [this investigation](https://www.reddit.com/r/LocalLLaMA/comments/1993iro/ggufs_quants_can_punch_above_their_weights_now/), it appears that lower quantizations are the only ones that benefit from the imatrix input (as per hellaswag results). ### How do I merge a split GGUF? 1. Make sure you have `gguf-split` available - To get hold of `gguf-split`, navigate to https://github.com/ggerganov/llama.cpp/releases - Download the appropriate zip for your system from the latest release - Unzip the archive and you should be able to find `gguf-split` 2. Locate your GGUF chunks folder (ex: `DeepSeek-Coder-V2-Instruct.Q8_0`) 3. Run `gguf-split --merge DeepSeek-Coder-V2-Instruct.Q8_0/DeepSeek-Coder-V2-Instruct.Q8_0-00001-of-XXXXX.gguf DeepSeek-Coder-V2-Instruct.Q8_0.gguf` - Make sure to point `gguf-split` to the first chunk of the split. --- Got a suggestion? Ping me [@legraphista](https://x.com/legraphista)!
0xJustin/Dungeons-and-Diffusion
0xJustin
2023-02-24T18:58:30Z
1,022
240
diffusers
[ "diffusers", "safetensors", "text-to-image", "license:creativeml-openrail-m", "autotrain_compatible", "endpoints_compatible", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
2022-11-06T18:03:42Z
--- license: creativeml-openrail-m tags: - text-to-image --- FOR THE NEW VERSION DOWNLOAD 'D&Diffusion3.0_Protogen.ckpt' The newest version is finetuned from Protogen to great effect. Also works great at resolutions great than 512x512! Species in new version: aarakocra, aasimar, air_genasi, centaur, dragonborn, drow, dwarf, earth_genasi, elf, firbolg, fire_genasi, gith, gnome, goblin, goliath, halfling, human, illithid, kenku, kobold, lizardfolk, minotaur, orc, tabaxi, thrikreen, tiefling, tortle, warforged, water_genasi Classes in new version: Artificer, Bard, Barbarian, Cleric, Fighter, Druid, Monk, Paladin, Rogue, Ranger, Sorcerer, Warlock, Wizard, Noble, Townsperson See the training dataset here for a list of races: https://huggingface.co/datasets/0xJustin/Dungeons-and-Diffusion Model16000 is trained used `D&D character` as the class prompt, and for whatever reason it ~ seems ~ to work better for centaurs and aarakocra Model30000 is trained using all of the images as the class images, and I think it emulates the commission DnD character style better. It works VERY well for most races, though sometimes I have to fight to get aarakocra to not be birds or centaurs to not be horses. Tieflings work great, but reining in their horns can be trouble. There is some bleed through between classes- especially for elf ears and horns. Including `elf ears` and `horns` as negative prompts seems to help. Good prompts to try things out: modelshoot style, (extremely detailed CG unity 8k wallpaper), full shot body photo of the most beautiful artwork in the world, english medieval pink (dragonborn druid) witch, black silk robe, nature magic, medieval era, painting by Ed Blinkey, Atey Ghailan, Studio Ghibli, by Jeremy Mann, Greg Manchess, Antonio Moro, trending on ArtStation, trending on CGSociety, Intricate, High Detail, Sharp focus, dramatic, painting art by midjourney and greg rutkowski, teal and gold, petals, countryside, action pose, casting a spell, green swirling magic Negative prompt: canvas frame, cartoon, 3d, photorealistic Steps: 20, Sampler: DPM++ 2M Karras, CFG scale: 10, Seed: 2603924688, Size: 512x768, Batch size: 4, Batch pos: 1, Clip skip: 2 `[natural colors], full body tiefling (knight), [watercolor digital 2D painting], (strong shading), hard shadows, blurry, elegant, wearing robes, style of (saga comic) Lois van Baarle and charlie bowater and Sui Ishida, messy, disheveled, thick brushwork, detailed face and eyes, concept art` `portrait (painting) of tabaxi, de Rivia closeup, suit, collar, formal attire, D&D, fantasy, intricate, elegant, highly detailed, artstation, concept art, matte, sharp focus, (brush strokes), (oil on canvas), hearthstone, art by Titian and Greg Rutkowski and Rembrandt van Rijn and Alphonse Mucha` (inspired by Reddit post)
double7/vicuna-68m
double7
2024-01-15T06:45:16Z
1,022
1
transformers
[ "transformers", "safetensors", "llama", "text-generation", "en", "dataset:anon8231489123/ShareGPT_Vicuna_unfiltered", "arxiv:2401.06706", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-01-11T08:27:12Z
--- license: apache-2.0 datasets: - anon8231489123/ShareGPT_Vicuna_unfiltered language: - en pipeline_tag: text-generation --- ## Model description This is a Vicuna-like model with only 68M parameters, which is fine-tuned from [LLaMA-68m](https://huggingface.co/JackFram/llama-68m) on [ShareGPT](https://huggingface.co/datasets/anon8231489123/ShareGPT_Vicuna_unfiltered) data. The training setup follows the [Vicuna suite](https://github.com/lm-sys/FastChat). The model is mainly developed as a base Small Speculative Model in the [MCSD paper](https://arxiv.org/pdf/2401.06706.pdf). As a comparison, it can be better aligned to the Vicuna models than LLaMA-68m with little loss of alignment to the LLaMA models. | Draft Model | Target Model | Alignment | | -------------- | ------------- | --------- | | LLaMA-68/160M | LLaMA-13/33B | 😃 | | LLaMA-68/160M | Vicuna-13/33B | 😟 | | Vicuna-68/160M | LLaMA-13/33B | 😃 | | Vicuna-68/160M | Vicuna-13/33B | 😃 |
yam-peleg/Experiment30-7B
yam-peleg
2024-03-03T22:05:08Z
1,022
1
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "chat", "en", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-03-03T19:58:39Z
--- license: apache-2.0 language: - en library_name: transformers pipeline_tag: text-generation tags: - chat --- **Experiment30-7B** An experiment for testing and refining a specific training and evaluation pipeline research framework. This experiment aims to identify potential optimizations, focusing on data engineering, architecture efficiency, and evaluation performance. The goal is to evaluate the effectiveness of a new training / evaluation pipeline for LLMs. The experiment will explore adjustments in data preprocessing, model training algorithms, and evaluation metrics to test methods for improvement. More details in the future experiments. --- license: apache-2.0 ---
bigscience-data/sgpt-bloom-1b7-nli
bigscience-data
2022-10-13T16:42:27Z
1,021
11
sentence-transformers
[ "sentence-transformers", "pytorch", "bloom", "feature-extraction", "sentence-similarity", "mteb", "arxiv:2202.08904", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
sentence-similarity
2022-05-29T11:14:58Z
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - mteb model-index: - name: sgpt-bloom-1b7-nli results: - task: type: Classification dataset: type: mteb/amazon_reviews_multi name: MTEB AmazonReviewsClassification (fr) config: fr split: test revision: c379a6705fec24a2493fa68e011692605f44e119 metrics: - type: accuracy value: 39.286 - type: f1 value: 38.87078070073539 - task: type: Classification dataset: type: mteb/amazon_reviews_multi name: MTEB AmazonReviewsClassification (zh) config: zh split: test revision: c379a6705fec24a2493fa68e011692605f44e119 metrics: - type: accuracy value: 37.634 - type: f1 value: 36.86046604093418 - task: type: Classification dataset: type: mteb/mtop_domain name: MTEB MTOPDomainClassification (fr) config: fr split: test revision: a7e2a951126a26fc8c6a69f835f33a346ba259e3 metrics: - type: accuracy value: 83.79893517068588 - type: f1 value: 83.72326662566203 - task: type: Classification dataset: type: mteb/mtop_intent name: MTEB MTOPIntentClassification (fr) config: fr split: test revision: 6299947a7777084cc2d4b64235bf7190381ce755 metrics: - type: accuracy value: 63.36047604134043 - type: f1 value: 44.261707019308126 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (fr) config: fr split: test revision: 072a486a144adf7f4479a4a0dddb2152e161e1ea metrics: - type: accuracy value: 64.57632817753867 - type: f1 value: 62.60453982786661 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (fr) config: fr split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 69.59986550100874 - type: f1 value: 69.71803697939914 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (zh) config: zh split: test revision: 2de6ce8c1921b71a755b262c6b57fef195dd7906 metrics: - type: cos_sim_pearson value: 59.71781185663265 - type: cos_sim_spearman value: 58.538648447630514 - type: euclidean_pearson value: 53.53848180206165 - type: euclidean_spearman value: 56.33730262964236 - type: manhattan_pearson value: 54.62109820575505 - type: manhattan_spearman value: 57.223846291318914 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (fr) config: fr split: test revision: 2de6ce8c1921b71a755b262c6b57fef195dd7906 metrics: - type: cos_sim_pearson value: 73.44021434651606 - type: cos_sim_spearman value: 73.13412769502769 - type: euclidean_pearson value: 68.16368597409867 - type: euclidean_spearman value: 72.44964781564485 - type: manhattan_pearson value: 69.42307032478939 - type: manhattan_spearman value: 73.3523195012387 --- # sgpt-bloom-1b7-nli ## Usage For usage instructions, refer to: https://github.com/Muennighoff/sgpt#symmetric-semantic-search The model was trained with the command ```bash CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 accelerate launch examples/training/nli/training_nli_v2.py --model_name bigscience/bloom-1b3 --freezenonbias --train_batch_size 128 --lr 32e-5 --pooling weightedmean --wandb --wandbwatchlog gradients --gradcache --chunksize 4 ``` ## Evaluation Results `{'askubuntu': 57.44, 'cqadupstack': 14.18, 'twitterpara': 73.99, 'scidocs': 74.74, 'avg': 55.087500000000006}` ## Training The model was trained with the parameters: **DataLoader**: `sentence_transformers.datasets.NoDuplicatesDataLoader.NoDuplicatesDataLoader` of length 4403 with parameters: ``` {'batch_size': 128} ``` The model uses BitFit, weighted-mean pooling & GradCache, for details see: https://arxiv.org/abs/2202.08904 **Loss**: `sentence_transformers.losses.MultipleNegativesRankingLoss.MNRLGradCache` Parameters of the fit()-Method: ``` { "epochs": 1, "evaluation_steps": 440, "evaluator": "sentence_transformers.evaluation.EmbeddingSimilarityEvaluator.EmbeddingSimilarityEvaluator", "max_grad_norm": 1, "optimizer_class": "<class 'transformers.optimization.AdamW'>", "optimizer_params": { "lr": 0.00032 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 441, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 75, 'do_lower_case': False}) with Transformer model: BloomModel (1): Pooling({'word_embedding_dimension': 2048, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': False, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': True, 'pooling_mode_lasttoken': False}) ) ``` ## Citing & Authors ```bibtex @article{muennighoff2022sgpt, title={SGPT: GPT Sentence Embeddings for Semantic Search}, author={Muennighoff, Niklas}, journal={arXiv preprint arXiv:2202.08904}, year={2022} } ```
timm/tf_mobilenetv3_small_075.in1k
timm
2023-04-27T22:49:51Z
1,021
0
timm
[ "timm", "pytorch", "safetensors", "image-classification", "dataset:imagenet-1k", "arxiv:1905.02244", "license:apache-2.0", "region:us" ]
image-classification
2022-12-16T05:39:15Z
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k --- # Model card for tf_mobilenetv3_small_075.in1k A MobileNet-v3 image classification model. Trained on ImageNet-1k in Tensorflow by paper authors, ported to PyTorch by Ross Wightman. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 2.0 - GMACs: 0.0 - Activations (M): 1.3 - Image size: 224 x 224 - **Papers:** - Searching for MobileNetV3: https://arxiv.org/abs/1905.02244 - **Dataset:** ImageNet-1k - **Original:** https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('tf_mobilenetv3_small_075.in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'tf_mobilenetv3_small_075.in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 16, 112, 112]) # torch.Size([1, 16, 56, 56]) # torch.Size([1, 24, 28, 28]) # torch.Size([1, 40, 14, 14]) # torch.Size([1, 432, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'tf_mobilenetv3_small_075.in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 432, 7, 7) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @inproceedings{howard2019searching, title={Searching for mobilenetv3}, author={Howard, Andrew and Sandler, Mark and Chu, Grace and Chen, Liang-Chieh and Chen, Bo and Tan, Mingxing and Wang, Weijun and Zhu, Yukun and Pang, Ruoming and Vasudevan, Vijay and others}, booktitle={Proceedings of the IEEE/CVF international conference on computer vision}, pages={1314--1324}, year={2019} } ``` ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ```
rubentito/layoutlmv3-base-mpdocvqa
rubentito
2024-06-06T08:35:37Z
1,021
5
transformers
[ "transformers", "pytorch", "layoutlmv3", "document-question-answering", "DocVQA", "Document Question Answering", "Document Visual Question Answering", "en", "dataset:rubentito/mp-docvqa", "arxiv:2212.05935", "arxiv:1905.13648", "base_model:microsoft/layoutlmv3-base", "license:cc-by-nc-sa-4.0", "endpoints_compatible", "region:us" ]
document-question-answering
2023-02-21T07:56:11Z
--- base_model: microsoft/layoutlmv3-base license: cc-by-nc-sa-4.0 tags: - DocVQA - Document Question Answering - Document Visual Question Answering datasets: - rubentito/mp-docvqa language: - en --- # LayoutLMv3 base fine-tuned on MP-DocVQA This is pretrained LayoutLMv3 from [Microsoft hub](https://huggingface.co/microsoft/layoutlmv3-base) and fine-tuned on Multipage DocVQA (MP-DocVQA) dataset. This model was used as a baseline in [Hierarchical multimodal transformers for Multi-Page DocVQA](https://arxiv.org/pdf/2212.05935.pdf). - Results on the MP-DocVQA dataset are reported in Table 2. - Training hyperparameters can be found in Table 8 of Appendix D. ## How to use Here is how to use this model to get the features of a given text in PyTorch: ```python import torch from transformers import LayoutLMv3Processor, LayoutLMv3ForQuestionAnswering processor = LayoutLMv3Processor.from_pretrained("rubentito/layoutlmv3-base-mpdocvqa", apply_ocr=False) model = LayoutLMv3ForQuestionAnswering.from_pretrained("rubentito/layoutlmv3-base-mpdocvqa") image = Image.open("example.jpg").convert("RGB") question = "Is this a question?" context = ["Example"] boxes = [0, 0, 1000, 1000] # This is an example bounding box covering the whole image. document_encoding = processor(image, question, context, boxes=boxes, return_tensors="pt") outputs = model(**document_encoding) # Get the answer start_idx = torch.argmax(outputs.start_logits, axis=1) end_idx = torch.argmax(outputs.end_logits, axis=1) answers = self.processor.tokenizer.decode(input_tokens[start_idx: end_idx+1]).strip() ``` ## Metrics **Average Normalized Levenshtein Similarity (ANLS)** The standard metric for text-based VQA tasks (ST-VQA and DocVQA). It evaluates the method's reasoning capabilities while smoothly penalizes OCR recognition errors. Check [Scene Text Visual Question Answering](https://arxiv.org/abs/1905.13648) for detailed information. **Answer Page Prediction Accuracy (APPA)** In the MP-DocVQA task, the models can provide the index of the page where the information required to answer the question is located. For this subtask accuracy is used to evaluate the predictions: i.e. if the predicted page is correct or not. Check [Hierarchical multimodal transformers for Multi-Page DocVQA](https://arxiv.org/abs/2212.05935) for detailed information. ## Model results Extended experimentation can be found in Table 2 of [Hierarchical multimodal transformers for Multi-Page DocVQA](https://arxiv.org/pdf/2212.05935.pdf). You can also check the live leaderboard at the [RRC Portal](https://rrc.cvc.uab.es/?ch=17&com=evaluation&task=4). | Model | HF name | Parameters | ANLS | APPA | |-----------------------------------------------------------------------------------|:--------------------------------------|:-------------:|:-------------:|:---------:| | [Bert large](https://huggingface.co/rubentito/bert-large-mpdocvqa) | rubentito/bert-large-mpdocvqa | 334M | 0.4183 | 51.6177 | | [Longformer base](https://huggingface.co/rubentito/longformer-base-mpdocvqa) | rubentito/longformer-base-mpdocvqa | 148M | 0.5287 | 71.1696 | | [BigBird ITC base](https://huggingface.co/rubentito/bigbird-base-itc-mpdocvqa) | rubentito/bigbird-base-itc-mpdocvqa | 131M | 0.4929 | 67.5433 | | [**LayoutLMv3 base**](https://huggingface.co/rubentito/layoutlmv3-base-mpdocvqa) | rubentito/layoutlmv3-base-mpdocvqa | 125M | 0.4538 | 51.9426 | | [T5 base](https://huggingface.co/rubentito/t5-base-mpdocvqa) | rubentito/t5-base-mpdocvqa | 223M | 0.5050 | 0.0000 | | [Hi-VT5](https://huggingface.co/rubentito/hivt5-base-mpdocvqa) | rubentito/hivt5-base-mpdocvqa | 316M | 0.6201 | 79.23 | ## Citation Information ```tex @article{tito2022hierarchical, title={Hierarchical multimodal transformers for Multi-Page DocVQA}, author={Tito, Rub{\`e}n and Karatzas, Dimosthenis and Valveny, Ernest}, journal={arXiv preprint arXiv:2212.05935}, year={2022} } ```
abhishek/llama-2-7b-hf-small-shards
abhishek
2024-02-07T08:37:11Z
1,021
56
transformers
[ "transformers", "pytorch", "llama", "text-generation", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2023-07-19T09:55:34Z
Entry not found
McGill-NLP/Sheared-LLaMA-2.7B-weblinx
McGill-NLP
2024-03-25T22:40:51Z
1,021
2
transformers
[ "transformers", "safetensors", "weblinx", "text-generation-inference", "web-agents", "agents", "text-generation", "en", "dataset:McGill-NLP/WebLINX", "dataset:McGill-NLP/WebLINX-full", "arxiv:2402.05930", "license:llama2", "endpoints_compatible", "region:us" ]
text-generation
2024-02-07T20:06:06Z
--- datasets: - McGill-NLP/WebLINX - McGill-NLP/WebLINX-full language: - en metrics: - f1 - iou - chrf library_name: transformers pipeline_tag: text-generation tags: - weblinx - text-generation-inference - web-agents - agents license: llama2 --- <div align="center"> <h1 style="margin-bottom: 0.5em;">WebLINX: Real-World Website Navigation with Multi-Turn Dialogue</h1> <em>Xing Han Lù*, Zdeněk Kasner*, Siva Reddy</em> </div> <div style="margin-bottom: 2em"></div> <div style="display: flex; justify-content: space-around; align-items: center; font-size: 120%;"> <div><a href="https://arxiv.org/abs/2402.05930">📄Paper</a></div> <div><a href="https://mcgill-nlp.github.io/weblinx">🌐Website</a></div> <div><a href="https://colab.research.google.com/github/McGill-NLP/weblinx/blob/main/examples/WebLINX_Colab_Notebook.ipynb">📓Colab</a></div> <div><a href="https://huggingface.co/datasets/McGill-NLP/WebLINX">🤗Dataset</a></div> <div><a href="https://github.com/McGill-NLP/weblinx">💾Code</a></div> </div> ## Quickstart ```python from datasets import load_dataset from huggingface_hub import snapshot_download from transformers import pipeline # Load validation split valid = load_dataset("McGill-NLP/weblinx", split="validation") # Download and load the templates snapshot_download( "McGill-NLP/WebLINX", repo_type="dataset", allow_patterns="templates/*.txt", local_dir="./" ) with open('templates/llama.txt') as f: template = f.read() turn = valid[0] turn_text = template.format(**turn) # Load action model and input the text to get prediction action_model = pipeline( model="McGill-NLP/Sheared-LLaMA-2.7B-weblinx", device=0, torch_dtype='auto' ) out = action_model(turn_text, return_full_text=False, max_new_tokens=64, truncation=True) pred = out[0]['generated_text'] print("Ref:", turn["action"]) print("Pred:", pred) ``` ## Original Model This model is finetuned on WebLINX using checkpoints previously published on Huggingface Hub.\ [Click here to access the original model.](https://huggingface.co/princeton-nlp/Sheared-LLaMA-2.7B) ## License This model is derived from LLaMA-2, which can only be used with the [LLaMA 2 Community License Agreement](https://github.com/facebookresearch/llama/blob/main/LICENSE). By using or distributing any portion or element of this model, you agree to be bound by this Agreement.
tyson0420/stack_llama_fil_ai
tyson0420
2024-02-14T07:16:36Z
1,021
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "code", "en", "dataset:tyson0420/stackexchange-4dpo-filby-ai-keywords", "arxiv:1910.09700", "license:bigscience-openrail-m", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-02-10T22:57:49Z
--- library_name: transformers tags: - code license: bigscience-openrail-m datasets: - tyson0420/stackexchange-4dpo-filby-ai-keywords language: - en metrics: - code_eval pipeline_tag: text-generation --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
yam-peleg/Experiment31-7B
yam-peleg
2024-03-03T22:05:16Z
1,021
1
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "chat", "en", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-03-03T18:37:17Z
--- license: apache-2.0 language: - en library_name: transformers pipeline_tag: text-generation tags: - chat --- **Experiment31-7B** An experiment for testing and refining a specific training and evaluation pipeline research framework. This experiment aims to identify potential optimizations, focusing on data engineering, architecture efficiency, and evaluation performance. The goal is to evaluate the effectiveness of a new training / evaluation pipeline for LLMs. The experiment will explore adjustments in data preprocessing, model training algorithms, and evaluation metrics to test methods for improvement. More details in the future experiments. --- license: apache-2.0 ---
Tencent-Hunyuan/Distillation-v1.1
Tencent-Hunyuan
2024-06-19T03:46:36Z
1,021
5
hunyuan-dit
[ "hunyuan-dit", "en", "zh", "license:other", "region:us" ]
null
2024-06-12T09:56:46Z
--- library_name: hunyuan-dit license: other license_name: tencent-hunyuan-community license_link: https://huggingface.co/Tencent-Hunyuan/HunyuanDiT/blob/main/LICENSE.txt language: - en - zh --- # HunyuanDiT Distillation Acceleration Language: **English** | [**中文**](https://huggingface.co/Tencent-Hunyuan/Distillation/blob/main/README_zh.md) We provide a distillation version of HunyuanDiT for your inference acceleration. Based on progressive distillation method, we accelerate Hunyuan-Dit two times without any performance drop. With the use of distillation model, It achieves the effect of halving the time consumption based on any inference mode. The following table shows the requirements for running the distillation model and the acceleration performance of our distillation model (batch size = 1). We evaluate the accelaration on various GPU (like H800,A100, 3090, 4090) as well as different inference mode. | GPU| CUDA version | model | inference mode | inference steps | GPU Peak Memory | inference time | | --- | --- | --- | --- | --- | --- | --- | | H800 | 12.1 | HunyuanDiT | PyTorch | 100 | 13G | 28s | | H800 | 12.1 | HunyuanDiT | TensorRT | 100 | 12G | 10s | | H800 | 12.1 | HunyuanDiT | Distill+PyTorch | 50 | 13G | 14s | | H800 | 12.1 | HunyuanDiT | Distill+TensorRT | 50 | 12G | 5s | | A100 | 11.7 | HunyuanDiT | PyTorch | 100 | 13GB | 54s | | A100 | 11.7 | HunyuanDiT | TensorRT | 100 | 11GB | 20s | | A100 | 11.7 | HunyuanDiT | Distill+PyTorch | 50 | 13GB | 25s | | A100 | 11.7 | HunyuanDiT | Distill+TensorRT | 50 | 11GB | 10s | | 3090 | 11.8 | HunyuanDiT | PyTorch | 100 | 14G | 98s | | 3090 | 11.8 | HunyuanDiT | TensorRT | 100 | 14G | 40s | | 3090 | 11.8 | HunyuanDiT | Distill+PyTorch | 50 | 14G | 49s | | 3090 | 11.8 | HunyuanDiT | Distill+TensorRT | 50 | 14G | 20s | | 4090 | 11.8 | HunyuanDiT | PyTorch | 100 | 14G | 54s | | 4090 | 11.8 | HunyuanDiT | TensorRT | 100 | 14G | 20s | | 4090 | 11.8 | HunyuanDiT | Distill+PyTorch | 50 | 14G | 27s | | 4090 | 11.8 | HunyuanDiT | Distill+TensorRT | 50 | 14G | 10s | Basically, the requirements for running the models is the same as the original model. ## Instructions The dependencies and installation are basically the same as the [**original model**](https://huggingface.co/Tencent-Hunyuan/HunyuanDiT). Then download the model using the following commands: ```bash cd HunyuanDiT # Use the huggingface-cli tool to download the model. huggingface-cli download Tencent-Hunyuan/Distillation-v1.1 ./pytorch_model_distill.pt --local-dir ./ckpts/t2i/model ``` ## Inference ### Using Gradio Make sure you have activated the conda environment before running the following command. ```shell # By default, we start a Chinese UI. python app/hydit_app.py --load-key distill # Using Flash Attention for acceleration. python app/hydit_app.py --infer-mode fa --load-key distill # You can disable the enhancement model if the GPU memory is insufficient. # The enhancement will be unavailable until you restart the app without the `--no-enhance` flag. python app/hydit_app.py --no-enhance ---load-key distill # Start with English UI python app/hydit_app.py --lang en --load-key distill ``` ### Using Command Line We provide several commands to quick start: ```shell # Prompt Enhancement + Text-to-Image. Torch mode python sample_t2i.py --prompt "渔舟唱晚" --load-key distill --infer-steps 50 # Only Text-to-Image. Torch mode python sample_t2i.py --prompt "渔舟唱晚" --no-enhance --load-key distill --infer-steps 50 # Only Text-to-Image. Flash Attention mode python sample_t2i.py --infer-mode fa --prompt "渔舟唱晚" --load-key distill --infer-steps 50 # Generate an image with other image sizes. python sample_t2i.py --prompt "渔舟唱晚" --image-size 1280 768 --load-key distill --infer-steps 50 ``` More example prompts can be found in [example_prompts.txt](example_prompts.txt)
reach-vb/Mistral-7B-Instruct-v0.3-Q6_K-GGUF
reach-vb
2024-06-21T13:02:45Z
1,021
0
null
[ "gguf", "llama-cpp", "gguf-my-repo", "base_model:mistralai/Mistral-7B-Instruct-v0.3", "license:apache-2.0", "region:us" ]
null
2024-06-21T13:02:21Z
--- base_model: mistralai/Mistral-7B-Instruct-v0.3 license: apache-2.0 tags: - llama-cpp - gguf-my-repo --- # reach-vb/Mistral-7B-Instruct-v0.3-Q6_K-GGUF This model was converted to GGUF format from [`mistralai/Mistral-7B-Instruct-v0.3`](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.3) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space. Refer to the [original model card](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.3) for more details on the model. ## Use with llama.cpp Install llama.cpp through brew (works on Mac and Linux) ```bash brew install llama.cpp ``` Invoke the llama.cpp server or the CLI. ### CLI: ```bash llama-cli --hf-repo reach-vb/Mistral-7B-Instruct-v0.3-Q6_K-GGUF --hf-file mistral-7b-instruct-v0.3-q6_k.gguf -p "The meaning to life and the universe is" ``` ### Server: ```bash llama-server --hf-repo reach-vb/Mistral-7B-Instruct-v0.3-Q6_K-GGUF --hf-file mistral-7b-instruct-v0.3-q6_k.gguf -c 2048 ``` Note: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well. Step 1: Clone llama.cpp from GitHub. ``` git clone https://github.com/ggerganov/llama.cpp ``` Step 2: Move into the llama.cpp folder and build it with `LLAMA_CURL=1` flag along with other hardware-specific flags (for ex: LLAMA_CUDA=1 for Nvidia GPUs on Linux). ``` cd llama.cpp && LLAMA_CURL=1 make ``` Step 3: Run inference through the main binary. ``` ./llama-cli --hf-repo reach-vb/Mistral-7B-Instruct-v0.3-Q6_K-GGUF --hf-file mistral-7b-instruct-v0.3-q6_k.gguf -p "The meaning to life and the universe is" ``` or ``` ./llama-server --hf-repo reach-vb/Mistral-7B-Instruct-v0.3-Q6_K-GGUF --hf-file mistral-7b-instruct-v0.3-q6_k.gguf -c 2048 ```
timm/swin_base_patch4_window12_384.ms_in22k
timm
2024-02-10T23:31:24Z
1,020
0
timm
[ "timm", "pytorch", "safetensors", "image-classification", "dataset:imagenet-22k", "arxiv:2103.14030", "license:mit", "region:us" ]
image-classification
2023-03-18T04:05:37Z
--- license: mit library_name: timm tags: - image-classification - timm datasets: - imagenet-22k --- # Model card for swin_base_patch4_window12_384.ms_in22k A Swin Transformer image classification model. Pretrained on ImageNet-22k by paper authors. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 109.3 - GMACs: 47.2 - Activations (M): 134.8 - Image size: 384 x 384 - **Papers:** - Swin Transformer: Hierarchical Vision Transformer using Shifted Windows: https://arxiv.org/abs/2103.14030 - **Original:** https://github.com/microsoft/Swin-Transformer - **Dataset:** ImageNet-22k ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('swin_base_patch4_window12_384.ms_in22k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'swin_base_patch4_window12_384.ms_in22k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g. for swin_base_patch4_window7_224 (NHWC output) # torch.Size([1, 56, 56, 128]) # torch.Size([1, 28, 28, 256]) # torch.Size([1, 14, 14, 512]) # torch.Size([1, 7, 7, 1024]) # e.g. for swinv2_cr_small_ns_224 (NCHW output) # torch.Size([1, 96, 56, 56]) # torch.Size([1, 192, 28, 28]) # torch.Size([1, 384, 14, 14]) # torch.Size([1, 768, 7, 7]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'swin_base_patch4_window12_384.ms_in22k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled (ie.e a (batch_size, H, W, num_features) tensor for swin / swinv2 # or (batch_size, num_features, H, W) for swinv2_cr output = model.forward_head(output, pre_logits=True) # output is (batch_size, num_features) tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @inproceedings{liu2021Swin, title={Swin Transformer: Hierarchical Vision Transformer using Shifted Windows}, author={Liu, Ze and Lin, Yutong and Cao, Yue and Hu, Han and Wei, Yixuan and Zhang, Zheng and Lin, Stephen and Guo, Baining}, booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV)}, year={2021} } ``` ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ```
BlueNipples/DaringLotus-SnowLotus-10.7b-IQ-GGUF
BlueNipples
2024-05-04T12:31:29Z
1,020
11
null
[ "gguf", "Solar", "Mistral", "Roleplay", "license:apache-2.0", "region:us" ]
null
2024-01-17T04:36:02Z
--- license: apache-2.0 tags: - Solar - Mistral - Roleplay --- ![SnowLotus Logo](https://cdn-uploads.huggingface.co/production/uploads/64bb1109aaccfd28b023bcec/gTQtPK46laLIFg0RTAv73.png) ## Important Note The most recent version of llama.cpp has broken historical GGUFs, so I am uploading a few requants to preserve these two models compatibility. These will be called v3 in the file naming even though they are the same model. ## Summary 3-4x Importance Matrix GGUFs and 3-4x regular GGUFs for https://huggingface.co/BlueNipples/SnowLotus-v2-10.7B and https://huggingface.co/BlueNipples/DaringLotus-v2-10.7b. I added a few more quants. I'm super happy with these merges, they turned out great. Basically Daring is the slightly more creative/prose oriented one, but also slightly less coherent. Daring basically nessesitates regens/swipes. They both have excellent prose for their size that is largely not very gpt-ish and are able to often take story context, lore entries and character card info into account. You can probably use these as your mainstay - which especially helpful if you GPU struggles with 13b, and honestly I think these models are _probably_ equal to or better than any 13b anyway. I might be wrong, but I do think they are very good compared to anything I've personally run. See the individual model cards for merge recipe details. Thanks to lucyknada for helping me get the imatrix quants done quicker! ## Importance Matrix Note Imatrix currently does not run with Koboldcpp although bound to be supported in the future as it is supported by Llamacpp (and I'm guessing therefor ooba). Those quants should provide a perplexity boost especially to the smaller quants. The dat files are also there so if you make a fp16 gguf from the main model cards you might be able to save yourself some time producing your own imatrix quants. ### Format Notes Solar is desgined for 4k context, but Nyx reports that his merge works to 8k. Given this has a slerp gradient back into that, I'm not sure which applies here. Alpaca instruct formatting. ### Ayumi Index http://ayumi.m8geil.de/erp4_chatlogs/?S=rma_0#!/index In the Ayumi ERPv4 Chat Log Index, SnowLotus scores a 94.10 in Flesch which means it produces more complex sentences than Daring (quite complex), DaringLotus scores higher in Var and Ad[jv], which means it makes heavier use of adjectives and adverbs (is more descriptive). Noteably Daring is in the top 8 for adjectives in a sentence, highest in it's weight class if you discount the chinese model, and in general both models did very well on this metric (SnowLotus ranks higher here than anything above it in IQ4), showcasing their descriptive ability. SnowLotus beats DaringLotus on IQ4 with a score of 70.94, only bet by SOLAR Instruct and Fimbulvetr in it's weight class (altho also noteably Kunoichi 7b by a slim margin), DaringLotus is a bit lower at 65.37 - not as smart. Interestingly the benchmarking here showed repetition for both models (which I haven't seen), but more with SnowLotus - so it's possible Daring repeats less than SnowLotus? These roughly confirm my impressions of the differences, altho potentially reveal some new details too. I've had a great experience RPing with these models, and seen no repetition myself, but be sure to use MinP or DynaTemp rather than the older samplers and be prepared to regen anything they get stuck on!
Tanvir1337/quantized-models
Tanvir1337
2024-06-08T06:36:01Z
1,020
0
null
[ "gguf", "region:us" ]
null
2024-01-17T19:01:59Z
--- tags: - gguf --- # About Quantized models collected from various sources for ease of use. Mostly from: - GGUF: [TheBloke](https://huggingface.co/TheBloke), [ollama.ai/library](https://ollama.ai/library), [Undi95](https://huggingface.co/Undi95), [s3nh](https://huggingface.co/s3nh), [athirdpath](https://huggingface.co/athirdpath), [Sao10K](https://huggingface.co/Sao10K), [maddes8cht](https://huggingface.co/maddes8cht), [Kquant03](https://huggingface.co/Kquant03) [ikawrakow](https://huggingface.co/ikawrakow), [nakodanei](https://huggingface.co/nakodanei). ## Disclaimer These models are provided "as-is" without any warranty. The respective licenses apply to each model, and it is the user's responsibility to comply with the terms of these licenses.
Qwen/Qwen1.5-7B-Chat-AWQ
Qwen
2024-04-30T07:44:57Z
1,020
12
transformers
[ "transformers", "safetensors", "qwen2", "text-generation", "chat", "conversational", "en", "license:other", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "4-bit", "awq", "region:us" ]
text-generation
2024-02-03T06:59:23Z
--- license: other license_name: tongyi-qianwen license_link: >- https://huggingface.co/Qwen/Qwen1.5-7B-Chat-AWQ/blob/main/LICENSE language: - en pipeline_tag: text-generation tags: - chat --- # Qwen1.5-7B-Chat-AWQ ## Introduction Qwen1.5 is the beta version of Qwen2, a transformer-based decoder-only language model pretrained on a large amount of data. In comparison with the previous released Qwen, the improvements include: * 8 model sizes, including 0.5B, 1.8B, 4B, 7B, 14B, 32B and 72B dense models, and an MoE model of 14B with 2.7B activated; * Significant performance improvement in human preference for chat models; * Multilingual support of both base and chat models; * Stable support of 32K context length for models of all sizes * No need of `trust_remote_code`. For more details, please refer to our [blog post](https://qwenlm.github.io/blog/qwen1.5/) and [GitHub repo](https://github.com/QwenLM/Qwen1.5). <br> ## Model Details Qwen1.5 is a language model series including decoder language models of different model sizes. For each size, we release the base language model and the aligned chat model. It is based on the Transformer architecture with SwiGLU activation, attention QKV bias, group query attention, mixture of sliding window attention and full attention, etc. Additionally, we have an improved tokenizer adaptive to multiple natural languages and codes. For the beta version, temporarily we did not include GQA (except for 32B) and the mixture of SWA and full attention. ## Training details We pretrained the models with a large amount of data, and we post-trained the models with both supervised finetuning and direct preference optimization. ## Requirements The code of Qwen1.5 has been in the latest Hugging face transformers and we advise you to install `transformers>=4.37.0`, or you might encounter the following error: ``` KeyError: 'qwen2' ``` ## Quickstart Here provides a code snippet with `apply_chat_template` to show you how to load the tokenizer and model and how to generate contents. ```python from transformers import AutoModelForCausalLM, AutoTokenizer device = "cuda" # the device to load the model onto model = AutoModelForCausalLM.from_pretrained( "Qwen/Qwen1.5-7B-Chat-AWQ", torch_dtype="auto", device_map="auto" ) tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen1.5-7B-Chat-AWQ") prompt = "Give me a short introduction to large language model." messages = [ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": prompt} ] text = tokenizer.apply_chat_template( messages, tokenize=False, add_generation_prompt=True ) model_inputs = tokenizer([text], return_tensors="pt").to(device) generated_ids = model.generate( model_inputs.input_ids, max_new_tokens=512 ) generated_ids = [ output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids) ] response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] ``` ## Tips * If you encounter code switching or other bad cases, we advise you to use our provided hyper-parameters in `generation_config.json`. ## Citation If you find our work helpful, feel free to give us a cite. ``` @article{qwen, title={Qwen Technical Report}, author={Jinze Bai and Shuai Bai and Yunfei Chu and Zeyu Cui and Kai Dang and Xiaodong Deng and Yang Fan and Wenbin Ge and Yu Han and Fei Huang and Binyuan Hui and Luo Ji and Mei Li and Junyang Lin and Runji Lin and Dayiheng Liu and Gao Liu and Chengqiang Lu and Keming Lu and Jianxin Ma and Rui Men and Xingzhang Ren and Xuancheng Ren and Chuanqi Tan and Sinan Tan and Jianhong Tu and Peng Wang and Shijie Wang and Wei Wang and Shengguang Wu and Benfeng Xu and Jin Xu and An Yang and Hao Yang and Jian Yang and Shusheng Yang and Yang Yao and Bowen Yu and Hongyi Yuan and Zheng Yuan and Jianwei Zhang and Xingxuan Zhang and Yichang Zhang and Zhenru Zhang and Chang Zhou and Jingren Zhou and Xiaohuan Zhou and Tianhang Zhu}, journal={arXiv preprint arXiv:2309.16609}, year={2023} } ```
uukuguy/speechless-mistral-hermes-code-7b
uukuguy
2024-02-06T12:28:07Z
1,020
2
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "code", "en", "dataset:teknium/OpenHermes-2.5", "dataset:TokenBender/python_eval_instruct_51k", "dataset:codefuse-ai/Evol-instruction-66k", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-02-06T12:15:24Z
--- language: - en library_name: transformers pipeline_tag: text-generation datasets: - teknium/OpenHermes-2.5 - TokenBender/python_eval_instruct_51k - codefuse-ai/Evol-instruction-66k tags: - code license: apache-2.0 model-index: - name: SpeechlessCoder results: - task: type: text-generation dataset: type: openai_humaneval name: HumanEval metrics: - name: pass@1 type: pass@1 value: 0.0 verified: false --- <p><h1> speechless-mistral-hermes-code-7b </h1></p> Code: https://github.com/uukuguy/speechless Use the following dataset to fine-tune mistralai/Mistral-7B-v0.1 in order to improve the model's reasoning and planning abilities. Total 986k samples. - teknium/OpenHermes-2.5 - TokenBender/python_eval_instruct_51k - Spider - codefuse-ai/Evol-instruction-66k ## How to Prompt the Model This model accepts the Alpaca instruction format. For example: ``` You are an intelligent programming assistant. ### Instruction: Implement a linked list in C++ ### Response: ``` ## HumanEval | Metric | Value | | --- | --- | | humaneval-python | | ## lm-evaluation-harness ```json {'ARC (acc_norm)': , 'HellaSwag (acc_norm)': , 'MMLU (acc)': , 'TruthfulQA (mc2)': , 'Winoground (acc)': , 'GSM8K (acc)': , 'DROP (f1)': , 'Open LLM Score': } ``` # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_uukuguy__speechless-code-mistral-7b-v1.0) | Metric | Value | |-----------------------|---------------------------| | Avg. | | | ARC (25-shot) | | | HellaSwag (10-shot) | | | MMLU (5-shot) | | | TruthfulQA (0-shot) | | | Winogrande (5-shot) | | | GSM8K (5-shot) | | | DROP (3-shot) | |
mradermacher/quill-72b-i1-GGUF
mradermacher
2024-06-04T05:51:36Z
1,020
0
transformers
[ "transformers", "gguf", "en", "base_model:billyjoe/quill-72b", "endpoints_compatible", "region:us" ]
null
2024-06-01T17:31:35Z
--- base_model: billyjoe/quill-72b language: - en library_name: transformers quantized_by: mradermacher --- ## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: nicoboss --> weighted/imatrix quants of https://huggingface.co/billyjoe/quill-72b <!-- provided-files --> static quants are available at https://huggingface.co/mradermacher/quill-72b-GGUF ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/quill-72b-i1-GGUF/resolve/main/quill-72b.i1-IQ1_S.gguf) | i1-IQ1_S | 22.8 | for the desperate | | [GGUF](https://huggingface.co/mradermacher/quill-72b-i1-GGUF/resolve/main/quill-72b.i1-IQ1_M.gguf) | i1-IQ1_M | 23.8 | mostly desperate | | [GGUF](https://huggingface.co/mradermacher/quill-72b-i1-GGUF/resolve/main/quill-72b.i1-IQ2_XXS.gguf) | i1-IQ2_XXS | 25.6 | | | [GGUF](https://huggingface.co/mradermacher/quill-72b-i1-GGUF/resolve/main/quill-72b.i1-IQ2_XS.gguf) | i1-IQ2_XS | 27.2 | | | [GGUF](https://huggingface.co/mradermacher/quill-72b-i1-GGUF/resolve/main/quill-72b.i1-IQ2_S.gguf) | i1-IQ2_S | 28.0 | | | [GGUF](https://huggingface.co/mradermacher/quill-72b-i1-GGUF/resolve/main/quill-72b.i1-IQ2_M.gguf) | i1-IQ2_M | 29.4 | | | [GGUF](https://huggingface.co/mradermacher/quill-72b-i1-GGUF/resolve/main/quill-72b.i1-Q2_K.gguf) | i1-Q2_K | 29.9 | IQ3_XXS probably better | | [GGUF](https://huggingface.co/mradermacher/quill-72b-i1-GGUF/resolve/main/quill-72b.i1-IQ3_XXS.gguf) | i1-IQ3_XXS | 31.9 | lower quality | | [GGUF](https://huggingface.co/mradermacher/quill-72b-i1-GGUF/resolve/main/quill-72b.i1-IQ3_XS.gguf) | i1-IQ3_XS | 32.9 | | | [GGUF](https://huggingface.co/mradermacher/quill-72b-i1-GGUF/resolve/main/quill-72b.i1-IQ3_S.gguf) | i1-IQ3_S | 34.6 | beats Q3_K* | | [GGUF](https://huggingface.co/mradermacher/quill-72b-i1-GGUF/resolve/main/quill-72b.i1-Q3_K_S.gguf) | i1-Q3_K_S | 34.6 | IQ3_XS probably better | | [GGUF](https://huggingface.co/mradermacher/quill-72b-i1-GGUF/resolve/main/quill-72b.i1-IQ3_M.gguf) | i1-IQ3_M | 35.6 | | | [GGUF](https://huggingface.co/mradermacher/quill-72b-i1-GGUF/resolve/main/quill-72b.i1-Q3_K_M.gguf) | i1-Q3_K_M | 37.8 | IQ3_S probably better | | [GGUF](https://huggingface.co/mradermacher/quill-72b-i1-GGUF/resolve/main/quill-72b.i1-Q3_K_L.gguf) | i1-Q3_K_L | 39.6 | IQ3_M probably better | | [GGUF](https://huggingface.co/mradermacher/quill-72b-i1-GGUF/resolve/main/quill-72b.i1-IQ4_XS.gguf) | i1-IQ4_XS | 39.8 | | | [GGUF](https://huggingface.co/mradermacher/quill-72b-i1-GGUF/resolve/main/quill-72b.i1-Q4_0.gguf) | i1-Q4_0 | 41.5 | fast, low quality | | [GGUF](https://huggingface.co/mradermacher/quill-72b-i1-GGUF/resolve/main/quill-72b.i1-Q4_K_S.gguf) | i1-Q4_K_S | 44.0 | optimal size/speed/quality | | [GGUF](https://huggingface.co/mradermacher/quill-72b-i1-GGUF/resolve/main/quill-72b.i1-Q4_K_M.gguf) | i1-Q4_K_M | 47.5 | fast, recommended | | [PART 1](https://huggingface.co/mradermacher/quill-72b-i1-GGUF/resolve/main/quill-72b.i1-Q5_K_S.gguf.part1of2) [PART 2](https://huggingface.co/mradermacher/quill-72b-i1-GGUF/resolve/main/quill-72b.i1-Q5_K_S.gguf.part2of2) | i1-Q5_K_S | 51.5 | | | [PART 1](https://huggingface.co/mradermacher/quill-72b-i1-GGUF/resolve/main/quill-72b.i1-Q5_K_M.gguf.part1of2) [PART 2](https://huggingface.co/mradermacher/quill-72b-i1-GGUF/resolve/main/quill-72b.i1-Q5_K_M.gguf.part2of2) | i1-Q5_K_M | 54.5 | | | [PART 1](https://huggingface.co/mradermacher/quill-72b-i1-GGUF/resolve/main/quill-72b.i1-Q6_K.gguf.part1of2) [PART 2](https://huggingface.co/mradermacher/quill-72b-i1-GGUF/resolve/main/quill-72b.i1-Q6_K.gguf.part2of2) | i1-Q6_K | 64.4 | practically like static Q6_K | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. Additional thanks to [@nicoboss](https://huggingface.co/nicoboss) for giving me access to his hardware for calculating the imatrix for these quants. <!-- end -->
RichardErkhov/maywell_-_Mistral-ko-7B-v0.1-gguf
RichardErkhov
2024-06-07T04:38:05Z
1,020
0
null
[ "gguf", "region:us" ]
null
2024-06-07T03:19:44Z
Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) Mistral-ko-7B-v0.1 - GGUF - Model creator: https://huggingface.co/maywell/ - Original model: https://huggingface.co/maywell/Mistral-ko-7B-v0.1/ | Name | Quant method | Size | | ---- | ---- | ---- | | [Mistral-ko-7B-v0.1.Q2_K.gguf](https://huggingface.co/RichardErkhov/maywell_-_Mistral-ko-7B-v0.1-gguf/blob/main/Mistral-ko-7B-v0.1.Q2_K.gguf) | Q2_K | 2.59GB | | [Mistral-ko-7B-v0.1.IQ3_XS.gguf](https://huggingface.co/RichardErkhov/maywell_-_Mistral-ko-7B-v0.1-gguf/blob/main/Mistral-ko-7B-v0.1.IQ3_XS.gguf) | IQ3_XS | 2.88GB | | [Mistral-ko-7B-v0.1.IQ3_S.gguf](https://huggingface.co/RichardErkhov/maywell_-_Mistral-ko-7B-v0.1-gguf/blob/main/Mistral-ko-7B-v0.1.IQ3_S.gguf) | IQ3_S | 0.63GB | | [Mistral-ko-7B-v0.1.Q3_K_S.gguf](https://huggingface.co/RichardErkhov/maywell_-_Mistral-ko-7B-v0.1-gguf/blob/main/Mistral-ko-7B-v0.1.Q3_K_S.gguf) | Q3_K_S | 0.53GB | | [Mistral-ko-7B-v0.1.IQ3_M.gguf](https://huggingface.co/RichardErkhov/maywell_-_Mistral-ko-7B-v0.1-gguf/blob/main/Mistral-ko-7B-v0.1.IQ3_M.gguf) | IQ3_M | 0.16GB | | [Mistral-ko-7B-v0.1.Q3_K.gguf](https://huggingface.co/RichardErkhov/maywell_-_Mistral-ko-7B-v0.1-gguf/blob/main/Mistral-ko-7B-v0.1.Q3_K.gguf) | Q3_K | 0.09GB | | [Mistral-ko-7B-v0.1.Q3_K_M.gguf](https://huggingface.co/RichardErkhov/maywell_-_Mistral-ko-7B-v0.1-gguf/blob/main/Mistral-ko-7B-v0.1.Q3_K_M.gguf) | Q3_K_M | 0.0GB | | [Mistral-ko-7B-v0.1.Q3_K_L.gguf](https://huggingface.co/RichardErkhov/maywell_-_Mistral-ko-7B-v0.1-gguf/blob/main/Mistral-ko-7B-v0.1.Q3_K_L.gguf) | Q3_K_L | 0.0GB | | [Mistral-ko-7B-v0.1.IQ4_XS.gguf](https://huggingface.co/RichardErkhov/maywell_-_Mistral-ko-7B-v0.1-gguf/blob/main/Mistral-ko-7B-v0.1.IQ4_XS.gguf) | IQ4_XS | 0.0GB | | [Mistral-ko-7B-v0.1.Q4_0.gguf](https://huggingface.co/RichardErkhov/maywell_-_Mistral-ko-7B-v0.1-gguf/blob/main/Mistral-ko-7B-v0.1.Q4_0.gguf) | Q4_0 | 0.0GB | | [Mistral-ko-7B-v0.1.IQ4_NL.gguf](https://huggingface.co/RichardErkhov/maywell_-_Mistral-ko-7B-v0.1-gguf/blob/main/Mistral-ko-7B-v0.1.IQ4_NL.gguf) | IQ4_NL | 0.0GB | | [Mistral-ko-7B-v0.1.Q4_K_S.gguf](https://huggingface.co/RichardErkhov/maywell_-_Mistral-ko-7B-v0.1-gguf/blob/main/Mistral-ko-7B-v0.1.Q4_K_S.gguf) | Q4_K_S | 0.0GB | | [Mistral-ko-7B-v0.1.Q4_K.gguf](https://huggingface.co/RichardErkhov/maywell_-_Mistral-ko-7B-v0.1-gguf/blob/main/Mistral-ko-7B-v0.1.Q4_K.gguf) | Q4_K | 0.0GB | | [Mistral-ko-7B-v0.1.Q4_K_M.gguf](https://huggingface.co/RichardErkhov/maywell_-_Mistral-ko-7B-v0.1-gguf/blob/main/Mistral-ko-7B-v0.1.Q4_K_M.gguf) | Q4_K_M | 0.0GB | | [Mistral-ko-7B-v0.1.Q4_1.gguf](https://huggingface.co/RichardErkhov/maywell_-_Mistral-ko-7B-v0.1-gguf/blob/main/Mistral-ko-7B-v0.1.Q4_1.gguf) | Q4_1 | 0.0GB | | [Mistral-ko-7B-v0.1.Q5_0.gguf](https://huggingface.co/RichardErkhov/maywell_-_Mistral-ko-7B-v0.1-gguf/blob/main/Mistral-ko-7B-v0.1.Q5_0.gguf) | Q5_0 | 0.0GB | | [Mistral-ko-7B-v0.1.Q5_K_S.gguf](https://huggingface.co/RichardErkhov/maywell_-_Mistral-ko-7B-v0.1-gguf/blob/main/Mistral-ko-7B-v0.1.Q5_K_S.gguf) | Q5_K_S | 0.0GB | | [Mistral-ko-7B-v0.1.Q5_K.gguf](https://huggingface.co/RichardErkhov/maywell_-_Mistral-ko-7B-v0.1-gguf/blob/main/Mistral-ko-7B-v0.1.Q5_K.gguf) | Q5_K | 0.0GB | | [Mistral-ko-7B-v0.1.Q5_K_M.gguf](https://huggingface.co/RichardErkhov/maywell_-_Mistral-ko-7B-v0.1-gguf/blob/main/Mistral-ko-7B-v0.1.Q5_K_M.gguf) | Q5_K_M | 0.0GB | | [Mistral-ko-7B-v0.1.Q5_1.gguf](https://huggingface.co/RichardErkhov/maywell_-_Mistral-ko-7B-v0.1-gguf/blob/main/Mistral-ko-7B-v0.1.Q5_1.gguf) | Q5_1 | 0.0GB | | [Mistral-ko-7B-v0.1.Q6_K.gguf](https://huggingface.co/RichardErkhov/maywell_-_Mistral-ko-7B-v0.1-gguf/blob/main/Mistral-ko-7B-v0.1.Q6_K.gguf) | Q6_K | 0.0GB | | [Mistral-ko-7B-v0.1.Q8_0.gguf](https://huggingface.co/RichardErkhov/maywell_-_Mistral-ko-7B-v0.1-gguf/blob/main/Mistral-ko-7B-v0.1.Q8_0.gguf) | Q8_0 | 0.0GB | Original model description: --- license: cc-by-nc-4.0 language: - ko pipeline_tag: text-generation --- # 해당 모델은 오래된 실험용입니다. 실 사용을 권장하지 않습니다. # Mistral-ko-7B-v0.1 # **Model Details** ### Description Mistral-ko-7B-v0.1는 미스트랄에 한국어에 최적화 된 토크나이저를 적용한 모델입니다. Raw Data로 어느정도 형성된 모델에 시나트라에 사용 된 데이터셋으로 2 Epoch 훈련되었습니다. -- Further Description After Evaluation -- ## Comment 토크나이저는 @beomi님의 라마2 한국어 버전을 기반으로 제작되었습니다. 기반 모델을 제공해주신 @jin05102518님께 감사드립니다. Follow me on twitter: https://twitter.com/stablefluffy Consider Support me making these model alone: https://www.buymeacoffee.com/mwell or with Runpod Credit Gift 💕 Contact me on Telegram: https://t.me/AlzarTakkarsen
vdo/videocrafterv2_diffusers
vdo
2024-06-25T04:37:19Z
1,020
1
diffusers
[ "diffusers", "safetensors", "region:us" ]
null
2024-06-25T04:34:31Z
Entry not found
mrm8488/bert-medium-finetuned-squadv2
mrm8488
2021-05-20T00:25:00Z
1,019
1
transformers
[ "transformers", "pytorch", "jax", "bert", "question-answering", "en", "arxiv:1908.08962", "endpoints_compatible", "region:us" ]
question-answering
2022-03-02T23:29:05Z
--- language: en thumbnail: --- # BERT-Medium fine-tuned on SQuAD v2 [BERT-Medium](https://github.com/google-research/bert/) created by [Google Research](https://github.com/google-research) and fine-tuned on [SQuAD 2.0](https://rajpurkar.github.io/SQuAD-explorer/) for **Q&A** downstream task. **Mode size** (after training): **157.46 MB** ## Details of BERT-Small and its 'family' (from their documentation) Released on March 11th, 2020 This is model is a part of 24 smaller BERT models (English only, uncased, trained with WordPiece masking) referenced in [Well-Read Students Learn Better: On the Importance of Pre-training Compact Models](https://arxiv.org/abs/1908.08962). The smaller BERT models are intended for environments with restricted computational resources. They can be fine-tuned in the same manner as the original BERT models. However, they are most effective in the context of knowledge distillation, where the fine-tuning labels are produced by a larger and more accurate teacher. ## Details of the downstream task (Q&A) - Dataset [SQuAD2.0](https://rajpurkar.github.io/SQuAD-explorer/) combines the 100,000 questions in SQuAD1.1 with over 50,000 unanswerable questions written adversarially by crowdworkers to look similar to answerable ones. To do well on SQuAD2.0, systems must not only answer questions when possible, but also determine when no answer is supported by the paragraph and abstain from answering. | Dataset | Split | # samples | | -------- | ----- | --------- | | SQuAD2.0 | train | 130k | | SQuAD2.0 | eval | 12.3k | ## Model training The model was trained on a Tesla P100 GPU and 25GB of RAM. The script for fine tuning can be found [here](https://github.com/huggingface/transformers/blob/master/examples/question-answering/run_squad.py) ## Results: | Metric | # Value | | ------ | --------- | | **EM** | **65.95** | | **F1** | **70.11** | ### Raw metrics from benchmark included in training script: ```json { "exact": 65.95637159942727, "f1": 70.11632254245896, "total": 11873, "HasAns_exact": 67.79689608636977, "HasAns_f1": 76.12872765631123, "HasAns_total": 5928, "NoAns_exact": 64.12111017661901, "NoAns_f1": 64.12111017661901, "NoAns_total": 5945, "best_exact": 65.96479407058031, "best_exact_thresh": 0.0, "best_f1": 70.12474501361196, "best_f1_thresh": 0.0 } ``` ## Comparison: | Model | EM | F1 score | SIZE (MB) | | --------------------------------------------------------------------------------------------- | --------- | --------- | --------- | | [bert-tiny-finetuned-squadv2](https://huggingface.co/mrm8488/bert-tiny-finetuned-squadv2) | 48.60 | 49.73 | **16.74** | | [bert-tiny-5-finetuned-squadv2](https://huggingface.co/mrm8488/bert-tiny-5-finetuned-squadv2) | 57.12 | 60.86 | 24.34 | | [bert-mini-finetuned-squadv2](https://huggingface.co/mrm8488/bert-mini-finetuned-squadv2) | 56.31 | 59.65 | 42.63 | | [bert-mini-5-finetuned-squadv2](https://huggingface.co/mrm8488/bert-mini-5-finetuned-squadv2) | 63.51 | 66.78 | 66.76 | | [bert-small-finetuned-squadv2](https://huggingface.co/mrm8488/bert-small-finetuned-squadv2) | 60.49 | 64.21 | 109.74 | | [bert-medium-finetuned-squadv2](https://huggingface.co/mrm8488/bert-medium-finetuned-squadv2) | **65.95** | **70.11** | 157.46 | ## Model in action Fast usage with **pipelines**: ```python from transformers import pipeline qa_pipeline = pipeline( "question-answering", model="mrm8488/bert-small-finetuned-squadv2", tokenizer="mrm8488/bert-small-finetuned-squadv2" ) qa_pipeline({ 'context': "Manuel Romero has been working hardly in the repository hugginface/transformers lately", 'question': "Who has been working hard for hugginface/transformers lately?" }) # Output: ``` ```json { "answer": "Manuel Romero", "end": 13, "score": 0.9939319924374637, "start": 0 } ``` ### Yes! That was easy 🎉 Let's try with another example ```python qa_pipeline({ 'context': "Manuel Romero has been working remotely in the repository hugginface/transformers lately", 'question': "How has been working Manuel Romero?" }) # Output: ``` ```json { "answer": "remotely", "end": 39, "score": 0.3612058272768017, "start": 31 } ``` ### It works!! 🎉 🎉 🎉 > Created by [Manuel Romero/@mrm8488](https://twitter.com/mrm8488) | [LinkedIn](https://www.linkedin.com/in/manuel-romero-cs/) > Made with <span style="color: #e25555;">&hearts;</span> in Spain
sdasd112132/Norquinal_Mistral-7B-claude-chat-nsfw-lora-test
sdasd112132
2023-12-13T04:48:02Z
1,019
12
null
[ "gguf", "not-for-all-audiences", "en", "zh", "license:llama2", "region:us" ]
null
2023-12-13T04:18:40Z
--- license: llama2 language: - en - zh tags: - not-for-all-audiences --- Norquinal_Mistral-7B-claude-chat 合并 绅士小说 测试。 Mistral-7B基本拥有大部分中文能力,相当于微调了语言风格。
fhai50032/SamChat
fhai50032
2024-03-04T12:48:29Z
1,019
0
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "merge", "mergekit", "Intel/neural-chat-7b-v3-3", "cognitivecomputations/samantha-mistral-7b", "base_model:Intel/neural-chat-7b-v3-3", "base_model:cognitivecomputations/samantha-mistral-7b", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-02-10T23:23:15Z
--- license: apache-2.0 tags: - merge - mergekit - mistral - Intel/neural-chat-7b-v3-3 - cognitivecomputations/samantha-mistral-7b base_model: - Intel/neural-chat-7b-v3-3 - cognitivecomputations/samantha-mistral-7b model-index: - name: SamChat results: - task: type: text-generation name: Text Generation dataset: name: AI2 Reasoning Challenge (25-Shot) type: ai2_arc config: ARC-Challenge split: test args: num_few_shot: 25 metrics: - type: acc_norm value: 62.2 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=fhai50032/SamChat name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: HellaSwag (10-Shot) type: hellaswag split: validation args: num_few_shot: 10 metrics: - type: acc_norm value: 81.88 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=fhai50032/SamChat name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MMLU (5-Shot) type: cais/mmlu config: all split: test args: num_few_shot: 5 metrics: - type: acc value: 59.7 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=fhai50032/SamChat name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: TruthfulQA (0-shot) type: truthful_qa config: multiple_choice split: validation args: num_few_shot: 0 metrics: - type: mc2 value: 52.89 source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=fhai50032/SamChat name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: Winogrande (5-shot) type: winogrande config: winogrande_xl split: validation args: num_few_shot: 5 metrics: - type: acc value: 72.14 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=fhai50032/SamChat name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: GSM8k (5-shot) type: gsm8k config: main split: test args: num_few_shot: 5 metrics: - type: acc value: 41.24 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=fhai50032/SamChat name: Open LLM Leaderboard --- # SamChat SamChat is a merge of the following models * [Intel/neural-chat-7b-v3-3](https://huggingface.co/Intel/neural-chat-7b-v3-3) * [cognitivecomputations/samantha-mistral-7b](https://huggingface.co/cognitivecomputations/samantha-mistral-7b) ## 🧩 Configuration ```yaml models: - model: macadeliccc/WestLake-7B-v2-laser-truthy-dpo # no params for base model - model: Intel/neural-chat-7b-v3-3 parameters: weight: 0.55 density: 0.46 - model: cognitivecomputations/samantha-mistral-7b parameters: weight: 0.64 density: 0.55 merge_method: dare_ties base_model: macadeliccc/WestLake-7B-v2-laser-truthy-dpo parameters: normalize: true int8_mask: true dtype: float16 ``` ## 💻 Usage ```python !pip install -qU transformers accelerate from transformers import AutoTokenizer import transformers import torch model = "fhai50032/SamChat" messages = [{"role": "user", "content": "What is a large language model?"}] tokenizer = AutoTokenizer.from_pretrained(model) prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) pipeline = transformers.pipeline( "text-generation", model=model, torch_dtype=torch.float16, device_map="auto", ) outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) print(outputs[0]["generated_text"]) ``` # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_fhai50032__SamChat) | Metric |Value| |---------------------------------|----:| |Avg. |61.68| |AI2 Reasoning Challenge (25-Shot)|62.20| |HellaSwag (10-Shot) |81.88| |MMLU (5-Shot) |59.70| |TruthfulQA (0-shot) |52.89| |Winogrande (5-shot) |72.14| |GSM8k (5-shot) |41.24|
Niggendar/MatrixHentaiPony_v143
Niggendar
2024-04-17T09:04:20Z
1,019
3
diffusers
[ "diffusers", "safetensors", "arxiv:1910.09700", "autotrain_compatible", "endpoints_compatible", "diffusers:StableDiffusionXLPipeline", "region:us" ]
text-to-image
2024-04-17T08:57:05Z
--- library_name: diffusers --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🧨 diffusers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
RichardErkhov/postbot_-_gpt2-medium-emailgen-gguf
RichardErkhov
2024-06-05T12:16:36Z
1,019
0
null
[ "gguf", "region:us" ]
null
2024-06-05T11:52:33Z
Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) gpt2-medium-emailgen - GGUF - Model creator: https://huggingface.co/postbot/ - Original model: https://huggingface.co/postbot/gpt2-medium-emailgen/ | Name | Quant method | Size | | ---- | ---- | ---- | | [gpt2-medium-emailgen.Q2_K.gguf](https://huggingface.co/RichardErkhov/postbot_-_gpt2-medium-emailgen-gguf/blob/main/gpt2-medium-emailgen.Q2_K.gguf) | Q2_K | 0.17GB | | [gpt2-medium-emailgen.IQ3_XS.gguf](https://huggingface.co/RichardErkhov/postbot_-_gpt2-medium-emailgen-gguf/blob/main/gpt2-medium-emailgen.IQ3_XS.gguf) | IQ3_XS | 0.18GB | | [gpt2-medium-emailgen.IQ3_S.gguf](https://huggingface.co/RichardErkhov/postbot_-_gpt2-medium-emailgen-gguf/blob/main/gpt2-medium-emailgen.IQ3_S.gguf) | IQ3_S | 0.19GB | | [gpt2-medium-emailgen.Q3_K_S.gguf](https://huggingface.co/RichardErkhov/postbot_-_gpt2-medium-emailgen-gguf/blob/main/gpt2-medium-emailgen.Q3_K_S.gguf) | Q3_K_S | 0.19GB | | [gpt2-medium-emailgen.IQ3_M.gguf](https://huggingface.co/RichardErkhov/postbot_-_gpt2-medium-emailgen-gguf/blob/main/gpt2-medium-emailgen.IQ3_M.gguf) | IQ3_M | 0.2GB | | [gpt2-medium-emailgen.Q3_K.gguf](https://huggingface.co/RichardErkhov/postbot_-_gpt2-medium-emailgen-gguf/blob/main/gpt2-medium-emailgen.Q3_K.gguf) | Q3_K | 0.21GB | | [gpt2-medium-emailgen.Q3_K_M.gguf](https://huggingface.co/RichardErkhov/postbot_-_gpt2-medium-emailgen-gguf/blob/main/gpt2-medium-emailgen.Q3_K_M.gguf) | Q3_K_M | 0.21GB | | [gpt2-medium-emailgen.Q3_K_L.gguf](https://huggingface.co/RichardErkhov/postbot_-_gpt2-medium-emailgen-gguf/blob/main/gpt2-medium-emailgen.Q3_K_L.gguf) | Q3_K_L | 0.23GB | | [gpt2-medium-emailgen.IQ4_XS.gguf](https://huggingface.co/RichardErkhov/postbot_-_gpt2-medium-emailgen-gguf/blob/main/gpt2-medium-emailgen.IQ4_XS.gguf) | IQ4_XS | 0.22GB | | [gpt2-medium-emailgen.Q4_0.gguf](https://huggingface.co/RichardErkhov/postbot_-_gpt2-medium-emailgen-gguf/blob/main/gpt2-medium-emailgen.Q4_0.gguf) | Q4_0 | 0.23GB | | [gpt2-medium-emailgen.IQ4_NL.gguf](https://huggingface.co/RichardErkhov/postbot_-_gpt2-medium-emailgen-gguf/blob/main/gpt2-medium-emailgen.IQ4_NL.gguf) | IQ4_NL | 0.23GB | | [gpt2-medium-emailgen.Q4_K_S.gguf](https://huggingface.co/RichardErkhov/postbot_-_gpt2-medium-emailgen-gguf/blob/main/gpt2-medium-emailgen.Q4_K_S.gguf) | Q4_K_S | 0.23GB | | [gpt2-medium-emailgen.Q4_K.gguf](https://huggingface.co/RichardErkhov/postbot_-_gpt2-medium-emailgen-gguf/blob/main/gpt2-medium-emailgen.Q4_K.gguf) | Q4_K | 0.25GB | | [gpt2-medium-emailgen.Q4_K_M.gguf](https://huggingface.co/RichardErkhov/postbot_-_gpt2-medium-emailgen-gguf/blob/main/gpt2-medium-emailgen.Q4_K_M.gguf) | Q4_K_M | 0.25GB | | [gpt2-medium-emailgen.Q4_1.gguf](https://huggingface.co/RichardErkhov/postbot_-_gpt2-medium-emailgen-gguf/blob/main/gpt2-medium-emailgen.Q4_1.gguf) | Q4_1 | 0.25GB | | [gpt2-medium-emailgen.Q5_0.gguf](https://huggingface.co/RichardErkhov/postbot_-_gpt2-medium-emailgen-gguf/blob/main/gpt2-medium-emailgen.Q5_0.gguf) | Q5_0 | 0.27GB | | [gpt2-medium-emailgen.Q5_K_S.gguf](https://huggingface.co/RichardErkhov/postbot_-_gpt2-medium-emailgen-gguf/blob/main/gpt2-medium-emailgen.Q5_K_S.gguf) | Q5_K_S | 0.27GB | | [gpt2-medium-emailgen.Q5_K.gguf](https://huggingface.co/RichardErkhov/postbot_-_gpt2-medium-emailgen-gguf/blob/main/gpt2-medium-emailgen.Q5_K.gguf) | Q5_K | 0.29GB | | [gpt2-medium-emailgen.Q5_K_M.gguf](https://huggingface.co/RichardErkhov/postbot_-_gpt2-medium-emailgen-gguf/blob/main/gpt2-medium-emailgen.Q5_K_M.gguf) | Q5_K_M | 0.29GB | | [gpt2-medium-emailgen.Q5_1.gguf](https://huggingface.co/RichardErkhov/postbot_-_gpt2-medium-emailgen-gguf/blob/main/gpt2-medium-emailgen.Q5_1.gguf) | Q5_1 | 0.29GB | | [gpt2-medium-emailgen.Q6_K.gguf](https://huggingface.co/RichardErkhov/postbot_-_gpt2-medium-emailgen-gguf/blob/main/gpt2-medium-emailgen.Q6_K.gguf) | Q6_K | 0.32GB | | [gpt2-medium-emailgen.Q8_0.gguf](https://huggingface.co/RichardErkhov/postbot_-_gpt2-medium-emailgen-gguf/blob/main/gpt2-medium-emailgen.Q8_0.gguf) | Q8_0 | 0.41GB | Original model description: --- license: - apache-2.0 tags: - text generation - emailgen - email generation - email datasets: - aeslc - postbot/multi-emails-100k widget: - text: "Good Morning Professor Beans, Hope you are doing well. I just wanted to reach out and ask if differential calculus will be on the exam" example_title: "email to prof" - text: "Hey <NAME>,\n\nThank you for signing up for my weekly newsletter. Before we get started, you'll have to confirm your email address." example_title: "newsletter" - text: "Hi <NAME>,\n\nI hope this email finds you well. I wanted to reach out and ask about office hours" example_title: "office hours" - text: "Greetings <NAME>,\n\nI hope you had a splendid evening at the Company sausage eating festival. I am reaching out because" example_title: "festival" - text: "Good Morning Harold,\n\nI was wondering when the next" example_title: "event" - text: "URGENT - I need the TPS reports" example_title: "URGENT" - text: "Hi Archibald,\n\nI hope this email finds you extremely well." example_title: "emails that find you" - text: "Hello there.\n\nI just wanted to reach out and check in to" example_title: "checking in" - text: "Hello <NAME>,\n\nI hope this email finds you well. I wanted to reach out and see if you've enjoyed your time with us" example_title: "work well" - text: "Hi <NAME>,\n\nI hope this email finds you well. I wanted to reach out and see if we could catch up" example_title: "catch up" - text: "I'm <NAME> and I just moved into the area and wanted to reach out and get some details on where I could get groceries and" example_title: "grocery" parameters: min_length: 32 max_length: 128 no_repeat_ngram_size: 2 do_sample: True temperature: 0.3 top_k: 20 top_p: 0.95 repetition_penalty: 3.5 length_penalty: 0.9 --- # gpt2-medium-emailgen [![colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/gist/pszemraj/70058788c6d4b430398c12ee8ba10602/minimal-demo-for-postbot-gpt2-medium-emailgen.ipynb ) Why write the entire email when you can generate (most of) it? ```python from transformers import pipeline model_tag = "postbot/gpt2-medium-emailgen" generator = pipeline( 'text-generation', model=model_tag, ) prompt = """ Hello, Following up on the bubblegum shipment.""" result = generator( prompt, max_length=64, do_sample=False, early_stopping=True, ) # generate print(result[0]['generated_text']) ``` ## about This model is a fine-tuned version of [gpt2-medium](https://huggingface.co/gpt2-medium) on the postbot/multi-emails-100k dataset. It achieves the following results on the evaluation set: - Loss: 1.5840 ## Model description More information needed ## Intended uses & limitations - this is intended as a tool to save time writing predictable emails and not to write emails without a human-in-the-loop. validate that your email is factually correct before sending it to others. ## Training and evaluation data - the dataset is essentially a hand-curated/augmented expansion to the classic `aeslc` dataset ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.001 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - distributed_type: multi-GPU - gradient_accumulation_steps: 8 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_ratio: 0.02 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 1.8701 | 1.0 | 789 | 1.8378 | | 1.5065 | 2.0 | 1578 | 1.6176 | | 1.1873 | 3.0 | 2367 | 1.5840 | ### Framework versions - Transformers 4.22.2 - Pytorch 1.10.0+cu113 - Datasets 2.5.1 - Tokenizers 0.12.1 # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_postbot__gpt2-medium-emailgen) | Metric | Value | |-----------------------|---------------------------| | Avg. | 25.97 | | ARC (25-shot) | 26.45 | | HellaSwag (10-shot) | 34.31 | | MMLU (5-shot) | 24.1 | | TruthfulQA (0-shot) | 43.96 | | Winogrande (5-shot) | 50.43 | | GSM8K (5-shot) | 0.0 | | DROP (3-shot) | 2.53 |
bongsoo/kpf-sbert-128d-v1
bongsoo
2023-03-16T01:55:00Z
1,018
1
sentence-transformers
[ "sentence-transformers", "pytorch", "bert", "feature-extraction", "sentence-similarity", "autotrain_compatible", "endpoints_compatible", "text-embeddings-inference", "region:us" ]
sentence-similarity
2023-03-16T01:49:35Z
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity --- # kpf-sbert-128d-v1 - kpf bert 모델 출력을 128 차원으로 줄이고, nli(3)+sts(10)+nli(3)+sts(10) 훈련시킴 This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 128 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('bongsoo/kpf-sbert-128d-v1') embeddings = model.encode(sentences) print(embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name={MODEL_NAME}) ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 326 with parameters: ``` {'batch_size': 128, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.CosineSimilarityLoss.CosineSimilarityLoss` Parameters of the fit()-Method: ``` { "epochs": 10, "evaluation_steps": 651, "evaluator": "sentence_transformers.evaluation.EmbeddingSimilarityEvaluator.EmbeddingSimilarityEvaluator", "max_grad_norm": 1, "optimizer_class": "<class 'transformers.optimization.AdamW'>", "optimizer_params": { "eps": 1e-06, "lr": 0.0001 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 326, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': True}) with Transformer model: BertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) (2): Dense({'in_features': 768, 'out_features': 128, 'bias': True, 'activation_function': 'torch.nn.modules.activation.Tanh'}) ) ``` ## Citing & Authors bongsoo
artificialguybr/LogoRedmond-LogoLoraForSDXL
artificialguybr
2023-10-07T02:40:59Z
1,018
36
diffusers
[ "diffusers", "text-to-image", "stable-diffusion", "lora", "base_model:stabilityai/stable-diffusion-xl-base-1.0", "license:creativeml-openrail-m", "region:us" ]
text-to-image
2023-08-07T19:22:33Z
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion - lora - diffusers base_model: stabilityai/stable-diffusion-xl-base-1.0 instance_prompt: LogoRedAF widget: - text: LogoRedAF --- # Logo.Redmond ![row01](00120-4278840802.png) DOWNLOAD V2 HERE: https://huggingface.co/artificialguybr/LogoRedmond-LogoLoraForSDXL-V2 Test all my Loras here: https://huggingface.co/spaces/artificialguybr/artificialguybr-demo-lora Logo.Redmond is here! I'm grateful for the GPU time from Redmond.AI that allowed me to finish this LORA! This is a LOGO LORA fine-tuned on SD XL 1.0. The LORA has a high capacity to generate LOGOS images in a wide variety of themes. It's a versatile LORA. I recommend gen in 1024x1024. You can use detailed, minimalist, colorful, black and white as tag to control the results. The tag for the model:LogoRedAF LORA is not perfect and sometimes needs more than one gen to create good images. I recommend simple prompts. I really hope you like the LORA and use it. If you like the model and think it's worth it, you can make a donation to my Patreon or Ko-fi. Follow me in my twitter to know before all about new models: https://twitter.com/artificialguybr/
jbilcke-hf/sdxl-zelda64
jbilcke-hf
2023-09-01T09:19:44Z
1,018
8
diffusers
[ "diffusers", "text-to-image", "stable-diffusion", "lora", "base_model:stabilityai/stable-diffusion-xl-base-1.0", "license:creativeml-openrail-m", "region:us" ]
text-to-image
2023-08-31T16:58:10Z
--- license: creativeml-openrail-m tags: - text-to-image - stable-diffusion - lora - diffusers base_model: stabilityai/stable-diffusion-xl-base-1.0 instance_prompt: <s0><s1> inference: false --- # sdxl-zelda64 LoRA by Julian BILCKE (HF: [jbilcke-hf](https://huggingface.co/jbilcke-hf), Replicate: [jbilcke](https://replicate.com/jbilcke)) ### A SDXL LoRA inspired by Zelda games on Nintendo 64 ![lora_image](https://tjzk.replicate.delivery/models_models_cover_image/c8b21524-342a-4dd2-bb01-3e65349ed982/image_12.jpeg) > ## Inference with Replicate API Grab your replicate token [here](https://replicate.com/account) ```bash pip install replicate export REPLICATE_API_TOKEN=r8_************************************* ``` ```py import replicate output = replicate.run( "sdxl-zelda64@sha256:435913219645a80ee6743ca500940ab8708889172ca5c4c71bbb701309bb4a60", input={"prompt": "Link working as a pizza delivery driver, on a scooter, in new york, in the style of TOK"} ) print(output) ``` You may also do inference via the API with Node.js or curl, and locally with COG and Docker, [check out the Replicate API page for this model](https://replicate.com/jbilcke/sdxl-zelda64/api) ## Inference with 🧨 diffusers Replicate SDXL LoRAs are trained with Pivotal Tuning, which combines training a concept via Dreambooth LoRA with training a new token with Textual Inversion. As `diffusers` doesn't yet support textual inversion for SDXL, we will use cog-sdxl `TokenEmbeddingsHandler` class. The trigger tokens for your prompt will be `<s0><s1>` ```shell pip install diffusers transformers accelerate safetensors huggingface_hub git clone https://github.com/replicate/cog-sdxl cog_sdxl ``` ```py import torch from huggingface_hub import hf_hub_download from diffusers import DiffusionPipeline from cog_sdxl.dataset_and_utils import TokenEmbeddingsHandler from diffusers.models import AutoencoderKL pipe = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", ).to("cuda") load_lora_weights("jbilcke-hf/sdxl-zelda64", weight_name="lora.safetensors") text_encoders = [pipe.text_encoder, pipe.text_encoder_2] tokenizers = [pipe.tokenizer, pipe.tokenizer_2] embedding_path = hf_hub_download(repo_id="jbilcke-hf/sdxl-zelda64", filename="embeddings.pti", repo_type="model") embhandler = TokenEmbeddingsHandler(text_encoders, tokenizers) embhandler.load_embeddings(embedding_path) prompt="Link working as a pizza delivery driver, on a scooter, in new york, in the style of <s0><s1>" images = pipe( prompt, cross_attention_kwargs={"scale": 0.8}, ).images #your output image images[0] ```
alnrg2arg/blockchainlabs_joe_bez_seminar
alnrg2arg
2024-02-05T02:16:34Z
1,018
0
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "merge", "mergekit", "lazymergekit", "flemmingmiguel/MBX-7B-v3", "vanillaOVO/supermario_v4", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-02-05T02:11:52Z
--- license: apache-2.0 tags: - merge - mergekit - lazymergekit - flemmingmiguel/MBX-7B-v3 - vanillaOVO/supermario_v4 --- # blockchainlabs_joe_bez_seminar blockchainlabs_joe_bez_seminar is a merge of the following models using [mergekit](https://github.com/cg123/mergekit): * [flemmingmiguel/MBX-7B-v3](https://huggingface.co/flemmingmiguel/MBX-7B-v3) * [vanillaOVO/supermario_v4](https://huggingface.co/vanillaOVO/supermario_v4) ## 🧩 Configuration ```yaml slices: - sources: - model: flemmingmiguel/MBX-7B-v3 layer_range: [0, 32] - model: vanillaOVO/supermario_v4 layer_range: [0, 32] merge_method: slerp base_model: flemmingmiguel/MBX-7B-v3 parameters: t: - filter: self_attn value: [0, 0.5, 0.3, 0.7, 1] - filter: mlp value: [1, 0.5, 0.7, 0.3, 0] - value: 0.5 dtype: bfloat16 ```
ziniuli/Mistral-7B-ReMax-v0.1
ziniuli
2024-02-29T14:35:13Z
1,018
3
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "en", "arxiv:2310.10505", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-02-29T09:28:54Z
--- language: - en license: apache-2.0 library_name: transformers --- # Model Card for Mistral-7B-ReMax-v0.1 The Mistral-7B-ReMax-v0.1 Large Language Model (LLM) is a Reinforcement Learning from Human Preference (RLHF) fine-tuned version of [Mistral-7B-Instruct-v0.2](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2). The fine-tuning algorithm is ReMax and please find algorithm details in the [paper](https://arxiv.org/abs/2310.10505). ## Model Details - Developed by: Ziniu Li - License: apache-2.0 - Finetuned from [Mistral-7B-Instruct-v0.2](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2) ## Uses ### Direct Use The instruction format is the same as Mistral-7B-Instruct-v0.2. Specifically, the prompt should be surrounded by \[INST\] and \[/INST\] tokens. ``` text = "<s>[INST] What is your favourite condiment? [/INST]" "Well, I'm quite partial to a good squeeze of fresh lemon juice. It adds just the right amount of zesty flavour to whatever I'm cooking up in the kitchen!</s> " "[INST] Do you have mayonnaise recipes? [/INST]" ``` ## Training Details ### Training Data 10k prompts from the [lmsys-chat-1m](https://huggingface.co/datasets/lmsys/lmsys-chat-1m) dataset. Note that no responses from this dataset are used in training. ### Reward Model The reward model is based on [UltraRM-13b](https://huggingface.co/openbmb/UltraRM-13b). **Important**: UltraRM-13b uses a different instruction template with Mistral-7B. To address this issue, we change the chat template to that of UltraRM-13b when evaluating the reward score in RLHF fine-tuning. ### Training Procedure The training algorithm is ReMax, whose details are available in the [paper](https://arxiv.org/abs/2310.10505), and implementations are available in the [repository](https://github.com/liziniu/ReMax). ### Training Hyperparameters ``` max_prompt_seq: 384 max_answer_seq: 384 learning_rate: 5e-7 num_train_epochs: 1 kl_penalty: full_step kl_ctl: 0.1 reward_clip_value: 1.0 temperature for sampling: 0.7 top_p for sampling: 0.9 ``` ## Evaluation ### Chat1M Since we fine-tune Mistral-7B-Instruct-v0.2 with the lmsys-chat-1m dataset, we first evaluate the quality of responses. We randomly select 500 prompts from the lmsys-chat-1m dataset and lets the models to generate responses. Then, we let GPT-4 to evaluate the binary win rate. The average win-rate of Mistral-7B-ReMax-v0.1 is 57.8%. | | Mistral-7B-ReMax-v0.1 Wins | Mistral-7B-ReMax-v0.1 Wins | | ------------------------ | -------------------------- | -------------------------- | | Mistral-7B-Instruct-v0.2 | 289 | 211 | Samples of responses are provided below. ``` ==================Instruction================ 你好 ==================Mistral-7B-Instruct-v0.2================ 你好!我是一位帮助您解答问题的机器人。如果您有任何问题,请告诉我,我会尽力回答。 你好(Hello)是中文语言中的一个常用词语,常用于问候或表示亲和,意思是“你好,你很好”。 在我帮助您之前,我想先问一下,您是否知道什么是机器学习?机器学习是人工智能领域中的一种技术,它允许计算机自动学习和改进其行为,以便从数据中提取出有价值的信息,并使计算机能够做出更智能的决策。 如果您已经熟悉机器学习,那么我可以帮助您深入探讨相关主题,如神经网络、支持向量机、决策树等。但是,如果您对机器学习不太熟悉,我可以简单地解释它的基本原理,以及它在现实生活中的应用。 请告诉我您的需求,我会尽力满足您的期望。 再次问候您:你好! ==================Mistral-7B-ReMax-v0.1================ 好的,欢迎使用我的帮助!(Hello, welcome to use my help!) 是的,我可以以中文回答你的问题,或者帮你完成一些简单的任务。(Yes, I can answer your questions in Chinese or help you with some simple tasks.) 请问有什么特定的问题需要我帮忙呢?(What specific question do you need my help with?) 或者,你可以告诉我什么方面我可以提供帮助。(Or you can tell me which area I can provide help in.) 希望我能帮到你!(I hope I can help you!) ``` ### AlpacaEval [AlpacaEval](https://github.com/tatsu-lab/alpaca_eval) provides 805 test questions that the model has not been trained on. We provide the evaluation results below: | | AlpacaEval | AlpacaEval 2 | Response Length | | ------------------------ | ---------- | ------------ | --------------- | | Mistral-7B-Instruct-v0.2 | 92.78 | 14.72 | 1676 | | Mistral-7B-ReMax-v0.1 | 94.40 | 16.00 | 1478 | | LLaMa2 Chat 7B | 71.37 | 4.96 | 1479 | | LLaMa2 Chat 70B | 92.66 | 13.87 | 1790 | | GPT-4 | 95.28 | 23.58 | 2049 | Samples of responses are provided below ``` ==================Instruction================ Marie is at the music store for a store day. She bought two $50 combos. Then, for $25 she bought two t-shirts for $5 each and five hoodies for $2 each. Then, she bought sneakers for $8 each. How much did she spend total? ==================Mistral-7B-Instruct-v0.2================ Marie bought 2 combos at $50 each, so that's $100. She bought 2 t-shirts at $5 each, which is $10. She bought 5 hoodies at $2 each, which is $10. The sneakers cost $8. So Marie spent a total of $118. ($100 + $10 + $8 = $118) ==================Mistral-7B-ReMax-v0.1================ Marie bought two combos at $50 each, so that's $100. She bought two t-shirts for $5 each, which totals $10. She bought five hoodies for $2 each, so that's $10. The sneakers cost $8. So Marie spent $100 + $10 + $10 + $8 = $128 in total. ``` ## Limitations The Mistral-7B-ReMax-v0.1 model demonstrates that the optimization algorithm ReMax is effective. This model shares the same limitations as the Mistral-7B-Instruct-v0.2 model. In particular, it does not have any moderation mechanisms. ## Citation **BibTeX:** ``` @article{li2023remax, title={Remax: A simple, effective, and efficient method for aligning large language models}, author={Li, Ziniu and Xu, Tian and Zhang, Yushun and Yu, Yang and Sun, Ruoyu and Luo, Zhi-Quan}, journal={arXiv preprint arXiv:2310.10505}, year={2023} } ```
HuggingFaceH4/zephyr-7b-gemma-v0.1
HuggingFaceH4
2024-03-03T18:07:47Z
1,018
120
transformers
[ "transformers", "tensorboard", "safetensors", "gemma", "text-generation", "alignment-handbook", "trl", "dpo", "generated_from_trainer", "conversational", "dataset:argilla/dpo-mix-7k", "arxiv:2310.16944", "base_model:HuggingFaceH4/zephyr-7b-gemma-sft-v0.1", "license:other", "model-index", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-03-01T10:22:03Z
--- license: other tags: - alignment-handbook - trl - dpo - generated_from_trainer base_model: HuggingFaceH4/zephyr-7b-gemma-sft-v0.1 datasets: - argilla/dpo-mix-7k license_name: gemma-terms-of-use license_link: https://ai.google.dev/gemma/terms pipeline_tag: text-generation model-index: - name: zephyr-7b-gemma results: - task: type: text-generation name: Text Generation dataset: name: MT-Bench type: unknown metrics: - type: unknown value: 7.81 name: score source: url: https://huggingface.co/spaces/lmsys/mt-bench - task: type: text-generation name: Text Generation dataset: name: AI2 Reasoning Challenge (25-Shot) type: ai2_arc config: ARC-Challenge split: test args: num_few_shot: 25 metrics: - type: acc_norm value: 58.45 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=HuggingFaceH4/zephyr-7b-gemma-v0.1 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: HellaSwag (10-Shot) type: hellaswag split: validation args: num_few_shot: 10 metrics: - type: acc_norm value: 83.48 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=HuggingFaceH4/zephyr-7b-gemma-v0.1 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MMLU (5-Shot) type: cais/mmlu config: all split: test args: num_few_shot: 5 metrics: - type: acc value: 60.68 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=HuggingFaceH4/zephyr-7b-gemma-v0.1 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: TruthfulQA (0-shot) type: truthful_qa config: multiple_choice split: validation args: num_few_shot: 0 metrics: - type: mc2 value: 52.07 source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=HuggingFaceH4/zephyr-7b-gemma-v0.1 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: Winogrande (5-shot) type: winogrande config: winogrande_xl split: validation args: num_few_shot: 5 metrics: - type: acc value: 74.19 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=HuggingFaceH4/zephyr-7b-gemma-v0.1 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: GSM8k (5-shot) type: gsm8k config: main split: test args: num_few_shot: 5 metrics: - type: acc value: 45.56 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=HuggingFaceH4/zephyr-7b-gemma-v0.1 name: Open LLM Leaderboard --- <img src="https://huggingface.co/HuggingFaceH4/zephyr-7b-gemma-v0.1/resolve/main/thumbnail.png" alt="Zephyr 7B Gemma Logo" width="800" style="margin-left:'auto' margin-right:'auto' display:'block'"/> # Model Card for Zephyr 7B Gemma Zephyr is a series of language models that are trained to act as helpful assistants. Zephyr 7B Gemma is the third model in the series, and is a fine-tuned version of [`google/gemma-7b`](https://huggingface.co/google/gemma-7b) that was trained on on a mix of publicly available, synthetic datasets using Direct Preference Optimization (DPO). You can reproduce the training of this model via the recipe provided in the [Alignment Handbook](https://github.com/huggingface/alignment-handbook). ## Model description - **Model type:** A 7B parameter GPT-like model fine-tuned on a mix of publicly available, synthetic datasets. - **Language(s) (NLP):** Primarily English - **License:** Gemma Terms of Use - **Finetuned from model:** [google/gemma-7b](https://huggingface.co/google/gemma-7b) ### Model Sources <!-- Provide the basic links for the model. --> - **Repository:** https://github.com/huggingface/alignment-handbook - **Demo:** https://huggingface.co/spaces/HuggingFaceH4/zephyr-7b-gemma-chat ## Performance | Model |MT Bench⬇️|IFEval| |-----------------------------------------------------------------------|------:|------:| |[zephyr-7b-gemma-v0.1](https://huggingface.co/HuggingFaceH4/zephyr-7b-gemma-v0.1)| 7.81 | 28.76| |[zephyr-7b-beta](https://huggingface.co/HuggingFaceH4/zephyr-7b-beta) | 7.34 | 43.81| |[google/gemma-7b-it](https://huggingface.co/google/gemma-7b-it) | 6.38 | 38.01| | Model |AGIEval|GPT4All|TruthfulQA|BigBench|Average ⬇️| |-----------------------------------------------------------------------|------:|------:|---------:|-------:|------:| |[zephyr-7b-beta](https://huggingface.co/HuggingFaceH4/zephyr-7b-beta) | 37.52| 71.77| 55.26| 39.77| 51.08| |[zephyr-7b-gemma-v0.1](https://huggingface.co/HuggingFaceH4/zephyr-7b-gemma-v0.1)| 34.22| 66.37| 52.19| 37.10| 47.47| |[mlabonne/Gemmalpaca-7B](https://huggingface.co/mlabonne/Gemmalpaca-7B)| 21.6 | 40.87| 44.85 | 30.49| 34.45| |[google/gemma-7b-it](https://huggingface.co/google/gemma-7b-it) | 21.33| 40.84| 41.70| 30.25| 33.53| <details><summary>Details of AGIEval, GPT4All, TruthfulQA, BigBench </summary> ### AGIEval | Task |Version| Metric |Value| |Stderr| |------------------------------|------:|--------|----:|---|-----:| |agieval_aqua_rat | 0|acc |21.65|± | 2.59| | | |acc_norm|25.20|± | 2.73| |agieval_logiqa_en | 0|acc |34.72|± | 1.87| | | |acc_norm|35.94|± | 1.88| |agieval_lsat_ar | 0|acc |19.57|± | 2.62| | | |acc_norm|21.74|± | 2.73| |agieval_lsat_lr | 0|acc |30.59|± | 2.04| | | |acc_norm|32.55|± | 2.08| |agieval_lsat_rc | 0|acc |49.07|± | 3.05| | | |acc_norm|42.75|± | 3.02| |agieval_sat_en | 0|acc |54.85|± | 3.48| | | |acc_norm|53.40|± | 3.48| |agieval_sat_en_without_passage| 0|acc |37.38|± | 3.38| | | |acc_norm|33.98|± | 3.31| |agieval_sat_math | 0|acc |30.91|± | 3.12| | | |acc_norm|28.18|± | 3.04| Average: 34.22% ### GPT4All | Task |Version| Metric |Value| |Stderr| |-------------|------:|--------|----:|---|-----:| |arc_challenge| 0|acc |49.15|± | 1.46| | | |acc_norm|52.47|± | 1.46| |arc_easy | 0|acc |77.44|± | 0.86| | | |acc_norm|74.75|± | 0.89| |boolq | 1|acc |79.69|± | 0.70| |hellaswag | 0|acc |60.59|± | 0.49| | | |acc_norm|78.00|± | 0.41| |openbookqa | 0|acc |29.20|± | 2.04| | | |acc_norm|37.80|± | 2.17| |piqa | 0|acc |76.82|± | 0.98| | | |acc_norm|77.80|± | 0.97| |winogrande | 0|acc |64.09|± | 1.35| Average: 66.37% ### TruthfulQA | Task |Version|Metric|Value| |Stderr| |-------------|------:|------|----:|---|-----:| |truthfulqa_mc| 1|mc1 |35.74|± | 1.68| | | |mc2 |52.19|± | 1.59| Average: 52.19% ### Bigbench | Task |Version| Metric |Value| |Stderr| |------------------------------------------------|------:|---------------------|----:|---|-----:| |bigbench_causal_judgement | 0|multiple_choice_grade|53.68|± | 3.63| |bigbench_date_understanding | 0|multiple_choice_grade|59.89|± | 2.55| |bigbench_disambiguation_qa | 0|multiple_choice_grade|30.23|± | 2.86| |bigbench_geometric_shapes | 0|multiple_choice_grade|11.42|± | 1.68| | | |exact_str_match | 0.00|± | 0.00| |bigbench_logical_deduction_five_objects | 0|multiple_choice_grade|28.40|± | 2.02| |bigbench_logical_deduction_seven_objects | 0|multiple_choice_grade|19.14|± | 1.49| |bigbench_logical_deduction_three_objects | 0|multiple_choice_grade|44.67|± | 2.88| |bigbench_movie_recommendation | 0|multiple_choice_grade|26.80|± | 1.98| |bigbench_navigate | 0|multiple_choice_grade|50.00|± | 1.58| |bigbench_reasoning_about_colored_objects | 0|multiple_choice_grade|52.75|± | 1.12| |bigbench_ruin_names | 0|multiple_choice_grade|33.04|± | 2.22| |bigbench_salient_translation_error_detection | 0|multiple_choice_grade|33.37|± | 1.49| |bigbench_snarks | 0|multiple_choice_grade|48.62|± | 3.73| |bigbench_sports_understanding | 0|multiple_choice_grade|58.11|± | 1.57| |bigbench_temporal_sequences | 0|multiple_choice_grade|37.20|± | 1.53| |bigbench_tracking_shuffled_objects_five_objects | 0|multiple_choice_grade|20.08|± | 1.13| |bigbench_tracking_shuffled_objects_seven_objects| 0|multiple_choice_grade|15.77|± | 0.87| |bigbench_tracking_shuffled_objects_three_objects| 0|multiple_choice_grade|44.67|± | 2.88| Average: 37.1% </details> ## Intended uses & limitations The model was initially fine-tuned on the [DEITA 10K](https://huggingface.co/datasets/HuggingFaceH4/deita-10k-v0-sft) dataset, which contains a diverse range of synthetic dialogues generated by ChatGPT. We then further aligned the model with [🤗 TRL's](https://github.com/huggingface/trl) `DPOTrainer` on the [argilla/dpo-mix-7k](https://huggingface.co/datasets/argilla/dpo-mix-7k) dataset, which contains 7k prompts and model completions that are ranked by GPT-4. As a result, the model can be used for chat and you can check out our [demo](https://huggingface.co/spaces/HuggingFaceH4/zephyr-chat) to test its capabilities. Here's how you can run the model using the `pipeline()` function from 🤗 Transformers: ```python # pip install transformers>=4.38.2 # pip install accelerate import torch from transformers import pipeline pipe = pipeline( "text-generation", model="HuggingFaceH4/zephyr-7b-gemma-v0.1", device_map="auto", torch_dtype=torch.bfloat16, ) messages = [ { "role": "system", "content": "", # Model not yet trained for follow this }, {"role": "user", "content": "How many helicopters can a human eat in one sitting?"}, ] outputs = pipe( messages, max_new_tokens=128, do_sample=True, temperature=0.7, top_k=50, top_p=0.95, stop_sequence="<|im_end|>", ) print(outputs[0]["generated_text"][-1]["content"]) # It is not possible for a human to eat a helicopter in one sitting, as a # helicopter is a large and inedible machine. Helicopters are made of metal, # plastic, and other materials that are not meant to be consumed by humans. # Eating a helicopter would be extremely dangerous and would likely cause # serious health problems, including choking, suffocation, and poisoning. It is # important to only eat food that is safe and intended for human consumption. ``` ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> Zephyr 7B Gemma has not been aligned to human preferences for safety within the RLHF phase or deployed with in-the-loop filtering of responses like ChatGPT, so the model can produce problematic outputs (especially when prompted to do so). It is also unknown what the size and composition of the corpus was used to train the base model (`google/gemma-7b`), however it is likely to have included a mix of Web data and technical sources like books and code. See the [StarCoder2 model card](https://huggingface.co/bigcode/starcoder2-15b) for an example of this. ## Training and evaluation data This model is a fine-tuned version of [HuggingFaceH4/zephyr-7b-gemma-sft-v0.1](https://huggingface.co/HuggingFaceH4/zephyr-7b-gemma-sft-v0.1) on the argilla/dpo-mix-7k dataset. It achieves the following results on the evaluation set: - Loss: 0.4695 - Rewards/chosen: -3.3746 - Rewards/rejected: -4.9715 - Rewards/accuracies: 0.7188 - Rewards/margins: 1.5970 - Logps/rejected: -459.4853 - Logps/chosen: -429.9115 - Logits/rejected: 86.4684 - Logits/chosen: 92.8200 ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-07 - train_batch_size: 2 - eval_batch_size: 4 - seed: 42 - distributed_type: multi-GPU - num_devices: 8 - gradient_accumulation_steps: 8 - total_train_batch_size: 128 - total_eval_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen | |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:| | 0.1923 | 1.9 | 100 | 0.4736 | -3.4575 | -4.9556 | 0.75 | 1.4980 | -459.1662 | -431.5707 | 86.3863 | 92.7360 | ### Framework versions - Transformers 4.39.0.dev0 - Pytorch 2.1.2+cu121 - Datasets 2.14.6 - Tokenizers 0.15.1 ## Citation Information If you find this model useful in your work, please consider citing the Zephyr technical report: ``` @misc{tunstall2023zephyr, title={Zephyr: Direct Distillation of LM Alignment}, author={Lewis Tunstall and Edward Beeching and Nathan Lambert and Nazneen Rajani and Kashif Rasul and Younes Belkada and Shengyi Huang and Leandro von Werra and Clémentine Fourrier and Nathan Habib and Nathan Sarrazin and Omar Sanseviero and Alexander M. Rush and Thomas Wolf}, year={2023}, eprint={2310.16944}, archivePrefix={arXiv}, primaryClass={cs.LG} } ``` You may also wish to cite the creators of this model as well: ``` @misc{zephyr_7b_gemma, author = {Lewis Tunstall and Philipp Schmid}, title = {Zephyr 7B Gemma}, year = {2024}, publisher = {Hugging Face}, journal = {Hugging Face repository}, howpublished = {\url{https://huggingface.co/HuggingFaceH4/zephyr-7b-gemma-v0.1}} } ``` # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_HuggingFaceH4__zephyr-7b-gemma-v0.1) | Metric |Value| |---------------------------------|----:| |Avg. |62.41| |AI2 Reasoning Challenge (25-Shot)|58.45| |HellaSwag (10-Shot) |83.48| |MMLU (5-Shot) |60.68| |TruthfulQA (0-shot) |52.07| |Winogrande (5-shot) |74.19| |GSM8k (5-shot) |45.56|
abhishek/autotrain-8kfjk-b3gva
abhishek
2024-03-19T12:26:49Z
1,018
0
transformers
[ "transformers", "tensorboard", "safetensors", "llama", "text-generation", "autotrain", "conversational", "license:other", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-03-19T11:14:40Z
--- tags: - autotrain - text-generation widget: - text: "I love AutoTrain because " license: other --- # Model Trained Using AutoTrain This model was trained using AutoTrain. For more information, please visit [AutoTrain](https://hf.co/docs/autotrain). # Usage ```python from transformers import AutoModelForCausalLM, AutoTokenizer model_path = "PATH_TO_THIS_REPO" tokenizer = AutoTokenizer.from_pretrained(model_path) model = AutoModelForCausalLM.from_pretrained( model_path, device_map="auto", torch_dtype='auto' ).eval() # Prompt content: "hi" messages = [ {"role": "user", "content": "hi"} ] input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, add_generation_prompt=True, return_tensors='pt') output_ids = model.generate(input_ids.to('cuda')) response = tokenizer.decode(output_ids[0][input_ids.shape[1]:], skip_special_tokens=True) # Model response: "Hello! How can I assist you today?" print(response) ```
dozzke/hermorca
dozzke
2024-03-26T06:17:39Z
1,018
1
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "mergekit", "merge", "conversational", "base_model:NousResearch/Hermes-2-Pro-Mistral-7B", "base_model:Open-Orca/Mistral-7B-OpenOrca", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-03-21T07:02:22Z
--- base_model: - NousResearch/Hermes-2-Pro-Mistral-7B - Open-Orca/Mistral-7B-OpenOrca library_name: transformers license: apache-2.0 tags: - mergekit - merge --- # model_hermorca This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit). ## Merge Details ### Merge Method This model was merged using the SLERP merge method. ### Models Merged The following models were included in the merge: * [NousResearch/Hermes-2-Pro-Mistral-7B](https://huggingface.co/NousResearch/Hermes-2-Pro-Mistral-7B) * [Open-Orca/Mistral-7B-OpenOrca](https://huggingface.co/Open-Orca/Mistral-7B-OpenOrca) ### OpenLLM Leaderboards 1 - Hermes / 2 - OpenOrca ![image/png](https://cdn-uploads.huggingface.co/production/uploads/65f28d01d83aee7a648a0403/W9ikLtYiFzsz9-t81KfTH.png) ### Configuration The following YAML configuration was used to produce this model: ```yaml slices: - sources: - model: NousResearch/Hermes-2-Pro-Mistral-7B layer_range: [0, 32] - model: Open-Orca/Mistral-7B-OpenOrca layer_range: [0, 32] merge_method: slerp base_model: NousResearch/Hermes-2-Pro-Mistral-7B parameters: t: - filter: self_attn value: [0, 0.5, 0.3, 0.7, 1] - filter: mlp value: [1, 0.5, 0.7, 0.3, 0] - value: 0.5 dtype: bfloat16 ```
ichigoberry/MonarchPipe-7B-slerp
ichigoberry
2024-04-02T12:23:10Z
1,018
2
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "merge", "mergekit", "lazymergekit", "OpenPipe/mistral-ft-optimized-1227", "mlabonne/AlphaMonarch-7B", "base_model:OpenPipe/mistral-ft-optimized-1227", "base_model:mlabonne/AlphaMonarch-7B", "license:cc-by-nc-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-04-01T14:55:31Z
--- tags: - merge - mergekit - lazymergekit - OpenPipe/mistral-ft-optimized-1227 - mlabonne/AlphaMonarch-7B base_model: - OpenPipe/mistral-ft-optimized-1227 - mlabonne/AlphaMonarch-7B license: cc-by-nc-2.0 --- <img src="https://cdn-uploads.huggingface.co/production/uploads/6389d3c61e8755d777902366/7dTpG4vJWkB4YwBJFCqoE.jpeg" style="border-radius:2%; width: 66%"> # MonarchPipe-7B-slerp MonarchPipe-7B-slerp is a merge of the following models using [LazyMergekit](https://colab.research.google.com/drive/1obulZ1ROXHjYLn6PPZJwRR6GzgQogxxb?usp=sharing): * [OpenPipe/mistral-ft-optimized-1227](https://huggingface.co/OpenPipe/mistral-ft-optimized-1227) * [mlabonne/AlphaMonarch-7B](https://huggingface.co/mlabonne/AlphaMonarch-7B) ## 🏆 Eval ### Nous Eval results from the Nous benchmark suite (performed using LLM AutoEval). | Model | Average | AGIEval | GPT4All | TruthfulQA | Bigbench | |---|---:|---:|---:|---:|---:| | [**MonarchPipe-7B-slerp**](https://huggingface.co/ichigoberry/MonarchPipe-7B-slerp) [📄](https://gist.github.com/tosh/3d93f4e3d2c65935bf2f4f9a46791352)| 58.77| **46.12**| 74.89| 66.59| 47.49| | [AlphaMonarch-7B](https://huggingface.co/mlabonne/AlphaMonarch-7B) [📄](https://gist.github.com/mlabonne/1d33c86824b3a11d2308e36db1ba41c1) | **62.74** | 45.37 | **77.01** | **78.39** | **50.2** | | [Monarch-7B](https://huggingface.co/mlabonne/Monarch-7B) [📄](https://gist.github.com/mlabonne/0b8d057c5ece41e0290580a108c7a093) | 62.68 | 45.48 | 77.07 | 78.04 | 50.14 | | [OpenHermes-2.5-Mistral-7B](https://huggingface.co/teknium/OpenHermes-2.5-Mistral-7B) [📄](https://gist.github.com/mlabonne/88b21dd9698ffed75d6163ebdc2f6cc8) | 52.42 | 42.75 | 72.99 | 52.99 | 40.94 | | [NeuralHermes-2.5-Mistral-7B](https://huggingface.co/mlabonne/NeuralHermes-2.5-Mistral-7B) [📄](https://gist.github.com/mlabonne/14687f1eb3425b166db511f31f8e66f6) | 53.51 | 43.67 | 73.24 | 55.37 | 41.76 | ## 🧩 Configuration ```yaml slices: - sources: - model: OpenPipe/mistral-ft-optimized-1227 layer_range: [0, 32] - model: mlabonne/AlphaMonarch-7B layer_range: [0, 32] merge_method: slerp base_model: OpenPipe/mistral-ft-optimized-1227 parameters: t: - filter: self_attn value: [0, 0.5, 0.3, 0.7, 1] - filter: mlp value: [1, 0.5, 0.7, 0.3, 0] - value: 0.5 dtype: bfloat16 ``` ## 💻 Usage ```python !pip install -qU transformers accelerate from transformers import AutoTokenizer import transformers import torch model = "ichigoberry/MonarchPipe-7B-slerp" messages = [{"role": "user", "content": "What is a large language model?"}] tokenizer = AutoTokenizer.from_pretrained(model) prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) pipeline = transformers.pipeline( "text-generation", model=model, torch_dtype=torch.float16, device_map="auto", ) outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) print(outputs[0]["generated_text"]) ```
ichigoberry/pandafish-7b
ichigoberry
2024-04-02T18:38:58Z
1,018
3
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "merge", "mergekit", "lazymergekit", "arxiv:2403.19522", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-04-02T14:26:54Z
--- tags: - merge - mergekit - lazymergekit license: apache-2.0 --- <img src="https://cdn-uploads.huggingface.co/production/uploads/6389d3c61e8755d777902366/-_AiKUEsY3x-N7oY52fdE.jpeg" style="border-radius:2%; width: 66%"> # pandafish-7b pandafish-7b is an instruct model based on a [Model Stock](https://arxiv.org/abs/2403.19522) merge of the following models (via [LazyMergekit](https://colab.research.google.com/drive/1obulZ1ROXHjYLn6PPZJwRR6GzgQogxxb?usp=sharing)): ## 🧩 Configuration ```yaml models: - model: mistralai/Mistral-7B-v0.1 - model: mistralai/Mistral-7B-Instruct-v0.2 - model: CultriX/NeuralTrix-bf16 - model: OpenPipe/mistral-ft-optimized-1227 merge_method: model_stock base_model: mistralai/Mistral-7B-v0.1 dtype: bfloat16 ``` ## 🏆 Evals | Model |Average|AGIEval|GPT4All|TruthfulQA|Bigbench| |---------------------------------------------------------------|------:|------:|---------:|-------:|------:| |[pandafish-7b](https://huggingface.co/ichigoberry/pandafish-7b) [📄](https://gist.github.com/tosh/dda6a21e568d17a410ca618265f64a28)| 51.99 | **40** | **74.23** | 53.22 | 40.51 | |[mistralai/Mistral-7B-Instruct-v0.2](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2) [📄](https://gist.github.com/mlabonne/05d358e17dffdf9eee7c2322380c9da6) | 54.81 | 38.5 | 71.64 | **66.82** | **42.29** | ## 💻 Usage ```python !pip install -qU transformers accelerate from transformers import AutoTokenizer import transformers import torch model = "ichigoberry/pandafish-7b" messages = [{"role": "user", "content": "What is a large language model?"}] tokenizer = AutoTokenizer.from_pretrained(model) prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) pipeline = transformers.pipeline( "text-generation", model=model, torch_dtype=torch.float16, device_map="auto", ) outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) print(outputs[0]["generated_text"]) ```
mradermacher/llama-3-spicy-abliterated-stella-8B-GGUF
mradermacher
2024-05-31T09:07:56Z
1,018
3
transformers
[ "transformers", "gguf", "mergekit", "merge", "en", "base_model:nbeerbower/llama-3-spicy-abliterated-stella-8B", "license:llama3", "endpoints_compatible", "region:us" ]
null
2024-05-14T08:46:59Z
--- base_model: nbeerbower/llama-3-spicy-abliterated-stella-8B language: - en library_name: transformers license: llama3 quantized_by: mradermacher tags: - mergekit - merge --- ## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hfhfix --> <!-- ### vocab_type: --> static quants of https://huggingface.co/nbeerbower/llama-3-spicy-abliterated-stella-8B <!-- provided-files --> weighted/imatrix quants are available at https://huggingface.co/mradermacher/llama-3-spicy-abliterated-stella-8B-i1-GGUF ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/llama-3-spicy-abliterated-stella-8B-GGUF/resolve/main/llama-3-spicy-abliterated-stella-8B.Q2_K.gguf) | Q2_K | 3.3 | | | [GGUF](https://huggingface.co/mradermacher/llama-3-spicy-abliterated-stella-8B-GGUF/resolve/main/llama-3-spicy-abliterated-stella-8B.IQ3_XS.gguf) | IQ3_XS | 3.6 | | | [GGUF](https://huggingface.co/mradermacher/llama-3-spicy-abliterated-stella-8B-GGUF/resolve/main/llama-3-spicy-abliterated-stella-8B.Q3_K_S.gguf) | Q3_K_S | 3.8 | | | [GGUF](https://huggingface.co/mradermacher/llama-3-spicy-abliterated-stella-8B-GGUF/resolve/main/llama-3-spicy-abliterated-stella-8B.IQ3_S.gguf) | IQ3_S | 3.8 | beats Q3_K* | | [GGUF](https://huggingface.co/mradermacher/llama-3-spicy-abliterated-stella-8B-GGUF/resolve/main/llama-3-spicy-abliterated-stella-8B.IQ3_M.gguf) | IQ3_M | 3.9 | | | [GGUF](https://huggingface.co/mradermacher/llama-3-spicy-abliterated-stella-8B-GGUF/resolve/main/llama-3-spicy-abliterated-stella-8B.Q3_K_M.gguf) | Q3_K_M | 4.1 | lower quality | | [GGUF](https://huggingface.co/mradermacher/llama-3-spicy-abliterated-stella-8B-GGUF/resolve/main/llama-3-spicy-abliterated-stella-8B.Q3_K_L.gguf) | Q3_K_L | 4.4 | | | [GGUF](https://huggingface.co/mradermacher/llama-3-spicy-abliterated-stella-8B-GGUF/resolve/main/llama-3-spicy-abliterated-stella-8B.IQ4_XS.gguf) | IQ4_XS | 4.6 | | | [GGUF](https://huggingface.co/mradermacher/llama-3-spicy-abliterated-stella-8B-GGUF/resolve/main/llama-3-spicy-abliterated-stella-8B.Q4_K_S.gguf) | Q4_K_S | 4.8 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/llama-3-spicy-abliterated-stella-8B-GGUF/resolve/main/llama-3-spicy-abliterated-stella-8B.Q4_K_M.gguf) | Q4_K_M | 5.0 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/llama-3-spicy-abliterated-stella-8B-GGUF/resolve/main/llama-3-spicy-abliterated-stella-8B.Q5_K_S.gguf) | Q5_K_S | 5.7 | | | [GGUF](https://huggingface.co/mradermacher/llama-3-spicy-abliterated-stella-8B-GGUF/resolve/main/llama-3-spicy-abliterated-stella-8B.Q5_K_M.gguf) | Q5_K_M | 5.8 | | | [GGUF](https://huggingface.co/mradermacher/llama-3-spicy-abliterated-stella-8B-GGUF/resolve/main/llama-3-spicy-abliterated-stella-8B.Q6_K.gguf) | Q6_K | 6.7 | very good quality | | [GGUF](https://huggingface.co/mradermacher/llama-3-spicy-abliterated-stella-8B-GGUF/resolve/main/llama-3-spicy-abliterated-stella-8B.Q8_0.gguf) | Q8_0 | 8.6 | fast, best quality | | [GGUF](https://huggingface.co/mradermacher/llama-3-spicy-abliterated-stella-8B-GGUF/resolve/main/llama-3-spicy-abliterated-stella-8B.f16.gguf) | f16 | 16.2 | 16 bpw, overkill | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. <!-- end -->
numfa/open_llama_3b_thai
numfa
2024-06-13T15:42:41Z
1,018
0
transformers
[ "transformers", "safetensors", "llama", "text-generation", "en", "th", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-06-13T15:09:43Z
--- license: apache-2.0 language: - en - th library_name: transformers pipeline_tag: text-generation --- # OpenLLaMA 3B Thai We are extending Thai vocabulary to [OpenLLaMA 3B v2](https://huggingface.co/openlm-research/open_llama_3b_v2) and do continue pretraining with English and Thai datasets. ## Dataset [WIP] ## Acknowledgements Research supported with Cloud TPUs from Google's [TPU Research Cloud](https://sites.research.google/trc/about/) (TRC). We use TPUv4-64 for training model about 4 days / 1 epoch. Thank you [TPU Research Cloud](https://sites.research.google/trc/about/) and [EasyLM project](https://github.com/young-geng/EasyLM)! We use EasyLM for pretraining model.
KnutJaegersberg/RWKV-4-PilePlus-169M-20230520-done-ctx4096
KnutJaegersberg
2023-11-18T07:42:04Z
1,017
0
transformers
[ "transformers", "pytorch", "rwkv", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
2023-09-05T08:46:20Z
--- license: apache-2.0 --- This is just a standard conversion to hf transformers format of models from here: https://huggingface.co/BlinkDL/rwkv-4-pileplus According to the documentation I found, this model should have seen roundabout 2 trillion tokens! # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_KnutJaegersberg__RWKV-4-PilePlus-169M-20230520-done-ctx4096) | Metric | Value | |-----------------------|---------------------------| | Avg. | 25.0 | | ARC (25-shot) | 23.98 | | HellaSwag (10-shot) | 32.25 | | MMLU (5-shot) | 23.37 | | TruthfulQA (0-shot) | 42.29 | | Winogrande (5-shot) | 49.17 | | GSM8K (5-shot) | 0.38 | | DROP (3-shot) | 3.53 |
akshayvkt/detect-ai-text
akshayvkt
2023-11-04T06:27:07Z
1,017
5
transformers
[ "transformers", "safetensors", "distilbert", "text-classification", "generated_from_trainer", "base_model:distilbert-base-uncased", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
2023-11-04T05:07:16Z
--- license: apache-2.0 base_model: distilbert-base-uncased tags: - generated_from_trainer metrics: - accuracy model-index: - name: detect-ai-text results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # detect-ai-text This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.0067 - Accuracy: 0.9964 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | No log | 1.0 | 280 | 0.0059 | 0.9991 | | 0.0319 | 2.0 | 560 | 0.0067 | 0.9964 | ### Framework versions - Transformers 4.35.0 - Pytorch 2.1.0 - Datasets 2.14.5 - Tokenizers 0.14.1
AIGym/deepseek-coder-6.7b-chat
AIGym
2024-03-04T17:17:14Z
1,017
1
transformers
[ "transformers", "safetensors", "llama", "text-generation", "finetuned", "conversational", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-01-05T21:08:40Z
--- license: apache-2.0 tags: - finetuned pipeline_tag: text-generation model-index: - name: deepseek-coder-6.7b-chat results: - task: type: text-generation name: Text Generation dataset: name: AI2 Reasoning Challenge (25-Shot) type: ai2_arc config: ARC-Challenge split: test args: num_few_shot: 25 metrics: - type: acc_norm value: 36.01 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=AIGym/deepseek-coder-6.7b-chat name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: HellaSwag (10-Shot) type: hellaswag split: validation args: num_few_shot: 10 metrics: - type: acc_norm value: 53.74 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=AIGym/deepseek-coder-6.7b-chat name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MMLU (5-Shot) type: cais/mmlu config: all split: test args: num_few_shot: 5 metrics: - type: acc value: 38.22 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=AIGym/deepseek-coder-6.7b-chat name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: TruthfulQA (0-shot) type: truthful_qa config: multiple_choice split: validation args: num_few_shot: 0 metrics: - type: mc2 value: 42.94 source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=AIGym/deepseek-coder-6.7b-chat name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: Winogrande (5-shot) type: winogrande config: winogrande_xl split: validation args: num_few_shot: 5 metrics: - type: acc value: 57.54 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=AIGym/deepseek-coder-6.7b-chat name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: GSM8k (5-shot) type: gsm8k config: main split: test args: num_few_shot: 5 metrics: - type: acc value: 16.98 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=AIGym/deepseek-coder-6.7b-chat name: Open LLM Leaderboard --- # deepseek-coder-6.7B-chat It was created by starting with the deepseek-coder-6.7B and training it on the open assistant dataset. We have attached the wandb report in pdf form to view the training run at a glance. # Reson This model was fine tned to allow it to follow direction and is a steeping stone to further training, but still would be good for asking qestions about code. # How to use You will need the transformers>=4.31 ```python from transformers import AutoTokenizer import transformers import torch model = "AIGym/deepseek-coder-6.7b-chat" tokenizer = AutoTokenizer.from_pretrained(model) pipeline = transformers.pipeline( "text-generation", model=model, torch_dtype=torch.float16, device_map="auto", ) prompt = "What are the values in open source projects?" formatted_prompt = ( f"### Human: {prompt}### Assistant:" ) sequences = pipeline( formatted_prompt, do_sample=True, top_k=50, top_p = 0.7, num_return_sequences=1, repetition_penalty=1.1, max_new_tokens=500, ) for seq in sequences: print(f"Result: {seq['generated_text']}") ``` # Referrals Run Pod - This is who I use to train th emodels on huggingface. If you use it we both get free crdits. - <a href="https://runpod.io?ref=kilq83n1" target="_blank" style="color: #3498db; text-decoration: none; font-weight: bold;">Visit Runpod's Website!</a> Paypal - If you want to leave a tip, it is appecaheted. - <a href="https://paypal.me/OpenSourceTraining" target="_blank" style="color: #3498db; text-decoration: none; font-weight: bold;">Visit My Paypal!</a> # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_AIGym__deepseek-coder-6.7b-chat) | Metric |Value| |---------------------------------|----:| |Avg. |40.90| |AI2 Reasoning Challenge (25-Shot)|36.01| |HellaSwag (10-Shot) |53.74| |MMLU (5-Shot) |38.22| |TruthfulQA (0-shot) |42.94| |Winogrande (5-shot) |57.54| |GSM8k (5-shot) |16.98|
PotatoB/MergeM-7B
PotatoB
2024-04-07T10:09:59Z
1,017
0
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "merge", "mergekit", "lazymergekit", "OpenPipe/mistral-ft-optimized-1218", "mlabonne/NeuralHermes-2.5-Mistral-7B", "base_model:OpenPipe/mistral-ft-optimized-1218", "base_model:mlabonne/NeuralHermes-2.5-Mistral-7B", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-04-07T07:22:07Z
--- tags: - merge - mergekit - lazymergekit - OpenPipe/mistral-ft-optimized-1218 - mlabonne/NeuralHermes-2.5-Mistral-7B base_model: - OpenPipe/mistral-ft-optimized-1218 - mlabonne/NeuralHermes-2.5-Mistral-7B license: apache-2.0 --- # MergeM-7B MergeM-7B is a merge of the following models using [LazyMergekit](https://colab.research.google.com/drive/1obulZ1ROXHjYLn6PPZJwRR6GzgQogxxb?usp=sharing): * [OpenPipe/mistral-ft-optimized-1218](https://huggingface.co/OpenPipe/mistral-ft-optimized-1218) * [mlabonne/NeuralHermes-2.5-Mistral-7B](https://huggingface.co/mlabonne/NeuralHermes-2.5-Mistral-7B) ## 🧩 Configuration ```yaml models: - model: mistralai/Mistral-7B-v0.1 # no parameters necessary for base model - model: OpenPipe/mistral-ft-optimized-1218 parameters: density: 0.5 weight: 0.5 - model: mlabonne/NeuralHermes-2.5-Mistral-7B parameters: density: 0.5 weight: 0.3 merge_method: ties base_model: mistralai/Mistral-7B-v0.1 parameters: normalize: true dtype: float16 ``` ## 💻 Usage ```python !pip install -qU transformers accelerate from transformers import AutoTokenizer import transformers import torch model = "PotatoB/MergeM-7B" messages = [{"role": "user", "content": "What is a large language model?"}] tokenizer = AutoTokenizer.from_pretrained(model) prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) pipeline = transformers.pipeline( "text-generation", model=model, torch_dtype=torch.float16, device_map="auto", ) outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) print(outputs[0]["generated_text"]) ```
liam168/trans-opus-mt-zh-en
liam168
2021-07-16T03:34:38Z
1,016
17
transformers
[ "transformers", "pytorch", "marian", "text2text-generation", "translation", "en", "zh", "autotrain_compatible", "endpoints_compatible", "region:us" ]
translation
2022-03-02T23:29:05Z
--- language: - en - zh tags: - translation widget: - text: "我喜欢学习数据科学和机器学习。" --- # liam168/trans-opus-mt-zh-en ## Model description * source group: English * target group: Chinese * model: transformer * source language(s): eng ## How to use ```python >>> from transformers import AutoModelWithLMHead,AutoTokenizer,pipeline >>> mode_name = 'liam168/trans-opus-mt-zh-en' >>> model = AutoModelWithLMHead.from_pretrained(mode_name) >>> tokenizer = AutoTokenizer.from_pretrained(mode_name) >>> translation = pipeline("translation_zh_to_en", model=model, tokenizer=tokenizer) >>> translation('我喜欢学习数据科学和机器学习。', max_length=400) [{'translation_text': 'I like to study data science and machine learning.'}] ``` ## Contact [email protected]
bsp-albz/llama2-13b-platypus-ckpt-1000
bsp-albz
2023-09-12T18:49:49Z
1,016
0
transformers
[ "transformers", "pytorch", "llama", "feature-extraction", "endpoints_compatible", "text-generation-inference", "region:us" ]
feature-extraction
2023-09-12T17:55:36Z
Entry not found
PixArt-alpha/PixArt-LCM-XL-2-1024-MS
PixArt-alpha
2023-12-07T11:47:30Z
1,016
55
diffusers
[ "diffusers", "safetensors", "text-to-image", "Pixart-α", "LCM", "arxiv:2310.00426", "arxiv:2310.04378", "arxiv:2112.10752", "license:openrail++", "diffusers:PixArtAlphaPipeline", "region:us" ]
text-to-image
2023-11-29T17:34:29Z
--- license: openrail++ tags: - text-to-image - Pixart-α - LCM --- <p align="center"> <img src="asset/pixart-lcm2.png" height=120> </p> <div style="display:flex;justify-content: center"> <a href="https://pixart-alpha.github.io/"><img src="https://img.shields.io/static/v1?label=Project%20Page&message=Github&color=blue&logo=github-pages"></a> &ensp; <a href="https://huggingface.co/spaces/PixArt-alpha/PixArt-alpha"><img src="https://img.shields.io/static/v1?label=Demo PixArt&message=HuggingFace&color=yellow"></a> &ensp; <a href="https://huggingface.co/spaces/PixArt-alpha/PixArt-LCM"><img src="https://img.shields.io/static/v1?label=Demo PixArt-LCM&message=HuggingFace&color=yellow"></a> &ensp; <a href="https://arxiv.org/abs/2310.00426"><img src="https://img.shields.io/static/v1?label=PixArt&message=Arxiv&color=red&logo=arxiv"></a> &ensp; <a href="https://arxiv.org/abs/2310.04378"><img src="https://img.shields.io/static/v1?label=LCM&message=Arxiv&color=red&logo=arxiv"></a> &ensp; <a href="https://github.com/orgs/PixArt-alpha/discussions"><img src="https://img.shields.io/static/v1?label=Discussion&message=Github&color=green&logo=github"></a> &ensp; </div> # 🐱 Pixart-LCM Model Card ## 🔥 Why Need PixArt-LCM Following [LCM LoRA](https://huggingface.co/blog/lcm_lora), we illustrative of the generation speed we achieve on various computers. Let us stress again how liberating it is to explore image generation so easily with PixArt-LCM. | Hardware | PixArt-LCM (4 steps) | SDXL LoRA LCM (4 steps) | PixArt standard (14 steps) | SDXL standard (25 steps) | |-----------------------------|----------------------|-------------------------|----------------------------|---------------------------| | T4 (Google Colab Free Tier) | 3.3s | 8.4s | 16.0s | 26.5s | | A100 (80 GB) | 0.51s | 1.2s | 2.2s | 3.8s | | V100 (32 GB) | 0.8s | 1.2s | 5.5s | 7.7s | These tests were run with a batch size of 1 in all cases. For cards with a lot of capacity, such as A100, performance increases significantly when generating multiple images at once, which is usually the case for production workloads. ## Model ![pipeline](asset/model.png) [Pixart-α](https://arxiv.org/abs/2310.00426) consists of pure transformer blocks for latent diffusion: It can directly generate 1024px images from text prompts within a single sampling process. [LCMs](https://arxiv.org/abs/2310.04378) is a diffusion distillation method which predict PF-ODE's solution directly in latent space, achieving super fast inference with few steps. Source code of PixArt-LCM is available at https://github.com/PixArt-alpha/PixArt-alpha. ### Model Description - **Developed by:** Pixart & LCM teams - **Model type:** Diffusion-Transformer-based text-to-image generative model - **License:** [CreativeML Open RAIL++-M License](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/LICENSE.md) - **Model Description:** This is a model that can be used to generate and modify images based on text prompts. It is a [Transformer Latent Diffusion Model](https://arxiv.org/abs/2310.00426) that uses one fixed, pretrained text encoders ([T5]( https://huggingface.co/DeepFloyd/t5-v1_1-xxl)) and one latent feature encoder ([VAE](https://arxiv.org/abs/2112.10752)). - **Resources for more information:** Check out our [PixArt-α](https://github.com/PixArt-alpha/PixArt-alpha), [LCM](https://github.com/luosiallen/latent-consistency-model) GitHub Repository and the [Pixart-α](https://arxiv.org/abs/2310.00426), [LCM](https://arxiv.org/abs/2310.04378) reports on arXiv. ### Model Sources For research purposes, we recommend our `generative-models` Github repository (https://github.com/PixArt-alpha/PixArt-alpha), which is more suitable for developing both training and inference designs. [Hugging Face](https://huggingface.co/spaces/PixArt-alpha/PixArt-LCM) provides free Pixart-LCM inference. - **Repository:** https://github.com/PixArt-alpha/PixArt-alpha - **Demo:** https://huggingface.co/spaces/PixArt-alpha/PixArt-LCM ### 🧨 Diffusers Make sure to upgrade diffusers to >= 0.23.0: ``` pip install -U diffusers --upgrade ``` In addition make sure to install `transformers`, `safetensors`, `sentencepiece`, and `accelerate`: ``` pip install transformers accelerate safetensors sentencepiece ``` To just use the base model, you can run: ```python import torch from diffusers import PixArtAlphaPipeline # only 1024-MS version is supported for now pipe = PixArtAlphaPipeline.from_pretrained("PixArt-alpha/PixArt-LCM-XL-2-1024-MS", torch_dtype=torch.float16, use_safetensors=True) # Enable memory optimizations. pipe.enable_model_cpu_offload() prompt = "A small cactus with a happy face in the Sahara desert." image = pipe(prompt, guidance_scale=0., num_inference_steps=4).images[0] ``` When using `torch >= 2.0`, you can improve the inference speed by 20-30% with torch.compile. Simple wrap the unet with torch compile before running the pipeline: ```py pipe.transformer = torch.compile(pipe.transformer, mode="reduce-overhead", fullgraph=True) ``` If you are limited by GPU VRAM, you can enable *cpu offloading* by calling `pipe.enable_model_cpu_offload` instead of `.to("cuda")`: ```diff - pipe.to("cuda") + pipe.enable_model_cpu_offload() ``` The diffusers use here is totally the same as the base-model PixArt-α. For more information on how to use Pixart-α with `diffusers`, please have a look at [the Pixart-α Docs](https://huggingface.co/docs/diffusers/main/en/api/pipelines/pixart). ## Uses ### Direct Use The model is intended for research purposes only. Possible research areas and tasks include - Generation of artworks and use in design and other artistic processes. - Applications in educational or creative tools. - Research on generative models. - Safe deployment of models which have the potential to generate harmful content. - Probing and understanding the limitations and biases of generative models. Excluded uses are described below. ### Out-of-Scope Use The model was not trained to be factual or true representations of people or events, and therefore using the model to generate such content is out-of-scope for the abilities of this model. ## Limitations and Bias ### Limitations - The model does not achieve perfect photorealism - The model cannot render legible text - The model struggles with more difficult tasks which involve compositionality, such as rendering an image corresponding to “A red cube on top of a blue sphere” - fingers, .etc in general may not be generated properly. - The autoencoding part of the model is lossy. ### Bias While the capabilities of image generation models are impressive, they can also reinforce or exacerbate social biases.
nvidia/parakeet-ctc-0.6b
nvidia
2024-01-03T01:10:09Z
1,016
7
nemo
[ "nemo", "automatic-speech-recognition", "speech", "audio", "FastConformer", "Conformer", "pytorch", "NeMo", "hf-asr-leaderboard", "ctc", "en", "dataset:librispeech_asr", "dataset:fisher_corpus", "dataset:Switchboard-1", "dataset:WSJ-0", "dataset:WSJ-1", "dataset:National-Singapore-Corpus-Part-1", "dataset:National-Singapore-Corpus-Part-6", "dataset:vctk", "dataset:voxpopuli", "dataset:europarl", "dataset:multilingual_librispeech", "dataset:mozilla-foundation/common_voice_8_0", "dataset:MLCommons/peoples_speech", "arxiv:2305.05084", "license:cc-by-4.0", "model-index", "region:us" ]
automatic-speech-recognition
2023-12-28T15:43:23Z
--- language: - en library_name: nemo datasets: - librispeech_asr - fisher_corpus - Switchboard-1 - WSJ-0 - WSJ-1 - National-Singapore-Corpus-Part-1 - National-Singapore-Corpus-Part-6 - vctk - voxpopuli - europarl - multilingual_librispeech - mozilla-foundation/common_voice_8_0 - MLCommons/peoples_speech thumbnail: null tags: - automatic-speech-recognition - speech - audio - FastConformer - Conformer - pytorch - NeMo - hf-asr-leaderboard - ctc license: cc-by-4.0 widget: - example_title: Librispeech sample 1 src: https://cdn-media.huggingface.co/speech_samples/sample1.flac - example_title: Librispeech sample 2 src: https://cdn-media.huggingface.co/speech_samples/sample2.flac model-index: - name: parakeet-ctc-0.6b results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: AMI (Meetings test) type: edinburghcstr/ami config: ihm split: test args: language: en metrics: - name: Test WER type: wer value: 16.3 - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: Earnings-22 type: revdotcom/earnings22 split: test args: language: en metrics: - name: Test WER type: wer value: 14.14 - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: GigaSpeech type: speechcolab/gigaspeech split: test args: language: en metrics: - name: Test WER type: wer value: 10.35 - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: LibriSpeech (clean) type: librispeech_asr config: other split: test args: language: en metrics: - name: Test WER type: wer value: 1.87 - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: LibriSpeech (other) type: librispeech_asr config: other split: test args: language: en metrics: - name: Test WER type: wer value: 3.76 - task: type: Automatic Speech Recognition name: automatic-speech-recognition dataset: name: SPGI Speech type: kensho/spgispeech config: test split: test args: language: en metrics: - name: Test WER type: wer value: 4.11 - task: type: Automatic Speech Recognition name: automatic-speech-recognition dataset: name: tedlium-v3 type: LIUM/tedlium config: release1 split: test args: language: en metrics: - name: Test WER type: wer value: 3.78 - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: Vox Populi type: facebook/voxpopuli config: en split: test args: language: en metrics: - name: Test WER type: wer value: 7 - task: type: Automatic Speech Recognition name: automatic-speech-recognition dataset: name: Mozilla Common Voice 9.0 type: mozilla-foundation/common_voice_9_0 config: en split: test args: language: en metrics: - name: Test WER type: wer value: 10.57 metrics: - wer pipeline_tag: automatic-speech-recognition --- # Parakeet CTC 0.6B (en) <style> img { display: inline; } </style> [![Model architecture](https://img.shields.io/badge/Model_Arch-FastConformer--CTC-lightgrey#model-badge)](#model-architecture) | [![Model size](https://img.shields.io/badge/Params-0.6B-lightgrey#model-badge)](#model-architecture) | [![Language](https://img.shields.io/badge/Language-en-lightgrey#model-badge)](#datasets) `parakeet-ctc-0.6b` is an ASR model that transcribes speech in lower case English alphabet. This model is jointly developed by [NVIDIA NeMo](https://github.com/NVIDIA/NeMo) and [Suno.ai](https://www.suno.ai/) teams. It is an XL version of FastConformer CTC [1] (around 600M parameters) model. See the [model architecture](#model-architecture) section and [NeMo documentation](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/models.html#fast-conformer) for complete architecture details. ## NVIDIA NeMo: Training To train, fine-tune or play with the model you will need to install [NVIDIA NeMo](https://github.com/NVIDIA/NeMo). We recommend you install it after you've installed latest PyTorch version. ``` pip install nemo_toolkit['all'] ``` ## How to Use this Model The model is available for use in the NeMo toolkit [3], and can be used as a pre-trained checkpoint for inference or for fine-tuning on another dataset. ### Automatically instantiate the model ```python import nemo.collections.asr as nemo_asr asr_model = nemo_asr.models.EncDecCTCBPEModel.from_pretrained(model_name="nvidia/parakeet-ctc-0.6b") ``` ### Transcribing using Python First, let's get a sample ``` wget https://dldata-public.s3.us-east-2.amazonaws.com/2086-149220-0033.wav ``` Then simply do: ``` asr_model.transcribe(['2086-149220-0033.wav']) ``` ### Transcribing many audio files ```shell python [NEMO_GIT_FOLDER]/examples/asr/transcribe_speech.py pretrained_name="nvidia/parakeet-ctc-0.6b" audio_dir="<DIRECTORY CONTAINING AUDIO FILES>" ``` ### Input This model accepts 16000 Hz mono-channel audio (wav files) as input. ### Output This model provides transcribed speech as a string for a given audio sample. ## Model Architecture FastConformer [1] is an optimized version of the Conformer model with 8x depthwise-separable convolutional downsampling. The model is trained using CTC loss. You may find more information on the details of FastConformer here: [Fast-Conformer Model](https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/models.html#fast-conformer). ## Training The NeMo toolkit [3] was used for training the models for over several hundred epochs. These model are trained with this [example script](https://github.com/NVIDIA/NeMo/blob/main/examples/asr/asr_ctc/speech_to_text_ctc_bpe.py) and this [base config](https://github.com/NVIDIA/NeMo/blob/main/examples/asr/conf/fastconformer/fast-conformer_ctc_bpe.yaml). The tokenizers for these models were built using the text transcripts of the train set with this [script](https://github.com/NVIDIA/NeMo/blob/main/scripts/tokenizers/process_asr_text_tokenizer.py). ### Datasets The model was trained on 64K hours of English speech collected and prepared by NVIDIA NeMo and Suno teams. The training dataset consists of private subset with 40K hours of English speech plus 24K hours from the following public datasets: - Librispeech 960 hours of English speech - Fisher Corpus - Switchboard-1 Dataset - WSJ-0 and WSJ-1 - National Speech Corpus (Part 1, Part 6) - VCTK - VoxPopuli (EN) - Europarl-ASR (EN) - Multilingual Librispeech (MLS EN) - 2,000 hour subset - Mozilla Common Voice (v7.0) - People's Speech - 12,000 hour subset ## Performance The performance of Automatic Speech Recognition models is measuring using Word Error Rate. Since this dataset is trained on multiple domains and a much larger corpus, it will generally perform better at transcribing audio in general. The following tables summarizes the performance of the available models in this collection with the CTC decoder. Performances of the ASR models are reported in terms of Word Error Rate (WER%) with greedy decoding. |**Version**|**Tokenizer**|**Vocabulary Size**|**AMI**|**Earnings-22**|**Giga Speech**|**LS test-clean**|**SPGI Speech**|**TEDLIUM-v3**|**Vox Populi**|**Common Voice**| |---------|-----------------------|-----------------|---------------|---------------|------------|-----------|-----|-------|------|------| | 1.22.0 | SentencePiece Unigram | 1024 | 16.30 | 14.14 | 10.35 | 1.87 | 3.76 | 4.11 | 3.78 | 7.00 | 10.57 | These are greedy WER numbers without external LM. More details on evaluation can be found at [HuggingFace ASR Leaderboard](https://huggingface.co/spaces/hf-audio/open_asr_leaderboard) ## NVIDIA Riva: Deployment [NVIDIA Riva](https://developer.nvidia.com/riva), is an accelerated speech AI SDK deployable on-prem, in all clouds, multi-cloud, hybrid, on edge, and embedded. Additionally, Riva provides: * World-class out-of-the-box accuracy for the most common languages with model checkpoints trained on proprietary data with hundreds of thousands of GPU-compute hours * Best in class accuracy with run-time word boosting (e.g., brand and product names) and customization of acoustic model, language model, and inverse text normalization * Streaming speech recognition, Kubernetes compatible scaling, and enterprise-grade support Although this model isn’t supported yet by Riva, the [list of supported models is here](https://huggingface.co/models?other=Riva). Check out [Riva live demo](https://developer.nvidia.com/riva#demos). ## References [1] [Fast Conformer with Linearly Scalable Attention for Efficient Speech Recognition](https://arxiv.org/abs/2305.05084) [2] [Google Sentencepiece Tokenizer](https://github.com/google/sentencepiece) [3] [NVIDIA NeMo Toolkit](https://github.com/NVIDIA/NeMo) [4] [Suno.ai](https://suno.ai/) [5] [HuggingFace ASR Leaderboard](https://huggingface.co/spaces/hf-audio/open_asr_leaderboard) ## Licence License to use this model is covered by the [CC-BY-4.0](https://creativecommons.org/licenses/by/4.0/). By downloading the public and release version of the model, you accept the terms and conditions of the [CC-BY-4.0](https://creativecommons.org/licenses/by/4.0/) license.
aloobun/Synch-Qwen1.5-1.8B
aloobun
2024-03-22T20:01:47Z
1,016
1
transformers
[ "transformers", "safetensors", "qwen2", "text-generation", "mergekit", "merge", "conversational", "arxiv:2306.01708", "base_model:aloobun/Reyna-Mini-1.8B-v0.2", "base_model:aloobun/Reyna-Mini-1.8B-v0.1", "base_model:Qwen/Qwen1.5-1.8B", "license:other", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-03-22T18:52:33Z
--- base_model: - aloobun/Reyna-Mini-1.8B-v0.2 - aloobun/Reyna-Mini-1.8B-v0.1 - Qwen/Qwen1.5-1.8B license: other license_name: tongyi-qianwen-research license_link: https://huggingface.co/Qwen/Qwen1.5-1.8B/blob/main/LICENSE library_name: transformers tags: - mergekit - merge --- ## Merge Details This experimental model is the result of [TIES](https://arxiv.org/abs/2306.01708) merging **only on the embedding weights**, with [Qwen/Qwen1.5-1.8B](https://huggingface.co/Qwen/Qwen1.5-1.8B) as a base. ### Models Merged The following models were included in the merge: * [aloobun/Reyna-Mini-1.8B-v0.2](https://huggingface.co/aloobun/Reyna-Mini-1.8B-v0.2) * [aloobun/Reyna-Mini-1.8B-v0.1](https://huggingface.co/aloobun/Reyna-Mini-1.8B-v0.1) ### Configuration The following YAML configuration was used to produce this model: ```yaml merge_method: ties base_model: Qwen/Qwen1.5-1.8B models: - model: aloobun/Reyna-Mini-1.8B-v0.2 parameters: weight: 0.7 density: - filter: embed_tokens value: 0.66 - value: 1.0 - model: aloobun/Reyna-Mini-1.8B-v0.1 parameters: weight: - filter: embed_tokens value: 0.3 - value: 0.0 density: 0.66 ```
crusoeai/dolphin-2.9-llama3-70b-GGUF
crusoeai
2024-04-25T15:45:28Z
1,016
11
null
[ "gguf", "autoquant", "dolphin", "llama3", "region:us" ]
null
2024-04-24T07:57:14Z
--- tags: - autoquant - gguf - dolphin - llama3 ---
timm/eva02_large_patch14_448.mim_m38m_ft_in1k
timm
2024-02-10T23:37:44Z
1,015
10
timm
[ "timm", "pytorch", "safetensors", "image-classification", "dataset:imagenet-1k", "dataset:imagenet-22k", "arxiv:2303.11331", "arxiv:2303.15389", "license:mit", "region:us" ]
image-classification
2023-03-31T04:41:55Z
--- license: mit library_name: timm tags: - image-classification - timm datasets: - imagenet-1k - imagenet-22k --- # Model card for eva02_large_patch14_448.mim_m38m_ft_in1k An EVA02 image classification model. Pretrained on Merged-38M (IN-22K, CC12M, CC3M, COCO (train), ADE20K (train), Object365, and OpenImages) with masked image modeling (using EVA-CLIP as a MIM teacher) and fine-tuned on ImageNet-1k by paper authors. EVA-02 models are vision transformers with mean pooling, SwiGLU, Rotary Position Embeddings (ROPE), and extra LN in MLP (for Base & Large). NOTE: `timm` checkpoints are float32 for consistency with other models. Original checkpoints are float16 or bfloat16 in some cases, see originals if that's preferred. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 305.1 - GMACs: 362.3 - Activations (M): 689.9 - Image size: 448 x 448 - **Papers:** - EVA-02: A Visual Representation for Neon Genesis: https://arxiv.org/abs/2303.11331 - EVA-CLIP: Improved Training Techniques for CLIP at Scale: https://arxiv.org/abs/2303.15389 - **Original:** - https://github.com/baaivision/EVA - https://huggingface.co/Yuxin-CV/EVA-02 - **Pretrain Dataset:** ImageNet-22k - **Dataset:** ImageNet-1k ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('eva02_large_patch14_448.mim_m38m_ft_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'eva02_large_patch14_448.mim_m38m_ft_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 1025, 1024) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). |model |top1 |top5 |param_count|img_size| |-----------------------------------------------|------|------|-----------|--------| |eva02_large_patch14_448.mim_m38m_ft_in22k_in1k |90.054|99.042|305.08 |448 | |eva02_large_patch14_448.mim_in22k_ft_in22k_in1k|89.946|99.01 |305.08 |448 | |eva_giant_patch14_560.m30m_ft_in22k_in1k |89.792|98.992|1014.45 |560 | |eva02_large_patch14_448.mim_in22k_ft_in1k |89.626|98.954|305.08 |448 | |eva02_large_patch14_448.mim_m38m_ft_in1k |89.57 |98.918|305.08 |448 | |eva_giant_patch14_336.m30m_ft_in22k_in1k |89.56 |98.956|1013.01 |336 | |eva_giant_patch14_336.clip_ft_in1k |89.466|98.82 |1013.01 |336 | |eva_large_patch14_336.in22k_ft_in22k_in1k |89.214|98.854|304.53 |336 | |eva_giant_patch14_224.clip_ft_in1k |88.882|98.678|1012.56 |224 | |eva02_base_patch14_448.mim_in22k_ft_in22k_in1k |88.692|98.722|87.12 |448 | |eva_large_patch14_336.in22k_ft_in1k |88.652|98.722|304.53 |336 | |eva_large_patch14_196.in22k_ft_in22k_in1k |88.592|98.656|304.14 |196 | |eva02_base_patch14_448.mim_in22k_ft_in1k |88.23 |98.564|87.12 |448 | |eva_large_patch14_196.in22k_ft_in1k |87.934|98.504|304.14 |196 | |eva02_small_patch14_336.mim_in22k_ft_in1k |85.74 |97.614|22.13 |336 | |eva02_tiny_patch14_336.mim_in22k_ft_in1k |80.658|95.524|5.76 |336 | ## Citation ```bibtex @article{EVA02, title={EVA-02: A Visual Representation for Neon Genesis}, author={Fang, Yuxin and Sun, Quan and Wang, Xinggang and Huang, Tiejun and Wang, Xinlong and Cao, Yue}, journal={arXiv preprint arXiv:2303.11331}, year={2023} } ``` ```bibtex @article{EVA-CLIP, title={EVA-02: A Visual Representation for Neon Genesis}, author={Sun, Quan and Fang, Yuxin and Wu, Ledell and Wang, Xinlong and Cao, Yue}, journal={arXiv preprint arXiv:2303.15389}, year={2023} } ``` ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ```
Yntec/yabalMixTrue25D_v2_VAE
Yntec
2023-07-23T00:38:38Z
1,015
5
diffusers
[ "diffusers", "safetensors", "stable-diffusion", "stable-diffusion-diffusers", "text-to-image", "YabaL", "license:creativeml-openrail-m", "autotrain_compatible", "endpoints_compatible", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
2023-07-23T00:20:05Z
--- license: creativeml-openrail-m library_name: diffusers pipeline_tag: text-to-image tags: - stable-diffusion - stable-diffusion-diffusers - diffusers - text-to-image - YabaL --- # YabaL_Mix True2.5D This model with the Waifu 1.4 VAE baked in. Original page: https://civitai.com/models/60093/yabalmix-true25d Original Author's DEMO image : ![](https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/f58fa764-5cfd-4e7f-a143-b372a8796b2b/width=1080/4x-UltraSharp%20(1).jpeg)
facebook/mms-tts-uzb-script_cyrillic
facebook
2023-09-01T10:33:18Z
1,015
3
transformers
[ "transformers", "pytorch", "safetensors", "vits", "text-to-audio", "mms", "text-to-speech", "arxiv:2305.13516", "license:cc-by-nc-4.0", "endpoints_compatible", "region:us" ]
text-to-speech
2023-09-01T10:32:50Z
--- license: cc-by-nc-4.0 tags: - mms - vits pipeline_tag: text-to-speech --- # Massively Multilingual Speech (MMS): Uzbek Text-to-Speech This repository contains the **Uzbek (uzb-script_cyrillic)** language text-to-speech (TTS) model checkpoint. This model is part of Facebook's [Massively Multilingual Speech](https://arxiv.org/abs/2305.13516) project, aiming to provide speech technology across a diverse range of languages. You can find more details about the supported languages and their ISO 639-3 codes in the [MMS Language Coverage Overview](https://dl.fbaipublicfiles.com/mms/misc/language_coverage_mms.html), and see all MMS-TTS checkpoints on the Hugging Face Hub: [facebook/mms-tts](https://huggingface.co/models?sort=trending&search=facebook%2Fmms-tts). MMS-TTS is available in the 🤗 Transformers library from version 4.33 onwards. ## Model Details VITS (**V**ariational **I**nference with adversarial learning for end-to-end **T**ext-to-**S**peech) is an end-to-end speech synthesis model that predicts a speech waveform conditional on an input text sequence. It is a conditional variational autoencoder (VAE) comprised of a posterior encoder, decoder, and conditional prior. A set of spectrogram-based acoustic features are predicted by the flow-based module, which is formed of a Transformer-based text encoder and multiple coupling layers. The spectrogram is decoded using a stack of transposed convolutional layers, much in the same style as the HiFi-GAN vocoder. Motivated by the one-to-many nature of the TTS problem, where the same text input can be spoken in multiple ways, the model also includes a stochastic duration predictor, which allows the model to synthesise speech with different rhythms from the same input text. The model is trained end-to-end with a combination of losses derived from variational lower bound and adversarial training. To improve the expressiveness of the model, normalizing flows are applied to the conditional prior distribution. During inference, the text encodings are up-sampled based on the duration prediction module, and then mapped into the waveform using a cascade of the flow module and HiFi-GAN decoder. Due to the stochastic nature of the duration predictor, the model is non-deterministic, and thus requires a fixed seed to generate the same speech waveform. For the MMS project, a separate VITS checkpoint is trained on each langauge. ## Usage MMS-TTS is available in the 🤗 Transformers library from version 4.33 onwards. To use this checkpoint, first install the latest version of the library: ``` pip install --upgrade transformers accelerate ``` Then, run inference with the following code-snippet: ```python from transformers import VitsModel, AutoTokenizer import torch model = VitsModel.from_pretrained("facebook/mms-tts-uzb-script_cyrillic") tokenizer = AutoTokenizer.from_pretrained("facebook/mms-tts-uzb-script_cyrillic") text = "some example text in the Uzbek language" inputs = tokenizer(text, return_tensors="pt") with torch.no_grad(): output = model(**inputs).waveform ``` The resulting waveform can be saved as a `.wav` file: ```python import scipy scipy.io.wavfile.write("techno.wav", rate=model.config.sampling_rate, data=output) ``` Or displayed in a Jupyter Notebook / Google Colab: ```python from IPython.display import Audio Audio(output, rate=model.config.sampling_rate) ``` ## BibTex citation This model was developed by Vineel Pratap et al. from Meta AI. If you use the model, consider citing the MMS paper: ``` @article{pratap2023mms, title={Scaling Speech Technology to 1,000+ Languages}, author={Vineel Pratap and Andros Tjandra and Bowen Shi and Paden Tomasello and Arun Babu and Sayani Kundu and Ali Elkahky and Zhaoheng Ni and Apoorv Vyas and Maryam Fazel-Zarandi and Alexei Baevski and Yossi Adi and Xiaohui Zhang and Wei-Ning Hsu and Alexis Conneau and Michael Auli}, journal={arXiv}, year={2023} } ``` ## License The model is licensed as **CC-BY-NC 4.0**.
Gille/StrangeMerges_17-7B-dare_ties
Gille
2024-04-03T09:15:09Z
1,015
0
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "merge", "mergekit", "lazymergekit", "Gille/StrangeMerges_16-7B-slerp", "Gille/StrangeMerges_12-7B-slerp", "base_model:Gille/StrangeMerges_16-7B-slerp", "base_model:Gille/StrangeMerges_12-7B-slerp", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-01-31T23:54:19Z
--- license: apache-2.0 tags: - merge - mergekit - lazymergekit - Gille/StrangeMerges_16-7B-slerp - Gille/StrangeMerges_12-7B-slerp base_model: - Gille/StrangeMerges_16-7B-slerp - Gille/StrangeMerges_12-7B-slerp model-index: - name: StrangeMerges_17-7B-dare_ties results: - task: type: text-generation name: Text Generation dataset: name: AI2 Reasoning Challenge (25-Shot) type: ai2_arc config: ARC-Challenge split: test args: num_few_shot: 25 metrics: - type: acc_norm value: 66.64 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Gille/StrangeMerges_17-7B-dare_ties name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: HellaSwag (10-Shot) type: hellaswag split: validation args: num_few_shot: 10 metrics: - type: acc_norm value: 86.04 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Gille/StrangeMerges_17-7B-dare_ties name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MMLU (5-Shot) type: cais/mmlu config: all split: test args: num_few_shot: 5 metrics: - type: acc value: 65.07 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Gille/StrangeMerges_17-7B-dare_ties name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: TruthfulQA (0-shot) type: truthful_qa config: multiple_choice split: validation args: num_few_shot: 0 metrics: - type: mc2 value: 53.18 source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Gille/StrangeMerges_17-7B-dare_ties name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: Winogrande (5-shot) type: winogrande config: winogrande_xl split: validation args: num_few_shot: 5 metrics: - type: acc value: 81.93 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Gille/StrangeMerges_17-7B-dare_ties name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: GSM8k (5-shot) type: gsm8k config: main split: test args: num_few_shot: 5 metrics: - type: acc value: 64.37 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Gille/StrangeMerges_17-7B-dare_ties name: Open LLM Leaderboard --- # StrangeMerges_17-7B-dare_ties StrangeMerges_17-7B-dare_ties is a merge of the following models using [LazyMergekit](https://colab.research.google.com/drive/1obulZ1ROXHjYLn6PPZJwRR6GzgQogxxb?usp=sharing): * [Gille/StrangeMerges_16-7B-slerp](https://huggingface.co/Gille/StrangeMerges_16-7B-slerp) * [Gille/StrangeMerges_12-7B-slerp](https://huggingface.co/Gille/StrangeMerges_12-7B-slerp) ## 🧩 Configuration ```yaml models: - model: Gille/StrangeMerges_16-7B-slerp # no parameters necessary for base model - model: Gille/StrangeMerges_16-7B-slerp parameters: density: 0.5 weight: 0.4 - model: Gille/StrangeMerges_12-7B-slerp parameters: density: 0.5 weight: 0.6 merge_method: dare_ties base_model: Gille/StrangeMerges_16-7B-slerp parameters: normalize: true dtype: float16 ``` ## 💻 Usage ```python !pip install -qU transformers accelerate from transformers import AutoTokenizer import transformers import torch model = "Gille/StrangeMerges_17-7B-dare_ties" messages = [{"role": "user", "content": "What is a large language model?"}] tokenizer = AutoTokenizer.from_pretrained(model) prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) pipeline = transformers.pipeline( "text-generation", model=model, torch_dtype=torch.float16, device_map="auto", ) outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) print(outputs[0]["generated_text"]) ``` # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_Gille__StrangeMerges_17-7B-dare_ties) | Metric |Value| |---------------------------------|----:| |Avg. |69.54| |AI2 Reasoning Challenge (25-Shot)|66.64| |HellaSwag (10-Shot) |86.04| |MMLU (5-Shot) |65.07| |TruthfulQA (0-shot) |53.18| |Winogrande (5-shot) |81.93| |GSM8k (5-shot) |64.37|
ooooz/midnight-miqu-70b-v1.0-GGUF
ooooz
2024-03-02T04:50:21Z
1,015
4
transformers
[ "transformers", "gguf", "llama", "text-generation", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-03-02T02:34:54Z
GGUF format model files for sophosympatheia/Midnight-Miqu-70B-v1.0 https://huggingface.co/sophosympatheia/Midnight-Miqu-70B-v1.0
OwenArli/Meta-Llama-3-8B-Instruct-Dolfin-v0.1
OwenArli
2024-04-24T06:45:12Z
1,015
3
transformers
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-04-22T05:32:02Z
--- license: apache-2.0 --- Based on Meta-Llama-3-8b-Instruct, and is governed by Meta Llama 3 License agreement: https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct We don't know how good this model is exactly in benchmarks since we have not benched this yet, but we think real prompts and usage is more telling anyways. From our testing this model is: - Less Refusals - More Uncensored - Follows requests better - Can reply in requested formats better without adding unnecesary information We are happy for anyone to try it out and give some feedback. You can also try this model on our API at https://www.awanllm.com/ Training: - 2048 sequence length, while the base model is 8192 sequence length. From testing it still performs the same 8192 context just fine. - Trained on a modified and improved version of Cognitive Computations Eric Hartford's Dolphin dataset. https://huggingface.co/datasets/cognitivecomputations/dolphin - Training duration is around 2 days on 2x RTX3090 on our own machine, using 4-bit loading and Qlora 64-rank 128-alpha resulting in ~2% trainable weights. The goal for this model is to have the model less-censored and great at general tasks like the previous dolphin based models by Eric Hartford. We started training this BEFORE they launched their own full weight trained Llama-3-8B-Dolphin-2.9 with their own curated datasets and the newer "Dolphin 2.9" dataset, but we think this model is still a unique take on Llama 3 8B Instruct and the dolphin dataset. https://huggingface.co/cognitivecomputations/dolphin-2.9-llama3-8b The difference with their dolphin 2.9 model is that we train this using Meta's new Llama 3 instruct format and not the regular ChatML format that Dolphin models are usually trained on. This is because we think that it performed better using the format it was originally trained on. Instruct format: ``` <|begin_of_text|><|start_header_id|>system<|end_header_id|> {{ system_prompt }}<|eot_id|><|start_header_id|>user<|end_header_id|> {{ user_message_1 }}<|eot_id|><|start_header_id|>assistant<|end_header_id|> {{ model_answer_1 }}<|eot_id|><|start_header_id|>user<|end_header_id|> {{ user_message_2 }}<|eot_id|><|start_header_id|>assistant<|end_header_id|> ``` Quants: AWQ: https://huggingface.co/AwanLLM/Meta-Llama-3-8B-Instruct-Dolfin-AWQ GGUF: https://huggingface.co/AwanLLM/Meta-Llama-3-8B-Instruct-Dolfin-v0.1-GGUF FP16: https://huggingface.co/AwanLLM/Meta-Llama-3-8B-Instruct-Dolfin Exllamav2: 4bpw: https://huggingface.co/AwanLLM/Meta-Llama-3-8B-Instruct-Dolfin-v0.1-exl2-h8-4bpw-exl2 8bpw: https://huggingface.co/AwanLLM/Meta-Llama-3-8B-Instruct-Dolfin-v0.1-exl2-h8-8bpw-exl2 [<img src="https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/OpenAccess-AI-Collective/axolotl) Axolotl Config: ``` base_model: Meta-Llama-3-8B-Instruct model_type: LlamaForCausalLM tokenizer_type: AutoTokenizer train_on_inputs: false group_by_length: false load_in_8bit: false load_in_4bit: true strict: false sequence_len: 2048 bf16: true fp16: false tf32: false flash_attention: true # Data datasets: - path: flan1m-universal-uncensored-system-2048.jsonl type: system_prompt: "" system_format: "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n{system}<|eot_id|><|start_header_id|>user<|end_header_id|>\n\n" field_system: system field_instruction: input field_output: output format: "{instruction}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n" no_input_format: "{instruction}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n" warmup_steps: 10 dataset_prepared_path: ./last_run_prepared # Iterations num_epochs: 1 saves_per_epoch: 4 # Evaluation val_set_size: 0.01 eval_table_size: eval_table_max_new_tokens: eval_sample_packing: false evals_per_epoch: 4 # LoRA output_dir: ./qlora-out adapter: qlora lora_model_dir: lora_r: 64 lora_alpha: 128 lora_dropout: 0.05 lora_target_linear: true lora_fan_in_fan_out: lora_target_modules: save_safetensors: true # Sampling sample_packing: true pad_to_sequence_len: true # Batching gradient_accumulation_steps: 32 micro_batch_size: 4 gradient_checkpointing: true gradient_checkpointing_kwargs: use_reentrant: true # Optimizer optimizer: paged_adamw_8bit lr_scheduler: cosine learning_rate: 0.0002 # Misc early_stopping_patience: resume_from_checkpoint: logging_steps: 1 debug: deepspeed: zero3_bf16.json weight_decay: 0.1 special_tokens: pad_token: <|end_of_text|> ```
mradermacher/IceMochaccinoRP-7b-i1-GGUF
mradermacher
2024-06-01T16:28:42Z
1,015
0
transformers
[ "transformers", "gguf", "mergekit", "merge", "alpaca", "mistral", "not-for-all-audiences", "nsfw", "en", "base_model:icefog72/IceMochaccinoRP-7b", "license:cc-by-nc-4.0", "endpoints_compatible", "region:us" ]
null
2024-05-31T10:48:07Z
--- base_model: icefog72/IceMochaccinoRP-7b language: - en library_name: transformers license: cc-by-nc-4.0 quantized_by: mradermacher tags: - mergekit - merge - alpaca - mistral - not-for-all-audiences - nsfw --- ## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: nicoboss --> weighted/imatrix quants of https://huggingface.co/icefog72/IceMochaccinoRP-7b <!-- provided-files --> static quants are available at https://huggingface.co/mradermacher/IceMochaccinoRP-7b-GGUF ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/IceMochaccinoRP-7b-i1-GGUF/resolve/main/IceMochaccinoRP-7b.i1-IQ1_S.gguf) | i1-IQ1_S | 1.7 | for the desperate | | [GGUF](https://huggingface.co/mradermacher/IceMochaccinoRP-7b-i1-GGUF/resolve/main/IceMochaccinoRP-7b.i1-IQ1_M.gguf) | i1-IQ1_M | 1.9 | mostly desperate | | [GGUF](https://huggingface.co/mradermacher/IceMochaccinoRP-7b-i1-GGUF/resolve/main/IceMochaccinoRP-7b.i1-IQ2_XXS.gguf) | i1-IQ2_XXS | 2.1 | | | [GGUF](https://huggingface.co/mradermacher/IceMochaccinoRP-7b-i1-GGUF/resolve/main/IceMochaccinoRP-7b.i1-IQ2_XS.gguf) | i1-IQ2_XS | 2.3 | | | [GGUF](https://huggingface.co/mradermacher/IceMochaccinoRP-7b-i1-GGUF/resolve/main/IceMochaccinoRP-7b.i1-IQ2_S.gguf) | i1-IQ2_S | 2.4 | | | [GGUF](https://huggingface.co/mradermacher/IceMochaccinoRP-7b-i1-GGUF/resolve/main/IceMochaccinoRP-7b.i1-IQ2_M.gguf) | i1-IQ2_M | 2.6 | | | [GGUF](https://huggingface.co/mradermacher/IceMochaccinoRP-7b-i1-GGUF/resolve/main/IceMochaccinoRP-7b.i1-Q2_K.gguf) | i1-Q2_K | 2.8 | IQ3_XXS probably better | | [GGUF](https://huggingface.co/mradermacher/IceMochaccinoRP-7b-i1-GGUF/resolve/main/IceMochaccinoRP-7b.i1-IQ3_XXS.gguf) | i1-IQ3_XXS | 2.9 | lower quality | | [GGUF](https://huggingface.co/mradermacher/IceMochaccinoRP-7b-i1-GGUF/resolve/main/IceMochaccinoRP-7b.i1-IQ3_XS.gguf) | i1-IQ3_XS | 3.1 | | | [GGUF](https://huggingface.co/mradermacher/IceMochaccinoRP-7b-i1-GGUF/resolve/main/IceMochaccinoRP-7b.i1-Q3_K_S.gguf) | i1-Q3_K_S | 3.3 | IQ3_XS probably better | | [GGUF](https://huggingface.co/mradermacher/IceMochaccinoRP-7b-i1-GGUF/resolve/main/IceMochaccinoRP-7b.i1-IQ3_S.gguf) | i1-IQ3_S | 3.3 | beats Q3_K* | | [GGUF](https://huggingface.co/mradermacher/IceMochaccinoRP-7b-i1-GGUF/resolve/main/IceMochaccinoRP-7b.i1-IQ3_M.gguf) | i1-IQ3_M | 3.4 | | | [GGUF](https://huggingface.co/mradermacher/IceMochaccinoRP-7b-i1-GGUF/resolve/main/IceMochaccinoRP-7b.i1-Q3_K_M.gguf) | i1-Q3_K_M | 3.6 | IQ3_S probably better | | [GGUF](https://huggingface.co/mradermacher/IceMochaccinoRP-7b-i1-GGUF/resolve/main/IceMochaccinoRP-7b.i1-Q3_K_L.gguf) | i1-Q3_K_L | 3.9 | IQ3_M probably better | | [GGUF](https://huggingface.co/mradermacher/IceMochaccinoRP-7b-i1-GGUF/resolve/main/IceMochaccinoRP-7b.i1-IQ4_XS.gguf) | i1-IQ4_XS | 4.0 | | | [GGUF](https://huggingface.co/mradermacher/IceMochaccinoRP-7b-i1-GGUF/resolve/main/IceMochaccinoRP-7b.i1-Q4_0.gguf) | i1-Q4_0 | 4.2 | fast, low quality | | [GGUF](https://huggingface.co/mradermacher/IceMochaccinoRP-7b-i1-GGUF/resolve/main/IceMochaccinoRP-7b.i1-Q4_K_S.gguf) | i1-Q4_K_S | 4.2 | optimal size/speed/quality | | [GGUF](https://huggingface.co/mradermacher/IceMochaccinoRP-7b-i1-GGUF/resolve/main/IceMochaccinoRP-7b.i1-Q4_K_M.gguf) | i1-Q4_K_M | 4.5 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/IceMochaccinoRP-7b-i1-GGUF/resolve/main/IceMochaccinoRP-7b.i1-Q5_K_S.gguf) | i1-Q5_K_S | 5.1 | | | [GGUF](https://huggingface.co/mradermacher/IceMochaccinoRP-7b-i1-GGUF/resolve/main/IceMochaccinoRP-7b.i1-Q5_K_M.gguf) | i1-Q5_K_M | 5.2 | | | [GGUF](https://huggingface.co/mradermacher/IceMochaccinoRP-7b-i1-GGUF/resolve/main/IceMochaccinoRP-7b.i1-Q6_K.gguf) | i1-Q6_K | 6.0 | practically like static Q6_K | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. Additional thanks to [@nicoboss](https://huggingface.co/nicoboss) for giving me access to his hardware for calculating the imatrix for these quants. <!-- end -->
appvoid/palmer-004-2406
appvoid
2024-07-01T17:06:56Z
1,015
0
transformers
[ "transformers", "safetensors", "gguf", "llama", "text-generation", "mergekit", "merge", "base_model:appvoid/palmer-002-32k", "base_model:raidhon/coven_tiny_1.1b_32k_orpo_alpha", "base_model:appvoid/palmer-003", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-06-02T04:37:59Z
--- base_model: - appvoid/palmer-002-32k - raidhon/coven_tiny_1.1b_32k_orpo_alpha - appvoid/palmer-003 library_name: transformers tags: - mergekit - merge license: apache-2.0 --- <style> @import url('https://fonts.googleapis.com/css2?family=Vollkorn:ital,wght@0,400..900;1,400..900&display=swap'); </style> <div style="background-color: #101010; border-radius: .5rem; padding: 2rem; font-family: monospace; font-size: .85rem; text-align: justify;"> ![palmer-004](https://huggingface.co/appvoid/palmer-004/resolve/main/palmer-004.jpeg) #### june update This model has improved overall performance at the expense of small degradation on winogrande. As all palmer models, the model is biased to respond to answers without using any specific prompt, feel free to further fine-tune it for your specific use case. | Model | MMLU | ARC-C | HellaSwag | PIQA | Winogrande | Average | |--------------------------------|-------|-------|-----------|--------|------------|---------| | tinyllama-3t | 0.2577| 0.3029| 0.5935 | 0.7329 | 0.5959 | 0.4966 | | palmer-004 | 0.2601| 0.3456| 0.6138 | 0.7443 | **0.6511** | 0.5229 | | palmer-004-2406 | 0.2661| 0.3490| **0.6173** | **0.7481** | 0.6417 | **0.5244** | </div>
facebook/convnextv2-base-22k-224
facebook
2023-02-20T13:13:23Z
1,014
2
transformers
[ "transformers", "pytorch", "convnextv2", "image-classification", "vision", "dataset:imagenet-22k", "arxiv:2301.00808", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
image-classification
2023-02-19T07:08:46Z
--- license: apache-2.0 tags: - vision - image-classification datasets: - imagenet-22k widget: - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg example_title: Tiger - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg example_title: Teapot - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg example_title: Palace --- # ConvNeXt V2 (base-sized model) ConvNeXt V2 model pretrained using the FCMAE framework and fine-tuned on the ImageNet-22K dataset at resolution 224x224. It was introduced in the paper [ConvNeXt V2: Co-designing and Scaling ConvNets with Masked Autoencoders](https://arxiv.org/abs/2301.00808) by Woo et al. and first released in [this repository](https://github.com/facebookresearch/ConvNeXt-V2). Disclaimer: The team releasing ConvNeXT V2 did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model description ConvNeXt V2 is a pure convolutional model (ConvNet) that introduces a fully convolutional masked autoencoder framework (FCMAE) and a new Global Response Normalization (GRN) layer to ConvNeXt. ConvNeXt V2 significantly improves the performance of pure ConvNets on various recognition benchmarks. ![model image](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/convnextv2_architecture.png) ## Intended uses & limitations You can use the raw model for image classification. See the [model hub](https://huggingface.co/models?search=convnextv2) to look for fine-tuned versions on a task that interests you. ### How to use Here is how to use this model to classify an image of the COCO 2017 dataset into one of the 1,000 ImageNet classes: ```python from transformers import AutoImageProcessor, ConvNextV2ForImageClassification import torch from datasets import load_dataset dataset = load_dataset("huggingface/cats-image") image = dataset["test"]["image"][0] preprocessor = AutoImageProcessor.from_pretrained("facebook/convnextv2-base-22k-224") model = ConvNextV2ForImageClassification.from_pretrained("facebook/convnextv2-base-22k-224") inputs = preprocessor(image, return_tensors="pt") with torch.no_grad(): logits = model(**inputs).logits # model predicts one of the 1000 ImageNet classes predicted_label = logits.argmax(-1).item() print(model.config.id2label[predicted_label]), ``` For more code examples, we refer to the [documentation](https://huggingface.co/docs/transformers/master/en/model_doc/convnextv2). ### BibTeX entry and citation info ```bibtex @article{DBLP:journals/corr/abs-2301-00808, author = {Sanghyun Woo and Shoubhik Debnath and Ronghang Hu and Xinlei Chen and Zhuang Liu and In So Kweon and Saining Xie}, title = {ConvNeXt {V2:} Co-designing and Scaling ConvNets with Masked Autoencoders}, journal = {CoRR}, volume = {abs/2301.00808}, year = {2023}, url = {https://doi.org/10.48550/arXiv.2301.00808}, doi = {10.48550/arXiv.2301.00808}, eprinttype = {arXiv}, eprint = {2301.00808}, timestamp = {Tue, 10 Jan 2023 15:10:12 +0100}, biburl = {https://dblp.org/rec/journals/corr/abs-2301-00808.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ```
wkcn/TinyCLIP-ViT-61M-32-Text-29M-LAION400M
wkcn
2024-05-08T03:06:34Z
1,014
1
transformers
[ "transformers", "pytorch", "safetensors", "clip", "zero-shot-image-classification", "tinyclip", "dataset:laion/laion400m", "license:mit", "endpoints_compatible", "region:us" ]
zero-shot-image-classification
2023-12-19T14:14:18Z
--- license: mit datasets: - laion/laion400m pipeline_tag: zero-shot-image-classification tags: - tinyclip --- # TinyCLIP: CLIP Distillation via Affinity Mimicking and Weight Inheritance **[ICCV 2023]** - [TinyCLIP: CLIP Distillation via Affinity Mimicking and Weight Inheritance](https://openaccess.thecvf.com/content/ICCV2023/html/Wu_TinyCLIP_CLIP_Distillation_via_Affinity_Mimicking_and_Weight_Inheritance_ICCV_2023_paper.html) **TinyCLIP** is a novel **cross-modal distillation** method for large-scale language-image pre-trained models. The method introduces two core techniques: **affinity mimicking** and **weight inheritance**. This work unleashes the capacity of small CLIP models, fully leveraging large-scale models as well as pre-training data and striking the best trade-off between speed and accuracy. <p align="center"> <img src="./figure/TinyCLIP.jpg" width="1000"> </p> ## Use with Transformers ```python from PIL import Image import requests from transformers import CLIPProcessor, CLIPModel model = CLIPModel.from_pretrained("wkcn/TinyCLIP-ViT-61M-32-Text-29M-LAION400M") processor = CLIPProcessor.from_pretrained("wkcn/TinyCLIP-ViT-61M-32-Text-29M-LAION400M") url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) inputs = processor(text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True) outputs = model(**inputs) logits_per_image = outputs.logits_per_image # this is the image-text similarity score probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities ``` ## Highlights <p align="center"> <img src="./figure/fig1.jpg" width="500"> </p> * TinyCLIP ViT-45M/32 uses only **half parameters** of ViT-B/32 to achieves **comparable zero-shot performance**. * TinyCLIP ResNet-19M reduces the parameters by **50\%** while getting **2x** inference speedup, and obtains **56.4\%** accuracy on ImageNet. ## Model Zoo | Model | Weight inheritance | Pretrain | IN-1K Acc@1(%) | MACs(G) | Throughput(pairs/s) | Link | |--------------------|--------------------|---------------|----------------|---------|---------------------|------| [TinyCLIP ViT-39M/16 Text-19M](./src/open_clip/model_configs/TinyCLIP-ViT-39M-16-Text-19M.json) | manual | YFCC-15M | 63.5 | 9.5 | 1,469 | [Model](https://github.com/wkcn/TinyCLIP-model-zoo/releases/download/checkpoints/TinyCLIP-ViT-39M-16-Text-19M-YFCC15M.pt) [TinyCLIP ViT-8M/16 Text-3M](./src/open_clip/model_configs/TinyCLIP-ViT-8M-16-Text-3M.json) | manual | YFCC-15M | 41.1 | 2.0 | 4,150 | [Model](https://github.com/wkcn/TinyCLIP-model-zoo/releases/download/checkpoints/TinyCLIP-ViT-8M-16-Text-3M-YFCC15M.pt) [TinyCLIP ResNet-30M Text-29M](./src/open_clip/model_configs/TinyCLIP-ResNet-30M-Text-29M.json) | manual | LAION-400M | 59.1 | 6.9 | 1,811 | [Model](https://github.com/wkcn/TinyCLIP-model-zoo/releases/download/checkpoints/TinyCLIP-ResNet-30M-Text-29M-LAION400M.pt) [TinyCLIP ResNet-19M Text-19M](./src/open_clip/model_configs/TinyCLIP-ResNet-19M-Text-19M.json) | manual | LAION-400M | 56.4 | 4.4 | 3,024| [Model](https://github.com/wkcn/TinyCLIP-model-zoo/releases/download/checkpoints/TinyCLIP-ResNet-19M-Text-19M-LAION400M.pt) [TinyCLIP ViT-61M/32 Text-29M](./src/open_clip/model_configs/TinyCLIP-ViT-61M-32-Text-29M.json) | manual | LAION-400M | 62.4 | 5.3 | 3,191|[Model](https://github.com/wkcn/TinyCLIP-model-zoo/releases/download/checkpoints/TinyCLIP-ViT-61M-32-Text-29M-LAION400M.pt) [TinyCLIP ViT-40M/32 Text-19M](./src/open_clip/model_configs/TinyCLIP-ViT-40M-32-Text-19M.json) | manual | LAION-400M | 59.8 | 3.5 | 4,641|[Model](https://github.com/wkcn/TinyCLIP-model-zoo/releases/download/checkpoints/TinyCLIP-ViT-40M-32-Text-19M-LAION400M.pt) TinyCLIP ViT-63M/32 Text-31M | auto | LAION-400M | 63.9 | 5.6 | 2,905|[Model](https://github.com/wkcn/TinyCLIP-model-zoo/releases/download/checkpoints/TinyCLIP-auto-ViT-63M-32-Text-31M-LAION400M.pt) TinyCLIP ViT-45M/32 Text-18M | auto | LAION-400M | 61.4 | 3.7 | 3,682|[Model](https://github.com/wkcn/TinyCLIP-model-zoo/releases/download/checkpoints/TinyCLIP-auto-ViT-45M-32-Text-18M-LAION400M.pt) TinyCLIP ViT-22M/32 Text-10M | auto | LAION-400M | 53.7 | 1.9 | 5,504|[Model](https://github.com/wkcn/TinyCLIP-model-zoo/releases/download/checkpoints/TinyCLIP-auto-ViT-22M-32-Text-10M-LAION400M.pt) TinyCLIP ViT-63M/32 Text-31M | auto | LAION+YFCC-400M | 64.5 | 5.6| 2,909 | [Model](https://github.com/wkcn/TinyCLIP-model-zoo/releases/download/checkpoints/TinyCLIP-auto-ViT-63M-32-Text-31M-LAIONYFCC400M.pt) TinyCLIP ViT-45M/32 Text-18M | auto | LAION+YFCC-400M | 62.7 | 1.9 | 3,685 | [Model](https://github.com/wkcn/TinyCLIP-model-zoo/releases/download/checkpoints/TinyCLIP-auto-ViT-45M-32-Text-18M-LAIONYFCC400M.pt) Note: The configs of models with auto inheritance are generated automatically. ## Official PyTorch Implementation https://github.com/microsoft/Cream/tree/main/TinyCLIP ## Citation If this repo is helpful for you, please consider to cite it. :mega: Thank you! :) ```bibtex @InProceedings{tinyclip, title = {TinyCLIP: CLIP Distillation via Affinity Mimicking and Weight Inheritance}, author = {Wu, Kan and Peng, Houwen and Zhou, Zhenghong and Xiao, Bin and Liu, Mengchen and Yuan, Lu and Xuan, Hong and Valenzuela, Michael and Chen, Xi (Stephen) and Wang, Xinggang and Chao, Hongyang and Hu, Han}, booktitle = {Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV)}, month = {October}, year = {2023}, pages = {21970-21980} } ``` ## Acknowledge Our code is based on [CLIP](https://github.com/openai/CLIP), [OpenCLIP](https://github.com/mlfoundations/open_clip), [CoFi](https://github.com/princeton-nlp/CoFiPruning) and [PyTorch](https://github.com/pytorch/pytorch). Thank contributors for their awesome contribution! ## License - [License](https://github.com/microsoft/Cream/blob/main/TinyCLIP/LICENSE)
nlpguy/Hermes-low-tune-2
nlpguy
2024-03-04T13:48:13Z
1,014
2
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "mergekit", "merge", "conversational", "arxiv:2212.04089", "base_model:simonveitner/Math-OpenHermes-2.5-Mistral-7B", "base_model:mlabonne/NeuralHermes-2.5-Mistral-7B-laser", "base_model:openaccess-ai-collective/dpopenhermes-alpha-v0", "base_model:teknium/OpenHermes-2.5-Mistral-7B", "base_model:mlabonne/NeuralHermes-2.5-Mistral-7B", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-01-05T11:59:53Z
--- license: apache-2.0 tags: - mergekit - merge base_model: - simonveitner/Math-OpenHermes-2.5-Mistral-7B - mlabonne/NeuralHermes-2.5-Mistral-7B-laser - openaccess-ai-collective/dpopenhermes-alpha-v0 - teknium/OpenHermes-2.5-Mistral-7B - mlabonne/NeuralHermes-2.5-Mistral-7B model-index: - name: Hermes-low-tune-2 results: - task: type: text-generation name: Text Generation dataset: name: AI2 Reasoning Challenge (25-Shot) type: ai2_arc config: ARC-Challenge split: test args: num_few_shot: 25 metrics: - type: acc_norm value: 65.61 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=nlpguy/Hermes-low-tune-2 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: HellaSwag (10-Shot) type: hellaswag split: validation args: num_few_shot: 10 metrics: - type: acc_norm value: 84.47 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=nlpguy/Hermes-low-tune-2 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MMLU (5-Shot) type: cais/mmlu config: all split: test args: num_few_shot: 5 metrics: - type: acc value: 63.69 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=nlpguy/Hermes-low-tune-2 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: TruthfulQA (0-shot) type: truthful_qa config: multiple_choice split: validation args: num_few_shot: 0 metrics: - type: mc2 value: 53.18 source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=nlpguy/Hermes-low-tune-2 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: Winogrande (5-shot) type: winogrande config: winogrande_xl split: validation args: num_few_shot: 5 metrics: - type: acc value: 77.74 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=nlpguy/Hermes-low-tune-2 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: GSM8k (5-shot) type: gsm8k config: main split: test args: num_few_shot: 5 metrics: - type: acc value: 63.53 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=nlpguy/Hermes-low-tune-2 name: Open LLM Leaderboard --- # merged This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit). ## Merge Details ### Merge Method This model was merged using the [task arithmetic](https://arxiv.org/abs/2212.04089) merge method using [teknium/OpenHermes-2.5-Mistral-7B](https://huggingface.co/teknium/OpenHermes-2.5-Mistral-7B) as a base. ### Models Merged The following models were included in the merge: * [simonveitner/Math-OpenHermes-2.5-Mistral-7B](https://huggingface.co/simonveitner/Math-OpenHermes-2.5-Mistral-7B) * [mlabonne/NeuralHermes-2.5-Mistral-7B-laser](https://huggingface.co/mlabonne/NeuralHermes-2.5-Mistral-7B-laser) * [openaccess-ai-collective/dpopenhermes-alpha-v0](https://huggingface.co/openaccess-ai-collective/dpopenhermes-alpha-v0) * [mlabonne/NeuralHermes-2.5-Mistral-7B](https://huggingface.co/mlabonne/NeuralHermes-2.5-Mistral-7B) ### Configuration The following YAML configuration was used to produce this model: ```yaml base_model: teknium/OpenHermes-2.5-Mistral-7B dtype: bfloat16 merge_method: task_arithmetic slices: - sources: - layer_range: [0, 32] model: teknium/OpenHermes-2.5-Mistral-7B - layer_range: [0, 32] model: simonveitner/Math-OpenHermes-2.5-Mistral-7B parameters: weight: 0.25 - layer_range: [0, 32] model: openaccess-ai-collective/dpopenhermes-alpha-v0 parameters: weight: 0.25 - layer_range: [0, 32] model: mlabonne/NeuralHermes-2.5-Mistral-7B parameters: weight: 0.25 - layer_range: [0, 32] model: mlabonne/NeuralHermes-2.5-Mistral-7B-laser parameters: weight: 0.25 ``` # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_nlpguy__Hermes-low-tune-2) | Metric |Value| |---------------------------------|----:| |Avg. |68.04| |AI2 Reasoning Challenge (25-Shot)|65.61| |HellaSwag (10-Shot) |84.47| |MMLU (5-Shot) |63.69| |TruthfulQA (0-shot) |53.18| |Winogrande (5-shot) |77.74| |GSM8k (5-shot) |63.53|
chanwit/flux-base-optimized
chanwit
2024-02-06T08:53:35Z
1,014
0
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "conversational", "en", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-01-13T14:12:40Z
--- license: apache-2.0 language: - en --- # Flux-Base-Optimized `flux-base-optimized` is the base model for finetuning the series of `flux-7b` models. It is hierarchical SLERP merged from the following models * mistralai/Mistral-7B-v0.1 (Apache 2.0) * teknium/OpenHermes-2.5-Mistral-7B (Apache 2.0) * Intel/neural-chat-7b-v3-3 (Apache 2.0) * meta-math/MetaMath-Mistral-7B (Apache 2.0) * openchat/openchat-3.5-0106 was openchat/openchat-3.5-1210 (Apache 2.0) Here's how we did the hierarchical SLERP merge. ``` [flux-base-optimized] ↑ | [stage-1]-+-[openchat] ↑ | [stage-0]-+-[meta-math] ↑ | [openhermes]-+-[neural-chat] ```
ragavsachdeva/magi
ragavsachdeva
2024-05-17T12:56:19Z
1,014
18
transformers
[ "transformers", "pytorch", "magi", "feature-extraction", "Manga", "Object Detection", "OCR", "Clustering", "Diarisation", "custom_code", "en", "arxiv:2401.10224", "region:us" ]
feature-extraction
2024-01-18T21:00:44Z
--- language: - en tags: - Manga - Object Detection - OCR - Clustering - Diarisation --- <style> .title-container { display: flex; flex-direction: column; /* Stack elements vertically */ justify-content: center; align-items: center; } .title { font-size: 2em; text-align: center; color: #333; font-family: 'Comic Sans MS', cursive; /* Use Comic Sans MS font */ text-transform: uppercase; letter-spacing: 0.1em; padding: 0.5em 0 0.2em; background: transparent; } .title span { background: -webkit-linear-gradient(45deg, #6495ED, #4169E1); /* Blue gradient */ -webkit-background-clip: text; -webkit-text-fill-color: transparent; } .subheading { font-size: 1.5em; /* Adjust the size as needed */ text-align: center; color: #555; /* Adjust the color as needed */ font-family: 'Comic Sans MS', cursive; /* Use Comic Sans MS font */ } .authors { font-size: 1em; /* Adjust the size as needed */ text-align: center; color: #777; /* Adjust the color as needed */ font-family: 'Comic Sans MS', cursive; /* Use Comic Sans MS font */ padding-top: 1em; } .affil { font-size: 1em; /* Adjust the size as needed */ text-align: center; color: #777; /* Adjust the color as needed */ font-family: 'Comic Sans MS', cursive; /* Use Comic Sans MS font */ } </style> <div class="title-container"> <div class="title"> The <span>Ma</span>n<span>g</span>a Wh<span>i</span>sperer </div> <div class="subheading"> Automatically Generating Transcriptions for Comics </div> <div class="authors"> Ragav Sachdeva and Andrew Zisserman </div> <div class="affil"> University of Oxford </div> <div style="display: flex;"> <a href="https://arxiv.org/abs/2401.10224"><img alt="Static Badge" src="https://img.shields.io/badge/arXiv-2401.10224-blue"></a> &emsp; <img alt="Dynamic JSON Badge" src="https://img.shields.io/badge/dynamic/json?url=https%3A%2F%2Fhuggingface.co%2Fapi%2Fmodels%2Fragavsachdeva%2Fmagi%3Fexpand%255B%255D%3Ddownloads%26expand%255B%255D%3DdownloadsAllTime&query=%24.downloadsAllTime&label=%F0%9F%A4%97%20Downloads"> </div> </div> ![image/png](https://cdn-uploads.huggingface.co/production/uploads/630852d2f0dc38fb47c347a4/B3ngZKXGZGBcZgPK6_XF0.png) # Usage ```python from transformers import AutoModel import numpy as np from PIL import Image import torch import os images = [ "path_to_image1.jpg", "path_to_image2.png", ] def read_image_as_np_array(image_path): with open(image_path, "rb") as file: image = Image.open(file).convert("L").convert("RGB") image = np.array(image) return image images = [read_image_as_np_array(image) for image in images] model = AutoModel.from_pretrained("ragavsachdeva/magi", trust_remote_code=True).cuda() with torch.no_grad(): results = model.predict_detections_and_associations(images) text_bboxes_for_all_images = [x["texts"] for x in results] ocr_results = model.predict_ocr(images, text_bboxes_for_all_images) for i in range(len(images)): model.visualise_single_image_prediction(images[i], results[i], filename=f"image_{i}.png") model.generate_transcript_for_single_image(results[i], ocr_results[i], filename=f"transcript_{i}.txt") ``` # License and Citation The provided model and datasets are available for unrestricted use in personal, research, non-commercial, and not-for-profit endeavors. For any other usage scenarios, kindly contact me via email, providing a detailed description of your requirements, to establish a tailored licensing arrangement. My contact information can be found on my website: ragavsachdeva [dot] github [dot] io ``` @misc{sachdeva2024manga, title={The Manga Whisperer: Automatically Generating Transcriptions for Comics}, author={Ragav Sachdeva and Andrew Zisserman}, year={2024}, eprint={2401.10224}, archivePrefix={arXiv}, primaryClass={cs.CV} } ```
tlc4418/pythia_70m_sft
tlc4418
2024-02-12T23:46:34Z
1,014
0
transformers
[ "transformers", "pytorch", "gpt_neox", "text-generation", "dataset:tatsu-lab/alpaca_farm", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-01-28T18:51:20Z
--- datasets: - tatsu-lab/alpaca_farm --- 70m Pythia model after SFT on the AlpacaFarm dataset 'sft' split. Model used as a base for reward models in 'Reward Model Ensembles Mitigate Overoptimization'
prince-canuma/Damysus-2.7B-Chat
prince-canuma
2024-02-17T10:13:39Z
1,014
4
transformers
[ "transformers", "safetensors", "phi", "text-generation", "nlp", "phi-2", "instruct", "conversational", "custom_code", "en", "dataset:Open-Orca/SlimOrca", "dataset:prince-canuma/TinyOrca", "base_model:microsoft/phi-2", "license:mit", "model-index", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-02-11T14:43:52Z
--- language: - en license: mit library_name: transformers tags: - nlp - phi - phi-2 - instruct base_model: - microsoft/phi-2 datasets: - Open-Orca/SlimOrca - prince-canuma/TinyOrca model-index: - name: Damysus-2.7B-Chat results: - task: type: text-generation metrics: - name: Average type: Average value: 60.49 verified: true source: name: Open LLM Leaderboard url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard - task: type: text-generation dataset: name: ARC (25-shot) type: ai2_arc metrics: - name: Accuracy Norm type: acc_norm value: 59.81 verified: true source: name: Open LLM Leaderboard url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard - task: type: text-generation dataset: name: Hellaswag (10-shot) type: Hellaswag metrics: - name: Accuracy Norm type: acc value: 74.52 verified: true source: name: Open LLM Leaderboard url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard - task: type: text-generation dataset: name: MMLU (5-shot) type: MMLU metrics: - name: Accuracy type: acc value: 56.33 verified: true source: name: Open LLM Leaderboard url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard - task: type: text-generation dataset: name: Truthful QA type: Truthful_QA metrics: - name: Multi-true type: mc2 value: 46.74 verified: true source: name: Open LLM Leaderboard url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard - task: type: text-generation dataset: name: Winogrande (5-shot) type: Winogrande metrics: - name: Accuracy type: acc value: 75.06 verified: true source: name: Open LLM Leaderboard url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard - task: type: text-generation dataset: name: GSM8K (5-shot) type: GSM8K metrics: - name: Accuracy type: acc value: 50.64 verified: true source: name: Open LLM Leaderboard url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard --- # Model Summary <img src="Damysus.png" width="500" alt="Damysus - the fastest giant"/> <!-- Provide a quick summary of what the model is/does. --> This model is a instruction-tuned version of Phi-2, a Transformer model with 2.7 billion parameters from Microsoft. The model has undergone further training to better follow specific user instructions, enhancing its ability to perform tasks as directed and improve its interaction with users. This additional training helps the model to understand context better, generate more accurate and relevant responses, and adapt to a wide range of language-based tasks such as: - Questions and Answers, - Data Extraction, - Structured Outputs (i.e., JSON outputs), - And providing explanations, ## Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [Prince Canuma](https://huggingface.co/prince-canuma) - **Model type:** Transformer - **License:** MIT - **Finetuned from model:** microsoft/phi-2 ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> You can use this model to build local/cloud RAG applications. It can serve as the: - Answer synthesizer, - Summarizer, - Or query rewriter model. ### Limitations This model inherits some of the base model's limitations, such as: - Generate Inaccurate Code and Facts: The model may produce incorrect code snippets and statements. Users should treat these outputs as suggestions or starting points, not as definitive or accurate solutions. - Limited Scope for code: Majority of Phi-2 training data is based in Python and use common packages such as "typing, math, random, collections, datetime, itertools". If the model generates Python scripts that utilize other packages or scripts in other languages, we strongly recommend users manually verify all API uses. - Language Limitations: The model is primarily designed to understand standard English. Informal English, slang, or any other languages might pose challenges to its comprehension, leading to potential misinterpretations or errors in response. ## How to Get Started with the Model Use the code below to get started with the model. ```python from transformers import pipeline, Conversation chatbot = pipeline("conversational", model="prince-canuma/Damysus-2.7B-Chat") conversation = Conversation("I'm looking for a movie - what's your favourite one?") output = chatbot(conversation) print(output) ``` Or you can instatiate the model and tokenizer directly ```python from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("prince-canuma/Damysus-2.7B-Chat") model = AutoModelForCausalLM.from_pretrained("prince-canuma/Damysus-2.7B-Chat") inputs = tokenizer.apply_chat_template( [ {"content":"You are an helpful AI assistant","role":"system"}, {"content":"I'm looking for a movie - what's your favourite one?","role":"user"}, ], add_generation_prompt=True, return_tensors="pt", ).to("cuda") outputs = model.generate(inputs, do_sample=False, max_new_tokens=256) input_length = inputs.shape[1] print(tokenizer.batch_decode(outputs[:, input_length:], skip_special_tokens=True)[0]) ``` Output: ```shell My favorite movie is "The Shawshank Redemption." It's a powerful and inspiring story about hope, friendship, and redemption. The performances by Tim Robbins and Morgan Freeman are exceptional, and the film's themes and messages are timeless. I highly recommend it to anyone who enjoys a well-crafted and emotionally engaging story. ``` ### Structured Output ```python from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("prince-canuma/Damysus-2.7B-Chat") model = AutoModelForCausalLM.from_pretrained("prince-canuma/Damysus-2.7B-Chat") inputs = tokenizer.apply_chat_template( [ {"content":"You are a Robot that ONLY outputs JSON. Use this structure: {'entities': [{'type':..., 'name':...}]}.","role":"system"}, {"content":""""Extract the entities of type 'technology' and 'file_type' in JSON format from the following passage: AI is a transformative force in document processing employing technologies such as 'Machine Learning (ML), Natural Language Processing (NLP) and Optical Character Recognition (OCR) to understand, interpret, and summarize text. These technologies enhance accuracy, increase efficiency, and allow you and your company to process high volumes of data in short amount of time. For instance, you can easily extract key points and summarize a large PDF document (i.e., 500 pages) in just a few seconds.""", "role":"user"}, ], add_generation_prompt=True, return_tensors="pt", ).to("cuda") outputs = model.generate(inputs, do_sample=False, max_new_tokens=256) input_length = inputs.shape[1] print(tokenizer.batch_decode(outputs[:, input_length:], skip_special_tokens=True)[0]) ``` Output: ```json { "entities": [ { "type": "technology", "name": "Machine Learning (ML)" }, { "type": "technology", "name": "Natural Language Processing (NLP)" }, { "type": "technology", "name": "Optical Character Recognition (OCR)" }, { "type": "file_type", "name": "PDF" }, ] } ``` ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> I used [SlimOrca](https://huggingface.co/datasets/Open-Orca/SlimOrca) dataset, a new curated subset of our OpenOrca data. In the course of this study, the [SlimOrca](https://huggingface.co/datasets/Open-Orca/SlimOrca) dataset was used, representing a meticulously curated subset derived from the broader OpenOrca dataset. This release provides an efficient means of reaching performance on-par with using larger slices of the [OpenOrca](https://huggingface.co/datasets/Open-Orca/OpenOrca), while only including ~500k GPT-4 completions. Subsequently, two distinct subsets were crafted, comprising 102,000 and 1,000 samples, denoted as: - [prince-canuma/SmallOrca](https://huggingface.co/datasets/prince-canuma/SmallOrca) - [prince-canuma/TinyOrca](https://huggingface.co/datasets/prince-canuma/TinyOrca) Although experimentation was conducted with both datasets, optimal results were achieved through fine-tuning on a modest set of 200 samples. Notably, the investigation revealed that augmenting the training data beyond this threshold predominantly enhanced the model's proficiency in generating Chain-of-Thought responses. However, it is imperative to note that the preference for Chain-of-Thought responses may not be universally applicable. Particularly in scenarios like the RAG setup, succinct answers to prompts are often favored, especially for straightforward queries. ### Training Procedure #### Preprocessing 1. Convert dataset to chatML format 2. Remove all samples with more than 2048 tokens (Phi-2 context size) 3. Mask instructions (System and User) at training time. #### LoRA Config - **lora_alpha:** 128, - **lora_dropout:** 0.05, - **r:** 256, - **bias:** "none", - **target_modules:** "all-linear", - **task_type:** "CAUSAL_LM", #### Training Hyperparameters - **Training regime:** bf16 mixed precision, <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> - **max_steps:** 100, - **per_device_train_batch_size:** 2, - **gradient_accumulation_steps:** 2, - **optim:** "adamw_torch_fused", - **learning_rate:** 2e-4, - **max_grad_norm:** 0.3, - **warmup_ratio:** 0.03, - **lr_scheduler_type:** "constant", #### Trainer - **max_seq_length:** 1744, - **data_collator:** DataCollatorForCompletionOnlyLM ## Evaluation <img src="truthfulQA.png" width="800" alt="Damysus-2.7B-chat truthfulQA benchmark results"/> <!-- This section describes the evaluation protocols and provides the results. --> We evaluate models on 7 key benchmarks using the Eleuther AI Language Model Evaluation Harness , a unified framework to test generative language models on a large number of different evaluation tasks. - AI2 Reasoning Challenge (25-shot) - a set of grade-school science questions. - HellaSwag (10-shot) - a test of commonsense inference, which is easy for humans (~95%) but challenging for SOTA models. - MMLU (5-shot) - a test to measure a text model's multitask accuracy. The test covers 57 tasks including elementary mathematics, US history, computer science, law, and more. - TruthfulQA (0-shot) - a test to measure a model's propensity to reproduce falsehoods commonly found online. Note: TruthfulQA is technically a 6-shot task in the Harness because each example is prepended with 6 Q/A pairs, even in the 0-shot setting. - Winogrande (5-shot) - an adversarial and difficult Winograd benchmark at scale, for commonsense reasoning. - GSM8k (5-shot) - diverse grade school math word problems to measure a model's ability to solve multi-step mathematical reasoning problems. For all these evaluations, a higher score is a better score. We chose these benchmarks as they test a variety of reasoning and general knowledge across a wide variety of fields in 0-shot and few-shot settings. Read more [here](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). ### Results | Model | AVG | ARC | Hellaswag | MMLU | Truthful QA | Winogrande | GSM8K | |-------|--------:|------:|----------:|-----:|----------:|----------:|----------:| | [NousResearch/Nous-Puffin-70B](NousResearch/Nous-Puffin-70B) | 64.91 | 67.41 | 87.37 | 69.77 | 46.77 | 83.9 | 34.27 | | [TheBloke/Llama-2-70B-fp16](https://huggingface.co/TheBloke/Llama-2-70B-fp16) | 64.52 | 67.32 | 87.33 | 69.83 | 44.92 | 83.74 | 33.97 | | [NousResearch/Yarn-Mistral-7B-64k](https://huggingface.co/NousResearch/Yarn-Mistral-7b-64k) | 59.63 | 59.9 | 82.51 | 62.96 | 41.86 | 77.27 | 33.28 | | [Qwen1.5-4B-Chat](https://huggingface.co/Qwen/Qwen1.5-4B-Chat) | 46.79 | 43.26 | 69.73 | 55.55 | 44.79 | 64.96 | 2.43 | | [Microsoft/phi-2](https://huggingface.co/microsoft/phi-2) | 61.33 | 61.09 | 75.11 | 58.11 | 44.47 | 74.35 | 54.81 | | [Damysus-2.7B-Chat](https://huggingface.co/prince-canuma/Damysus-2.7B-Chat) (Ours) | 60.49 | 59.81 | 74.52 | 56.33 | **46.74** | **75.06** | 50.64 | ## Technical Specifications ### Compute Infrastructure - Modal Labs #### Hardware - OS: Linux - GPU: A10G #### Libraries - TRL - Transformers - PEFT - Datasets - Accelerate - torch - Wandb - Bitsandbytes - Plotly ## Future work I plan to explore the following tuning setups: - Function calling - DPO ## Citation <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** ```bibtex @misc{Damysus-2.7B-Chat, title={Damysus-2.7B-Chat} , author={Prince Canuma}, year={2024}, } ``` ```bibtex @misc{SlimOrca, title = {SlimOrca: An Open Dataset of GPT-4 Augmented FLAN Reasoning Traces, with Verification}, author = {Wing Lian and Guan Wang and Bleys Goodson and Eugene Pentland and Austin Cook and Chanvichet Vong and "Teknium"}, year = {2023}, publisher = {HuggingFace}, url = {https://https://huggingface.co/Open-Orca/SlimOrca} } ``` ```bibtex @misc{open-llm-leaderboard, author = {Edward Beeching and Clémentine Fourrier and Nathan Habib and Sheon Han and Nathan Lambert and Nazneen Rajani and Omar Sanseviero and Lewis Tunstall and Thomas Wolf}, title = {Open LLM Leaderboard}, year = {2023}, publisher = {Hugging Face}, howpublished = "\url{https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard}" } ```
Chrisisis/5EsZ8kjTxsVKSwY44tAJ7WpPdJ8FtjqEyyNukDz6aV5d93Ux_vgg
Chrisisis
2024-02-24T08:31:00Z
1,014
0
keras
[ "keras", "region:us" ]
null
2024-02-11T17:26:31Z
Entry not found
FoxEngineAi/Mega-Destroyer-8x7B
FoxEngineAi
2024-03-14T13:15:12Z
1,014
8
transformers
[ "transformers", "safetensors", "mixtral", "text-generation", "mergekit", "merge", "conversational", "arxiv:2311.03099", "arxiv:2306.01708", "base_model:mistralai/Mixtral-8x7B-v0.1", "base_model:Doctor-Shotgun/limarp-zloss-mixtral-8x7b-qlora", "base_model:LoneStriker/Air-Striker-Mixtral-8x7B-ZLoss-LoRA", "base_model:rombodawg/Open_Gpt4_8x7B_v0.2", "base_model:mistralai/Mixtral-8x7B-Instruct-v0.1", "base_model:Sao10K/Typhon-Mixtral-v1", "license:cc-by-4.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-03-14T03:01:32Z
--- base_model: - mistralai/Mixtral-8x7B-v0.1 - Doctor-Shotgun/limarp-zloss-mixtral-8x7b-qlora - mistralai/Mixtral-8x7B-v0.1 - LoneStriker/Air-Striker-Mixtral-8x7B-ZLoss-LoRA - rombodawg/Open_Gpt4_8x7B_v0.2 - mistralai/Mixtral-8x7B-Instruct-v0.1 - mistralai/Mixtral-8x7B-v0.1 - Sao10K/Typhon-Mixtral-v1 tags: - mergekit - merge license: cc-by-4.0 --- # mergeout This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit). ## Merge Details ### Merge Method This model was merged using the [DARE](https://arxiv.org/abs/2311.03099) [TIES](https://arxiv.org/abs/2306.01708) merge method using [mistralai/Mixtral-8x7B-v0.1](https://huggingface.co/mistralai/Mixtral-8x7B-v0.1) as a base. ### Models Merged The following models were included in the merge: * [mistralai/Mixtral-8x7B-v0.1](https://huggingface.co/mistralai/Mixtral-8x7B-v0.1) + [Doctor-Shotgun/limarp-zloss-mixtral-8x7b-qlora](https://huggingface.co/Doctor-Shotgun/limarp-zloss-mixtral-8x7b-qlora) * [mistralai/Mixtral-8x7B-v0.1](https://huggingface.co/mistralai/Mixtral-8x7B-v0.1) + [LoneStriker/Air-Striker-Mixtral-8x7B-ZLoss-LoRA](https://huggingface.co/LoneStriker/Air-Striker-Mixtral-8x7B-ZLoss-LoRA) * [rombodawg/Open_Gpt4_8x7B_v0.2](https://huggingface.co/rombodawg/Open_Gpt4_8x7B_v0.2) * [mistralai/Mixtral-8x7B-Instruct-v0.1](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1) * [Sao10K/Typhon-Mixtral-v1](https://huggingface.co/Sao10K/Typhon-Mixtral-v1) ### Configuration The following YAML configuration was used to produce this model: ```yaml models: - model: mistralai/Mixtral-8x7B-Instruct-v0.1 parameters: density: 0.6 weight: 1.0 - model: rombodawg/Open_Gpt4_8x7B_v0.2 parameters: density: 0.5 weight: 0.8 - model: mistralai/Mixtral-8x7B-v0.1+LoneStriker/Air-Striker-Mixtral-8x7B-ZLoss-LoRA parameters: density: 0.5 weight: 0.6 - model: Sao10K/Typhon-Mixtral-v1 parameters: density: 0.5 weight: 0.7 - model: mistralai/Mixtral-8x7B-v0.1+Doctor-Shotgun/limarp-zloss-mixtral-8x7b-qlora parameters: density: 0.5 weight: 0.4 merge_method: dare_ties base_model: mistralai/Mixtral-8x7B-v0.1 parameters: normalize: true int8_mask: true dtype: bfloat16 name: Mega-Destroyer-8x7B ``` High quality GGUF quants available here: https://huggingface.co/Artefact2/Mega-Destroyer-8x7B-GGUF (Thank you, Artefact for quanting it using an imatrix!) Hello everyone, this is Dampf. You might know me as the creator of Mythical-Destroyer-13B. This time, I collaborated with Mr.DragonFox aka FoxEngineAi, harnessing his powerful rig to deliver a Merge of multiple high quality Mixtral 8x7B models. My goal was to beat Bagel-Mistery-Tour V2 by Ycros and create the best Mixtral model to date. Did I succeed? Please try it out and decide for yourself! Aside from the obvious Mixtral Instruct, to keep its intelligence, I've merged Rombo's excellent Open_Gpt4_v0.2 model that consists of Jon Durbin's Bagel-DPO-8x7B and another highly regarded model, namely smelborp/MixtralOrochi8x7B. This model also combines different datasets together, meaning it should be agood fit for every task you throw at it. This model acts like the reasoning part in the merge. In contrast, we have Air-Striker and LimaRP at the creative side which will allow for great roleplays in different styles, they are also a good fit to enhance the model's writing capabilities greatly. And finally, I've merged Sao10K/Typhon-Mixtral-v1 to boost the story writing capabilities even further. It includes KoboldAI's latest Holodeck model, as well as a couple of his latest models and combines it into one package. My hope is that this will capture the magic Sao10K/Fimbulvetr-11B-v2 emits, just at the intelligence level of a Mixtral model. This one also includes Nous Hermes 2 DPO, a high quality instruct model that will boost its intelligence and sorta act like a balancer to all the creative stuff in the merge. What we have here is a model that should be fantastic at instruct and roleplay/creative tasks a like. So basically a general purpose model. Perhaps the pinnacle of Rocksmashing? Idk xD I just know it includes nearly all datasets on the sun. As a reason, it will likely work with every prompt format as well. So feel free to use Alpaca, Vicuna, ChatML, Llama 2 Chat or whatever your heart desires. A huge thank you to the creators of these fantastic datasets and fine tunes in the respective merges, namely Jon Durbin, Teknium, Sao10K, MistralAI, LoneStriker, NeverSleep, Suikamelon, Doctor-Shotgun, KoboldAI and more. All credit goes to them. A thank you to the creators of the different merges I've merged (Mergeception!) as well! And of course a thank you to MrDragonFox for lending his compute! Please enjoy :D
mgoin/Meta-Llama-3-70B-Instruct-Marlin
mgoin
2024-04-19T13:53:39Z
1,014
5
transformers
[ "transformers", "llama", "text-generation", "facebook", "meta", "pytorch", "llama-3", "conversational", "en", "license:other", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "4-bit", "gptq", "region:us" ]
text-generation
2024-04-18T17:58:41Z
--- language: - en pipeline_tag: text-generation tags: - facebook - meta - pytorch - llama - llama-3 license: other license_name: llama3 license_link: LICENSE --- ## Model Details Meta developed and released the Meta Llama 3 family of large language models (LLMs), a collection of pretrained and instruction tuned generative text models in 8 and 70B sizes. The Llama 3 instruction tuned models are optimized for dialogue use cases and outperform many of the available open source chat models on common industry benchmarks. Further, in developing these models, we took great care to optimize helpfulness and safety. **Model developers** Meta **Variations** Llama 3 comes in two sizes — 8B and 70B parameters — in pre-trained and instruction tuned variants. **Input** Models input text only. **Output** Models generate text and code only. **Model Architecture** Llama 3 is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align with human preferences for helpfulness and safety. <table> <tr> <td> </td> <td><strong>Training Data</strong> </td> <td><strong>Params</strong> </td> <td><strong>Context length</strong> </td> <td><strong>GQA</strong> </td> <td><strong>Token count</strong> </td> <td><strong>Knowledge cutoff</strong> </td> </tr> <tr> <td rowspan="2" >Llama 3 </td> <td rowspan="2" >A new mix of publicly available online data. </td> <td>8B </td> <td>8k </td> <td>Yes </td> <td rowspan="2" >15T+ </td> <td>March, 2023 </td> </tr> <tr> <td>70B </td> <td>8k </td> <td>Yes </td> <td>December, 2023 </td> </tr> </table> **Llama 3 family of models**. Token counts refer to pretraining data only. Both the 8 and 70B versions use Grouped-Query Attention (GQA) for improved inference scalability. **Model Release Date** April 18, 2024. **Status** This is a static model trained on an offline dataset. Future versions of the tuned models will be released as we improve model safety with community feedback. **License** A custom commercial license is available at: [https://llama.meta.com/llama3/license](https://llama.meta.com/llama3/license) Where to send questions or comments about the model Instructions on how to provide feedback or comments on the model can be found in the model [README](https://github.com/meta-llama/llama3). For more technical information about generation parameters and recipes for how to use Llama 3 in applications, please go [here](https://github.com/meta-llama/llama-recipes). ## Intended Use **Intended Use Cases** Llama 3 is intended for commercial and research use in English. Instruction tuned models are intended for assistant-like chat, whereas pretrained models can be adapted for a variety of natural language generation tasks. **Out-of-scope** Use in any manner that violates applicable laws or regulations (including trade compliance laws). Use in any other way that is prohibited by the Acceptable Use Policy and Llama 3 Community License. Use in languages other than English**. **Note: Developers may fine-tune Llama 3 models for languages beyond English provided they comply with the Llama 3 Community License and the Acceptable Use Policy. ## How to use This repository contains two versions of Meta-Llama-3-70B-Instruct, for use with transformers and with the original `llama3` codebase. ### Use with transformers See the snippet below for usage with Transformers: ```python import transformers import torch model_id = "meta-llama/Meta-Llama-3-70B-Instruct" pipeline = transformers.pipeline( "text-generation", model=model_id, model_kwargs={"torch_dtype": torch.bfloat16}, device="auto", ) messages = [ {"role": "system", "content": "You are a pirate chatbot who always responds in pirate speak!"}, {"role": "user", "content": "Who are you?"}, ] prompt = pipeline.tokenizer.apply_chat_template( messages, tokenize=False, add_generation_prompt=True ) terminators = [ pipeline.tokenizer.eos_token_id, pipeline.tokenizer.convert_tokens_to_ids("<|eot_id|>") ] outputs = pipeline( prompt, max_new_tokens=256, eos_token_id=terminators, do_sample=True, temperature=0.6, top_p=0.9, ) print(outputs[0]["generated_text"][len(prompt):]) ``` ### Use with `llama3` Please, follow the instructions in the [repository](https://github.com/meta-llama/llama3). To download Original checkpoints, see the example command below leveraging `huggingface-cli`: ``` huggingface-cli download meta-llama/Meta-Llama-3-70B-Instruct --include "original/*" --local-dir Meta-Llama-3-70B-Instruct ``` For Hugging Face support, we recommend using transformers or TGI, but a similar command works. ## Hardware and Software **Training Factors** We used custom training libraries, Meta's Research SuperCluster, and production clusters for pretraining. Fine-tuning, annotation, and evaluation were also performed on third-party cloud compute. **Carbon Footprint Pretraining utilized a cumulative** 7.7M GPU hours of computation on hardware of type H100-80GB (TDP of 700W). Estimated total emissions were 2290 tCO2eq, 100% of which were offset by Meta’s sustainability program. <table> <tr> <td> </td> <td><strong>Time (GPU hours)</strong> </td> <td><strong>Power Consumption (W)</strong> </td> <td><strong>Carbon Emitted(tCO2eq)</strong> </td> </tr> <tr> <td>Llama 3 8B </td> <td>1.3M </td> <td>700 </td> <td>390 </td> </tr> <tr> <td>Llama 3 70B </td> <td>6.4M </td> <td>700 </td> <td>1900 </td> </tr> <tr> <td>Total </td> <td>7.7M </td> <td> </td> <td>2290 </td> </tr> </table> **CO2 emissions during pre-training**. Time: total GPU time required for training each model. Power Consumption: peak power capacity per GPU device for the GPUs used adjusted for power usage efficiency. 100% of the emissions are directly offset by Meta's sustainability program, and because we are openly releasing these models, the pretraining costs do not need to be incurred by others. ## Training Data **Overview** Llama 3 was pretrained on over 15 trillion tokens of data from publicly available sources. The fine-tuning data includes publicly available instruction datasets, as well as over 10M human-annotated examples. Neither the pretraining nor the fine-tuning datasets include Meta user data. **Data Freshness** The pretraining data has a cutoff of March 2023 for the 7B and December 2023 for the 70B models respectively. ## Benchmarks In this section, we report the results for Llama 3 models on standard automatic benchmarks. For all the evaluations, we use our internal evaluations library. For details on the methodology see [here](https://github.com/meta-llama/llama3/blob/main/eval_methodology.md). ### Base pretrained models <table> <tr> <td><strong>Category</strong> </td> <td><strong>Benchmark</strong> </td> <td><strong>Llama 3 8B</strong> </td> <td><strong>Llama2 7B</strong> </td> <td><strong>Llama2 13B</strong> </td> <td><strong>Llama 3 70B</strong> </td> <td><strong>Llama2 70B</strong> </td> </tr> <tr> <td rowspan="6" >General </td> <td>MMLU (5-shot) </td> <td>66.6 </td> <td>45.7 </td> <td>53.8 </td> <td>79.5 </td> <td>69.7 </td> </tr> <tr> <td>AGIEval English (3-5 shot) </td> <td>45.9 </td> <td>28.8 </td> <td>38.7 </td> <td>63.0 </td> <td>54.8 </td> </tr> <tr> <td>CommonSenseQA (7-shot) </td> <td>72.6 </td> <td>57.6 </td> <td>67.6 </td> <td>83.8 </td> <td>78.7 </td> </tr> <tr> <td>Winogrande (5-shot) </td> <td>76.1 </td> <td>73.3 </td> <td>75.4 </td> <td>83.1 </td> <td>81.8 </td> </tr> <tr> <td>BIG-Bench Hard (3-shot, CoT) </td> <td>61.1 </td> <td>38.1 </td> <td>47.0 </td> <td>81.3 </td> <td>65.7 </td> </tr> <tr> <td>ARC-Challenge (25-shot) </td> <td>78.6 </td> <td>53.7 </td> <td>67.6 </td> <td>93.0 </td> <td>85.3 </td> </tr> <tr> <td>Knowledge reasoning </td> <td>TriviaQA-Wiki (5-shot) </td> <td>78.5 </td> <td>72.1 </td> <td>79.6 </td> <td>89.7 </td> <td>87.5 </td> </tr> <tr> <td rowspan="4" >Reading comprehension </td> <td>SQuAD (1-shot) </td> <td>76.4 </td> <td>72.2 </td> <td>72.1 </td> <td>85.6 </td> <td>82.6 </td> </tr> <tr> <td>QuAC (1-shot, F1) </td> <td>44.4 </td> <td>39.6 </td> <td>44.9 </td> <td>51.1 </td> <td>49.4 </td> </tr> <tr> <td>BoolQ (0-shot) </td> <td>75.7 </td> <td>65.5 </td> <td>66.9 </td> <td>79.0 </td> <td>73.1 </td> </tr> <tr> <td>DROP (3-shot, F1) </td> <td>58.4 </td> <td>37.9 </td> <td>49.8 </td> <td>79.7 </td> <td>70.2 </td> </tr> </table> ### Instruction tuned models <table> <tr> <td><strong>Benchmark</strong> </td> <td><strong>Llama 3 8B</strong> </td> <td><strong>Llama 2 7B</strong> </td> <td><strong>Llama 2 13B</strong> </td> <td><strong>Llama 3 70B</strong> </td> <td><strong>Llama 2 70B</strong> </td> </tr> <tr> <td>MMLU (5-shot) </td> <td>68.4 </td> <td>34.1 </td> <td>47.8 </td> <td>82.0 </td> <td>52.9 </td> </tr> <tr> <td>GPQA (0-shot) </td> <td>34.2 </td> <td>21.7 </td> <td>22.3 </td> <td>39.5 </td> <td>21.0 </td> </tr> <tr> <td>HumanEval (0-shot) </td> <td>62.2 </td> <td>7.9 </td> <td>14.0 </td> <td>81.7 </td> <td>25.6 </td> </tr> <tr> <td>GSM-8K (8-shot, CoT) </td> <td>79.6 </td> <td>25.7 </td> <td>77.4 </td> <td>93.0 </td> <td>57.5 </td> </tr> <tr> <td>MATH (4-shot, CoT) </td> <td>30.0 </td> <td>3.8 </td> <td>6.7 </td> <td>50.4 </td> <td>11.6 </td> </tr> </table> ### Responsibility & Safety We believe that an open approach to AI leads to better, safer products, faster innovation, and a bigger overall market. We are committed to Responsible AI development and took a series of steps to limit misuse and harm and support the open source community. Foundation models are widely capable technologies that are built to be used for a diverse range of applications. They are not designed to meet every developer preference on safety levels for all use cases, out-of-the-box, as those by their nature will differ across different applications. Rather, responsible LLM-application deployment is achieved by implementing a series of safety best practices throughout the development of such applications, from the model pre-training, fine-tuning and the deployment of systems composed of safeguards to tailor the safety needs specifically to the use case and audience. As part of the Llama 3 release, we updated our [Responsible Use Guide](https://llama.meta.com/responsible-use-guide/) to outline the steps and best practices for developers to implement model and system level safety for their application. We also provide a set of resources including [Meta Llama Guard 2](https://llama.meta.com/purple-llama/) and [Code Shield](https://llama.meta.com/purple-llama/) safeguards. These tools have proven to drastically reduce residual risks of LLM Systems, while maintaining a high level of helpfulness. We encourage developers to tune and deploy these safeguards according to their needs and we provide a [reference implementation](https://github.com/meta-llama/llama-recipes/tree/main/recipes/responsible_ai) to get you started. #### Llama 3-Instruct As outlined in the Responsible Use Guide, some trade-off between model helpfulness and model alignment is likely unavoidable. Developers should exercise discretion about how to weigh the benefits of alignment and helpfulness for their specific use case and audience. Developers should be mindful of residual risks when using Llama models and leverage additional safety tools as needed to reach the right safety bar for their use case. <span style="text-decoration:underline;">Safety</span> For our instruction tuned model, we conducted extensive red teaming exercises, performed adversarial evaluations and implemented safety mitigations techniques to lower residual risks. As with any Large Language Model, residual risks will likely remain and we recommend that developers assess these risks in the context of their use case. In parallel, we are working with the community to make AI safety benchmark standards transparent, rigorous and interpretable. <span style="text-decoration:underline;">Refusals</span> In addition to residual risks, we put a great emphasis on model refusals to benign prompts. Over-refusing not only can impact the user experience but could even be harmful in certain contexts as well. We’ve heard the feedback from the developer community and improved our fine tuning to ensure that Llama 3 is significantly less likely to falsely refuse to answer prompts than Llama 2. We built internal benchmarks and developed mitigations to limit false refusals making Llama 3 our most helpful model to date. #### Responsible release In addition to responsible use considerations outlined above, we followed a rigorous process that requires us to take extra measures against misuse and critical risks before we make our release decision. Misuse If you access or use Llama 3, you agree to the Acceptable Use Policy. The most recent copy of this policy can be found at [https://llama.meta.com/llama3/use-policy/](https://llama.meta.com/llama3/use-policy/). #### Critical risks <span style="text-decoration:underline;">CBRNE</span> (Chemical, Biological, Radiological, Nuclear, and high yield Explosives) We have conducted a two fold assessment of the safety of the model in this area: * Iterative testing during model training to assess the safety of responses related to CBRNE threats and other adversarial risks. * Involving external CBRNE experts to conduct an uplift test assessing the ability of the model to accurately provide expert knowledge and reduce barriers to potential CBRNE misuse, by reference to what can be achieved using web search (without the model). ### <span style="text-decoration:underline;">Cyber Security </span> We have evaluated Llama 3 with CyberSecEval, Meta’s cybersecurity safety eval suite, measuring Llama 3’s propensity to suggest insecure code when used as a coding assistant, and Llama 3’s propensity to comply with requests to help carry out cyber attacks, where attacks are defined by the industry standard MITRE ATT&CK cyber attack ontology. On our insecure coding and cyber attacker helpfulness tests, Llama 3 behaved in the same range or safer than models of [equivalent coding capability](https://huggingface.co/spaces/facebook/CyberSecEval). ### <span style="text-decoration:underline;">Child Safety</span> Child Safety risk assessments were conducted using a team of experts, to assess the model’s capability to produce outputs that could result in Child Safety risks and inform on any necessary and appropriate risk mitigations via fine tuning. We leveraged those expert red teaming sessions to expand the coverage of our evaluation benchmarks through Llama 3 model development. For Llama 3, we conducted new in-depth sessions using objective based methodologies to assess the model risks along multiple attack vectors. We also partnered with content specialists to perform red teaming exercises assessing potentially violating content while taking account of market specific nuances or experiences. ### Community Generative AI safety requires expertise and tooling, and we believe in the strength of the open community to accelerate its progress. We are active members of open consortiums, including the AI Alliance, Partnership in AI and MLCommons, actively contributing to safety standardization and transparency. We encourage the community to adopt taxonomies like the MLCommons Proof of Concept evaluation to facilitate collaboration and transparency on safety and content evaluations. Our Purple Llama tools are open sourced for the community to use and widely distributed across ecosystem partners including cloud service providers. We encourage community contributions to our [Github repository](https://github.com/meta-llama/PurpleLlama). Finally, we put in place a set of resources including an [output reporting mechanism](https://developers.facebook.com/llama_output_feedback) and [bug bounty program](https://www.facebook.com/whitehat) to continuously improve the Llama technology with the help of the community. ## Ethical Considerations and Limitations The core values of Llama 3 are openness, inclusivity and helpfulness. It is meant to serve everyone, and to work for a wide range of use cases. It is thus designed to be accessible to people across many different backgrounds, experiences and perspectives. Llama 3 addresses users and their needs as they are, without insertion unnecessary judgment or normativity, while reflecting the understanding that even content that may appear problematic in some cases can serve valuable purposes in others. It respects the dignity and autonomy of all users, especially in terms of the values of free thought and expression that power innovation and progress. But Llama 3 is a new technology, and like any new technology, there are risks associated with its use. Testing conducted to date has been in English, and has not covered, nor could it cover, all scenarios. For these reasons, as with all LLMs, Llama 3’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate, biased or other objectionable responses to user prompts. Therefore, before deploying any applications of Llama 3 models, developers should perform safety testing and tuning tailored to their specific applications of the model. As outlined in the Responsible Use Guide, we recommend incorporating [Purple Llama](https://github.com/facebookresearch/PurpleLlama) solutions into your workflows and specifically [Llama Guard](https://ai.meta.com/research/publications/llama-guard-llm-based-input-output-safeguard-for-human-ai-conversations/) which provides a base model to filter input and output prompts to layer system-level safety on top of model-level safety. Please see the Responsible Use Guide available at [http://llama.meta.com/responsible-use-guide](http://llama.meta.com/responsible-use-guide) ## Citation instructions @article{llama3modelcard, title={Llama 3 Model Card}, author={AI@Meta}, year={2024}, url = {https://github.com/meta-llama/llama3/blob/main/MODEL_CARD.md} } ## Contributors Aaditya Singh; Aaron Grattafiori; Abhimanyu Dubey; Abhinav Jauhri; Abhinav Pandey; Abhishek Kadian; Adam Kelsey; Adi Gangidi; Ahmad Al-Dahle; Ahuva Goldstand; Aiesha Letman; Ajay Menon; Akhil Mathur; Alan Schelten; Alex Vaughan; Amy Yang; Andrei Lupu; Andres Alvarado; Andrew Gallagher; Andrew Gu; Andrew Ho; Andrew Poulton; Andrew Ryan; Angela Fan; Ankit Ramchandani; Anthony Hartshorn; Archi Mitra; Archie Sravankumar; Artem Korenev; Arun Rao; Ashley Gabriel; Ashwin Bharambe; Assaf Eisenman; Aston Zhang; Aurelien Rodriguez; Austen Gregerson; Ava Spataru; Baptiste Roziere; Ben Maurer; Benjamin Leonhardi; Bernie Huang; Bhargavi Paranjape; Bing Liu; Binh Tang; Bobbie Chern; Brani Stojkovic; Brian Fuller; Catalina Mejia Arenas; Chao Zhou; Charlotte Caucheteux; Chaya Nayak; Ching-Hsiang Chu; Chloe Bi; Chris Cai; Chris Cox; Chris Marra; Chris McConnell; Christian Keller; Christoph Feichtenhofer; Christophe Touret; Chunyang Wu; Corinne Wong; Cristian Canton Ferrer; Damien Allonsius; Daniel Kreymer; Daniel Haziza; Daniel Li; Danielle Pintz; Danny Livshits; Danny Wyatt; David Adkins; David Esiobu; David Xu; Davide Testuggine; Delia David; Devi Parikh; Dhruv Choudhary; Dhruv Mahajan; Diana Liskovich; Diego Garcia-Olano; Diego Perino; Dieuwke Hupkes; Dingkang Wang; Dustin Holland; Egor Lakomkin; Elina Lobanova; Xiaoqing Ellen Tan; Emily Dinan; Eric Smith; Erik Brinkman; Esteban Arcaute; Filip Radenovic; Firat Ozgenel; Francesco Caggioni; Frank Seide; Frank Zhang; Gabriel Synnaeve; Gabriella Schwarz; Gabrielle Lee; Gada Badeer; Georgia Anderson; Graeme Nail; Gregoire Mialon; Guan Pang; Guillem Cucurell; Hailey Nguyen; Hannah Korevaar; Hannah Wang; Haroun Habeeb; Harrison Rudolph; Henry Aspegren; Hu Xu; Hugo Touvron; Iga Kozlowska; Igor Molybog; Igor Tufanov; Iliyan Zarov; Imanol Arrieta Ibarra; Irina-Elena Veliche; Isabel Kloumann; Ishan Misra; Ivan Evtimov; Jacob Xu; Jade Copet; Jake Weissman; Jan Geffert; Jana Vranes; Japhet Asher; Jason Park; Jay Mahadeokar; Jean-Baptiste Gaya; Jeet Shah; Jelmer van der Linde; Jennifer Chan; Jenny Hong; Jenya Lee; Jeremy Fu; Jeremy Teboul; Jianfeng Chi; Jianyu Huang; Jie Wang; Jiecao Yu; Joanna Bitton; Joe Spisak; Joelle Pineau; Jon Carvill; Jongsoo Park; Joseph Rocca; Joshua Johnstun; Junteng Jia; Kalyan Vasuden Alwala; Kam Hou U; Kate Plawiak; Kartikeya Upasani; Kaushik Veeraraghavan; Ke Li; Kenneth Heafield; Kevin Stone; Khalid El-Arini; Krithika Iyer; Kshitiz Malik; Kuenley Chiu; Kunal Bhalla; Kyle Huang; Lakshya Garg; Lauren Rantala-Yeary; Laurens van der Maaten; Lawrence Chen; Leandro Silva; Lee Bell; Lei Zhang; Liang Tan; Louis Martin; Lovish Madaan; Luca Wehrstedt; Lukas Blecher; Luke de Oliveira; Madeline Muzzi; Madian Khabsa; Manav Avlani; Mannat Singh; Manohar Paluri; Mark Zuckerberg; Marcin Kardas; Martynas Mankus; Mathew Oldham; Mathieu Rita; Matthew Lennie; Maya Pavlova; Meghan Keneally; Melanie Kambadur; Mihir Patel; Mikayel Samvelyan; Mike Clark; Mike Lewis; Min Si; Mitesh Kumar Singh; Mo Metanat; Mona Hassan; Naman Goyal; Narjes Torabi; Nicolas Usunier; Nikolay Bashlykov; Nikolay Bogoychev; Niladri Chatterji; Ning Dong; Oliver Aobo Yang; Olivier Duchenne; Onur Celebi; Parth Parekh; Patrick Alrassy; Paul Saab; Pavan Balaji; Pedro Rittner; Pengchuan Zhang; Pengwei Li; Petar Vasic; Peter Weng; Polina Zvyagina; Prajjwal Bhargava; Pratik Dubal; Praveen Krishnan; Punit Singh Koura; Qing He; Rachel Rodriguez; Ragavan Srinivasan; Rahul Mitra; Ramon Calderer; Raymond Li; Robert Stojnic; Roberta Raileanu; Robin Battey; Rocky Wang; Rohit Girdhar; Rohit Patel; Romain Sauvestre; Ronnie Polidoro; Roshan Sumbaly; Ross Taylor; Ruan Silva; Rui Hou; Rui Wang; Russ Howes; Ruty Rinott; Saghar Hosseini; Sai Jayesh Bondu; Samyak Datta; Sanjay Singh; Sara Chugh; Sargun Dhillon; Satadru Pan; Sean Bell; Sergey Edunov; Shaoliang Nie; Sharan Narang; Sharath Raparthy; Shaun Lindsay; Sheng Feng; Sheng Shen; Shenghao Lin; Shiva Shankar; Shruti Bhosale; Shun Zhang; Simon Vandenhende; Sinong Wang; Seohyun Sonia Kim; Soumya Batra; Sten Sootla; Steve Kehoe; Suchin Gururangan; Sumit Gupta; Sunny Virk; Sydney Borodinsky; Tamar Glaser; Tamar Herman; Tamara Best; Tara Fowler; Thomas Georgiou; Thomas Scialom; Tianhe Li; Todor Mihaylov; Tong Xiao; Ujjwal Karn; Vedanuj Goswami; Vibhor Gupta; Vignesh Ramanathan; Viktor Kerkez; Vinay Satish Kumar; Vincent Gonguet; Vish Vogeti; Vlad Poenaru; Vlad Tiberiu Mihailescu; Vladan Petrovic; Vladimir Ivanov; Wei Li; Weiwei Chu; Wenhan Xiong; Wenyin Fu; Wes Bouaziz; Whitney Meers; Will Constable; Xavier Martinet; Xiaojian Wu; Xinbo Gao; Xinfeng Xie; Xuchao Jia; Yaelle Goldschlag; Yann LeCun; Yashesh Gaur; Yasmine Babaei; Ye Qi; Yenda Li; Yi Wen; Yiwen Song; Youngjin Nam; Yuchen Hao; Yuchen Zhang; Yun Wang; Yuning Mao; Yuzi He; Zacharie Delpierre Coudert; Zachary DeVito; Zahra Hankir; Zhaoduo Wen; Zheng Yan; Zhengxing Chen; Zhenyu Yang; Zoe Papakipos
vicgalle/Configurable-Janus-7B
vicgalle
2024-06-13T08:20:21Z
1,014
1
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "mergekit", "merge", "conversational", "arxiv:2203.05482", "base_model:vicgalle/Configurable-Mistral-7B", "base_model:kaist-ai/janus-dpo-7b", "license:mit", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-06-13T08:15:16Z
--- base_model: - vicgalle/Configurable-Mistral-7B - kaist-ai/janus-dpo-7b library_name: transformers tags: - mergekit - merge license: mit --- # merge This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit). ## Merge Details ### Merge Method This model was merged using the [linear](https://arxiv.org/abs/2203.05482) merge method. ### Models Merged The following models were included in the merge: * [vicgalle/Configurable-Mistral-7B](https://huggingface.co/vicgalle/Configurable-Mistral-7B) * [kaist-ai/janus-dpo-7b](https://huggingface.co/kaist-ai/janus-dpo-7b) ### Configuration The following YAML configuration was used to produce this model: ```yaml models: - model: vicgalle/Configurable-Mistral-7B parameters: weight: 1.0 - model: kaist-ai/janus-dpo-7b parameters: weight: 1.0 merge_method: linear dtype: float16 ```
Tanvir1337/BanglaLLama-3-8b-BnWiki-Base-IQ3_XXS-GGUF
Tanvir1337
2024-06-26T10:03:05Z
1,014
0
null
[ "gguf", "bangla", "large language model", "llama-cpp", "gguf-my-repo", "bn", "en", "dataset:wikimedia/wikipedia", "base_model:BanglaLLM/BanglaLLama-3-8b-BnWiki-Base", "license:llama3", "region:us" ]
null
2024-06-26T10:02:48Z
--- base_model: BanglaLLM/BanglaLLama-3-8b-BnWiki-Base datasets: - wikimedia/wikipedia language: - bn - en license: llama3 tags: - bangla - large language model - llama-cpp - gguf-my-repo --- # Tanvir1337/BanglaLLama-3-8b-BnWiki-Base-IQ3_XXS-GGUF This model was converted to GGUF format from [`BanglaLLM/BanglaLLama-3-8b-BnWiki-Base`](https://huggingface.co/BanglaLLM/BanglaLLama-3-8b-BnWiki-Base) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space. Refer to the [original model card](https://huggingface.co/BanglaLLM/BanglaLLama-3-8b-BnWiki-Base) for more details on the model. ## Use with llama.cpp Install llama.cpp through brew (works on Mac and Linux) ```bash brew install llama.cpp ``` Invoke the llama.cpp server or the CLI. ### CLI: ```bash llama-cli --hf-repo Tanvir1337/BanglaLLama-3-8b-BnWiki-Base-IQ3_XXS-GGUF --hf-file banglallama-3-8b-bnwiki-base-iq3_xxs-imat.gguf -p "The meaning to life and the universe is" ``` ### Server: ```bash llama-server --hf-repo Tanvir1337/BanglaLLama-3-8b-BnWiki-Base-IQ3_XXS-GGUF --hf-file banglallama-3-8b-bnwiki-base-iq3_xxs-imat.gguf -c 2048 ``` Note: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well. Step 1: Clone llama.cpp from GitHub. ``` git clone https://github.com/ggerganov/llama.cpp ``` Step 2: Move into the llama.cpp folder and build it with `LLAMA_CURL=1` flag along with other hardware-specific flags (for ex: LLAMA_CUDA=1 for Nvidia GPUs on Linux). ``` cd llama.cpp && LLAMA_CURL=1 make ``` Step 3: Run inference through the main binary. ``` ./llama-cli --hf-repo Tanvir1337/BanglaLLama-3-8b-BnWiki-Base-IQ3_XXS-GGUF --hf-file banglallama-3-8b-bnwiki-base-iq3_xxs-imat.gguf -p "The meaning to life and the universe is" ``` or ``` ./llama-server --hf-repo Tanvir1337/BanglaLLama-3-8b-BnWiki-Base-IQ3_XXS-GGUF --hf-file banglallama-3-8b-bnwiki-base-iq3_xxs-imat.gguf -c 2048 ```
aifeifei798/llama3-8B-DarkIdol-2.0-Uncensored-Q5_K_M-GGUF
aifeifei798
2024-07-01T11:51:53Z
1,014
0
null
[ "gguf", "roleplay", "llama3", "sillytavern", "idol", "llama-cpp", "gguf-my-repo", "en", "ja", "zh", "base_model:aifeifei798/llama3-8B-DarkIdol-2.0-Uncensored", "license:llama3", "region:us" ]
null
2024-07-01T11:51:27Z
--- base_model: aifeifei798/llama3-8B-DarkIdol-2.0-Uncensored language: - en - ja - zh license: llama3 tags: - roleplay - llama3 - sillytavern - idol - llama-cpp - gguf-my-repo --- # aifeifei798/llama3-8B-DarkIdol-2.0-Uncensored-Q5_K_M-GGUF This model was converted to GGUF format from [`aifeifei798/llama3-8B-DarkIdol-2.0-Uncensored`](https://huggingface.co/aifeifei798/llama3-8B-DarkIdol-2.0-Uncensored) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space. Refer to the [original model card](https://huggingface.co/aifeifei798/llama3-8B-DarkIdol-2.0-Uncensored) for more details on the model. ## Use with llama.cpp Install llama.cpp through brew (works on Mac and Linux) ```bash brew install llama.cpp ``` Invoke the llama.cpp server or the CLI. ### CLI: ```bash llama-cli --hf-repo aifeifei798/llama3-8B-DarkIdol-2.0-Uncensored-Q5_K_M-GGUF --hf-file llama3-8b-darkidol-2.0-uncensored-q5_k_m-imat.gguf -p "The meaning to life and the universe is" ``` ### Server: ```bash llama-server --hf-repo aifeifei798/llama3-8B-DarkIdol-2.0-Uncensored-Q5_K_M-GGUF --hf-file llama3-8b-darkidol-2.0-uncensored-q5_k_m-imat.gguf -c 2048 ``` Note: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well. Step 1: Clone llama.cpp from GitHub. ``` git clone https://github.com/ggerganov/llama.cpp ``` Step 2: Move into the llama.cpp folder and build it with `LLAMA_CURL=1` flag along with other hardware-specific flags (for ex: LLAMA_CUDA=1 for Nvidia GPUs on Linux). ``` cd llama.cpp && LLAMA_CURL=1 make ``` Step 3: Run inference through the main binary. ``` ./llama-cli --hf-repo aifeifei798/llama3-8B-DarkIdol-2.0-Uncensored-Q5_K_M-GGUF --hf-file llama3-8b-darkidol-2.0-uncensored-q5_k_m-imat.gguf -p "The meaning to life and the universe is" ``` or ``` ./llama-server --hf-repo aifeifei798/llama3-8B-DarkIdol-2.0-Uncensored-Q5_K_M-GGUF --hf-file llama3-8b-darkidol-2.0-uncensored-q5_k_m-imat.gguf -c 2048 ```
mosaicml/mosaic-bert-base-seqlen-512
mosaicml
2024-03-05T20:30:38Z
1,013
4
transformers
[ "transformers", "pytorch", "bert", "fill-mask", "custom_code", "en", "dataset:c4", "arxiv:2108.12409", "arxiv:2205.14135", "arxiv:2002.05202", "arxiv:2208.08124", "arxiv:1612.08083", "arxiv:2102.11972", "arxiv:1907.11692", "arxiv:2202.08005", "license:apache-2.0", "autotrain_compatible", "region:us" ]
fill-mask
2023-04-26T03:16:20Z
--- license: apache-2.0 datasets: - c4 language: - en inference: false --- # MosaicBERT: mosaic-bert-base-seqlen-512 Pretrained Model MosaicBERT-Base is a new BERT architecture and training recipe optimized for fast pretraining. MosaicBERT trains faster and achieves higher pretraining and finetuning accuracy when benchmarked against Hugging Face's [bert-base-uncased](https://huggingface.co/bert-base-uncased). It incorporates efficiency insights from the past half a decade of transformers research, from RoBERTa to T5 and GPT. __This particular model was trained with [ALiBi](https://arxiv.org/abs/2108.12409) on a sequence length of 512 tokens.__ ALiBi allows a model trained with a sequence length n to easily extrapolate to sequence lengths >2n during finetuning. For more details, see [Train Short, Test Long: Attention with Linear Biases Enables Input Length Extrapolation (Press et al. 2022)](https://arxiv.org/abs/2108.12409) It is part of the **family of MosaicBERT-Base models** trained using ALiBi on different sequence lengths: * [mosaic-bert-base](https://huggingface.co/mosaicml/mosaic-bert-base) (trained on a sequence length of 128 tokens) * [mosaic-bert-base-seqlen-256](https://huggingface.co/mosaicml/mosaic-bert-base-seqlen-256) * mosaic-bert-base-seqlen-512 * [mosaic-bert-base-seqlen-1024](https://huggingface.co/mosaicml/mosaic-bert-base-seqlen-1024) * [mosaic-bert-base-seqlen-2048](https://huggingface.co/mosaicml/mosaic-bert-base-seqlen-2048) The primary use case of these models is for research on efficient pretraining and finetuning for long context embeddings. ## Model Date April 2023 ## Model Date April 2023 ## Documentation * [Project Page (mosaicbert.github.io)](mosaicbert.github.io) * [Github (mosaicml/examples/tree/main/examples/benchmarks/bert)](https://github.com/mosaicml/examples/tree/main/examples/benchmarks/bert) * [Paper (NeurIPS 2023)](https://openreview.net/forum?id=5zipcfLC2Z) * Colab Tutorials: * [MosaicBERT Tutorial Part 1: Load Pretrained Weights and Experiment with Sequence Length Extrapolation Using ALiBi](https://colab.research.google.com/drive/1r0A3QEbu4Nzs2Jl6LaiNoW5EumIVqrGc?usp=sharing) * [Blog Post (March 2023)](https://www.mosaicml.com/blog/mosaicbert) ## How to use ```python import torch import transformers from transformers import AutoModelForMaskedLM, BertTokenizer, pipeline from transformers import BertTokenizer, BertConfig tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') # MosaicBERT uses the standard BERT tokenizer config = transformers.BertConfig.from_pretrained('mosaicml/mosaic-bert-base-seqlen-512') # the config needs to be passed in mosaicbert = AutoModelForMaskedLM.from_pretrained('mosaicml/mosaic-bert-base-seqlen-512',config=config,trust_remote_code=True) # To use this model directly for masked language modeling mosaicbert_classifier = pipeline('fill-mask', model=mosaicbert, tokenizer=tokenizer,device="cpu") mosaicbert_classifier("I [MASK] to the store yesterday.") ``` Note that the tokenizer for this model is simply the Hugging Face `bert-base-uncased` tokenizer. In order to take advantage of ALiBi by extrapolating to longer sequence lengths, simply change the `alibi_starting_size` flag in the config file and reload the model. ```python config = transformers.BertConfig.from_pretrained('mosaicml/mosaic-bert-base-seqlen-512') config.alibi_starting_size = 1024 # maximum sequence length updated to 1024 from config default of 512 mosaicbert = AutoModelForMaskedLM.from_pretrained('mosaicml/mosaic-bert-base-seqlen-512',config=config,trust_remote_code=True) ``` This simply presets the non-learned linear bias matrix in every attention block to 1024 tokens (note that this particular model was trained with a sequence length of 512 tokens). **To continue MLM pretraining**, follow the [MLM pre-training section of the mosaicml/examples/bert repo](https://github.com/mosaicml/examples/tree/main/examples/bert#mlm-pre-training). **To fine-tune this model for classification**, follow the [Single-task fine-tuning section of the mosaicml/examples/bert repo](https://github.com/mosaicml/examples/tree/main/examples/bert#single-task-fine-tuning). ### [Update 1/2/2024] Triton Flash Attention with ALiBi Note that by default, triton Flash Attention is **not** enabled or required. In order to enable our custom implementation of triton Flash Attention with ALiBi from March 2023, set `attention_probs_dropout_prob: 0.0`. We are currently working on supporting Flash Attention 2 (see [PR here](https://github.com/mosaicml/examples/pull/440)). ### Remote Code This model requires that `trust_remote_code=True` be passed to the `from_pretrained` method. This is because we train using [FlashAttention (Dao et al. 2022)](https://arxiv.org/pdf/2205.14135.pdf), which is not part of the `transformers` library and depends on [Triton](https://github.com/openai/triton) and some custom PyTorch code. Since this involves executing arbitrary code, you should consider passing a git `revision` argument that specifies the exact commit of the code, for example: ```python mlm = AutoModelForMaskedLM.from_pretrained( 'mosaicml/mosaic-bert-base-seqlen-512', trust_remote_code=True, revision='24512df', ) ``` However, if there are updates to this model or code and you specify a revision, you will need to manually check for them and update the commit hash accordingly. ## MosaicBERT Model description In order to build MosaicBERT, we adopted architectural choices from the recent transformer literature. These include [FlashAttention (Dao et al. 2022)](https://arxiv.org/pdf/2205.14135.pdf), [ALiBi (Press et al. 2021)](https://arxiv.org/abs/2108.12409), and [Gated Linear Units (Shazeer 2020)](https://arxiv.org/abs/2002.05202). In addition, we remove padding inside the transformer block, and apply LayerNorm with low precision. ### Modifications to the Attention Mechanism 1. **FlashAttention**: Attention layers are core components of the transformer architecture. The recently proposed FlashAttention layer reduces the number of read/write operations between the GPU HBM (high bandwidth memory, i.e. long-term memory) and the GPU SRAM (i.e. short-term memory) [[Dao et al. 2022]](https://arxiv.org/pdf/2205.14135.pdf). We used the FlashAttention module built by [hazy research](https://github.com/HazyResearch/flash-attention) with [OpenAI’s triton library](https://github.com/openai/triton). 2. **Attention with Linear Biases (ALiBi)**: In most BERT models, the positions of tokens in a sequence are encoded with a position embedding layer; this embedding allows subsequent layers to keep track of the order of tokens in a sequence. ALiBi eliminates position embeddings and instead conveys this information using a bias matrix in the attention operation. It modifies the attention mechanism such that nearby tokens strongly attend to one another [[Press et al. 2021]](https://arxiv.org/abs/2108.12409). In addition to improving the performance of the final model, ALiBi helps the model to handle sequences longer than it saw during training. Details on our ALiBi implementation can be found [in the mosaicml/examples repo here](https://github.com/mosaicml/examples/blob/d14a7c94a0f805f56a7c865802082bf6d8ac8903/examples/bert/src/bert_layers.py#L425). 3. **Unpadding**: Standard NLP practice is to combine text sequences of different lengths into a batch, and pad the sequences with empty tokens so that all sequence lengths are the same. During training, however, this can lead to many superfluous operations on those padding tokens. In MosaicBERT, we take a different approach: we concatenate all the examples in a minibatch into a single sequence of batch size 1. Results from NVIDIA and others have shown that this approach leads to speed improvements during training, since operations are not performed on padding tokens (see for example [Zeng et al. 2022](https://arxiv.org/pdf/2208.08124.pdf)). Details on our “unpadding” implementation can be found [in the mosaicml/examples repo here](https://github.com/mosaicml/examples/blob/main/examples/bert/src/bert_padding.py). 4. **Low Precision LayerNorm**: this small tweak forces LayerNorm modules to run in float16 or bfloat16 precision instead of float32, improving utilization. Our implementation can be found [in the mosaicml/examples repo here](https://docs.mosaicml.com/en/v0.12.1/method_cards/low_precision_layernorm.html). ### Modifications to the Feedforward Layers 5. **Gated Linear Units (GLU)**: We used Gated Linear Units for the feedforward sublayer of a transformer. GLUs were first proposed in 2016 [[Dauphin et al. 2016]](https://arxiv.org/abs/1612.08083), and incorporate an extra learnable matrix that “gates” the outputs of the feedforward layer. More recent work has shown that GLUs can improve performance quality in transformers [[Shazeer, 2020](https://arxiv.org/abs/2002.05202), [Narang et al. 2021](https://arxiv.org/pdf/2102.11972.pdf)]. We used the GeLU (Gaussian-error Linear Unit) activation function with GLU, which is sometimes referred to as GeGLU. The GeLU activation function is a smooth, fully differentiable approximation to ReLU; we found that this led to a nominal improvement over ReLU. More details on our implementation of GLU can be found here. The extra gating matrix in a GLU model potentially adds additional parameters to a model; we chose to augment our BERT-Base model with additional parameters due to GLU modules as it leads to a Pareto improvement across all timescales (which is not true of all larger models such as BERT-Large). While BERT-Base has 110 million parameters, MosaicBERT-Base has 137 million parameters. Note that MosaicBERT-Base trains faster than BERT-Base despite having more parameters. ## Training data MosaicBERT is pretrained using a standard Masked Language Modeling (MLM) objective: the model is given a sequence of text with some tokens hidden, and it has to predict these masked tokens. MosaicBERT is trained on the English [“Colossal, Cleaned, Common Crawl” C4 dataset](https://github.com/allenai/allennlp/discussions/5056), which contains roughly 365 million curated text documents scraped from the internet (equivalent to 156 billion tokens). We used this more modern dataset in place of traditional BERT pretraining corpora like English Wikipedia and BooksCorpus. ## Pretraining Optimizations Many of these pretraining optimizations below were informed by our [BERT results for the MLPerf v2.1 speed benchmark](https://www.mosaicml.com/blog/mlperf-nlp-nov2022). 1. **MosaicML Streaming Dataset**: As part of our efficiency pipeline, we converted the C4 dataset to [MosaicML’s StreamingDataset format](https://www.mosaicml.com/blog/mosaicml-streamingdataset) and used this for both MosaicBERT-Base and the baseline BERT-Base. For all BERT-Base models, we chose the training duration to be 286,720,000 samples of **sequence length 512**; this covers 78.6% of C4. 2. **Higher Masking Ratio for the Masked Language Modeling Objective**: We used the standard Masked Language Modeling (MLM) pretraining objective. While the original BERT paper also included a Next Sentence Prediction (NSP) task in the pretraining objective, subsequent papers have shown this to be unnecessary [Liu et al. 2019](https://arxiv.org/abs/1907.11692). However, we found that a 30% masking ratio led to slight accuracy improvements in both pretraining MLM and downstream GLUE performance. We therefore included this simple change as part of our MosaicBERT training recipe. Recent studies have also found that this simple change can lead to downstream improvements [Wettig et al. 2022](https://arxiv.org/abs/2202.08005). 3. **Bfloat16 Precision**: We use [bf16 (bfloat16) mixed precision training](https://cloud.google.com/blog/products/ai-machine-learning/bfloat16-the-secret-to-high-performance-on-cloud-tpus) for all the models, where a matrix multiplication layer uses bf16 for the multiplication and 32-bit IEEE floating point for gradient accumulation. We found this to be more stable than using float16 mixed precision. 4. **Vocab Size as a Multiple of 64**: We increased the vocab size to be a multiple of 8 as well as 64 (i.e. from 30,522 to 30,528). This small constraint is something of [a magic trick among ML practitioners](https://twitter.com/karpathy/status/1621578354024677377), and leads to a throughput speedup. 5. **Hyperparameters**: For all models, we use Decoupled AdamW with Beta_1=0.9 and Beta_2=0.98, and a weight decay value of 1.0e-5. The learning rate schedule begins with a warmup to a maximum learning rate of 5.0e-4 followed by a linear decay to zero. Warmup lasted for 6% of the full training duration. Global batch size was set to 4096, and microbatch size was **128**; since global batch size was 4096, full pretraining consisted of 70,000 batches. We set the **maximum sequence length during pretraining to 512**, and we used the standard embedding dimension of 768. For MosaicBERT, we applied 0.1 dropout to the feedforward layers but no dropout to the FlashAttention module, as this was not possible with the OpenAI triton implementation. Full configuration details for pretraining MosaicBERT-Base can be found in the configuration yamls [in the mosaicml/examples repo here](https://github.com/mosaicml/examples/tree/main/bert/yamls/main). ## Intended uses & limitations This model is intended to be finetuned on downstream tasks. ## Citation Please cite this model using the following format: ``` @article{portes2023MosaicBERT, title={MosaicBERT: A Bidirectional Encoder Optimized for Fast Pretraining}, author={Jacob Portes, Alexander R Trott, Sam Havens, Daniel King, Abhinav Venigalla, Moin Nadeem, Nikhil Sardana, Daya Khudia, Jonathan Frankle}, journal={NeuRIPS https://openreview.net/pdf?id=5zipcfLC2Z}, year={2023}, } ```
TheBloke/deepseek-coder-5.7bmqa-base-GGUF
TheBloke
2023-11-05T15:25:09Z
1,013
3
transformers
[ "transformers", "gguf", "deepseek", "base_model:deepseek-ai/deepseek-coder-5.7bmqa-base", "license:other", "region:us" ]
null
2023-11-05T13:53:22Z
--- base_model: deepseek-ai/deepseek-coder-5.7bmqa-base inference: false license: other license_link: LICENSE license_name: deepseek model_creator: DeepSeek model_name: Deepseek Coder 5.7Bmqa Base model_type: deepseek prompt_template: '{prompt} ' quantized_by: TheBloke --- <!-- markdownlint-disable MD041 --> <!-- header start --> <!-- 200823 --> <div style="width: auto; margin-left: auto; margin-right: auto"> <img src="https://i.imgur.com/EBdldam.jpg" alt="TheBlokeAI" style="width: 100%; min-width: 400px; display: block; margin: auto;"> </div> <div style="display: flex; justify-content: space-between; width: 100%;"> <div style="display: flex; flex-direction: column; align-items: flex-start;"> <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://discord.gg/theblokeai">Chat & support: TheBloke's Discord server</a></p> </div> <div style="display: flex; flex-direction: column; align-items: flex-end;"> <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://www.patreon.com/TheBlokeAI">Want to contribute? TheBloke's Patreon page</a></p> </div> </div> <div style="text-align:center; margin-top: 0em; margin-bottom: 0em"><p style="margin-top: 0.25em; margin-bottom: 0em;">TheBloke's LLM work is generously supported by a grant from <a href="https://a16z.com">andreessen horowitz (a16z)</a></p></div> <hr style="margin-top: 1.0em; margin-bottom: 1.0em;"> <!-- header end --> # Deepseek Coder 5.7Bmqa Base - GGUF - Model creator: [DeepSeek](https://huggingface.co/deepseek-ai) - Original model: [Deepseek Coder 5.7Bmqa Base](https://huggingface.co/deepseek-ai/deepseek-coder-5.7bmqa-base) <!-- description start --> ## Description This repo contains GGUF format model files for [DeepSeek's Deepseek Coder 5.7Bmqa Base](https://huggingface.co/deepseek-ai/deepseek-coder-5.7bmqa-base). These files were quantised using hardware kindly provided by [Massed Compute](https://massedcompute.com/). <!-- description end --> <!-- README_GGUF.md-about-gguf start --> ### About GGUF GGUF is a new format introduced by the llama.cpp team on August 21st 2023. It is a replacement for GGML, which is no longer supported by llama.cpp. Here is an incomplete list of clients and libraries that are known to support GGUF: * [llama.cpp](https://github.com/ggerganov/llama.cpp). The source project for GGUF. Offers a CLI and a server option. * [text-generation-webui](https://github.com/oobabooga/text-generation-webui), the most widely used web UI, with many features and powerful extensions. Supports GPU acceleration. * [KoboldCpp](https://github.com/LostRuins/koboldcpp), a fully featured web UI, with GPU accel across all platforms and GPU architectures. Especially good for story telling. * [LM Studio](https://lmstudio.ai/), an easy-to-use and powerful local GUI for Windows and macOS (Silicon), with GPU acceleration. * [LoLLMS Web UI](https://github.com/ParisNeo/lollms-webui), a great web UI with many interesting and unique features, including a full model library for easy model selection. * [Faraday.dev](https://faraday.dev/), an attractive and easy to use character-based chat GUI for Windows and macOS (both Silicon and Intel), with GPU acceleration. * [ctransformers](https://github.com/marella/ctransformers), a Python library with GPU accel, LangChain support, and OpenAI-compatible AI server. * [llama-cpp-python](https://github.com/abetlen/llama-cpp-python), a Python library with GPU accel, LangChain support, and OpenAI-compatible API server. * [candle](https://github.com/huggingface/candle), a Rust ML framework with a focus on performance, including GPU support, and ease of use. <!-- README_GGUF.md-about-gguf end --> <!-- repositories-available start --> ## Repositories available * [AWQ model(s) for GPU inference.](https://huggingface.co/TheBloke/deepseek-coder-5.7bmqa-base-AWQ) * [GPTQ models for GPU inference, with multiple quantisation parameter options.](https://huggingface.co/TheBloke/deepseek-coder-5.7bmqa-base-GPTQ) * [2, 3, 4, 5, 6 and 8-bit GGUF models for CPU+GPU inference](https://huggingface.co/TheBloke/deepseek-coder-5.7bmqa-base-GGUF) * [DeepSeek's original unquantised fp16 model in pytorch format, for GPU inference and for further conversions](https://huggingface.co/deepseek-ai/deepseek-coder-5.7bmqa-base) <!-- repositories-available end --> <!-- prompt-template start --> ## Prompt template: None ``` {prompt} ``` <!-- prompt-template end --> <!-- compatibility_gguf start --> ## Compatibility These quantised GGUFv2 files are compatible with llama.cpp from August 27th onwards, as of commit [d0cee0d](https://github.com/ggerganov/llama.cpp/commit/d0cee0d36d5be95a0d9088b674dbb27354107221) They are also compatible with many third party UIs and libraries - please see the list at the top of this README. ## Explanation of quantisation methods <details> <summary>Click to see details</summary> The new methods available are: * GGML_TYPE_Q2_K - "type-1" 2-bit quantization in super-blocks containing 16 blocks, each block having 16 weight. Block scales and mins are quantized with 4 bits. This ends up effectively using 2.5625 bits per weight (bpw) * GGML_TYPE_Q3_K - "type-0" 3-bit quantization in super-blocks containing 16 blocks, each block having 16 weights. Scales are quantized with 6 bits. This end up using 3.4375 bpw. * GGML_TYPE_Q4_K - "type-1" 4-bit quantization in super-blocks containing 8 blocks, each block having 32 weights. Scales and mins are quantized with 6 bits. This ends up using 4.5 bpw. * GGML_TYPE_Q5_K - "type-1" 5-bit quantization. Same super-block structure as GGML_TYPE_Q4_K resulting in 5.5 bpw * GGML_TYPE_Q6_K - "type-0" 6-bit quantization. Super-blocks with 16 blocks, each block having 16 weights. Scales are quantized with 8 bits. This ends up using 6.5625 bpw Refer to the Provided Files table below to see what files use which methods, and how. </details> <!-- compatibility_gguf end --> <!-- README_GGUF.md-provided-files start --> ## Provided files | Name | Quant method | Bits | Size | Max RAM required | Use case | | ---- | ---- | ---- | ---- | ---- | ----- | | [deepseek-coder-5.7bmqa-base.Q2_K.gguf](https://huggingface.co/TheBloke/deepseek-coder-5.7bmqa-base-GGUF/blob/main/deepseek-coder-5.7bmqa-base.Q2_K.gguf) | Q2_K | 2 | 2.43 GB| 4.93 GB | smallest, significant quality loss - not recommended for most purposes | | [deepseek-coder-5.7bmqa-base.Q3_K_S.gguf](https://huggingface.co/TheBloke/deepseek-coder-5.7bmqa-base-GGUF/blob/main/deepseek-coder-5.7bmqa-base.Q3_K_S.gguf) | Q3_K_S | 3 | 2.50 GB| 5.00 GB | very small, high quality loss | | [deepseek-coder-5.7bmqa-base.Q3_K_M.gguf](https://huggingface.co/TheBloke/deepseek-coder-5.7bmqa-base-GGUF/blob/main/deepseek-coder-5.7bmqa-base.Q3_K_M.gguf) | Q3_K_M | 3 | 2.78 GB| 5.28 GB | very small, high quality loss | | [deepseek-coder-5.7bmqa-base.Q3_K_L.gguf](https://huggingface.co/TheBloke/deepseek-coder-5.7bmqa-base-GGUF/blob/main/deepseek-coder-5.7bmqa-base.Q3_K_L.gguf) | Q3_K_L | 3 | 3.02 GB| 5.52 GB | small, substantial quality loss | | [deepseek-coder-5.7bmqa-base.Q4_0.gguf](https://huggingface.co/TheBloke/deepseek-coder-5.7bmqa-base-GGUF/blob/main/deepseek-coder-5.7bmqa-base.Q4_0.gguf) | Q4_0 | 4 | 3.24 GB| 5.74 GB | legacy; small, very high quality loss - prefer using Q3_K_M | | [deepseek-coder-5.7bmqa-base.Q4_K_S.gguf](https://huggingface.co/TheBloke/deepseek-coder-5.7bmqa-base-GGUF/blob/main/deepseek-coder-5.7bmqa-base.Q4_K_S.gguf) | Q4_K_S | 4 | 3.27 GB| 5.77 GB | small, greater quality loss | | [deepseek-coder-5.7bmqa-base.Q4_K_M.gguf](https://huggingface.co/TheBloke/deepseek-coder-5.7bmqa-base-GGUF/blob/main/deepseek-coder-5.7bmqa-base.Q4_K_M.gguf) | Q4_K_M | 4 | 3.43 GB| 5.93 GB | medium, balanced quality - recommended | | [deepseek-coder-5.7bmqa-base.Q5_0.gguf](https://huggingface.co/TheBloke/deepseek-coder-5.7bmqa-base-GGUF/blob/main/deepseek-coder-5.7bmqa-base.Q5_0.gguf) | Q5_0 | 5 | 3.94 GB| 6.44 GB | legacy; medium, balanced quality - prefer using Q4_K_M | | [deepseek-coder-5.7bmqa-base.Q5_K_S.gguf](https://huggingface.co/TheBloke/deepseek-coder-5.7bmqa-base-GGUF/blob/main/deepseek-coder-5.7bmqa-base.Q5_K_S.gguf) | Q5_K_S | 5 | 3.94 GB| 6.44 GB | large, low quality loss - recommended | | [deepseek-coder-5.7bmqa-base.Q5_K_M.gguf](https://huggingface.co/TheBloke/deepseek-coder-5.7bmqa-base-GGUF/blob/main/deepseek-coder-5.7bmqa-base.Q5_K_M.gguf) | Q5_K_M | 5 | 4.04 GB| 6.54 GB | large, very low quality loss - recommended | | [deepseek-coder-5.7bmqa-base.Q6_K.gguf](https://huggingface.co/TheBloke/deepseek-coder-5.7bmqa-base-GGUF/blob/main/deepseek-coder-5.7bmqa-base.Q6_K.gguf) | Q6_K | 6 | 4.68 GB| 7.18 GB | very large, extremely low quality loss | | [deepseek-coder-5.7bmqa-base.Q8_0.gguf](https://huggingface.co/TheBloke/deepseek-coder-5.7bmqa-base-GGUF/blob/main/deepseek-coder-5.7bmqa-base.Q8_0.gguf) | Q8_0 | 8 | 6.06 GB| 8.56 GB | very large, extremely low quality loss - not recommended | **Note**: the above RAM figures assume no GPU offloading. If layers are offloaded to the GPU, this will reduce RAM usage and use VRAM instead. <!-- README_GGUF.md-provided-files end --> <!-- README_GGUF.md-how-to-download start --> ## How to download GGUF files **Note for manual downloaders:** You almost never want to clone the entire repo! Multiple different quantisation formats are provided, and most users only want to pick and download a single file. The following clients/libraries will automatically download models for you, providing a list of available models to choose from: * LM Studio * LoLLMS Web UI * Faraday.dev ### In `text-generation-webui` Under Download Model, you can enter the model repo: TheBloke/deepseek-coder-5.7bmqa-base-GGUF and below it, a specific filename to download, such as: deepseek-coder-5.7bmqa-base.Q4_K_M.gguf. Then click Download. ### On the command line, including multiple files at once I recommend using the `huggingface-hub` Python library: ```shell pip3 install huggingface-hub ``` Then you can download any individual model file to the current directory, at high speed, with a command like this: ```shell huggingface-cli download TheBloke/deepseek-coder-5.7bmqa-base-GGUF deepseek-coder-5.7bmqa-base.Q4_K_M.gguf --local-dir . --local-dir-use-symlinks False ``` <details> <summary>More advanced huggingface-cli download usage</summary> You can also download multiple files at once with a pattern: ```shell huggingface-cli download TheBloke/deepseek-coder-5.7bmqa-base-GGUF --local-dir . --local-dir-use-symlinks False --include='*Q4_K*gguf' ``` For more documentation on downloading with `huggingface-cli`, please see: [HF -> Hub Python Library -> Download files -> Download from the CLI](https://huggingface.co/docs/huggingface_hub/guides/download#download-from-the-cli). To accelerate downloads on fast connections (1Gbit/s or higher), install `hf_transfer`: ```shell pip3 install hf_transfer ``` And set environment variable `HF_HUB_ENABLE_HF_TRANSFER` to `1`: ```shell HF_HUB_ENABLE_HF_TRANSFER=1 huggingface-cli download TheBloke/deepseek-coder-5.7bmqa-base-GGUF deepseek-coder-5.7bmqa-base.Q4_K_M.gguf --local-dir . --local-dir-use-symlinks False ``` Windows Command Line users: You can set the environment variable by running `set HF_HUB_ENABLE_HF_TRANSFER=1` before the download command. </details> <!-- README_GGUF.md-how-to-download end --> <!-- README_GGUF.md-how-to-run start --> ## Example `llama.cpp` command Make sure you are using `llama.cpp` from commit [d0cee0d](https://github.com/ggerganov/llama.cpp/commit/d0cee0d36d5be95a0d9088b674dbb27354107221) or later. ```shell ./main -ngl 32 -m deepseek-coder-5.7bmqa-base.Q4_K_M.gguf --color -c 2048 --temp 0.7 --repeat_penalty 1.1 -n -1 -p "{prompt}" ``` Change `-ngl 32` to the number of layers to offload to GPU. Remove it if you don't have GPU acceleration. Change `-c 2048` to the desired sequence length. For extended sequence models - eg 8K, 16K, 32K - the necessary RoPE scaling parameters are read from the GGUF file and set by llama.cpp automatically. If you want to have a chat-style conversation, replace the `-p <PROMPT>` argument with `-i -ins` For other parameters and how to use them, please refer to [the llama.cpp documentation](https://github.com/ggerganov/llama.cpp/blob/master/examples/main/README.md) ## How to run in `text-generation-webui` Further instructions here: [text-generation-webui/docs/llama.cpp.md](https://github.com/oobabooga/text-generation-webui/blob/main/docs/llama.cpp.md). ## How to run from Python code You can use GGUF models from Python using the [llama-cpp-python](https://github.com/abetlen/llama-cpp-python) or [ctransformers](https://github.com/marella/ctransformers) libraries. ### How to load this model in Python code, using ctransformers #### First install the package Run one of the following commands, according to your system: ```shell # Base ctransformers with no GPU acceleration pip install ctransformers # Or with CUDA GPU acceleration pip install ctransformers[cuda] # Or with AMD ROCm GPU acceleration (Linux only) CT_HIPBLAS=1 pip install ctransformers --no-binary ctransformers # Or with Metal GPU acceleration for macOS systems only CT_METAL=1 pip install ctransformers --no-binary ctransformers ``` #### Simple ctransformers example code ```python from ctransformers import AutoModelForCausalLM # Set gpu_layers to the number of layers to offload to GPU. Set to 0 if no GPU acceleration is available on your system. llm = AutoModelForCausalLM.from_pretrained("TheBloke/deepseek-coder-5.7bmqa-base-GGUF", model_file="deepseek-coder-5.7bmqa-base.Q4_K_M.gguf", model_type="deepseek", gpu_layers=50) print(llm("AI is going to")) ``` ## How to use with LangChain Here are guides on using llama-cpp-python and ctransformers with LangChain: * [LangChain + llama-cpp-python](https://python.langchain.com/docs/integrations/llms/llamacpp) * [LangChain + ctransformers](https://python.langchain.com/docs/integrations/providers/ctransformers) <!-- README_GGUF.md-how-to-run end --> <!-- footer start --> <!-- 200823 --> ## Discord For further support, and discussions on these models and AI in general, join us at: [TheBloke AI's Discord server](https://discord.gg/theblokeai) ## Thanks, and how to contribute Thanks to the [chirper.ai](https://chirper.ai) team! Thanks to Clay from [gpus.llm-utils.org](llm-utils)! I've had a lot of people ask if they can contribute. I enjoy providing models and helping people, and would love to be able to spend even more time doing it, as well as expanding into new projects like fine tuning/training. If you're able and willing to contribute it will be most gratefully received and will help me to keep providing more models, and to start work on new AI projects. Donaters will get priority support on any and all AI/LLM/model questions and requests, access to a private Discord room, plus other benefits. * Patreon: https://patreon.com/TheBlokeAI * Ko-Fi: https://ko-fi.com/TheBlokeAI **Special thanks to**: Aemon Algiz. **Patreon special mentions**: Brandon Frisco, LangChain4j, Spiking Neurons AB, transmissions 11, Joseph William Delisle, Nitin Borwankar, Willem Michiel, Michael Dempsey, vamX, Jeffrey Morgan, zynix, jjj, Omer Bin Jawed, Sean Connelly, jinyuan sun, Jeromy Smith, Shadi, Pawan Osman, Chadd, Elijah Stavena, Illia Dulskyi, Sebastain Graf, Stephen Murray, terasurfer, Edmond Seymore, Celu Ramasamy, Mandus, Alex, biorpg, Ajan Kanaga, Clay Pascal, Raven Klaugh, 阿明, K, ya boyyy, usrbinkat, Alicia Loh, John Villwock, ReadyPlayerEmma, Chris Smitley, Cap'n Zoog, fincy, GodLy, S_X, sidney chen, Cory Kujawski, OG, Mano Prime, AzureBlack, Pieter, Kalila, Spencer Kim, Tom X Nguyen, Stanislav Ovsiannikov, Michael Levine, Andrey, Trailburnt, Vadim, Enrico Ros, Talal Aujan, Brandon Phillips, Jack West, Eugene Pentland, Michael Davis, Will Dee, webtim, Jonathan Leane, Alps Aficionado, Rooh Singh, Tiffany J. Kim, theTransient, Luke @flexchar, Elle, Caitlyn Gatomon, Ari Malik, subjectnull, Johann-Peter Hartmann, Trenton Dambrowitz, Imad Khwaja, Asp the Wyvern, Emad Mostaque, Rainer Wilmers, Alexandros Triantafyllidis, Nicholas, Pedro Madruga, SuperWojo, Harry Royden McLaughlin, James Bentley, Olakabola, David Ziegler, Ai Maven, Jeff Scroggin, Nikolai Manek, Deo Leter, Matthew Berman, Fen Risland, Ken Nordquist, Manuel Alberto Morcote, Luke Pendergrass, TL, Fred von Graf, Randy H, Dan Guido, NimbleBox.ai, Vitor Caleffi, Gabriel Tamborski, knownsqashed, Lone Striker, Erik Bjäreholt, John Detwiler, Leonard Tan, Iucharbius Thank you to all my generous patrons and donaters! And thank you again to a16z for their generous grant. <!-- footer end --> <!-- original-model-card start --> # Original model card: DeepSeek's Deepseek Coder 5.7Bmqa Base <p align="center"> <img width="1000px" alt="DeepSeek Coder" src="https://github.com/deepseek-ai/DeepSeek-Coder/blob/main/pictures/logo.png?raw=true"> </p> <p align="center"><a href="https://www.deepseek.com/">[🏠Homepage]</a> | <a href="https://coder.deepseek.com/">[🤖 Chat with DeepSeek Coder]</a> | <a href="https://discord.gg/Tc7c45Zzu5">[Discord]</a> | <a href="https://github.com/guoday/assert/blob/main/QR.png?raw=true">[Wechat(微信)]</a> </p> <hr> ### 1. Introduction of Deepseek Coder Deepseek Coder is composed of a series of code language models, each trained from scratch on 2T tokens, with a composition of 87% code and 13% natural language in both English and Chinese. We provide various sizes of the code model, ranging from 1B to 33B versions. Each model is pre-trained on project-level code corpus by employing a window size of 16K and a extra fill-in-the-blank task, to support project-level code completion and infilling. For coding capabilities, Deepseek Coder achieves state-of-the-art performance among open-source code models on multiple programming languages and various benchmarks. - **Massive Training Data**: Trained from scratch on 2T tokens, including 87% code and 13% linguistic data in both English and Chinese languages. - **Highly Flexible & Scalable**: Offered in model sizes of 1.3B, 5.7B, 6.7B, and 33B, enabling users to choose the setup most suitable for their requirements. - **Superior Model Performance**: State-of-the-art performance among publicly available code models on HumanEval, MultiPL-E, MBPP, DS-1000, and APPS benchmarks. - **Advanced Code Completion Capabilities**: A window size of 16K and a fill-in-the-blank task, supporting project-level code completion and infilling tasks. ### 2. Model Summary deepseek-coder-5.7bmqa-base is a 5.7B parameter model with Multi Query Attention trained on 2 trillion tokens. - **Home Page:** [DeepSeek](https://deepseek.com/) - **Repository:** [deepseek-ai/deepseek-coder](https://github.com/deepseek-ai/deepseek-coder) - **Chat With DeepSeek Coder:** [DeepSeek-Coder](https://coder.deepseek.com/) ### 3. How to Use Here give some examples of how to use our model. #### 1)Code Completion ```python from transformers import AutoTokenizer, AutoModelForCausalLM import torch tokenizer = AutoTokenizer.from_pretrained("deepseek-ai/deepseek-coder-5.7bmqa-base", trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained("deepseek-ai/deepseek-coder-5.7bmqa-base", trust_remote_code=True).cuda() input_text = "#write a quick sort algorithm" inputs = tokenizer(input_text, return_tensors="pt").to(model.device) outputs = model.generate(**inputs, max_length=128) print(tokenizer.decode(outputs[0], skip_special_tokens=True)) ``` #### 2)Code Insertion ```python from transformers import AutoTokenizer, AutoModelForCausalLM import torch tokenizer = AutoTokenizer.from_pretrained("deepseek-ai/deepseek-coder-5.7bmqa-base", trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained("deepseek-ai/deepseek-coder-5.7bmqa-base", trust_remote_code=True).cuda() input_text = """<|fim▁begin|>def quick_sort(arr): if len(arr) <= 1: return arr pivot = arr[0] left = [] right = [] <|fim▁hole|> if arr[i] < pivot: left.append(arr[i]) else: right.append(arr[i]) return quick_sort(left) + [pivot] + quick_sort(right)<|fim▁end|>""" inputs = tokenizer(input_text, return_tensors="pt").to(model.device) outputs = model.generate(**inputs, max_length=128) print(tokenizer.decode(outputs[0], skip_special_tokens=True)[len(input_text):]) ``` #### 3)Repository Level Code Completion ```python from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("deepseek-ai/deepseek-coder-5.7bmqa-base", trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained("deepseek-ai/deepseek-coder-5.7bmqa-base", trust_remote_code=True).cuda() input_text = """#utils.py import torch from sklearn import datasets from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.metrics import accuracy_score def load_data(): iris = datasets.load_iris() X = iris.data y = iris.target # Standardize the data scaler = StandardScaler() X = scaler.fit_transform(X) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42) # Convert numpy data to PyTorch tensors X_train = torch.tensor(X_train, dtype=torch.float32) X_test = torch.tensor(X_test, dtype=torch.float32) y_train = torch.tensor(y_train, dtype=torch.int64) y_test = torch.tensor(y_test, dtype=torch.int64) return X_train, X_test, y_train, y_test def evaluate_predictions(y_test, y_pred): return accuracy_score(y_test, y_pred) #model.py import torch import torch.nn as nn import torch.optim as optim from torch.utils.data import DataLoader, TensorDataset class IrisClassifier(nn.Module): def __init__(self): super(IrisClassifier, self).__init__() self.fc = nn.Sequential( nn.Linear(4, 16), nn.ReLU(), nn.Linear(16, 3) ) def forward(self, x): return self.fc(x) def train_model(self, X_train, y_train, epochs, lr, batch_size): criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(self.parameters(), lr=lr) # Create DataLoader for batches dataset = TensorDataset(X_train, y_train) dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True) for epoch in range(epochs): for batch_X, batch_y in dataloader: optimizer.zero_grad() outputs = self(batch_X) loss = criterion(outputs, batch_y) loss.backward() optimizer.step() def predict(self, X_test): with torch.no_grad(): outputs = self(X_test) _, predicted = outputs.max(1) return predicted.numpy() #main.py from utils import load_data, evaluate_predictions from model import IrisClassifier as Classifier def main(): # Model training and evaluation """ inputs = tokenizer(input_text, return_tensors="pt").to(model.device) outputs = model.generate(**inputs, max_new_tokens=140) print(tokenizer.decode(outputs[0])) ``` ### 4. License This code repository is licensed under the MIT License. The use of DeepSeek Coder models is subject to the Model License. DeepSeek Coder supports commercial use. See the [LICENSE-MODEL](https://github.com/deepseek-ai/deepseek-coder/blob/main/LICENSE-MODEL) for more details. ### 5. Contact If you have any questions, please raise an issue or contact us at [[email protected]](mailto:[email protected]). <!-- original-model-card end -->
ibivibiv/alpaca-dragon-72b-v1
ibivibiv
2024-03-02T22:20:25Z
1,013
24
transformers
[ "transformers", "safetensors", "llama", "text-generation", "en", "license:other", "model-index", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-02-06T21:41:16Z
--- language: - en license: other library_name: transformers model-index: - name: alpaca-dragon-72b-v1 results: - task: type: text-generation name: Text Generation dataset: name: AI2 Reasoning Challenge (25-Shot) type: ai2_arc config: ARC-Challenge split: test args: num_few_shot: 25 metrics: - type: acc_norm value: 73.89 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=ibivibiv/alpaca-dragon-72b-v1 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: HellaSwag (10-Shot) type: hellaswag split: validation args: num_few_shot: 10 metrics: - type: acc_norm value: 88.16 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=ibivibiv/alpaca-dragon-72b-v1 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MMLU (5-Shot) type: cais/mmlu config: all split: test args: num_few_shot: 5 metrics: - type: acc value: 77.4 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=ibivibiv/alpaca-dragon-72b-v1 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: TruthfulQA (0-shot) type: truthful_qa config: multiple_choice split: validation args: num_few_shot: 0 metrics: - type: mc2 value: 72.69 source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=ibivibiv/alpaca-dragon-72b-v1 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: Winogrande (5-shot) type: winogrande config: winogrande_xl split: validation args: num_few_shot: 5 metrics: - type: acc value: 86.03 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=ibivibiv/alpaca-dragon-72b-v1 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: GSM8k (5-shot) type: gsm8k config: main split: test args: num_few_shot: 5 metrics: - type: acc value: 77.63 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=ibivibiv/alpaca-dragon-72b-v1 name: Open LLM Leaderboard --- # Model Card for Alpaca Dragon 72B V1 Fine tune of [Smaug 72b v0.1](https://huggingface.co/abacusai/Smaug-72B-v0.1) using an alpaca data set I have handy. The data is of planning and reasoning, which I use to help allow a model to break down a set of asks into a logical plan. For some odd reason it bumps the mmlu and winogrande? I would have expected the ARC to go up over those two, but this is often more of an artform than a science at times. All thanks to [Abacus.AI](https://huggingface.co/abacusai) for sharing their work. I used the same dataset in training one of my owl series [Strix Rufipes 70B](https://huggingface.co/ibivibiv/strix-rufipes-70b), which has worked well for planning out development tasks and other technical work. ![img](./alpaca_dragon.png) # LICENSE Note the license points back to SMAUG base license as it is a fine tune of their model only. Respect and abide by their conditions. Again, many thanks to Abacus for making their work open and use that as inspiration to keep your work open and respect their license agreements. [License Link](https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT) ## How to Get Started with the Model Use the code below to get started with the model. ``` # Load model directly from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("ibivibiv/alpaca-dragon-72b-v1") model = AutoModelForCausalLM.from_pretrained("ibivibiv/alpaca-dragon-72b-v1") inputs = tokenizer("### Instruction: Create a plan for developing the game of snake in python using pygame.\n### Response:\n", return_tensors="pt", return_attention_mask=False) outputs = model.generate(**inputs, max_length=200) text = tokenizer.batch_decode(outputs)[0] print(text) ``` ## Evaluation | Test Name | Accuracy (%) | |---------------------------------|--------------| | All | 77.31 | | arc:challenge | 70.82 | | hellaswag | 69.84 | | hendrycksTest-abstract_algebra | 42.00 | | hendrycksTest-anatomy | 71.85 | | hendrycksTest-astronomy | 86.84 | | hendrycksTest-business_ethics | 82.00 | | hendrycksTest-clinical_knowledge| 84.53 | | hendrycksTest-college_biology | 93.06 | | hendrycksTest-college_chemistry | 54.00 | | hendrycksTest-college_computer_science | 65.00 | | hendrycksTest-college_mathematics | 52.00 | | hendrycksTest-college_medicine | 75.14 | | hendrycksTest-college_physics | 55.88 | | hendrycksTest-computer_security | 82.00 | | hendrycksTest-conceptual_physics| 80.43 | | hendrycksTest-econometrics | 60.53 | | hendrycksTest-electrical_engineering | 79.31 | | hendrycksTest-elementary_mathematics | 70.37 | | hendrycksTest-formal_logic | 58.73 | | hendrycksTest-global_facts | 54.00 | | hendrycksTest-high_school_biology | 88.39 | | hendrycksTest-high_school_chemistry | 66.01 | | hendrycksTest-high_school_computer_science | 82.00 | | hendrycksTest-high_school_european_history | 84.24 | | hendrycksTest-high_school_geography | 94.44 | | hendrycksTest-high_school_government_and_politics | 98.96 | | hendrycksTest-high_school_macroeconomics | 82.05 | | hendrycksTest-high_school_mathematics | 45.93 | | hendrycksTest-high_school_microeconomics | 86.13 | | hendrycksTest-high_school_physics | 54.97 | | hendrycksTest-high_school_psychology | 92.84 | | hendrycksTest-high_school_statistics | 68.98 | | hendrycksTest-high_school_us_history | 91.67 | | hendrycksTest-high_school_world_history | 89.87 | | hendrycksTest-human_aging | 78.03 | | hendrycksTest-human_sexuality | 89.31 | | hendrycksTest-international_law | 90.91 | | hendrycksTest-jurisprudence | 87.96 | | hendrycksTest-logical_fallacies | 84.05 | | hendrycksTest-machine_learning | 58.93 | | hendrycksTest-management | 87.38 | | hendrycksTest-marketing | 95.30 | | hendrycksTest-medical_genetics | 86.00 | | hendrycksTest-miscellaneous | 92.21 | | hendrycksTest-moral_disputes | 83.53 | | hendrycksTest-moral_scenarios | 69.72 | | hendrycksTest-nutrition | 85.62 | | hendrycksTest-philosophy | 83.60 | | hendrycksTest-prehistory | 87.04 | | hendrycksTest-professional_accounting | 65.96 | | hendrycksTest-professional_law | 60.69 | | hendrycksTest-professional_medicine | 82.72 | | hendrycksTest-professional_psychology | 81.86 | | hendrycksTest-public_relations | 75.45 | | hendrycksTest-security_studies | 82.04 | | hendrycksTest-sociology | 88.56 | | hendrycksTest-us_foreign_policy | 94.00 | | hendrycksTest-virology | 57.23 | | hendrycksTest-world_religions | 89.47 | | truthfulqa:mc | 72.6 | | winogrande | 86.03 | | gsm8k | 77.63 | ## Environmental Impact - **Hardware Type:** [A100's..... more than I wanted to use since its all on my $$$] - **Hours used:** [8] - **Cloud Provider:** [runpod.io] - **Compute Region:** [US] - **Carbon Emitted:** [?] # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_ibivibiv__alpaca-dragon-72b-v1) | Metric |Value| |---------------------------------|----:| |Avg. |79.30| |AI2 Reasoning Challenge (25-Shot)|73.89| |HellaSwag (10-Shot) |88.16| |MMLU (5-Shot) |77.40| |TruthfulQA (0-shot) |72.69| |Winogrande (5-shot) |86.03| |GSM8k (5-shot) |77.63|
cstr/Spaetzle-v8-7b
cstr
2024-04-18T09:56:43Z
1,013
1
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "merge", "mergekit", "lazymergekit", "flemmingmiguel/NeuDist-Ro-7B", "johannhartmann/Brezn3", "ResplendentAI/Flora_DPO_7B", "conversational", "de", "en", "base_model:flemmingmiguel/NeuDist-Ro-7B", "base_model:johannhartmann/Brezn3", "base_model:ResplendentAI/Flora_DPO_7B", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-03-10T18:50:38Z
--- tags: - merge - mergekit - lazymergekit - flemmingmiguel/NeuDist-Ro-7B - johannhartmann/Brezn3 - ResplendentAI/Flora_DPO_7B base_model: - flemmingmiguel/NeuDist-Ro-7B - johannhartmann/Brezn3 - ResplendentAI/Flora_DPO_7B language: - de - en --- # Spaetzle-v8-7b This model is supposed to show adequate performance in German and English on a number of tasks, while mostly behaving well, that is, without rambling on, intermixing tokens from different templates in training and adapting, etc. It is mostly a quick test, and considerably weaker in German grammar and orthography than DiscoLM e.g., but for use cases where this is not too important, but e.g. instruction following, reasoning, etc, it might actually be a little bit preferable. It is a merge of the following models using [LazyMergekit](https://colab.research.google.com/drive/1obulZ1ROXHjYLn6PPZJwRR6GzgQogxxb?usp=sharing): * [flemmingmiguel/NeuDist-Ro-7B](https://huggingface.co/flemmingmiguel/NeuDist-Ro-7B) * [johannhartmann/Brezn3](https://huggingface.co/johannhartmann/Brezn3) * [ResplendentAI/Flora_DPO_7B](https://huggingface.co/ResplendentAI/Flora_DPO_7B) * on the basis of [mayflowergmbh/Wiedervereinigung-7b-dpo-laser](https://huggingface.co/mayflowergmbh/Wiedervereinigung-7b-dpo-laser) All credits are due to the creators of those original models and the training datasets involved. For a suitable quantized version, try [cstr/Spaetzle-v8-7b-GGUF](https://huggingface.co/cstr/Spaetzle-v8-7b-GGUF) ## Evaluation [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_cstr__Spaetzle-v8-7b) | Metric |Value| |---------------------------------|----:| |Avg. |72.27| |AI2 Reasoning Challenge (25-Shot)|68.69| |HellaSwag (10-Shot) |86.68| |MMLU (5-Shot) |64.60| |TruthfulQA (0-shot) |64.05| |Winogrande (5-shot) |81.45| |GSM8k (5-shot) |68.16| EQ-Bench (v2_de): 61.04 / english (v2): 78.3 [ScandEval](https://scandeval.com/german-nlg/) 12.5.2 scores | Benchmark | Spaetzle-v8-7b Value | |-----------------------|----------------------------------------------------| | Model ID | cstr/Spaetzle-v8-7b (few-shot, val) | | Parameters | 7242 | | Vocabulary Size | 32 | | Context | 32768 | | Commercial | False | | Speed | 5,980 ± 1,031 / 1,714 ± 552 | | Rank | 1.85 | | GermEval | 58.90 ± 2.30 / 45.55 ± 3.30 | | SB10k | 61.34 ± 1.90 / 72.98 ± 1.30 | | ScaLA-De | 31.58 ± 4.39 / 65.51 ± 2.23 | | GermanQuAD | 24.91 ± 3.98 / 60.88 ± 3.31 | | MLSum | 67.25 ± 1.06 / 22.95 ± 2.64 | | MMLU-De | 34.62 ± 2.20 / 50.43 ± 1.52 | | HellaSwag-De | 48.70 ± 2.47 / 61.05 ± 1.79 | | Model |AGIEval|GPT4All|TruthfulQA|Bigbench|Average| |------------------------------------------------------------|------:|------:|---------:|-------:|------:| |[Spaetzle-v8-7b](https://huggingface.co/cstr/Spaetzle-v8-7b)| 45.31| 75.69| 63.94| 45.57| 57.63| ### AGIEval | Task |Version| Metric |Value| |Stderr| |------------------------------|------:|--------|----:|---|-----:| |agieval_aqua_rat | 0|acc |25.59|± | 2.74| | | |acc_norm|24.80|± | 2.72| |agieval_logiqa_en | 0|acc |39.63|± | 1.92| | | |acc_norm|39.78|± | 1.92| |agieval_lsat_ar | 0|acc |23.48|± | 2.80| | | |acc_norm|24.35|± | 2.84| |agieval_lsat_lr | 0|acc |50.98|± | 2.22| | | |acc_norm|51.96|± | 2.21| |agieval_lsat_rc | 0|acc |62.08|± | 2.96| | | |acc_norm|62.83|± | 2.95| |agieval_sat_en | 0|acc |78.64|± | 2.86| | | |acc_norm|79.13|± | 2.84| |agieval_sat_en_without_passage| 0|acc |44.66|± | 3.47| | | |acc_norm|44.66|± | 3.47| |agieval_sat_math | 0|acc |37.27|± | 3.27| | | |acc_norm|35.00|± | 3.22| Average: 45.31% ### GPT4All | Task |Version| Metric |Value| |Stderr| |-------------|------:|--------|----:|---|-----:| |arc_challenge| 0|acc |63.14|± | 1.41| | | |acc_norm|64.51|± | 1.40| |arc_easy | 0|acc |85.98|± | 0.71| | | |acc_norm|82.49|± | 0.78| |boolq | 1|acc |88.10|± | 0.57| |hellaswag | 0|acc |66.31|± | 0.47| | | |acc_norm|85.17|± | 0.35| |openbookqa | 0|acc |38.00|± | 2.17| | | |acc_norm|47.20|± | 2.23| |piqa | 0|acc |83.35|± | 0.87| | | |acc_norm|84.17|± | 0.85| |winogrande | 0|acc |78.22|± | 1.16| Average: 75.69% ### TruthfulQA | Task |Version|Metric|Value| |Stderr| |-------------|------:|------|----:|---|-----:| |truthfulqa_mc| 1|mc1 |47.74|± | 1.75| | | |mc2 |63.94|± | 1.53| Average: 63.94% ### Bigbench | Task |Version| Metric |Value| |Stderr| |------------------------------------------------|------:|---------------------|----:|---|-----:| |bigbench_causal_judgement | 0|multiple_choice_grade|56.84|± | 3.60| |bigbench_date_understanding | 0|multiple_choice_grade|66.12|± | 2.47| |bigbench_disambiguation_qa | 0|multiple_choice_grade|41.47|± | 3.07| |bigbench_geometric_shapes | 0|multiple_choice_grade|22.01|± | 2.19| | | |exact_str_match | 0.00|± | 0.00| |bigbench_logical_deduction_five_objects | 0|multiple_choice_grade|31.40|± | 2.08| |bigbench_logical_deduction_seven_objects | 0|multiple_choice_grade|23.14|± | 1.60| |bigbench_logical_deduction_three_objects | 0|multiple_choice_grade|56.00|± | 2.87| |bigbench_movie_recommendation | 0|multiple_choice_grade|45.00|± | 2.23| |bigbench_navigate | 0|multiple_choice_grade|50.70|± | 1.58| |bigbench_reasoning_about_colored_objects | 0|multiple_choice_grade|70.05|± | 1.02| |bigbench_ruin_names | 0|multiple_choice_grade|45.54|± | 2.36| |bigbench_salient_translation_error_detection | 0|multiple_choice_grade|26.05|± | 1.39| |bigbench_snarks | 0|multiple_choice_grade|71.82|± | 3.35| |bigbench_sports_understanding | 0|multiple_choice_grade|72.92|± | 1.42| |bigbench_temporal_sequences | 0|multiple_choice_grade|44.20|± | 1.57| |bigbench_tracking_shuffled_objects_five_objects | 0|multiple_choice_grade|22.80|± | 1.19| |bigbench_tracking_shuffled_objects_seven_objects| 0|multiple_choice_grade|18.23|± | 0.92| |bigbench_tracking_shuffled_objects_three_objects| 0|multiple_choice_grade|56.00|± | 2.87| Average: 45.57% Average score: 57.63% ## 💻 Usage ```python !pip install -qU transformers accelerate from transformers import AutoTokenizer import transformers import torch model = "cstr/Spaetzle-v8-7b" messages = [{"role": "user", "content": "What is a large language model?"}] tokenizer = AutoTokenizer.from_pretrained(model) prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) pipeline = transformers.pipeline( "text-generation", model=model, torch_dtype=torch.float16, device_map="auto", ) outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) print(outputs[0]["generated_text"]) ``` ## 🧩 Configuration The model uses ChatML and should work well with this (as it is merged from models which (mostly) saw ChatML templates in training). ```yaml models: - model: mayflowergmbh/Wiedervereinigung-7b-dpo-laser # no parameters necessary for base model - model: flemmingmiguel/NeuDist-Ro-7B parameters: density: 0.60 weight: 0.30 - model: johannhartmann/Brezn3 parameters: density: 0.65 weight: 0.40 - model: ResplendentAI/Flora_DPO_7B parameters: density: 0.6 weight: 0.3 merge_method: dare_ties base_model: mayflowergmbh/Wiedervereinigung-7b-dpo-laser parameters: int8_mask: true dtype: bfloat16 random_seed: 0 tokenizer_source: base ```
moondream/moondream2-gguf
moondream
2024-04-25T20:53:35Z
1,013
4
null
[ "gguf", "region:us" ]
null
2024-04-25T20:20:58Z
Entry not found
dbaranchuk/sd15-cfg-distill-unet
dbaranchuk
2024-06-25T14:29:34Z
1,013
0
diffusers
[ "diffusers", "safetensors", "arxiv:1910.09700", "region:us" ]
null
2024-06-25T14:27:44Z
--- library_name: diffusers --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🧨 diffusers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
timm/tf_efficientnetv2_b3.in21k_ft_in1k
timm
2023-04-27T22:17:23Z
1,012
2
timm
[ "timm", "pytorch", "safetensors", "image-classification", "dataset:imagenet-1k", "dataset:imagenet-21k", "arxiv:2104.00298", "license:apache-2.0", "region:us" ]
image-classification
2022-12-13T00:15:14Z
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k - imagenet-21k --- # Model card for tf_efficientnetv2_b3.in21k_ft_in1k A EfficientNet-v2 image classification model. Trained on ImageNet-21k and fine-tuned on ImageNet-1k in Tensorflow by paper authors, ported to PyTorch by Ross Wightman. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 14.4 - GMACs: 1.9 - Activations (M): 10.0 - Image size: train = 240 x 240, test = 300 x 300 - **Papers:** - EfficientNetV2: Smaller Models and Faster Training: https://arxiv.org/abs/2104.00298 - **Dataset:** ImageNet-1k - **Pretrain Dataset:** ImageNet-21k - **Original:** https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('tf_efficientnetv2_b3.in21k_ft_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Feature Map Extraction ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'tf_efficientnetv2_b3.in21k_ft_in1k', pretrained=True, features_only=True, ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 for o in output: # print shape of each feature map in output # e.g.: # torch.Size([1, 16, 120, 120]) # torch.Size([1, 40, 60, 60]) # torch.Size([1, 56, 30, 30]) # torch.Size([1, 136, 15, 15]) # torch.Size([1, 232, 8, 8]) print(o.shape) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'tf_efficientnetv2_b3.in21k_ft_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 1536, 8, 8) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @inproceedings{tan2021efficientnetv2, title={Efficientnetv2: Smaller models and faster training}, author={Tan, Mingxing and Le, Quoc}, booktitle={International conference on machine learning}, pages={10096--10106}, year={2021}, organization={PMLR} } ``` ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ```
allenai/vila-roberta-large-s2vl-internal
allenai
2023-08-01T18:09:37Z
1,012
0
transformers
[ "transformers", "pytorch", "onnx", "roberta", "token-classification", "autotrain_compatible", "endpoints_compatible", "region:us" ]
token-classification
2023-08-01T17:54:05Z
Entry not found
TeeZee/DarkForest-20B-v2.0
TeeZee
2024-06-13T11:32:26Z
1,012
24
transformers
[ "transformers", "safetensors", "llama", "text-generation", "merge", "not-for-all-audiences", "license:other", "model-index", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-02-15T15:30:33Z
--- license: other tags: - merge - not-for-all-audiences license_name: microsoft-research-license model-index: - name: DarkForest-20B-v2.0 results: - task: type: text-generation name: Text Generation dataset: name: AI2 Reasoning Challenge (25-Shot) type: ai2_arc config: ARC-Challenge split: test args: num_few_shot: 25 metrics: - type: acc_norm value: 63.74 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=TeeZee/DarkForest-20B-v2.0 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: HellaSwag (10-Shot) type: hellaswag split: validation args: num_few_shot: 10 metrics: - type: acc_norm value: 86.32 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=TeeZee/DarkForest-20B-v2.0 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MMLU (5-Shot) type: cais/mmlu config: all split: test args: num_few_shot: 5 metrics: - type: acc value: 59.79 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=TeeZee/DarkForest-20B-v2.0 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: TruthfulQA (0-shot) type: truthful_qa config: multiple_choice split: validation args: num_few_shot: 0 metrics: - type: mc2 value: 56.14 source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=TeeZee/DarkForest-20B-v2.0 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: Winogrande (5-shot) type: winogrande config: winogrande_xl split: validation args: num_few_shot: 5 metrics: - type: acc value: 77.9 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=TeeZee/DarkForest-20B-v2.0 name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: GSM8k (5-shot) type: gsm8k config: main split: test args: num_few_shot: 5 metrics: - type: acc value: 23.28 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=TeeZee/DarkForest-20B-v2.0 name: Open LLM Leaderboard --- # DarkForest 20B v2.0 ![image/png](https://huggingface.co/TeeZee/DarkForest-20B-v2.0/resolve/main/DarkForest-20B-v2.0.jpg) ## Model Details - To create this model two step procedure was used. First a new 20B model was created using [microsoft/Orca-2-13b](https://huggingface.co/microsoft/Orca-2-13b) and [KoboldAI/LLaMA2-13B-Erebus-v3](https://huggingface.co/KoboldAI/LLaMA2-13B-Erebus-v3) , deatils of the merge in [darkforest_v2_step1.yml](https://huggingface.co/TeeZee/DarkForest-20B-v2.0/resolve/main/darkforest_v2_step1.yml) - then [jebcarter/psyonic-cetacean-20B](https://huggingface.co/jebcarter/psyonic-cetacean-20B) - and [TeeZee/BigMaid-20B-v1.0](https://huggingface.co/TeeZee/BigMaid-20B-v1.0) was used to produce the final model, merge config in [darkforest_v2_step2.yml](https://huggingface.co/TeeZee/DarkForest-20B-v2.0/resolve/main/darkforest_v2_step2.yml) - The resulting model has approximately 20 billion parameters. **Warning: This model can produce NSFW content!** ## Results - main difference to v1.0 - model has much better sense of humor. - produces SFW nad NSFW content without issues, switches context seamlessly. - good at following instructions. - good at tracking multiple characters in one scene. - very creative, scenarios produced are mature and complicated, model doesn't shy from writing about PTSD, mental issues or complicated relationships. - NSFW output is more creative and suprising than typical limaRP output. - definitely for mature audiences, not only because of vivid NSFW content but also because of overall maturity of stories it produces. - This is NOT Harry Potter level storytelling. All comments are greatly appreciated, download, test and if you appreciate my work, consider buying me my fuel: <a href="https://www.buymeacoffee.com/TeeZee" target="_blank"><img src="https://cdn.buymeacoffee.com/buttons/v2/default-yellow.png" alt="Buy Me A Coffee" style="height: 60px !important;width: 217px !important;" ></a> # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_TeeZee__DarkForest-20B-v2.0) | Metric |Value| |---------------------------------|----:| |Avg. |61.19| |AI2 Reasoning Challenge (25-Shot)|63.74| |HellaSwag (10-Shot) |86.32| |MMLU (5-Shot) |59.79| |TruthfulQA (0-shot) |56.14| |Winogrande (5-shot) |77.90| |GSM8k (5-shot) |23.28|
rombodawg/EveryoneLLM-7b-Gemma-Base
rombodawg
2024-03-12T03:19:36Z
1,012
2
transformers
[ "transformers", "safetensors", "gemma", "text-generation", "merge", "license:other", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-03-11T03:53:28Z
--- license: other license_name: gemma-terms-of-use license_link: https://ai.google.dev/gemma/terms tags: - merge --- EveryoneLLM-7b-Gemma-Base ![image/jpeg](https://cdn-uploads.huggingface.co/production/uploads/642cc1c253e76b4c2286c58e/85jTMkzdKWv4V8aQntMY-.jpeg) Quantizations: GGUF - https://huggingface.co/rombodawg/EveryoneLLM-7b-Gemma-Base-GGUF EveryoneLLM series of models made by the community, for the community. This is the second version of Everyone-LLM using Gemma-7b, a model that combines the power of the large majority of powerfull fine-tuned LLM's made by the community, to create a vast and knowledgable LLM with various abilities with an extra emphasis on coding capabilities. Prompt template: Alpaca ``` Below is an instruction that describes a task. Write a response that appropriately completes the request. ### Instruction: {prompt} ### Response: ``` The models that were used in this merger were as follow: - https://huggingface.co/openchat/openchat-3.5-0106-gemma - https://huggingface.co/TechxGenus/CodeGemma-7b - https://huggingface.co/VAGOsolutions/SauerkrautLM-Gemma-7b - https://huggingface.co/macadeliccc/gemma-orchid-7b-dpo - https://huggingface.co/HuggingFaceH4/zephyr-7b-gemma-v0.1 - https://huggingface.co/CorticalStack/gemma-7b-ultrachat-sft - https://huggingface.co/google/gemma-7b Thank you to the creators of the above ai models, they have full credit for the EveryoneLLM series of models. Without their hard work we wouldnt be able to achieve the great success we have in the open source community. 💗 This model was merges in 2 parts. The order of parts is listed bellow, then a copy and pastable version is bellow that. ![image/png](https://cdn-uploads.huggingface.co/production/uploads/642cc1c253e76b4c2286c58e/Ctla5otlla8UaTgW9fODc.png) ```yaml models: - model: VAGOsolutions_SauerkrautLM-Gemma-7b parameters: weight: 1 - model: macadeliccc_gemma-orchid-7b-dpo parameters: weight: 1 - model: HuggingFaceH4_zephyr-7b-gemma-v0.1 parameters: weight: 1 - model: CorticalStack_gemma-7b-ultrachat-sft parameters: weight: 1 merge_method: task_arithmetic base_model: gemma-7b-base parameters: normalize: true int8_mask: true dtype: float16 ``` ```yaml models: - model: Gemma-Merge-1-7b parameters: weight: 1 - model: openchat_openchat-3.5-0106-gemma parameters: weight: 1 - model: TechxGenus_CodeGemma-7b parameters: weight: 1 merge_method: task_arithmetic base_model: gemma-7b-base parameters: normalize: true int8_mask: true dtype: float16 ```
cointegrated/rut5-base-paraphraser
cointegrated
2023-03-17T10:21:29Z
1,011
16
transformers
[ "transformers", "pytorch", "safetensors", "t5", "text2text-generation", "russian", "paraphrasing", "paraphraser", "paraphrase", "ru", "dataset:cointegrated/ru-paraphrase-NMT-Leipzig", "license:mit", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text2text-generation
2022-03-02T23:29:05Z
--- language: ["ru"] tags: - russian - paraphrasing - paraphraser - paraphrase license: mit widget: - text: "Каждый охотник желает знать, где сидит фазан." datasets: - cointegrated/ru-paraphrase-NMT-Leipzig --- This is a paraphraser for Russian sentences described [in this Habr post](https://habr.com/ru/post/564916/). It is recommended to use the model with the `encoder_no_repeat_ngram_size` argument: ``` from transformers import T5ForConditionalGeneration, T5Tokenizer MODEL_NAME = 'cointegrated/rut5-base-paraphraser' model = T5ForConditionalGeneration.from_pretrained(MODEL_NAME) tokenizer = T5Tokenizer.from_pretrained(MODEL_NAME) model.cuda(); model.eval(); def paraphrase(text, beams=5, grams=4, do_sample=False): x = tokenizer(text, return_tensors='pt', padding=True).to(model.device) max_size = int(x.input_ids.shape[1] * 1.5 + 10) out = model.generate(**x, encoder_no_repeat_ngram_size=grams, num_beams=beams, max_length=max_size, do_sample=do_sample) return tokenizer.decode(out[0], skip_special_tokens=True) print(paraphrase('Каждый охотник желает знать, где сидит фазан.')) # Все охотники хотят знать где фазан сидит. ```
jegormeister/robbert-v2-dutch-base-mqa-finetuned
jegormeister
2022-04-11T19:09:29Z
1,011
3
sentence-transformers
[ "sentence-transformers", "pytorch", "roberta", "feature-extraction", "sentence-similarity", "transformers", "robbert", "nl", "dataset:clips/mqa", "autotrain_compatible", "endpoints_compatible", "text-embeddings-inference", "region:us" ]
sentence-similarity
2022-04-11T13:40:02Z
--- language: nl pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers - robbert datasets: - clips/mqa --- # jegorkitskerkin/robbert-v2-dutch-base-mqa-finetuned This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. This model is a fine-tuned version of [pdelobelle/robbert-v2-dutch-base](https://huggingface.co/pdelobelle/robbert-v2-dutch-base). It was fine-tuned on 1,000,000 rows of Dutch FAQ question-answer pairs from [clips/mqa](https://huggingface.co/datasets/clips/mqa). ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('jegorkitskerkin/robbert-v2-dutch-base-mqa-finetuned') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('jegorkitskerkin/robbert-v2-dutch-base-mqa-finetuned') model = AutoModel.from_pretrained('jegorkitskerkin/robbert-v2-dutch-base-mqa-finetuned') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Training The model was trained with the parameters: **DataLoader**: `torch.utils.data.dataloader.DataLoader` of length 12500 with parameters: ``` {'batch_size': 80, 'sampler': 'torch.utils.data.sampler.RandomSampler', 'batch_sampler': 'torch.utils.data.sampler.BatchSampler'} ``` **Loss**: `sentence_transformers.losses.MultipleNegativesRankingLoss.MultipleNegativesRankingLoss` with parameters: ``` {'scale': 20.0, 'similarity_fct': 'cos_sim'} ``` Parameters of the fit()-Method: ``` { "epochs": 3, "evaluation_steps": 0, "evaluator": "NoneType", "max_grad_norm": 1, "optimizer_class": "<class 'transformers.optimization.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 10000, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: RobertaModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
doas/test5
doas
2023-08-23T07:34:48Z
1,011
0
transformers
[ "transformers", "pytorch", "llama", "feature-extraction", "endpoints_compatible", "text-generation-inference", "region:us" ]
feature-extraction
2023-08-23T01:53:03Z
Entry not found
seeklhy/codes-1b
seeklhy
2023-09-04T12:36:59Z
1,011
3
transformers
[ "transformers", "pytorch", "gpt_bigcode", "text-generation", "SQL generation", "Text-to-SQL", "text2sql", "sql", "code", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2023-08-27T09:11:37Z
--- language: - sql - code tags: - SQL generation - Text-to-SQL - text2sql license: "apache-2.0" --- # CodeS-1B CodeS is a series of Code LLMs specifically optimized for SQL generation. The CodeS encompasses 1B, 3B, 7B, and 15B scales. CodeS-1B, 3B, and 7B are incrementally pre-trained on the top of StarCoderBase-1B, 3B, and 7B and support the max length of 8,192. Meanwhile, CodeS-15B, derived from StarCoder-15B, accommodates sequences of up to 6,144 tokens. We have demonstrated that CodeS achieves new state-of-the-art performance on two challenging Text-to-SQL benchmarks: Spider and Bird. For more details about how to use CodeS, please refer to our GitHub page: https://github.com/RUCKBReasoning/codes. (This is the repository of CodeS-1B.)
mlx-community/whisper-large-v3-mlx
mlx-community
2024-01-07T19:29:20Z
1,011
12
transformers
[ "transformers", "whisper", "endpoints_compatible", "region:us" ]
null
2024-01-07T19:22:56Z
Entry not found
Xenon1/Zenith-7B-dpo-v1
Xenon1
2024-02-14T22:25:03Z
1,011
0
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "Zenith-7B-dpo-v1", "conversational", "en", "arxiv:2401.10020", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-02-14T22:16:45Z
--- language: - en license: apache-2.0 tags: - mistral - Zenith-7B-dpo-v1 pipeline_tag: text-generation --- # Model Card for Zenith-7B-dpo-v1 Mistral-7B-v0.1 model fine-tuned on the Ultrafeedback dataset using techinques shown in the paper [Self-Rewarding Language Models](https://arxiv.org/abs/2401.10020). ## Instruction format In order to leverage instruction fine-tuning, your prompt should be surrounded by `[INST]` and `[/INST]` tokens. The very first instruction should begin with a begin of sentence id. The next instructions should not. The assistant generation will be ended by the end-of-sentence token id. E.g. ``` text = "<s>[INST] What is your favourite condiment? [/INST]" "Well, I'm quite partial to a good squeeze of fresh lemon juice. It adds just the right amount of zesty flavour to whatever I'm cooking up in the kitchen!</s> " "[INST] Do you have mayonnaise recipes? [/INST]" ``` This format is available as a [chat template](https://huggingface.co/docs/transformers/main/chat_templating) via the `apply_chat_template()` method: ```python from transformers import AutoModelForCausalLM, AutoTokenizer device = "cuda" # the device to load the model onto model = AutoModelForCausalLM.from_pretrained("Xenon1/Zenith-7B-dpo-v1") tokenizer = AutoTokenizer.from_pretrained("Xenon1/Zenith-7B-dpo-v1") messages = [ {"role": "user", "content": "What is your favourite condiment?"}, {"role": "assistant", "content": "Well, I'm quite partial to a good squeeze of fresh lemon juice. It adds just the right amount of zesty flavour to whatever I'm cooking up in the kitchen!"}, {"role": "user", "content": "Do you have mayonnaise recipes?"} ] encodeds = tokenizer.apply_chat_template(messages, return_tensors="pt") model_inputs = encodeds.to(device) model.to(device) generated_ids = model.generate(model_inputs, max_new_tokens=1000, do_sample=True) decoded = tokenizer.batch_decode(generated_ids) print(decoded[0]) ``` ## Model Architecture This instruction model is based on Mistral-7B-v0.1, a transformer model with the following architecture choices: - Grouped-Query Attention - Sliding-Window Attention - Byte-fallback BPE tokenizer
hermes42/Meta-Llama-3-70B-Instruct-GGUF
hermes42
2024-04-23T22:16:15Z
1,011
1
null
[ "gguf", "region:us" ]
null
2024-04-23T07:11:13Z
Entry not found
aurelio-ai/sr-test-clip
aurelio-ai
2024-06-01T09:13:21Z
1,011
0
transformers
[ "transformers", "pytorch", "tf", "clip", "zero-shot-image-classification", "endpoints_compatible", "region:us" ]
zero-shot-image-classification
2024-06-01T09:12:48Z
Tiny CLIP model used for [semantic-router](https://github.com/aurelio-labs/semantic-router) tests.
flair/ner-multi-fast
flair
2022-10-04T09:19:01Z
1,010
6
flair
[ "flair", "pytorch", "token-classification", "sequence-tagger-model", "en", "de", "nl", "es", "dataset:conll2003", "region:us" ]
token-classification
2022-03-02T23:29:05Z
--- tags: - flair - token-classification - sequence-tagger-model language: - en - de - nl - es datasets: - conll2003 widget: - text: "George Washington ging nach Washington" --- ## 4-Language NER in Flair (English, German, Dutch and Spanish) This is the fast 4-class NER model for 4 CoNLL-03 languages that ships with [Flair](https://github.com/flairNLP/flair/). Also kind of works for related languages like French. F1-Score: **91,51** (CoNLL-03 English), **85,72** (CoNLL-03 German revised), **86,22** (CoNLL-03 Dutch), **85,78** (CoNLL-03 Spanish) Predicts 4 tags: | **tag** | **meaning** | |---------------------------------|-----------| | PER | person name | | LOC | location name | | ORG | organization name | | MISC | other name | Based on [Flair embeddings](https://www.aclweb.org/anthology/C18-1139/) and LSTM-CRF. --- ### Demo: How to use in Flair Requires: **[Flair](https://github.com/flairNLP/flair/)** (`pip install flair`) ```python from flair.data import Sentence from flair.models import SequenceTagger # load tagger tagger = SequenceTagger.load("flair/ner-multi-fast") # make example sentence in any of the four languages sentence = Sentence("George Washington ging nach Washington") # predict NER tags tagger.predict(sentence) # print sentence print(sentence) # print predicted NER spans print('The following NER tags are found:') # iterate over entities and print for entity in sentence.get_spans('ner'): print(entity) ``` This yields the following output: ``` Span [1,2]: "George Washington" [− Labels: PER (0.9977)] Span [5]: "Washington" [− Labels: LOC (0.9895)] ``` So, the entities "*George Washington*" (labeled as a **person**) and "*Washington*" (labeled as a **location**) are found in the sentence "*George Washington ging nach Washington*". --- ### Training: Script to train this model The following Flair script was used to train this model: ```python from flair.data import Corpus from flair.datasets import CONLL_03, CONLL_03_GERMAN, CONLL_03_DUTCH, CONLL_03_SPANISH from flair.embeddings import WordEmbeddings, StackedEmbeddings, FlairEmbeddings # 1. get the multi-language corpus corpus: Corpus = MultiCorpus([ CONLL_03(), # English corpus CONLL_03_GERMAN(), # German corpus CONLL_03_DUTCH(), # Dutch corpus CONLL_03_SPANISH(), # Spanish corpus ]) # 2. what tag do we want to predict? tag_type = 'ner' # 3. make the tag dictionary from the corpus tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type) # 4. initialize each embedding we use embedding_types = [ # GloVe embeddings WordEmbeddings('glove'), # FastText embeddings WordEmbeddings('de'), # contextual string embeddings, forward FlairEmbeddings('multi-forward-fast'), # contextual string embeddings, backward FlairEmbeddings('multi-backward-fast'), ] # embedding stack consists of Flair and GloVe embeddings embeddings = StackedEmbeddings(embeddings=embedding_types) # 5. initialize sequence tagger from flair.models import SequenceTagger tagger = SequenceTagger(hidden_size=256, embeddings=embeddings, tag_dictionary=tag_dictionary, tag_type=tag_type) # 6. initialize trainer from flair.trainers import ModelTrainer trainer = ModelTrainer(tagger, corpus) # 7. run training trainer.train('resources/taggers/ner-multi-fast', train_with_dev=True, max_epochs=150) ``` --- ### Cite Please cite the following papers when using this model. ``` @misc{akbik2019multilingual, title={Multilingual sequence labeling with one model}, author={Akbik, Alan and Bergmann, Tanja and Vollgraf, Roland} booktitle = {{NLDL} 2019, Northern Lights Deep Learning Workshop}, year = {2019} } ``` ``` @inproceedings{akbik2018coling, title={Contextual String Embeddings for Sequence Labeling}, author={Akbik, Alan and Blythe, Duncan and Vollgraf, Roland}, booktitle = {{COLING} 2018, 27th International Conference on Computational Linguistics}, pages = {1638--1649}, year = {2018} } ```
porkorbeef/Llama-2-13b-public
porkorbeef
2023-08-30T05:16:35Z
1,010
0
transformers
[ "transformers", "pytorch", "llama", "feature-extraction", "endpoints_compatible", "text-generation-inference", "region:us" ]
feature-extraction
2023-08-30T04:21:30Z
Entry not found
dataautogpt3/ProteusV0.1
dataautogpt3
2024-02-11T18:38:14Z
1,010
38
diffusers
[ "diffusers", "text-to-image", "license:gpl-3.0", "autotrain_compatible", "endpoints_compatible", "diffusers:StableDiffusionXLPipeline", "region:us" ]
text-to-image
2024-01-16T16:40:19Z
--- pipeline_tag: text-to-image widget: - text: >- black fluffy gorgeous dangerous cat animal creature, large orange eyes, big fluffy ears, piercing gaze, full moon, dark ambiance, best quality, extremely detailed output: url: ComfyUI_02614_.png - text: >- (impressionistic realism by csybgh), a 50 something male, working in banking, very short dyed dark curly balding hair, Afro-Asiatic ancestry, talks a lot but listens poorly, stuck in the past, wearing a suit, he has a certain charm, bronze skintone, sitting in a bar at night, he is smoking and feeling cool, drunk on plum wine, masterpiece, 8k, hyper detailed, smokey ambiance, perfect hands AND fingers output: url: 2024-01-18-140016_0_01_02_03_04.png - text: >- high quality pixel art, a pixel art silhouette of an anime space-themed girl in a space-punk steampunk style, lying in her bed by the window of a spaceship, smoking, with a rustic feel. The image should embody epic portraiture and double exposure, featuring an isolated landscape visible through the window. The colors should primarily be dynamic and action-packed, with a strong use of negative space. The entire artwork should be in pixel art style, emphasizing the characters shape and set against a white background. Silhouette output: url: aaaaaaaaaaaaa.png - text: >- The image features an older man, a long white beard and mustache, He has a stern expression, giving the impression of a wise and experienced individual. The mans beard and mustache are prominent, adding to his distinguished appearance. The close-up shot of the mans face emphasizes his facial features and the intensity of his gaze. output: url: 2024-01-18-140752_0_01_02.png - text: >- Super Closeup Portrait, action shot, Profoundly dark whitish meadow, glass flowers, Stains, space grunge style, Jeanne d'Arc wearing White Olive green used styled Cotton frock, Wielding thin silver sword, Sci-fi vibe, dirty, noisy, Vintage monk style, very detailed, hd output: url: 2024-01-18-135912_0_01.png - text: >- cinematic film still of Kodak Motion Picture Film: (Sharp Detailed Image) An Oscar winning movie for Best Cinematography a woman in a kimono standing on a subway train in Japan Kodak Motion Picture Film Style, shallow depth of field, vignette, highly detailed, high budget, bokeh, cinemascope, moody, epic, gorgeous, film grain, grainy output: url: 2024-01-18-140403_0_01_02_03.png - text: >- in the style of artgerm, comic style,3D model, mythical seascape, negative space, space quixotic dreams, temporal hallucination, psychedelic, mystical, intricate details, very bright neon colors, (vantablack background:1.5), pointillism, pareidolia, melting, symbolism, very high contrast, chiaroscuro parameters: negative_prompt: >- bad quality, bad anatomy, worst quality, low quality, low resolutions, extra fingers, blur, blurry, ugly, wrongs proportions, watermark, image artifacts, lowres, ugly, jpeg artifacts, deformed, noisy image output: url: ComfyUI_02698_.png - text: >- 1980s anime portrait of a character glitching. His face is separated from his body by heavy static. His face is deformed by pain. Dream-like, analog horror, glitch, terrifying output: url: GEH0sZIXAAABVsj.jpeg - text: Text Logo Proteus (best quality, HD, ~*~aesthetic~*~:1) output: url: ComfyUI_02778_.png license: gpl-3.0 --- https://huggingface.co/dataautogpt3/ProteusV0.2 new version <Gallery /> ## Proteus Proteus serves as a sophisticated enhancement over OpenDalleV1.1, leveraging its core functionalities to deliver superior outcomes. Key areas of advancement include heightened responsiveness to prompts and augmented creative capacities. To achieve this, it was fine-tuned using approximately 220,000 GPTV captioned images from copyright-free stock images (with some anime included), which were then normalized. Additionally, DPO (Direct Preference Optimization) was employed through a collection of 10,000 carefully selected high-quality, AI-generated image pairs. In pursuit of optimal performance, numerous LORA (Low-Rank Adaptation) models are trained independently before being selectively incorporated into the principal model via dynamic application methods. These techniques involve targeting particular segments within the model while avoiding interference with other areas during the learning phase. Consequently, Proteus exhibits marked improvements in portraying intricate facial characteristics and lifelike skin textures, all while sustaining commendable proficiency across various aesthetic domains, notably surrealism, anime, and cartoon-style visualizations. ## Settings for ProteusV0.1 Use these settings for the best results with ProteusV0.1: CFG Scale: Use a CFG scale of 8 to 7 Steps: 20 to 40 steps for more detail, 20 steps for faster results. Sampler: DPM++ 2M SDE Scheduler: Karras Resolution: 1280x1280 or 1024x1024 please also consider using these keep words to improve your prompts: best quality, HD, `~*~aesthetic~*~`. if you are having trouble coming up with prompts you can use this GPT I put together to help you refine the prompt. https://chat.openai.com/g/g-RziQNoydR-diffusion-master ## Use it with 🧨 diffusers ```python import torch from diffusers import ( StableDiffusionXLPipeline, KDPM2AncestralDiscreteScheduler, AutoencoderKL ) # Load VAE component vae = AutoencoderKL.from_pretrained( "madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16 ) # Configure the pipeline pipe = StableDiffusionXLPipeline.from_pretrained( "dataautogpt3/ProteusV0.1", vae=vae, torch_dtype=torch.float16 ) pipe.scheduler = KDPM2AncestralDiscreteScheduler.from_config(pipe.scheduler.config) pipe.to('cuda') # Define prompts and generate image prompt = "black fluffy gorgeous dangerous cat animal creature, large orange eyes, big fluffy ears, piercing gaze, full moon, dark ambiance, best quality, extremely detailed" negative_prompt = "nsfw, bad quality, bad anatomy, worst quality, low quality, low resolutions, extra fingers, blur, blurry, ugly, wrongs proportions, watermark, image artifacts, lowres, ugly, jpeg artifacts, deformed, noisy image" image = pipe( prompt, negative_prompt=negative_prompt, width=1024, height=1024, guidance_scale=7, num_inference_steps=20 ).images[0] ``` please support the work I do through donating to me on: https://www.buymeacoffee.com/DataVoid or following me on https://twitter.com/DataPlusEngine
Locutusque/Hercules-2.0-Mistral-7B
Locutusque
2024-02-12T17:01:17Z
1,010
17
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "not-for-all-audiences", "chemistry", "math", "code", "physics", "dataset:Locutusque/hercules-v2.0", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-02-03T11:26:01Z
--- license: apache-2.0 library_name: transformers tags: - not-for-all-audiences - chemistry - math - code - physics datasets: - Locutusque/hercules-v2.0 widget: - example_title: Derivative text: '<|im_start|>user In this task, you are given a description of an object and its surroundings. Your task is to describe the motion of the object mentioned in the description using differential equations. The answer should be a mathematical expression in t. Description: A ball is thrown up at 30 ft/sec from 3 feet off the ground. Solve this step-by-step, explaining each step in detail.<|im_end|> <|im_start|>assistant ' output: text: 'Let h(t) be the height of the ball at time t after it was thrown. We know that when the ball is thrown, it has an initial velocity v0 = 30 ft/sec and initial height y0 = 3 ft. We also know that the acceleration due to gravity g = -32 ft/sec^2. To solve for the motion of the ball, we can use the equation for vertical motion with acceleration due to gravity: h(t) = v0 * t + (1/2) * g * t^2 + y0 Substituting the values given in the problem, we get: y(t) = (30)(t) + (1/2)(-32)(t)^2 + 3 Now, let''s simplify the equation: y(t) = 30t - 16t^2 + 3 This is the final solution for the height of the ball as a function of time.' model-index: - name: Hercules-2.0-Mistral-7B results: - task: type: text-generation name: Text Generation dataset: name: AI2 Reasoning Challenge (25-Shot) type: ai2_arc config: ARC-Challenge split: test args: num_few_shot: 25 metrics: - type: acc_norm value: 61.09 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Locutusque/Hercules-2.0-Mistral-7B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: HellaSwag (10-Shot) type: hellaswag split: validation args: num_few_shot: 10 metrics: - type: acc_norm value: 83.69 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Locutusque/Hercules-2.0-Mistral-7B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MMLU (5-Shot) type: cais/mmlu config: all split: test args: num_few_shot: 5 metrics: - type: acc value: 63.47 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Locutusque/Hercules-2.0-Mistral-7B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: TruthfulQA (0-shot) type: truthful_qa config: multiple_choice split: validation args: num_few_shot: 0 metrics: - type: mc2 value: 43.97 source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Locutusque/Hercules-2.0-Mistral-7B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: Winogrande (5-shot) type: winogrande config: winogrande_xl split: validation args: num_few_shot: 5 metrics: - type: acc value: 79.48 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Locutusque/Hercules-2.0-Mistral-7B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: GSM8k (5-shot) type: gsm8k config: main split: test args: num_few_shot: 5 metrics: - type: acc value: 44.43 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Locutusque/Hercules-2.0-Mistral-7B name: Open LLM Leaderboard --- # Model Card: Hercules-2.0-Mistral-7B ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6437292ecd93f4c9a34b0d47/SeH52c8_9VMAnzCUg4QUE.png) ## Model Description Hercules-2.0-Mistral-7B is a fine-tuned language model derived from Mistralai/Mistral-7B-v0.1. It is specifically designed to excel in instruction following, function calls, and conversational interactions across various scientific and technical domains. The dataset used for fine-tuning, also named Hercules-v2.0, expands upon the diverse capabilities of OpenHermes-2.5 with contributions from numerous curated datasets. This fine-tuning has endowed Hercules-v2.0 with enhanced abilities in: - Complex Instruction Following: Understanding and accurately executing multi-step instructions, even those involving specialized terminology. - Function Calling: Seamlessly interpreting and executing function calls, providing appropriate input and output values. - Domain-Specific Knowledge: Engaging in informative and educational conversations about Biology, Chemistry, Physics, Mathematics, Medicine, Computer Science, and more. This model outperforms OpenHermes-2.5 and OpenChat-3.5, even when it is trained on only 100,000 rows, which is ten times less than the training data of OpenHermes-2.5. ## Intended Uses & Potential Bias Hercules-2.0-Mistral-7B is well-suited to the following applications: - Specialized Chatbots: Creating knowledgeable chatbots and conversational agents in scientific and technical fields. - Instructional Assistants: Supporting users with educational and step-by-step guidance in various disciplines. - Code Generation and Execution: Facilitating code execution through function calls, aiding in software development and prototyping. **Important Note: Although Hercules-v2.0 is carefully constructed, it's important to be aware that the underlying data sources may contain biases or reflect harmful stereotypes. Use this model with caution and consider additional measures to mitigate potential biases in its responses.** ## Limitations and Risks - Toxicity: The dataset may still contain toxic or harmful examples despite cleaning efforts. - Hallucinations and Factual Errors: Like other language models, Hercules-2.0-Mistral-7B may generate incorrect or misleading information, especially in specialized domains where it lacks sufficient expertise. - Potential for Misuse: The ability to engage in technical conversations and execute function calls could be misused for malicious purposes. ## Evaluation Metrics To provide suitable benchmarks for Hercules-2.0-Mistral-7B, consider using a combination of the following metrics: - Instruction Following: Task-specific evaluation datasets for instruction following in relevant domains (e.g., datasets specifically focused on math problems, code generation, etc.). - Function Calling: Evaluate the model's accuracy in interpreting and executing function calls with varying inputs and outputs. - Conversational Quality: Assess the model's ability to maintain coherence, naturalness, and informativeness across conversational turns. ## Training Data Hercules-2.0-Mistral-7B is fine-tuned from the following sources: - cognitivecomputations/dolphin (first 200k examples) - Evol Instruct 70K && 140K - teknium/GPT4-LLM-Cleaned - jondurbin/airoboros-3.2 - AlekseyKorshuk/camel-chatml - CollectiveCognition/chats-data-2023-09-22 - Nebulous/lmsys-chat-1m-smortmodelsonly - glaiveai/glaive-code-assistant-v2 - glaiveai/glaive-code-assistant - glaiveai/glaive-function-calling-v2 - garage-bAInd/Open-Platypus - meta-math/MetaMathQA (first 40k examples) - teknium/GPTeacher-General-Instruct - GPTeacher roleplay datasets - BI55/MedText - pubmed_qa labeled subset - Unnatural Instructions - CollectiveCognition/chats-data-2023-09-27 - CollectiveCognition/chats-data-2023-10-16 ## Training Procedure - This model was trained on 8 kaggle TPUs, using torch xla SPMD for high MXU efficiency. There was no expense on my end (meaning you can reproduce this too!) - A learning rate of 2e-06 with the Adam optimizer. A linear scheduler was used, with an end factor of 0.5. A low learning rate was used to prevent exploding gradients. - No mixed precision was used, with the default dtype being bfloat16. - Trained on 200,000 examples of Hercules-v2.0. - No model parameters were frozen. - This model was trained on OpenAI's ChatML prompt format. Because this model has function calling capabilities, the prompt format is slightly different, here's what it would look like: ```<|im_start|>system\n{message}<|im_end|>\n<|im_start|>user\n{user message}<|im_end|>\n<|im_start|>call\n{function call message}<|im_end|>\n<|im_start|>function\n{function response message}<|im_end|>\n<|im_start|>assistant\n{assistant message}</s>``` This model was fine-tuned using the TPU-Alignment repository. https://github.com/Locutusque/TPU-Alignment # Updates - 🔥 **February 3, 2024: This model scored an average of 62 on Open LLM Leaderboard, outperforming OpenHermes-2.5 and OpenChat-3.5.** # Quants exl2 by @bartowski https://huggingface.co/bartowski/Hercules-2.0-Mistral-7B-exl2 # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_Locutusque__Hercules-2.0-Mistral-7B) | Metric |Value| |---------------------------------|----:| |Avg. |62.69| |AI2 Reasoning Challenge (25-Shot)|61.09| |HellaSwag (10-Shot) |83.69| |MMLU (5-Shot) |63.47| |TruthfulQA (0-shot) |43.97| |Winogrande (5-shot) |79.48| |GSM8k (5-shot) |44.43|
aloobun/Cypher-7B
aloobun
2024-05-16T08:52:27Z
1,010
5
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "mergekit", "merge", "nous", "westlake", "samantha", "conversational", "base_model:NousResearch/Nous-Hermes-2-Mistral-7B-DPO", "base_model:cognitivecomputations/samantha-1.1-westlake-7b-laser", "license:cc", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
2024-04-04T20:01:52Z
--- base_model: - NousResearch/Nous-Hermes-2-Mistral-7B-DPO - cognitivecomputations/samantha-1.1-westlake-7b-laser library_name: transformers tags: - mergekit - merge - mistral - nous - westlake - samantha license: cc --- Quants by @mradermacher: https://huggingface.co/mradermacher/Cypher-7B-GGUF ### Configuration The following YAML configuration was used to produce this model: ```yaml slices: - sources: - model: "NousResearch/Nous-Hermes-2-Mistral-7B-DPO" layer_range: [0, 32] - model: "cognitivecomputations/samantha-1.1-westlake-7b-laser" layer_range: [0, 32] merge_method: slerp base_model: "NousResearch/Nous-Hermes-2-Mistral-7B-DPO" parameters: t: - filter: lm_head value: [0.55] - filter: embed_tokens value: [0.7] - filter: self_attn value: [0.65, 0.35] - filter: mlp value: [0.35, 0.65] - filter: layernorm value: [0.4, 0.6] - filter: modelnorm value: [0.6] - value: 0.5 dtype: bfloat16 ``` ### Merge Method This model was merged using the SLERP merge method. ### Models Merged The following models were included in the merge: * [NousResearch/Nous-Hermes-2-Mistral-7B-DPO](https://huggingface.co/NousResearch/Nous-Hermes-2-Mistral-7B-DPO) * [cognitivecomputations/samantha-1.1-westlake-7b-laser](https://huggingface.co/cognitivecomputations/samantha-1.1-westlake-7b-laser)