Merge of top 7B models with TIES method

mergekit is a toolkit for merging pre-trained language models. mergekit uses an out-of-core approach to perform unreasonably elaborate merges in resource-constrained situations. Merges can be run entirely on CPU or accelerated with as little as 8 GB of VRAM. Many merging algorithms are supported, with more coming as they catch my attention.

Eval

image/png

{
    "all": {
        "acc": 0.6487801278765712,
        "acc_stderr": 0.03219011246717665,
        "acc_norm": 0.6479445077777353,
        "acc_norm_stderr": 0.032868022907407396,
        "mc1": 0.5862913096695227,
        "mc1_stderr": 0.0172408618120998,
        "mc2": 0.7078078883926877,
        "mc2_stderr": 0.015097515102384168
    },
    "harness|arc:challenge|25": {
        "acc": 0.7167235494880546,
        "acc_stderr": 0.013167478735134575,
        "acc_norm": 0.7363481228668942,
        "acc_norm_stderr": 0.012875929151297044
    },
    "harness|hellaswag|10": {
        "acc": 0.7321250746863175,
        "acc_stderr": 0.004419469983939178,
        "acc_norm": 0.8884684325831508,
        "acc_norm_stderr": 0.0031414591751392717
    },
    "harness|hendrycksTest-abstract_algebra|5": {
        "acc": 0.31,
        "acc_stderr": 0.04648231987117316,
        "acc_norm": 0.31,
        "acc_norm_stderr": 0.04648231987117316
    },
    "harness|hendrycksTest-anatomy|5": {
        "acc": 0.6518518518518519,
        "acc_stderr": 0.041153246103369526,
        "acc_norm": 0.6518518518518519,
        "acc_norm_stderr": 0.041153246103369526
    },
    "harness|hendrycksTest-astronomy|5": {
        "acc": 0.7039473684210527,
        "acc_stderr": 0.03715062154998904,
        "acc_norm": 0.7039473684210527,
        "acc_norm_stderr": 0.03715062154998904
    },
    "harness|hendrycksTest-business_ethics|5": {
        "acc": 0.61,
        "acc_stderr": 0.04902071300001975,
        "acc_norm": 0.61,
        "acc_norm_stderr": 0.04902071300001975
    },
    "harness|hendrycksTest-clinical_knowledge|5": {
        "acc": 0.7132075471698113,
        "acc_stderr": 0.02783491252754407,
        "acc_norm": 0.7132075471698113,
        "acc_norm_stderr": 0.02783491252754407
    },
    "harness|hendrycksTest-college_biology|5": {
        "acc": 0.75,
        "acc_stderr": 0.03621034121889507,
        "acc_norm": 0.75,
        "acc_norm_stderr": 0.03621034121889507
    },
    "harness|hendrycksTest-college_chemistry|5": {
        "acc": 0.46,
        "acc_stderr": 0.05009082659620333,
        "acc_norm": 0.46,
        "acc_norm_stderr": 0.05009082659620333
    },
    "harness|hendrycksTest-college_computer_science|5": {
        "acc": 0.55,
        "acc_stderr": 0.05,
        "acc_norm": 0.55,
        "acc_norm_stderr": 0.05
    },
    "harness|hendrycksTest-college_mathematics|5": {
        "acc": 0.3,
        "acc_stderr": 0.046056618647183814,
        "acc_norm": 0.3,
        "acc_norm_stderr": 0.046056618647183814
    },
    "harness|hendrycksTest-college_medicine|5": {
        "acc": 0.6589595375722543,
        "acc_stderr": 0.036146654241808254,
        "acc_norm": 0.6589595375722543,
        "acc_norm_stderr": 0.036146654241808254
    },
    "harness|hendrycksTest-college_physics|5": {
        "acc": 0.43137254901960786,
        "acc_stderr": 0.04928099597287534,
        "acc_norm": 0.43137254901960786,
        "acc_norm_stderr": 0.04928099597287534
    },
    "harness|hendrycksTest-computer_security|5": {
        "acc": 0.77,
        "acc_stderr": 0.04229525846816506,
        "acc_norm": 0.77,
        "acc_norm_stderr": 0.04229525846816506
    },
    "harness|hendrycksTest-conceptual_physics|5": {
        "acc": 0.548936170212766,
        "acc_stderr": 0.032529096196131965,
        "acc_norm": 0.548936170212766,
        "acc_norm_stderr": 0.032529096196131965
    },
    "harness|hendrycksTest-econometrics|5": {
        "acc": 0.49122807017543857,
        "acc_stderr": 0.04702880432049615,
        "acc_norm": 0.49122807017543857,
        "acc_norm_stderr": 0.04702880432049615
    },
    "harness|hendrycksTest-electrical_engineering|5": {
        "acc": 0.5517241379310345,
        "acc_stderr": 0.04144311810878152,
        "acc_norm": 0.5517241379310345,
        "acc_norm_stderr": 0.04144311810878152
    },
    "harness|hendrycksTest-elementary_mathematics|5": {
        "acc": 0.4126984126984127,
        "acc_stderr": 0.025355741263055277,
        "acc_norm": 0.4126984126984127,
        "acc_norm_stderr": 0.025355741263055277
    },
    "harness|hendrycksTest-formal_logic|5": {
        "acc": 0.49206349206349204,
        "acc_stderr": 0.044715725362943486,
        "acc_norm": 0.49206349206349204,
        "acc_norm_stderr": 0.044715725362943486
    },
    "harness|hendrycksTest-global_facts|5": {
        "acc": 0.35,
        "acc_stderr": 0.047937248544110196,
        "acc_norm": 0.35,
        "acc_norm_stderr": 0.047937248544110196
    },
    "harness|hendrycksTest-high_school_biology|5": {
        "acc": 0.7967741935483871,
        "acc_stderr": 0.02289168798455496,
        "acc_norm": 0.7967741935483871,
        "acc_norm_stderr": 0.02289168798455496
    },
    "harness|hendrycksTest-high_school_chemistry|5": {
        "acc": 0.5024630541871922,
        "acc_stderr": 0.035179450386910616,
        "acc_norm": 0.5024630541871922,
        "acc_norm_stderr": 0.035179450386910616
    },
    "harness|hendrycksTest-high_school_computer_science|5": {
        "acc": 0.7,
        "acc_stderr": 0.046056618647183814,
        "acc_norm": 0.7,
        "acc_norm_stderr": 0.046056618647183814
    },
    "harness|hendrycksTest-high_school_european_history|5": {
        "acc": 0.7575757575757576,
        "acc_stderr": 0.03346409881055953,
        "acc_norm": 0.7575757575757576,
        "acc_norm_stderr": 0.03346409881055953
    },
    "harness|hendrycksTest-high_school_geography|5": {
        "acc": 0.803030303030303,
        "acc_stderr": 0.028335609732463362,
        "acc_norm": 0.803030303030303,
        "acc_norm_stderr": 0.028335609732463362
    },
    "harness|hendrycksTest-high_school_government_and_politics|5": {
        "acc": 0.9067357512953368,
        "acc_stderr": 0.020986854593289733,
        "acc_norm": 0.9067357512953368,
        "acc_norm_stderr": 0.020986854593289733
    },
    "harness|hendrycksTest-high_school_macroeconomics|5": {
        "acc": 0.6487179487179487,
        "acc_stderr": 0.024203665177902803,
        "acc_norm": 0.6487179487179487,
        "acc_norm_stderr": 0.024203665177902803
    },
    "harness|hendrycksTest-high_school_mathematics|5": {
        "acc": 0.3333333333333333,
        "acc_stderr": 0.02874204090394848,
        "acc_norm": 0.3333333333333333,
        "acc_norm_stderr": 0.02874204090394848
    },
    "harness|hendrycksTest-high_school_microeconomics|5": {
        "acc": 0.6554621848739496,
        "acc_stderr": 0.03086868260412162,
        "acc_norm": 0.6554621848739496,
        "acc_norm_stderr": 0.03086868260412162
    },
    "harness|hendrycksTest-high_school_physics|5": {
        "acc": 0.32450331125827814,
        "acc_stderr": 0.038227469376587525,
        "acc_norm": 0.32450331125827814,
        "acc_norm_stderr": 0.038227469376587525
    },
    "harness|hendrycksTest-high_school_psychology|5": {
        "acc": 0.8403669724770643,
        "acc_stderr": 0.015703498348461763,
        "acc_norm": 0.8403669724770643,
        "acc_norm_stderr": 0.015703498348461763
    },
    "harness|hendrycksTest-high_school_statistics|5": {
        "acc": 0.5046296296296297,
        "acc_stderr": 0.03409825519163572,
        "acc_norm": 0.5046296296296297,
        "acc_norm_stderr": 0.03409825519163572
    },
    "harness|hendrycksTest-high_school_us_history|5": {
        "acc": 0.8235294117647058,
        "acc_stderr": 0.026756401538078962,
        "acc_norm": 0.8235294117647058,
        "acc_norm_stderr": 0.026756401538078962
    },
    "harness|hendrycksTest-high_school_world_history|5": {
        "acc": 0.7721518987341772,
        "acc_stderr": 0.02730348459906944,
        "acc_norm": 0.7721518987341772,
        "acc_norm_stderr": 0.02730348459906944
    },
    "harness|hendrycksTest-human_aging|5": {
        "acc": 0.6816143497757847,
        "acc_stderr": 0.03126580522513713,
        "acc_norm": 0.6816143497757847,
        "acc_norm_stderr": 0.03126580522513713
    },
    "harness|hendrycksTest-human_sexuality|5": {
        "acc": 0.7862595419847328,
        "acc_stderr": 0.0359546161177469,
        "acc_norm": 0.7862595419847328,
        "acc_norm_stderr": 0.0359546161177469
    },
    "harness|hendrycksTest-international_law|5": {
        "acc": 0.7851239669421488,
        "acc_stderr": 0.037494924487096966,
        "acc_norm": 0.7851239669421488,
        "acc_norm_stderr": 0.037494924487096966
    },
    "harness|hendrycksTest-jurisprudence|5": {
        "acc": 0.7777777777777778,
        "acc_stderr": 0.0401910747255735,
        "acc_norm": 0.7777777777777778,
        "acc_norm_stderr": 0.0401910747255735
    },
    "harness|hendrycksTest-logical_fallacies|5": {
        "acc": 0.7423312883435583,
        "acc_stderr": 0.03436150827846917,
        "acc_norm": 0.7423312883435583,
        "acc_norm_stderr": 0.03436150827846917
    },
    "harness|hendrycksTest-machine_learning|5": {
        "acc": 0.42857142857142855,
        "acc_stderr": 0.04697113923010212,
        "acc_norm": 0.42857142857142855,
        "acc_norm_stderr": 0.04697113923010212
    },
    "harness|hendrycksTest-management|5": {
        "acc": 0.7475728155339806,
        "acc_stderr": 0.04301250399690878,
        "acc_norm": 0.7475728155339806,
        "acc_norm_stderr": 0.04301250399690878
    },
    "harness|hendrycksTest-marketing|5": {
        "acc": 0.8846153846153846,
        "acc_stderr": 0.02093019318517933,
        "acc_norm": 0.8846153846153846,
        "acc_norm_stderr": 0.02093019318517933
    },
    "harness|hendrycksTest-medical_genetics|5": {
        "acc": 0.7,
        "acc_stderr": 0.046056618647183814,
        "acc_norm": 0.7,
        "acc_norm_stderr": 0.046056618647183814
    },
    "harness|hendrycksTest-miscellaneous|5": {
        "acc": 0.80970625798212,
        "acc_stderr": 0.014036945850381396,
        "acc_norm": 0.80970625798212,
        "acc_norm_stderr": 0.014036945850381396
    },
    "harness|hendrycksTest-moral_disputes|5": {
        "acc": 0.7369942196531792,
        "acc_stderr": 0.023703099525258172,
        "acc_norm": 0.7369942196531792,
        "acc_norm_stderr": 0.023703099525258172
    },
    "harness|hendrycksTest-moral_scenarios|5": {
        "acc": 0.47150837988826816,
        "acc_stderr": 0.016695329746015796,
        "acc_norm": 0.47150837988826816,
        "acc_norm_stderr": 0.016695329746015796
    },
    "harness|hendrycksTest-nutrition|5": {
        "acc": 0.7189542483660131,
        "acc_stderr": 0.025738854797818733,
        "acc_norm": 0.7189542483660131,
        "acc_norm_stderr": 0.025738854797818733
    },
    "harness|hendrycksTest-philosophy|5": {
        "acc": 0.7170418006430869,
        "acc_stderr": 0.025583062489984813,
        "acc_norm": 0.7170418006430869,
        "acc_norm_stderr": 0.025583062489984813
    },
    "harness|hendrycksTest-prehistory|5": {
        "acc": 0.7407407407407407,
        "acc_stderr": 0.024383665531035457,
        "acc_norm": 0.7407407407407407,
        "acc_norm_stderr": 0.024383665531035457
    },
    "harness|hendrycksTest-professional_accounting|5": {
        "acc": 0.475177304964539,
        "acc_stderr": 0.029790719243829727,
        "acc_norm": 0.475177304964539,
        "acc_norm_stderr": 0.029790719243829727
    },
    "harness|hendrycksTest-professional_law|5": {
        "acc": 0.470013037809648,
        "acc_stderr": 0.01274724896707906,
        "acc_norm": 0.470013037809648,
        "acc_norm_stderr": 0.01274724896707906
    },
    "harness|hendrycksTest-professional_medicine|5": {
        "acc": 0.6691176470588235,
        "acc_stderr": 0.028582709753898445,
        "acc_norm": 0.6691176470588235,
        "acc_norm_stderr": 0.028582709753898445
    },
    "harness|hendrycksTest-professional_psychology|5": {
        "acc": 0.6584967320261438,
        "acc_stderr": 0.019184639328092487,
        "acc_norm": 0.6584967320261438,
        "acc_norm_stderr": 0.019184639328092487
    },
    "harness|hendrycksTest-public_relations|5": {
        "acc": 0.6818181818181818,
        "acc_stderr": 0.044612721759105085,
        "acc_norm": 0.6818181818181818,
        "acc_norm_stderr": 0.044612721759105085
    },
    "harness|hendrycksTest-security_studies|5": {
        "acc": 0.7306122448979592,
        "acc_stderr": 0.02840125202902294,
        "acc_norm": 0.7306122448979592,
        "acc_norm_stderr": 0.02840125202902294
    },
    "harness|hendrycksTest-sociology|5": {
        "acc": 0.835820895522388,
        "acc_stderr": 0.026193923544454125,
        "acc_norm": 0.835820895522388,
        "acc_norm_stderr": 0.026193923544454125
    },
    "harness|hendrycksTest-us_foreign_policy|5": {
        "acc": 0.85,
        "acc_stderr": 0.03588702812826371,
        "acc_norm": 0.85,
        "acc_norm_stderr": 0.03588702812826371
    },
    "harness|hendrycksTest-virology|5": {
        "acc": 0.5542168674698795,
        "acc_stderr": 0.03869543323472101,
        "acc_norm": 0.5542168674698795,
        "acc_norm_stderr": 0.03869543323472101
    },
    "harness|hendrycksTest-world_religions|5": {
        "acc": 0.8245614035087719,
        "acc_stderr": 0.029170885500727665,
        "acc_norm": 0.8245614035087719,
        "acc_norm_stderr": 0.029170885500727665
    },
    "harness|truthfulqa:mc|0": {
        "mc1": 0.5862913096695227,
        "mc1_stderr": 0.0172408618120998,
        "mc2": 0.7078078883926877,
        "mc2_stderr": 0.015097515102384168
    },
    "harness|winogrande|5": {
        "acc": 0.8579321231254933,
        "acc_stderr": 0.009812000391679367
    },
    "harness|gsm8k|5": {
        "acc": 0.6648976497346475,
        "acc_stderr": 0.013001948176422954
    }
}

Open LLM Leaderboard Evaluation Results

Detailed results can be found here

Metric Value
Avg. 74.96
AI2 Reasoning Challenge (25-Shot) 73.63
HellaSwag (10-Shot) 88.85
MMLU (5-Shot) 64.22
TruthfulQA (0-shot) 70.78
Winogrande (5-shot) 85.79
GSM8k (5-shot) 66.49
Downloads last month
87
Safetensors
Model size
7.24B params
Tensor type
FP16
·
Inference Examples
This model does not have enough activity to be deployed to Inference API (serverless) yet. Increase its social visibility and check back later, or deploy to Inference Endpoints (dedicated) instead.

Model tree for MaziyarPanahi/TheTop-5x7B-Instruct-T-v0.1

Quantizations
2 models

Collection including MaziyarPanahi/TheTop-5x7B-Instruct-T-v0.1

Evaluation results