modelId
stringlengths
5
122
author
stringlengths
2
42
last_modified
unknown
downloads
int64
0
738M
likes
int64
0
11k
library_name
stringclasses
245 values
tags
sequencelengths
1
4.05k
pipeline_tag
stringclasses
48 values
createdAt
unknown
card
stringlengths
1
901k
aws-neuron/bge-base-en-v1-5-seqlen-384-bs-1
aws-neuron
"2023-11-21T09:06:00Z"
4,695
1
sentence-transformers
[ "sentence-transformers", "bert", "feature-extraction", "sentence-similarity", "transformers", "mteb", "inferentia2", "neuron", "en", "license:mit", "model-index", "autotrain_compatible", "endpoints_compatible", "text-embeddings-inference", "region:us" ]
feature-extraction
"2023-11-21T09:03:35Z"
--- language: - en license: mit tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers - mteb - inferentia2 - neuron model-index: - name: bge-base-en-v1.5 results: - task: type: Classification dataset: name: MTEB AmazonCounterfactualClassification (en) type: mteb/amazon_counterfactual config: en split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 76.14925373134328 - type: ap value: 39.32336517995478 - type: f1 value: 70.16902252611425 - task: type: Classification dataset: name: MTEB AmazonPolarityClassification type: mteb/amazon_polarity config: default split: test revision: e2d317d38cd51312af73b3d32a06d1a08b442046 metrics: - type: accuracy value: 93.386825 - type: ap value: 90.21276917991995 - type: f1 value: 93.37741030006174 - task: type: Classification dataset: name: MTEB AmazonReviewsClassification (en) type: mteb/amazon_reviews_multi config: en split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 48.846000000000004 - type: f1 value: 48.14646269778261 - task: type: Retrieval dataset: name: MTEB ArguAna type: arguana config: default split: test revision: None metrics: - type: map_at_1 value: 40.754000000000005 - type: map_at_10 value: 55.761 - type: map_at_100 value: 56.330999999999996 - type: map_at_1000 value: 56.333999999999996 - type: map_at_3 value: 51.92 - type: map_at_5 value: 54.010999999999996 - type: mrr_at_1 value: 41.181 - type: mrr_at_10 value: 55.967999999999996 - type: mrr_at_100 value: 56.538 - type: mrr_at_1000 value: 56.542 - type: mrr_at_3 value: 51.980000000000004 - type: mrr_at_5 value: 54.208999999999996 - type: ndcg_at_1 value: 40.754000000000005 - type: ndcg_at_10 value: 63.605000000000004 - type: ndcg_at_100 value: 66.05199999999999 - type: ndcg_at_1000 value: 66.12 - type: ndcg_at_3 value: 55.708 - type: ndcg_at_5 value: 59.452000000000005 - type: precision_at_1 value: 40.754000000000005 - type: precision_at_10 value: 8.841000000000001 - type: precision_at_100 value: 0.991 - type: precision_at_1000 value: 0.1 - type: precision_at_3 value: 22.238 - type: precision_at_5 value: 15.149000000000001 - type: recall_at_1 value: 40.754000000000005 - type: recall_at_10 value: 88.407 - type: recall_at_100 value: 99.14699999999999 - type: recall_at_1000 value: 99.644 - type: recall_at_3 value: 66.714 - type: recall_at_5 value: 75.747 - task: type: Clustering dataset: name: MTEB ArxivClusteringP2P type: mteb/arxiv-clustering-p2p config: default split: test revision: a122ad7f3f0291bf49cc6f4d32aa80929df69d5d metrics: - type: v_measure value: 48.74884539679369 - task: type: Clustering dataset: name: MTEB ArxivClusteringS2S type: mteb/arxiv-clustering-s2s config: default split: test revision: f910caf1a6075f7329cdf8c1a6135696f37dbd53 metrics: - type: v_measure value: 42.8075893810716 - task: type: Reranking dataset: name: MTEB AskUbuntuDupQuestions type: mteb/askubuntudupquestions-reranking config: default split: test revision: 2000358ca161889fa9c082cb41daa8dcfb161a54 metrics: - type: map value: 62.128470519187736 - type: mrr value: 74.28065778481289 - task: type: STS dataset: name: MTEB BIOSSES type: mteb/biosses-sts config: default split: test revision: d3fb88f8f02e40887cd149695127462bbcf29b4a metrics: - type: cos_sim_pearson value: 89.24629081484655 - type: cos_sim_spearman value: 86.93752309911496 - type: euclidean_pearson value: 87.58589628573816 - type: euclidean_spearman value: 88.05622328825284 - type: manhattan_pearson value: 87.5594959805773 - type: manhattan_spearman value: 88.19658793233961 - task: type: Classification dataset: name: MTEB Banking77Classification type: mteb/banking77 config: default split: test revision: 0fd18e25b25c072e09e0d92ab615fda904d66300 metrics: - type: accuracy value: 86.9512987012987 - type: f1 value: 86.92515357973708 - task: type: Clustering dataset: name: MTEB BiorxivClusteringP2P type: mteb/biorxiv-clustering-p2p config: default split: test revision: 65b79d1d13f80053f67aca9498d9402c2d9f1f40 metrics: - type: v_measure value: 39.10263762928872 - task: type: Clustering dataset: name: MTEB BiorxivClusteringS2S type: mteb/biorxiv-clustering-s2s config: default split: test revision: 258694dd0231531bc1fd9de6ceb52a0853c6d908 metrics: - type: v_measure value: 36.69711517426737 - task: type: Retrieval dataset: name: MTEB CQADupstackAndroidRetrieval type: BeIR/cqadupstack config: default split: test revision: None metrics: - type: map_at_1 value: 32.327 - type: map_at_10 value: 44.099 - type: map_at_100 value: 45.525 - type: map_at_1000 value: 45.641999999999996 - type: map_at_3 value: 40.47 - type: map_at_5 value: 42.36 - type: mrr_at_1 value: 39.199 - type: mrr_at_10 value: 49.651 - type: mrr_at_100 value: 50.29 - type: mrr_at_1000 value: 50.329 - type: mrr_at_3 value: 46.924 - type: mrr_at_5 value: 48.548 - type: ndcg_at_1 value: 39.199 - type: ndcg_at_10 value: 50.773 - type: ndcg_at_100 value: 55.67999999999999 - type: ndcg_at_1000 value: 57.495 - type: ndcg_at_3 value: 45.513999999999996 - type: ndcg_at_5 value: 47.703 - type: precision_at_1 value: 39.199 - type: precision_at_10 value: 9.914000000000001 - type: precision_at_100 value: 1.5310000000000001 - type: precision_at_1000 value: 0.198 - type: precision_at_3 value: 21.984 - type: precision_at_5 value: 15.737000000000002 - type: recall_at_1 value: 32.327 - type: recall_at_10 value: 63.743 - type: recall_at_100 value: 84.538 - type: recall_at_1000 value: 96.089 - type: recall_at_3 value: 48.065000000000005 - type: recall_at_5 value: 54.519 - type: map_at_1 value: 32.671 - type: map_at_10 value: 42.954 - type: map_at_100 value: 44.151 - type: map_at_1000 value: 44.287 - type: map_at_3 value: 39.912 - type: map_at_5 value: 41.798 - type: mrr_at_1 value: 41.465 - type: mrr_at_10 value: 49.351 - type: mrr_at_100 value: 49.980000000000004 - type: mrr_at_1000 value: 50.016000000000005 - type: mrr_at_3 value: 47.144000000000005 - type: mrr_at_5 value: 48.592999999999996 - type: ndcg_at_1 value: 41.465 - type: ndcg_at_10 value: 48.565999999999995 - type: ndcg_at_100 value: 52.76499999999999 - type: ndcg_at_1000 value: 54.749 - type: ndcg_at_3 value: 44.57 - type: ndcg_at_5 value: 46.759 - type: precision_at_1 value: 41.465 - type: precision_at_10 value: 9.107999999999999 - type: precision_at_100 value: 1.433 - type: precision_at_1000 value: 0.191 - type: precision_at_3 value: 21.423000000000002 - type: precision_at_5 value: 15.414 - type: recall_at_1 value: 32.671 - type: recall_at_10 value: 57.738 - type: recall_at_100 value: 75.86500000000001 - type: recall_at_1000 value: 88.36 - type: recall_at_3 value: 45.626 - type: recall_at_5 value: 51.812000000000005 - type: map_at_1 value: 41.185 - type: map_at_10 value: 53.929 - type: map_at_100 value: 54.92 - type: map_at_1000 value: 54.967999999999996 - type: map_at_3 value: 50.70400000000001 - type: map_at_5 value: 52.673 - type: mrr_at_1 value: 47.398 - type: mrr_at_10 value: 57.303000000000004 - type: mrr_at_100 value: 57.959 - type: mrr_at_1000 value: 57.985 - type: mrr_at_3 value: 54.932 - type: mrr_at_5 value: 56.464999999999996 - type: ndcg_at_1 value: 47.398 - type: ndcg_at_10 value: 59.653 - type: ndcg_at_100 value: 63.627 - type: ndcg_at_1000 value: 64.596 - type: ndcg_at_3 value: 54.455 - type: ndcg_at_5 value: 57.245000000000005 - type: precision_at_1 value: 47.398 - type: precision_at_10 value: 9.524000000000001 - type: precision_at_100 value: 1.243 - type: precision_at_1000 value: 0.13699999999999998 - type: precision_at_3 value: 24.389 - type: precision_at_5 value: 16.752 - type: recall_at_1 value: 41.185 - type: recall_at_10 value: 73.193 - type: recall_at_100 value: 90.357 - type: recall_at_1000 value: 97.253 - type: recall_at_3 value: 59.199999999999996 - type: recall_at_5 value: 66.118 - type: map_at_1 value: 27.27 - type: map_at_10 value: 36.223 - type: map_at_100 value: 37.218 - type: map_at_1000 value: 37.293 - type: map_at_3 value: 33.503 - type: map_at_5 value: 35.097 - type: mrr_at_1 value: 29.492 - type: mrr_at_10 value: 38.352000000000004 - type: mrr_at_100 value: 39.188 - type: mrr_at_1000 value: 39.247 - type: mrr_at_3 value: 35.876000000000005 - type: mrr_at_5 value: 37.401 - type: ndcg_at_1 value: 29.492 - type: ndcg_at_10 value: 41.239 - type: ndcg_at_100 value: 46.066 - type: ndcg_at_1000 value: 47.992000000000004 - type: ndcg_at_3 value: 36.11 - type: ndcg_at_5 value: 38.772 - type: precision_at_1 value: 29.492 - type: precision_at_10 value: 6.260000000000001 - type: precision_at_100 value: 0.914 - type: precision_at_1000 value: 0.11100000000000002 - type: precision_at_3 value: 15.104000000000001 - type: precision_at_5 value: 10.644 - type: recall_at_1 value: 27.27 - type: recall_at_10 value: 54.589 - type: recall_at_100 value: 76.70700000000001 - type: recall_at_1000 value: 91.158 - type: recall_at_3 value: 40.974 - type: recall_at_5 value: 47.327000000000005 - type: map_at_1 value: 17.848 - type: map_at_10 value: 26.207 - type: map_at_100 value: 27.478 - type: map_at_1000 value: 27.602 - type: map_at_3 value: 23.405 - type: map_at_5 value: 24.98 - type: mrr_at_1 value: 21.891 - type: mrr_at_10 value: 31.041999999999998 - type: mrr_at_100 value: 32.092 - type: mrr_at_1000 value: 32.151999999999994 - type: mrr_at_3 value: 28.358 - type: mrr_at_5 value: 29.969 - type: ndcg_at_1 value: 21.891 - type: ndcg_at_10 value: 31.585 - type: ndcg_at_100 value: 37.531 - type: ndcg_at_1000 value: 40.256 - type: ndcg_at_3 value: 26.508 - type: ndcg_at_5 value: 28.894 - type: precision_at_1 value: 21.891 - type: precision_at_10 value: 5.795999999999999 - type: precision_at_100 value: 0.9990000000000001 - type: precision_at_1000 value: 0.13799999999999998 - type: precision_at_3 value: 12.769 - type: precision_at_5 value: 9.279 - type: recall_at_1 value: 17.848 - type: recall_at_10 value: 43.452 - type: recall_at_100 value: 69.216 - type: recall_at_1000 value: 88.102 - type: recall_at_3 value: 29.18 - type: recall_at_5 value: 35.347 - type: map_at_1 value: 30.94 - type: map_at_10 value: 41.248000000000005 - type: map_at_100 value: 42.495 - type: map_at_1000 value: 42.602000000000004 - type: map_at_3 value: 37.939 - type: map_at_5 value: 39.924 - type: mrr_at_1 value: 37.824999999999996 - type: mrr_at_10 value: 47.041 - type: mrr_at_100 value: 47.83 - type: mrr_at_1000 value: 47.878 - type: mrr_at_3 value: 44.466 - type: mrr_at_5 value: 46.111999999999995 - type: ndcg_at_1 value: 37.824999999999996 - type: ndcg_at_10 value: 47.223 - type: ndcg_at_100 value: 52.394 - type: ndcg_at_1000 value: 54.432 - type: ndcg_at_3 value: 42.032000000000004 - type: ndcg_at_5 value: 44.772 - type: precision_at_1 value: 37.824999999999996 - type: precision_at_10 value: 8.393 - type: precision_at_100 value: 1.2890000000000001 - type: precision_at_1000 value: 0.164 - type: precision_at_3 value: 19.698 - type: precision_at_5 value: 14.013 - type: recall_at_1 value: 30.94 - type: recall_at_10 value: 59.316 - type: recall_at_100 value: 80.783 - type: recall_at_1000 value: 94.15400000000001 - type: recall_at_3 value: 44.712 - type: recall_at_5 value: 51.932 - type: map_at_1 value: 27.104 - type: map_at_10 value: 36.675999999999995 - type: map_at_100 value: 38.076 - type: map_at_1000 value: 38.189 - type: map_at_3 value: 33.733999999999995 - type: map_at_5 value: 35.287 - type: mrr_at_1 value: 33.904 - type: mrr_at_10 value: 42.55 - type: mrr_at_100 value: 43.434 - type: mrr_at_1000 value: 43.494 - type: mrr_at_3 value: 40.126 - type: mrr_at_5 value: 41.473 - type: ndcg_at_1 value: 33.904 - type: ndcg_at_10 value: 42.414 - type: ndcg_at_100 value: 48.203 - type: ndcg_at_1000 value: 50.437 - type: ndcg_at_3 value: 37.633 - type: ndcg_at_5 value: 39.67 - type: precision_at_1 value: 33.904 - type: precision_at_10 value: 7.82 - type: precision_at_100 value: 1.2409999999999999 - type: precision_at_1000 value: 0.159 - type: precision_at_3 value: 17.884 - type: precision_at_5 value: 12.648000000000001 - type: recall_at_1 value: 27.104 - type: recall_at_10 value: 53.563 - type: recall_at_100 value: 78.557 - type: recall_at_1000 value: 93.533 - type: recall_at_3 value: 39.92 - type: recall_at_5 value: 45.457 - type: map_at_1 value: 27.707749999999997 - type: map_at_10 value: 36.961 - type: map_at_100 value: 38.158833333333334 - type: map_at_1000 value: 38.270333333333326 - type: map_at_3 value: 34.07183333333334 - type: map_at_5 value: 35.69533333333334 - type: mrr_at_1 value: 32.81875 - type: mrr_at_10 value: 41.293 - type: mrr_at_100 value: 42.116499999999995 - type: mrr_at_1000 value: 42.170249999999996 - type: mrr_at_3 value: 38.83983333333333 - type: mrr_at_5 value: 40.29775 - type: ndcg_at_1 value: 32.81875 - type: ndcg_at_10 value: 42.355 - type: ndcg_at_100 value: 47.41374999999999 - type: ndcg_at_1000 value: 49.5805 - type: ndcg_at_3 value: 37.52825 - type: ndcg_at_5 value: 39.83266666666667 - type: precision_at_1 value: 32.81875 - type: precision_at_10 value: 7.382416666666666 - type: precision_at_100 value: 1.1640833333333334 - type: precision_at_1000 value: 0.15383333333333335 - type: precision_at_3 value: 17.134166666666665 - type: precision_at_5 value: 12.174833333333336 - type: recall_at_1 value: 27.707749999999997 - type: recall_at_10 value: 53.945 - type: recall_at_100 value: 76.191 - type: recall_at_1000 value: 91.101 - type: recall_at_3 value: 40.39083333333334 - type: recall_at_5 value: 46.40083333333333 - type: map_at_1 value: 26.482 - type: map_at_10 value: 33.201 - type: map_at_100 value: 34.107 - type: map_at_1000 value: 34.197 - type: map_at_3 value: 31.174000000000003 - type: map_at_5 value: 32.279 - type: mrr_at_1 value: 29.908 - type: mrr_at_10 value: 36.235 - type: mrr_at_100 value: 37.04 - type: mrr_at_1000 value: 37.105 - type: mrr_at_3 value: 34.355999999999995 - type: mrr_at_5 value: 35.382999999999996 - type: ndcg_at_1 value: 29.908 - type: ndcg_at_10 value: 37.325 - type: ndcg_at_100 value: 41.795 - type: ndcg_at_1000 value: 44.105 - type: ndcg_at_3 value: 33.555 - type: ndcg_at_5 value: 35.266999999999996 - type: precision_at_1 value: 29.908 - type: precision_at_10 value: 5.721 - type: precision_at_100 value: 0.8630000000000001 - type: precision_at_1000 value: 0.11299999999999999 - type: precision_at_3 value: 14.008000000000001 - type: precision_at_5 value: 9.754999999999999 - type: recall_at_1 value: 26.482 - type: recall_at_10 value: 47.072 - type: recall_at_100 value: 67.27 - type: recall_at_1000 value: 84.371 - type: recall_at_3 value: 36.65 - type: recall_at_5 value: 40.774 - type: map_at_1 value: 18.815 - type: map_at_10 value: 26.369999999999997 - type: map_at_100 value: 27.458 - type: map_at_1000 value: 27.588 - type: map_at_3 value: 23.990000000000002 - type: map_at_5 value: 25.345000000000002 - type: mrr_at_1 value: 22.953000000000003 - type: mrr_at_10 value: 30.342999999999996 - type: mrr_at_100 value: 31.241000000000003 - type: mrr_at_1000 value: 31.319000000000003 - type: mrr_at_3 value: 28.16 - type: mrr_at_5 value: 29.406 - type: ndcg_at_1 value: 22.953000000000003 - type: ndcg_at_10 value: 31.151 - type: ndcg_at_100 value: 36.309000000000005 - type: ndcg_at_1000 value: 39.227000000000004 - type: ndcg_at_3 value: 26.921 - type: ndcg_at_5 value: 28.938000000000002 - type: precision_at_1 value: 22.953000000000003 - type: precision_at_10 value: 5.602 - type: precision_at_100 value: 0.9530000000000001 - type: precision_at_1000 value: 0.13899999999999998 - type: precision_at_3 value: 12.606 - type: precision_at_5 value: 9.119 - type: recall_at_1 value: 18.815 - type: recall_at_10 value: 41.574 - type: recall_at_100 value: 64.84400000000001 - type: recall_at_1000 value: 85.406 - type: recall_at_3 value: 29.694 - type: recall_at_5 value: 34.935 - type: map_at_1 value: 27.840999999999998 - type: map_at_10 value: 36.797999999999995 - type: map_at_100 value: 37.993 - type: map_at_1000 value: 38.086999999999996 - type: map_at_3 value: 34.050999999999995 - type: map_at_5 value: 35.379 - type: mrr_at_1 value: 32.649 - type: mrr_at_10 value: 41.025 - type: mrr_at_100 value: 41.878 - type: mrr_at_1000 value: 41.929 - type: mrr_at_3 value: 38.573 - type: mrr_at_5 value: 39.715 - type: ndcg_at_1 value: 32.649 - type: ndcg_at_10 value: 42.142 - type: ndcg_at_100 value: 47.558 - type: ndcg_at_1000 value: 49.643 - type: ndcg_at_3 value: 37.12 - type: ndcg_at_5 value: 38.983000000000004 - type: precision_at_1 value: 32.649 - type: precision_at_10 value: 7.08 - type: precision_at_100 value: 1.1039999999999999 - type: precision_at_1000 value: 0.13899999999999998 - type: precision_at_3 value: 16.698 - type: precision_at_5 value: 11.511000000000001 - type: recall_at_1 value: 27.840999999999998 - type: recall_at_10 value: 54.245 - type: recall_at_100 value: 77.947 - type: recall_at_1000 value: 92.36999999999999 - type: recall_at_3 value: 40.146 - type: recall_at_5 value: 44.951 - type: map_at_1 value: 26.529000000000003 - type: map_at_10 value: 35.010000000000005 - type: map_at_100 value: 36.647 - type: map_at_1000 value: 36.857 - type: map_at_3 value: 31.968000000000004 - type: map_at_5 value: 33.554 - type: mrr_at_1 value: 31.818 - type: mrr_at_10 value: 39.550999999999995 - type: mrr_at_100 value: 40.54 - type: mrr_at_1000 value: 40.596 - type: mrr_at_3 value: 36.726 - type: mrr_at_5 value: 38.416 - type: ndcg_at_1 value: 31.818 - type: ndcg_at_10 value: 40.675 - type: ndcg_at_100 value: 46.548 - type: ndcg_at_1000 value: 49.126 - type: ndcg_at_3 value: 35.829 - type: ndcg_at_5 value: 38.0 - type: precision_at_1 value: 31.818 - type: precision_at_10 value: 7.826 - type: precision_at_100 value: 1.538 - type: precision_at_1000 value: 0.24 - type: precision_at_3 value: 16.601 - type: precision_at_5 value: 12.095 - type: recall_at_1 value: 26.529000000000003 - type: recall_at_10 value: 51.03 - type: recall_at_100 value: 77.556 - type: recall_at_1000 value: 93.804 - type: recall_at_3 value: 36.986000000000004 - type: recall_at_5 value: 43.096000000000004 - type: map_at_1 value: 23.480999999999998 - type: map_at_10 value: 30.817 - type: map_at_100 value: 31.838 - type: map_at_1000 value: 31.932 - type: map_at_3 value: 28.011999999999997 - type: map_at_5 value: 29.668 - type: mrr_at_1 value: 25.323 - type: mrr_at_10 value: 33.072 - type: mrr_at_100 value: 33.926 - type: mrr_at_1000 value: 33.993 - type: mrr_at_3 value: 30.436999999999998 - type: mrr_at_5 value: 32.092 - type: ndcg_at_1 value: 25.323 - type: ndcg_at_10 value: 35.514 - type: ndcg_at_100 value: 40.489000000000004 - type: ndcg_at_1000 value: 42.908 - type: ndcg_at_3 value: 30.092000000000002 - type: ndcg_at_5 value: 32.989000000000004 - type: precision_at_1 value: 25.323 - type: precision_at_10 value: 5.545 - type: precision_at_100 value: 0.861 - type: precision_at_1000 value: 0.117 - type: precision_at_3 value: 12.446 - type: precision_at_5 value: 9.131 - type: recall_at_1 value: 23.480999999999998 - type: recall_at_10 value: 47.825 - type: recall_at_100 value: 70.652 - type: recall_at_1000 value: 88.612 - type: recall_at_3 value: 33.537 - type: recall_at_5 value: 40.542 - task: type: Retrieval dataset: name: MTEB ClimateFEVER type: climate-fever config: default split: test revision: None metrics: - type: map_at_1 value: 13.333999999999998 - type: map_at_10 value: 22.524 - type: map_at_100 value: 24.506 - type: map_at_1000 value: 24.715 - type: map_at_3 value: 19.022 - type: map_at_5 value: 20.693 - type: mrr_at_1 value: 29.186 - type: mrr_at_10 value: 41.22 - type: mrr_at_100 value: 42.16 - type: mrr_at_1000 value: 42.192 - type: mrr_at_3 value: 38.013000000000005 - type: mrr_at_5 value: 39.704 - type: ndcg_at_1 value: 29.186 - type: ndcg_at_10 value: 31.167 - type: ndcg_at_100 value: 38.879000000000005 - type: ndcg_at_1000 value: 42.376000000000005 - type: ndcg_at_3 value: 25.817 - type: ndcg_at_5 value: 27.377000000000002 - type: precision_at_1 value: 29.186 - type: precision_at_10 value: 9.693999999999999 - type: precision_at_100 value: 1.8030000000000002 - type: precision_at_1000 value: 0.246 - type: precision_at_3 value: 19.11 - type: precision_at_5 value: 14.344999999999999 - type: recall_at_1 value: 13.333999999999998 - type: recall_at_10 value: 37.092000000000006 - type: recall_at_100 value: 63.651 - type: recall_at_1000 value: 83.05 - type: recall_at_3 value: 23.74 - type: recall_at_5 value: 28.655 - task: type: Retrieval dataset: name: MTEB DBPedia type: dbpedia-entity config: default split: test revision: None metrics: - type: map_at_1 value: 9.151 - type: map_at_10 value: 19.653000000000002 - type: map_at_100 value: 28.053 - type: map_at_1000 value: 29.709000000000003 - type: map_at_3 value: 14.191 - type: map_at_5 value: 16.456 - type: mrr_at_1 value: 66.25 - type: mrr_at_10 value: 74.4 - type: mrr_at_100 value: 74.715 - type: mrr_at_1000 value: 74.726 - type: mrr_at_3 value: 72.417 - type: mrr_at_5 value: 73.667 - type: ndcg_at_1 value: 54.25 - type: ndcg_at_10 value: 40.77 - type: ndcg_at_100 value: 46.359 - type: ndcg_at_1000 value: 54.193000000000005 - type: ndcg_at_3 value: 44.832 - type: ndcg_at_5 value: 42.63 - type: precision_at_1 value: 66.25 - type: precision_at_10 value: 32.175 - type: precision_at_100 value: 10.668 - type: precision_at_1000 value: 2.067 - type: precision_at_3 value: 47.667 - type: precision_at_5 value: 41.3 - type: recall_at_1 value: 9.151 - type: recall_at_10 value: 25.003999999999998 - type: recall_at_100 value: 52.976 - type: recall_at_1000 value: 78.315 - type: recall_at_3 value: 15.487 - type: recall_at_5 value: 18.999 - task: type: Classification dataset: name: MTEB EmotionClassification type: mteb/emotion config: default split: test revision: 4f58c6b202a23cf9a4da393831edf4f9183cad37 metrics: - type: accuracy value: 51.89999999999999 - type: f1 value: 46.47777925067403 - task: type: Retrieval dataset: name: MTEB FEVER type: fever config: default split: test revision: None metrics: - type: map_at_1 value: 73.706 - type: map_at_10 value: 82.423 - type: map_at_100 value: 82.67999999999999 - type: map_at_1000 value: 82.694 - type: map_at_3 value: 81.328 - type: map_at_5 value: 82.001 - type: mrr_at_1 value: 79.613 - type: mrr_at_10 value: 87.07000000000001 - type: mrr_at_100 value: 87.169 - type: mrr_at_1000 value: 87.17 - type: mrr_at_3 value: 86.404 - type: mrr_at_5 value: 86.856 - type: ndcg_at_1 value: 79.613 - type: ndcg_at_10 value: 86.289 - type: ndcg_at_100 value: 87.201 - type: ndcg_at_1000 value: 87.428 - type: ndcg_at_3 value: 84.625 - type: ndcg_at_5 value: 85.53699999999999 - type: precision_at_1 value: 79.613 - type: precision_at_10 value: 10.399 - type: precision_at_100 value: 1.1079999999999999 - type: precision_at_1000 value: 0.11499999999999999 - type: precision_at_3 value: 32.473 - type: precision_at_5 value: 20.132 - type: recall_at_1 value: 73.706 - type: recall_at_10 value: 93.559 - type: recall_at_100 value: 97.188 - type: recall_at_1000 value: 98.555 - type: recall_at_3 value: 88.98700000000001 - type: recall_at_5 value: 91.373 - task: type: Retrieval dataset: name: MTEB FiQA2018 type: fiqa config: default split: test revision: None metrics: - type: map_at_1 value: 19.841 - type: map_at_10 value: 32.643 - type: map_at_100 value: 34.575 - type: map_at_1000 value: 34.736 - type: map_at_3 value: 28.317999999999998 - type: map_at_5 value: 30.964000000000002 - type: mrr_at_1 value: 39.660000000000004 - type: mrr_at_10 value: 48.620000000000005 - type: mrr_at_100 value: 49.384 - type: mrr_at_1000 value: 49.415 - type: mrr_at_3 value: 45.988 - type: mrr_at_5 value: 47.361 - type: ndcg_at_1 value: 39.660000000000004 - type: ndcg_at_10 value: 40.646 - type: ndcg_at_100 value: 47.657 - type: ndcg_at_1000 value: 50.428 - type: ndcg_at_3 value: 36.689 - type: ndcg_at_5 value: 38.211 - type: precision_at_1 value: 39.660000000000004 - type: precision_at_10 value: 11.235000000000001 - type: precision_at_100 value: 1.8530000000000002 - type: precision_at_1000 value: 0.23600000000000002 - type: precision_at_3 value: 24.587999999999997 - type: precision_at_5 value: 18.395 - type: recall_at_1 value: 19.841 - type: recall_at_10 value: 48.135 - type: recall_at_100 value: 74.224 - type: recall_at_1000 value: 90.826 - type: recall_at_3 value: 33.536 - type: recall_at_5 value: 40.311 - task: type: Retrieval dataset: name: MTEB HotpotQA type: hotpotqa config: default split: test revision: None metrics: - type: map_at_1 value: 40.358 - type: map_at_10 value: 64.497 - type: map_at_100 value: 65.362 - type: map_at_1000 value: 65.41900000000001 - type: map_at_3 value: 61.06700000000001 - type: map_at_5 value: 63.317 - type: mrr_at_1 value: 80.716 - type: mrr_at_10 value: 86.10799999999999 - type: mrr_at_100 value: 86.265 - type: mrr_at_1000 value: 86.27 - type: mrr_at_3 value: 85.271 - type: mrr_at_5 value: 85.82499999999999 - type: ndcg_at_1 value: 80.716 - type: ndcg_at_10 value: 72.597 - type: ndcg_at_100 value: 75.549 - type: ndcg_at_1000 value: 76.61 - type: ndcg_at_3 value: 67.874 - type: ndcg_at_5 value: 70.655 - type: precision_at_1 value: 80.716 - type: precision_at_10 value: 15.148 - type: precision_at_100 value: 1.745 - type: precision_at_1000 value: 0.188 - type: precision_at_3 value: 43.597 - type: precision_at_5 value: 28.351 - type: recall_at_1 value: 40.358 - type: recall_at_10 value: 75.739 - type: recall_at_100 value: 87.259 - type: recall_at_1000 value: 94.234 - type: recall_at_3 value: 65.39500000000001 - type: recall_at_5 value: 70.878 - task: type: Classification dataset: name: MTEB ImdbClassification type: mteb/imdb config: default split: test revision: 3d86128a09e091d6018b6d26cad27f2739fc2db7 metrics: - type: accuracy value: 90.80799999999998 - type: ap value: 86.81350378180757 - type: f1 value: 90.79901248314215 - task: type: Retrieval dataset: name: MTEB MSMARCO type: msmarco config: default split: dev revision: None metrics: - type: map_at_1 value: 22.096 - type: map_at_10 value: 34.384 - type: map_at_100 value: 35.541 - type: map_at_1000 value: 35.589999999999996 - type: map_at_3 value: 30.496000000000002 - type: map_at_5 value: 32.718 - type: mrr_at_1 value: 22.750999999999998 - type: mrr_at_10 value: 35.024 - type: mrr_at_100 value: 36.125 - type: mrr_at_1000 value: 36.168 - type: mrr_at_3 value: 31.225 - type: mrr_at_5 value: 33.416000000000004 - type: ndcg_at_1 value: 22.750999999999998 - type: ndcg_at_10 value: 41.351 - type: ndcg_at_100 value: 46.92 - type: ndcg_at_1000 value: 48.111 - type: ndcg_at_3 value: 33.439 - type: ndcg_at_5 value: 37.407000000000004 - type: precision_at_1 value: 22.750999999999998 - type: precision_at_10 value: 6.564 - type: precision_at_100 value: 0.935 - type: precision_at_1000 value: 0.104 - type: precision_at_3 value: 14.288 - type: precision_at_5 value: 10.581999999999999 - type: recall_at_1 value: 22.096 - type: recall_at_10 value: 62.771 - type: recall_at_100 value: 88.529 - type: recall_at_1000 value: 97.55 - type: recall_at_3 value: 41.245 - type: recall_at_5 value: 50.788 - task: type: Classification dataset: name: MTEB MTOPDomainClassification (en) type: mteb/mtop_domain config: en split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 94.16780665754673 - type: f1 value: 93.96331194859894 - task: type: Classification dataset: name: MTEB MTOPIntentClassification (en) type: mteb/mtop_intent config: en split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 76.90606475148198 - type: f1 value: 58.58344986604187 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (en) type: mteb/amazon_massive_intent config: en split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 76.14660390047075 - type: f1 value: 74.31533923533614 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (en) type: mteb/amazon_massive_scenario config: en split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 80.16139878950908 - type: f1 value: 80.18532656824924 - task: type: Clustering dataset: name: MTEB MedrxivClusteringP2P type: mteb/medrxiv-clustering-p2p config: default split: test revision: e7a26af6f3ae46b30dde8737f02c07b1505bcc73 metrics: - type: v_measure value: 32.949880906135085 - task: type: Clustering dataset: name: MTEB MedrxivClusteringS2S type: mteb/medrxiv-clustering-s2s config: default split: test revision: 35191c8c0dca72d8ff3efcd72aa802307d469663 metrics: - type: v_measure value: 31.56300351524862 - task: type: Reranking dataset: name: MTEB MindSmallReranking type: mteb/mind_small config: default split: test revision: 3bdac13927fdc888b903db93b2ffdbd90b295a69 metrics: - type: map value: 31.196521894371315 - type: mrr value: 32.22644231694389 - task: type: Retrieval dataset: name: MTEB NFCorpus type: nfcorpus config: default split: test revision: None metrics: - type: map_at_1 value: 6.783 - type: map_at_10 value: 14.549000000000001 - type: map_at_100 value: 18.433 - type: map_at_1000 value: 19.949 - type: map_at_3 value: 10.936 - type: map_at_5 value: 12.514 - type: mrr_at_1 value: 47.368 - type: mrr_at_10 value: 56.42 - type: mrr_at_100 value: 56.908 - type: mrr_at_1000 value: 56.95 - type: mrr_at_3 value: 54.283 - type: mrr_at_5 value: 55.568 - type: ndcg_at_1 value: 45.666000000000004 - type: ndcg_at_10 value: 37.389 - type: ndcg_at_100 value: 34.253 - type: ndcg_at_1000 value: 43.059999999999995 - type: ndcg_at_3 value: 42.725 - type: ndcg_at_5 value: 40.193 - type: precision_at_1 value: 47.368 - type: precision_at_10 value: 27.988000000000003 - type: precision_at_100 value: 8.672 - type: precision_at_1000 value: 2.164 - type: precision_at_3 value: 40.248 - type: precision_at_5 value: 34.737 - type: recall_at_1 value: 6.783 - type: recall_at_10 value: 17.838 - type: recall_at_100 value: 33.672000000000004 - type: recall_at_1000 value: 66.166 - type: recall_at_3 value: 11.849 - type: recall_at_5 value: 14.205000000000002 - task: type: Retrieval dataset: name: MTEB NQ type: nq config: default split: test revision: None metrics: - type: map_at_1 value: 31.698999999999998 - type: map_at_10 value: 46.556 - type: map_at_100 value: 47.652 - type: map_at_1000 value: 47.68 - type: map_at_3 value: 42.492000000000004 - type: map_at_5 value: 44.763999999999996 - type: mrr_at_1 value: 35.747 - type: mrr_at_10 value: 49.242999999999995 - type: mrr_at_100 value: 50.052 - type: mrr_at_1000 value: 50.068 - type: mrr_at_3 value: 45.867000000000004 - type: mrr_at_5 value: 47.778999999999996 - type: ndcg_at_1 value: 35.717999999999996 - type: ndcg_at_10 value: 54.14600000000001 - type: ndcg_at_100 value: 58.672999999999995 - type: ndcg_at_1000 value: 59.279 - type: ndcg_at_3 value: 46.407 - type: ndcg_at_5 value: 50.181 - type: precision_at_1 value: 35.717999999999996 - type: precision_at_10 value: 8.844000000000001 - type: precision_at_100 value: 1.139 - type: precision_at_1000 value: 0.12 - type: precision_at_3 value: 20.993000000000002 - type: precision_at_5 value: 14.791000000000002 - type: recall_at_1 value: 31.698999999999998 - type: recall_at_10 value: 74.693 - type: recall_at_100 value: 94.15299999999999 - type: recall_at_1000 value: 98.585 - type: recall_at_3 value: 54.388999999999996 - type: recall_at_5 value: 63.08200000000001 - task: type: Retrieval dataset: name: MTEB QuoraRetrieval type: quora config: default split: test revision: None metrics: - type: map_at_1 value: 71.283 - type: map_at_10 value: 85.24000000000001 - type: map_at_100 value: 85.882 - type: map_at_1000 value: 85.897 - type: map_at_3 value: 82.326 - type: map_at_5 value: 84.177 - type: mrr_at_1 value: 82.21000000000001 - type: mrr_at_10 value: 88.228 - type: mrr_at_100 value: 88.32 - type: mrr_at_1000 value: 88.32 - type: mrr_at_3 value: 87.323 - type: mrr_at_5 value: 87.94800000000001 - type: ndcg_at_1 value: 82.17999999999999 - type: ndcg_at_10 value: 88.9 - type: ndcg_at_100 value: 90.079 - type: ndcg_at_1000 value: 90.158 - type: ndcg_at_3 value: 86.18299999999999 - type: ndcg_at_5 value: 87.71799999999999 - type: precision_at_1 value: 82.17999999999999 - type: precision_at_10 value: 13.464 - type: precision_at_100 value: 1.533 - type: precision_at_1000 value: 0.157 - type: precision_at_3 value: 37.693 - type: precision_at_5 value: 24.792 - type: recall_at_1 value: 71.283 - type: recall_at_10 value: 95.742 - type: recall_at_100 value: 99.67200000000001 - type: recall_at_1000 value: 99.981 - type: recall_at_3 value: 87.888 - type: recall_at_5 value: 92.24 - task: type: Clustering dataset: name: MTEB RedditClustering type: mteb/reddit-clustering config: default split: test revision: 24640382cdbf8abc73003fb0fa6d111a705499eb metrics: - type: v_measure value: 56.24267063669042 - task: type: Clustering dataset: name: MTEB RedditClusteringP2P type: mteb/reddit-clustering-p2p config: default split: test revision: 282350215ef01743dc01b456c7f5241fa8937f16 metrics: - type: v_measure value: 62.88056988932578 - task: type: Retrieval dataset: name: MTEB SCIDOCS type: scidocs config: default split: test revision: None metrics: - type: map_at_1 value: 4.903 - type: map_at_10 value: 13.202 - type: map_at_100 value: 15.5 - type: map_at_1000 value: 15.870999999999999 - type: map_at_3 value: 9.407 - type: map_at_5 value: 11.238 - type: mrr_at_1 value: 24.2 - type: mrr_at_10 value: 35.867 - type: mrr_at_100 value: 37.001 - type: mrr_at_1000 value: 37.043 - type: mrr_at_3 value: 32.5 - type: mrr_at_5 value: 34.35 - type: ndcg_at_1 value: 24.2 - type: ndcg_at_10 value: 21.731 - type: ndcg_at_100 value: 30.7 - type: ndcg_at_1000 value: 36.618 - type: ndcg_at_3 value: 20.72 - type: ndcg_at_5 value: 17.954 - type: precision_at_1 value: 24.2 - type: precision_at_10 value: 11.33 - type: precision_at_100 value: 2.4410000000000003 - type: precision_at_1000 value: 0.386 - type: precision_at_3 value: 19.667 - type: precision_at_5 value: 15.86 - type: recall_at_1 value: 4.903 - type: recall_at_10 value: 22.962 - type: recall_at_100 value: 49.563 - type: recall_at_1000 value: 78.238 - type: recall_at_3 value: 11.953 - type: recall_at_5 value: 16.067999999999998 - task: type: STS dataset: name: MTEB SICK-R type: mteb/sickr-sts config: default split: test revision: a6ea5a8cab320b040a23452cc28066d9beae2cee metrics: - type: cos_sim_pearson value: 84.12694254604078 - type: cos_sim_spearman value: 80.30141815181918 - type: euclidean_pearson value: 81.34015449877128 - type: euclidean_spearman value: 80.13984197010849 - type: manhattan_pearson value: 81.31767068124086 - type: manhattan_spearman value: 80.11720513114103 - task: type: STS dataset: name: MTEB STS12 type: mteb/sts12-sts config: default split: test revision: a0d554a64d88156834ff5ae9920b964011b16384 metrics: - type: cos_sim_pearson value: 86.13112984010417 - type: cos_sim_spearman value: 78.03063573402875 - type: euclidean_pearson value: 83.51928418844804 - type: euclidean_spearman value: 78.4045235411144 - type: manhattan_pearson value: 83.49981637388689 - type: manhattan_spearman value: 78.4042575139372 - task: type: STS dataset: name: MTEB STS13 type: mteb/sts13-sts config: default split: test revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca metrics: - type: cos_sim_pearson value: 82.50327987379504 - type: cos_sim_spearman value: 84.18556767756205 - type: euclidean_pearson value: 82.69684424327679 - type: euclidean_spearman value: 83.5368106038335 - type: manhattan_pearson value: 82.57967581007374 - type: manhattan_spearman value: 83.43009053133697 - task: type: STS dataset: name: MTEB STS14 type: mteb/sts14-sts config: default split: test revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375 metrics: - type: cos_sim_pearson value: 82.50756863007814 - type: cos_sim_spearman value: 82.27204331279108 - type: euclidean_pearson value: 81.39535251429741 - type: euclidean_spearman value: 81.84386626336239 - type: manhattan_pearson value: 81.34281737280695 - type: manhattan_spearman value: 81.81149375673166 - task: type: STS dataset: name: MTEB STS15 type: mteb/sts15-sts config: default split: test revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3 metrics: - type: cos_sim_pearson value: 86.8727714856726 - type: cos_sim_spearman value: 87.95738287792312 - type: euclidean_pearson value: 86.62920602795887 - type: euclidean_spearman value: 87.05207355381243 - type: manhattan_pearson value: 86.53587918472225 - type: manhattan_spearman value: 86.95382961029586 - task: type: STS dataset: name: MTEB STS16 type: mteb/sts16-sts config: default split: test revision: 4d8694f8f0e0100860b497b999b3dbed754a0513 metrics: - type: cos_sim_pearson value: 83.52240359769479 - type: cos_sim_spearman value: 85.47685776238286 - type: euclidean_pearson value: 84.25815333483058 - type: euclidean_spearman value: 85.27415639683198 - type: manhattan_pearson value: 84.29127757025637 - type: manhattan_spearman value: 85.30226224917351 - task: type: STS dataset: name: MTEB STS17 (en-en) type: mteb/sts17-crosslingual-sts config: en-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 86.42501708915708 - type: cos_sim_spearman value: 86.42276182795041 - type: euclidean_pearson value: 86.5408207354761 - type: euclidean_spearman value: 85.46096321750838 - type: manhattan_pearson value: 86.54177303026881 - type: manhattan_spearman value: 85.50313151916117 - task: type: STS dataset: name: MTEB STS22 (en) type: mteb/sts22-crosslingual-sts config: en split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 64.86521089250766 - type: cos_sim_spearman value: 65.94868540323003 - type: euclidean_pearson value: 67.16569626533084 - type: euclidean_spearman value: 66.37667004134917 - type: manhattan_pearson value: 67.1482365102333 - type: manhattan_spearman value: 66.53240122580029 - task: type: STS dataset: name: MTEB STSBenchmark type: mteb/stsbenchmark-sts config: default split: test revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831 metrics: - type: cos_sim_pearson value: 84.64746265365318 - type: cos_sim_spearman value: 86.41888825906786 - type: euclidean_pearson value: 85.27453642725811 - type: euclidean_spearman value: 85.94095796602544 - type: manhattan_pearson value: 85.28643660505334 - type: manhattan_spearman value: 85.95028003260744 - task: type: Reranking dataset: name: MTEB SciDocsRR type: mteb/scidocs-reranking config: default split: test revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab metrics: - type: map value: 87.48903153618527 - type: mrr value: 96.41081503826601 - task: type: Retrieval dataset: name: MTEB SciFact type: scifact config: default split: test revision: None metrics: - type: map_at_1 value: 58.594 - type: map_at_10 value: 69.296 - type: map_at_100 value: 69.782 - type: map_at_1000 value: 69.795 - type: map_at_3 value: 66.23 - type: map_at_5 value: 68.293 - type: mrr_at_1 value: 61.667 - type: mrr_at_10 value: 70.339 - type: mrr_at_100 value: 70.708 - type: mrr_at_1000 value: 70.722 - type: mrr_at_3 value: 68.0 - type: mrr_at_5 value: 69.56700000000001 - type: ndcg_at_1 value: 61.667 - type: ndcg_at_10 value: 74.039 - type: ndcg_at_100 value: 76.103 - type: ndcg_at_1000 value: 76.47800000000001 - type: ndcg_at_3 value: 68.967 - type: ndcg_at_5 value: 71.96900000000001 - type: precision_at_1 value: 61.667 - type: precision_at_10 value: 9.866999999999999 - type: precision_at_100 value: 1.097 - type: precision_at_1000 value: 0.11299999999999999 - type: precision_at_3 value: 27.111 - type: precision_at_5 value: 18.2 - type: recall_at_1 value: 58.594 - type: recall_at_10 value: 87.422 - type: recall_at_100 value: 96.667 - type: recall_at_1000 value: 99.667 - type: recall_at_3 value: 74.217 - type: recall_at_5 value: 81.539 - task: type: PairClassification dataset: name: MTEB SprintDuplicateQuestions type: mteb/sprintduplicatequestions-pairclassification config: default split: test revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46 metrics: - type: cos_sim_accuracy value: 99.85049504950496 - type: cos_sim_ap value: 96.33111544137081 - type: cos_sim_f1 value: 92.35443037974684 - type: cos_sim_precision value: 93.53846153846153 - type: cos_sim_recall value: 91.2 - type: dot_accuracy value: 99.82376237623762 - type: dot_ap value: 95.38082527310888 - type: dot_f1 value: 90.90909090909092 - type: dot_precision value: 92.90187891440502 - type: dot_recall value: 89.0 - type: euclidean_accuracy value: 99.84851485148515 - type: euclidean_ap value: 96.32316003996347 - type: euclidean_f1 value: 92.2071392659628 - type: euclidean_precision value: 92.71991911021233 - type: euclidean_recall value: 91.7 - type: manhattan_accuracy value: 99.84851485148515 - type: manhattan_ap value: 96.3655668249217 - type: manhattan_f1 value: 92.18356026222895 - type: manhattan_precision value: 92.98067141403867 - type: manhattan_recall value: 91.4 - type: max_accuracy value: 99.85049504950496 - type: max_ap value: 96.3655668249217 - type: max_f1 value: 92.35443037974684 - task: type: Clustering dataset: name: MTEB StackExchangeClustering type: mteb/stackexchange-clustering config: default split: test revision: 6cbc1f7b2bc0622f2e39d2c77fa502909748c259 metrics: - type: v_measure value: 65.94861371629051 - task: type: Clustering dataset: name: MTEB StackExchangeClusteringP2P type: mteb/stackexchange-clustering-p2p config: default split: test revision: 815ca46b2622cec33ccafc3735d572c266efdb44 metrics: - type: v_measure value: 35.009430451385 - task: type: Reranking dataset: name: MTEB StackOverflowDupQuestions type: mteb/stackoverflowdupquestions-reranking config: default split: test revision: e185fbe320c72810689fc5848eb6114e1ef5ec69 metrics: - type: map value: 54.61164066427969 - type: mrr value: 55.49710603938544 - task: type: Summarization dataset: name: MTEB SummEval type: mteb/summeval config: default split: test revision: cda12ad7615edc362dbf25a00fdd61d3b1eaf93c metrics: - type: cos_sim_pearson value: 30.622620124907662 - type: cos_sim_spearman value: 31.0678351356163 - type: dot_pearson value: 30.863727693306814 - type: dot_spearman value: 31.230306567021255 - task: type: Retrieval dataset: name: MTEB TRECCOVID type: trec-covid config: default split: test revision: None metrics: - type: map_at_1 value: 0.22 - type: map_at_10 value: 2.011 - type: map_at_100 value: 10.974 - type: map_at_1000 value: 25.819 - type: map_at_3 value: 0.6649999999999999 - type: map_at_5 value: 1.076 - type: mrr_at_1 value: 86.0 - type: mrr_at_10 value: 91.8 - type: mrr_at_100 value: 91.8 - type: mrr_at_1000 value: 91.8 - type: mrr_at_3 value: 91.0 - type: mrr_at_5 value: 91.8 - type: ndcg_at_1 value: 82.0 - type: ndcg_at_10 value: 78.07300000000001 - type: ndcg_at_100 value: 58.231 - type: ndcg_at_1000 value: 51.153000000000006 - type: ndcg_at_3 value: 81.123 - type: ndcg_at_5 value: 81.059 - type: precision_at_1 value: 86.0 - type: precision_at_10 value: 83.0 - type: precision_at_100 value: 59.38 - type: precision_at_1000 value: 22.55 - type: precision_at_3 value: 87.333 - type: precision_at_5 value: 86.8 - type: recall_at_1 value: 0.22 - type: recall_at_10 value: 2.2079999999999997 - type: recall_at_100 value: 14.069 - type: recall_at_1000 value: 47.678 - type: recall_at_3 value: 0.7040000000000001 - type: recall_at_5 value: 1.161 - task: type: Retrieval dataset: name: MTEB Touche2020 type: webis-touche2020 config: default split: test revision: None metrics: - type: map_at_1 value: 2.809 - type: map_at_10 value: 10.394 - type: map_at_100 value: 16.598 - type: map_at_1000 value: 18.142 - type: map_at_3 value: 5.572 - type: map_at_5 value: 7.1370000000000005 - type: mrr_at_1 value: 32.653 - type: mrr_at_10 value: 46.564 - type: mrr_at_100 value: 47.469 - type: mrr_at_1000 value: 47.469 - type: mrr_at_3 value: 42.177 - type: mrr_at_5 value: 44.524 - type: ndcg_at_1 value: 30.612000000000002 - type: ndcg_at_10 value: 25.701 - type: ndcg_at_100 value: 37.532 - type: ndcg_at_1000 value: 48.757 - type: ndcg_at_3 value: 28.199999999999996 - type: ndcg_at_5 value: 25.987 - type: precision_at_1 value: 32.653 - type: precision_at_10 value: 23.469 - type: precision_at_100 value: 7.9799999999999995 - type: precision_at_1000 value: 1.5350000000000001 - type: precision_at_3 value: 29.932 - type: precision_at_5 value: 26.122 - type: recall_at_1 value: 2.809 - type: recall_at_10 value: 16.887 - type: recall_at_100 value: 48.67 - type: recall_at_1000 value: 82.89699999999999 - type: recall_at_3 value: 6.521000000000001 - type: recall_at_5 value: 9.609 - task: type: Classification dataset: name: MTEB ToxicConversationsClassification type: mteb/toxic_conversations_50k config: default split: test revision: d7c0de2777da35d6aae2200a62c6e0e5af397c4c metrics: - type: accuracy value: 71.57860000000001 - type: ap value: 13.82629211536393 - type: f1 value: 54.59860966183956 - task: type: Classification dataset: name: MTEB TweetSentimentExtractionClassification type: mteb/tweet_sentiment_extraction config: default split: test revision: d604517c81ca91fe16a244d1248fc021f9ecee7a metrics: - type: accuracy value: 59.38030560271647 - type: f1 value: 59.69685552567865 - task: type: Clustering dataset: name: MTEB TwentyNewsgroupsClustering type: mteb/twentynewsgroups-clustering config: default split: test revision: 6125ec4e24fa026cec8a478383ee943acfbd5449 metrics: - type: v_measure value: 51.4736717043405 - task: type: PairClassification dataset: name: MTEB TwitterSemEval2015 type: mteb/twittersemeval2015-pairclassification config: default split: test revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1 metrics: - type: cos_sim_accuracy value: 86.92853311080646 - type: cos_sim_ap value: 77.67872502591382 - type: cos_sim_f1 value: 70.33941236068895 - type: cos_sim_precision value: 67.63273258645884 - type: cos_sim_recall value: 73.27176781002639 - type: dot_accuracy value: 85.79603027954938 - type: dot_ap value: 73.73786190233379 - type: dot_f1 value: 67.3437901774235 - type: dot_precision value: 65.67201604814443 - type: dot_recall value: 69.10290237467018 - type: euclidean_accuracy value: 86.94045419324074 - type: euclidean_ap value: 77.6687791535167 - type: euclidean_f1 value: 70.47209214023542 - type: euclidean_precision value: 67.7207492094381 - type: euclidean_recall value: 73.45646437994723 - type: manhattan_accuracy value: 86.87488823985218 - type: manhattan_ap value: 77.63373392430728 - type: manhattan_f1 value: 70.40920716112532 - type: manhattan_precision value: 68.31265508684864 - type: manhattan_recall value: 72.63852242744063 - type: max_accuracy value: 86.94045419324074 - type: max_ap value: 77.67872502591382 - type: max_f1 value: 70.47209214023542 - task: type: PairClassification dataset: name: MTEB TwitterURLCorpus type: mteb/twitterurlcorpus-pairclassification config: default split: test revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf metrics: - type: cos_sim_accuracy value: 88.67155664221679 - type: cos_sim_ap value: 85.64591703003417 - type: cos_sim_f1 value: 77.59531005352656 - type: cos_sim_precision value: 73.60967184801382 - type: cos_sim_recall value: 82.03726516784724 - type: dot_accuracy value: 88.41541506578181 - type: dot_ap value: 84.6482788957769 - type: dot_f1 value: 77.04748541466657 - type: dot_precision value: 74.02440754931176 - type: dot_recall value: 80.3279950723745 - type: euclidean_accuracy value: 88.63080684596576 - type: euclidean_ap value: 85.44570045321562 - type: euclidean_f1 value: 77.28769403336106 - type: euclidean_precision value: 72.90600040958427 - type: euclidean_recall value: 82.22975053895904 - type: manhattan_accuracy value: 88.59393798269105 - type: manhattan_ap value: 85.40271361038187 - type: manhattan_f1 value: 77.17606419344392 - type: manhattan_precision value: 72.4447747078295 - type: manhattan_recall value: 82.5685247921158 - type: max_accuracy value: 88.67155664221679 - type: max_ap value: 85.64591703003417 - type: max_f1 value: 77.59531005352656 --- # Neuronx model for [BAAI/bge-base-en-v1.5](https://huggingface.co/BAAI/bge-base-en-v1.5) This repository contains are [**AWS Inferentia2**](https://aws.amazon.com/ec2/instance-types/inf2/) and [`neuronx`](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/) compatible checkpoint for [BAAI/bge-base-en-v1.5](https://huggingface.co/BAAI/bge-base-en-v1.5). You can find detailed information about the base model on its [Model Card](https://huggingface.co/BAAI/bge-base-en-v1.5). ## Usage on Amazon SageMaker _coming soon_ ## Usage with optimum-neuron ```python from optimum.neuron import NeuronModelForFeatureExtraction from transformers import AutoTokenizer import torch import torch_neuronx # Load Model from Hugging Face repository model = NeuronModelForFeatureExtraction.from_pretrained("aws-neuron/bge-base-en-v1-5-seqlen-384-bs-1") tokenizer = AutoTokenizer.from_pretrained("aws-neuron/bge-base-en-v1-5-seqlen-384-bs-1") # sentence input inputs = "Hello, my dog is cute" # Tokenize sentences encoded_input = tokenizer(inputs,return_tensors="pt",truncation=True,max_length=model.config.neuron["static_sequence_length"]) # Compute embeddings with torch.no_grad(): model_output = model(*tuple(encoded_input.values())) # Perform pooling. In this case, cls pooling. sentence_embeddings = model_output[0][:, 0] # normalize embeddings sentence_embeddings = torch.nn.functional.normalize(sentence_embeddings, p=2, dim=1) ``` **input_shapes** ```json { "sequence_length": 384, "batch_size": 1 } ```
mradermacher/Hathor_RP-v.01-L3-8B-i1-GGUF
mradermacher
"2024-06-15T11:24:52Z"
4,694
1
transformers
[ "transformers", "gguf", "en", "base_model:Nitral-AI/Hathor_RP-v.01-L3-8B", "license:other", "endpoints_compatible", "region:us" ]
null
"2024-06-15T07:06:54Z"
--- base_model: Nitral-AI/Hathor_RP-v.01-L3-8B language: - en library_name: transformers license: other quantized_by: mradermacher --- ## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: nicoboss --> weighted/imatrix quants of https://huggingface.co/Nitral-AI/Hathor_RP-v.01-L3-8B <!-- provided-files --> static quants are available at https://huggingface.co/mradermacher/Hathor_RP-v.01-L3-8B-GGUF ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/Hathor_RP-v.01-L3-8B-i1-GGUF/resolve/main/Hathor_RP-v.01-L3-8B.i1-IQ1_S.gguf) | i1-IQ1_S | 2.1 | for the desperate | | [GGUF](https://huggingface.co/mradermacher/Hathor_RP-v.01-L3-8B-i1-GGUF/resolve/main/Hathor_RP-v.01-L3-8B.i1-IQ1_M.gguf) | i1-IQ1_M | 2.3 | mostly desperate | | [GGUF](https://huggingface.co/mradermacher/Hathor_RP-v.01-L3-8B-i1-GGUF/resolve/main/Hathor_RP-v.01-L3-8B.i1-IQ2_XXS.gguf) | i1-IQ2_XXS | 2.5 | | | [GGUF](https://huggingface.co/mradermacher/Hathor_RP-v.01-L3-8B-i1-GGUF/resolve/main/Hathor_RP-v.01-L3-8B.i1-IQ2_XS.gguf) | i1-IQ2_XS | 2.7 | | | [GGUF](https://huggingface.co/mradermacher/Hathor_RP-v.01-L3-8B-i1-GGUF/resolve/main/Hathor_RP-v.01-L3-8B.i1-IQ2_S.gguf) | i1-IQ2_S | 2.9 | | | [GGUF](https://huggingface.co/mradermacher/Hathor_RP-v.01-L3-8B-i1-GGUF/resolve/main/Hathor_RP-v.01-L3-8B.i1-IQ2_M.gguf) | i1-IQ2_M | 3.0 | | | [GGUF](https://huggingface.co/mradermacher/Hathor_RP-v.01-L3-8B-i1-GGUF/resolve/main/Hathor_RP-v.01-L3-8B.i1-Q2_K.gguf) | i1-Q2_K | 3.3 | IQ3_XXS probably better | | [GGUF](https://huggingface.co/mradermacher/Hathor_RP-v.01-L3-8B-i1-GGUF/resolve/main/Hathor_RP-v.01-L3-8B.i1-IQ3_XXS.gguf) | i1-IQ3_XXS | 3.4 | lower quality | | [GGUF](https://huggingface.co/mradermacher/Hathor_RP-v.01-L3-8B-i1-GGUF/resolve/main/Hathor_RP-v.01-L3-8B.i1-IQ3_XS.gguf) | i1-IQ3_XS | 3.6 | | | [GGUF](https://huggingface.co/mradermacher/Hathor_RP-v.01-L3-8B-i1-GGUF/resolve/main/Hathor_RP-v.01-L3-8B.i1-Q3_K_S.gguf) | i1-Q3_K_S | 3.8 | IQ3_XS probably better | | [GGUF](https://huggingface.co/mradermacher/Hathor_RP-v.01-L3-8B-i1-GGUF/resolve/main/Hathor_RP-v.01-L3-8B.i1-IQ3_S.gguf) | i1-IQ3_S | 3.8 | beats Q3_K* | | [GGUF](https://huggingface.co/mradermacher/Hathor_RP-v.01-L3-8B-i1-GGUF/resolve/main/Hathor_RP-v.01-L3-8B.i1-IQ3_M.gguf) | i1-IQ3_M | 3.9 | | | [GGUF](https://huggingface.co/mradermacher/Hathor_RP-v.01-L3-8B-i1-GGUF/resolve/main/Hathor_RP-v.01-L3-8B.i1-Q3_K_M.gguf) | i1-Q3_K_M | 4.1 | IQ3_S probably better | | [GGUF](https://huggingface.co/mradermacher/Hathor_RP-v.01-L3-8B-i1-GGUF/resolve/main/Hathor_RP-v.01-L3-8B.i1-Q3_K_L.gguf) | i1-Q3_K_L | 4.4 | IQ3_M probably better | | [GGUF](https://huggingface.co/mradermacher/Hathor_RP-v.01-L3-8B-i1-GGUF/resolve/main/Hathor_RP-v.01-L3-8B.i1-IQ4_XS.gguf) | i1-IQ4_XS | 4.5 | | | [GGUF](https://huggingface.co/mradermacher/Hathor_RP-v.01-L3-8B-i1-GGUF/resolve/main/Hathor_RP-v.01-L3-8B.i1-Q4_0.gguf) | i1-Q4_0 | 4.8 | fast, low quality | | [GGUF](https://huggingface.co/mradermacher/Hathor_RP-v.01-L3-8B-i1-GGUF/resolve/main/Hathor_RP-v.01-L3-8B.i1-Q4_K_S.gguf) | i1-Q4_K_S | 4.8 | optimal size/speed/quality | | [GGUF](https://huggingface.co/mradermacher/Hathor_RP-v.01-L3-8B-i1-GGUF/resolve/main/Hathor_RP-v.01-L3-8B.i1-Q4_K_M.gguf) | i1-Q4_K_M | 5.0 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/Hathor_RP-v.01-L3-8B-i1-GGUF/resolve/main/Hathor_RP-v.01-L3-8B.i1-Q5_K_S.gguf) | i1-Q5_K_S | 5.7 | | | [GGUF](https://huggingface.co/mradermacher/Hathor_RP-v.01-L3-8B-i1-GGUF/resolve/main/Hathor_RP-v.01-L3-8B.i1-Q5_K_M.gguf) | i1-Q5_K_M | 5.8 | | | [GGUF](https://huggingface.co/mradermacher/Hathor_RP-v.01-L3-8B-i1-GGUF/resolve/main/Hathor_RP-v.01-L3-8B.i1-Q6_K.gguf) | i1-Q6_K | 6.7 | practically like static Q6_K | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. Additional thanks to [@nicoboss](https://huggingface.co/nicoboss) for giving me access to his hardware for calculating the imatrix for these quants. <!-- end -->
sentence-transformers/msmarco-distilbert-base-v2
sentence-transformers
"2024-03-27T11:26:58Z"
4,693
1
sentence-transformers
[ "sentence-transformers", "pytorch", "tf", "safetensors", "distilbert", "feature-extraction", "sentence-similarity", "transformers", "arxiv:1908.10084", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-embeddings-inference", "region:us" ]
sentence-similarity
"2022-03-02T23:29:05Z"
--- license: apache-2.0 library_name: sentence-transformers tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers pipeline_tag: sentence-similarity --- # sentence-transformers/msmarco-distilbert-base-v2 This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('sentence-transformers/msmarco-distilbert-base-v2') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('sentence-transformers/msmarco-distilbert-base-v2') model = AutoModel.from_pretrained('sentence-transformers/msmarco-distilbert-base-v2') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, max pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name=sentence-transformers/msmarco-distilbert-base-v2) ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 350, 'do_lower_case': False}) with Transformer model: DistilBertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors This model was trained by [sentence-transformers](https://www.sbert.net/). If you find this model helpful, feel free to cite our publication [Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks](https://arxiv.org/abs/1908.10084): ```bibtex @inproceedings{reimers-2019-sentence-bert, title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", author = "Reimers, Nils and Gurevych, Iryna", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", month = "11", year = "2019", publisher = "Association for Computational Linguistics", url = "http://arxiv.org/abs/1908.10084", } ```
consciousAI/cai-lunaris-text-embeddings
consciousAI
"2023-06-22T21:33:52Z"
4,692
4
sentence-transformers
[ "sentence-transformers", "pytorch", "bert", "feature-extraction", "sentence-similarity", "transformers", "mteb", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "text-embeddings-inference", "region:us" ]
sentence-similarity
"2023-06-22T18:08:54Z"
--- license: apache-2.0 pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers - mteb model-index: - name: cai-lunaris-text-embeddings results: - task: type: Retrieval dataset: type: arguana name: MTEB ArguAna config: default split: test revision: None metrics: - type: map_at_1 value: 17.07 - type: map_at_10 value: 29.372999999999998 - type: map_at_100 value: 30.79 - type: map_at_1000 value: 30.819999999999997 - type: map_at_3 value: 24.395 - type: map_at_5 value: 27.137 - type: mrr_at_1 value: 17.923000000000002 - type: mrr_at_10 value: 29.695 - type: mrr_at_100 value: 31.098 - type: mrr_at_1000 value: 31.128 - type: mrr_at_3 value: 24.704 - type: mrr_at_5 value: 27.449 - type: ndcg_at_1 value: 17.07 - type: ndcg_at_10 value: 37.269000000000005 - type: ndcg_at_100 value: 43.716 - type: ndcg_at_1000 value: 44.531 - type: ndcg_at_3 value: 26.839000000000002 - type: ndcg_at_5 value: 31.845000000000002 - type: precision_at_1 value: 17.07 - type: precision_at_10 value: 6.3020000000000005 - type: precision_at_100 value: 0.922 - type: precision_at_1000 value: 0.099 - type: precision_at_3 value: 11.309 - type: precision_at_5 value: 9.246 - type: recall_at_1 value: 17.07 - type: recall_at_10 value: 63.016000000000005 - type: recall_at_100 value: 92.24799999999999 - type: recall_at_1000 value: 98.72 - type: recall_at_3 value: 33.926 - type: recall_at_5 value: 46.23 - task: type: Reranking dataset: type: mteb/askubuntudupquestions-reranking name: MTEB AskUbuntuDupQuestions config: default split: test revision: 2000358ca161889fa9c082cb41daa8dcfb161a54 metrics: - type: map value: 53.44266265900711 - type: mrr value: 66.54695950402322 - task: type: STS dataset: type: mteb/biosses-sts name: MTEB BIOSSES config: default split: test revision: d3fb88f8f02e40887cd149695127462bbcf29b4a metrics: - type: cos_sim_pearson value: 75.9652953730204 - type: cos_sim_spearman value: 73.96554077670989 - type: euclidean_pearson value: 75.68477255792381 - type: euclidean_spearman value: 74.59447076995703 - type: manhattan_pearson value: 75.94984623881341 - type: manhattan_spearman value: 74.72218452337502 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackAndroidRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 14.119000000000002 - type: map_at_10 value: 19.661 - type: map_at_100 value: 20.706 - type: map_at_1000 value: 20.848 - type: map_at_3 value: 17.759 - type: map_at_5 value: 18.645 - type: mrr_at_1 value: 17.166999999999998 - type: mrr_at_10 value: 23.313 - type: mrr_at_100 value: 24.263 - type: mrr_at_1000 value: 24.352999999999998 - type: mrr_at_3 value: 21.412 - type: mrr_at_5 value: 22.313 - type: ndcg_at_1 value: 17.166999999999998 - type: ndcg_at_10 value: 23.631 - type: ndcg_at_100 value: 28.427000000000003 - type: ndcg_at_1000 value: 31.862000000000002 - type: ndcg_at_3 value: 20.175 - type: ndcg_at_5 value: 21.397 - type: precision_at_1 value: 17.166999999999998 - type: precision_at_10 value: 4.549 - type: precision_at_100 value: 0.8370000000000001 - type: precision_at_1000 value: 0.136 - type: precision_at_3 value: 9.68 - type: precision_at_5 value: 6.981 - type: recall_at_1 value: 14.119000000000002 - type: recall_at_10 value: 32.147999999999996 - type: recall_at_100 value: 52.739999999999995 - type: recall_at_1000 value: 76.67 - type: recall_at_3 value: 22.019 - type: recall_at_5 value: 25.361 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackEnglishRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 16.576 - type: map_at_10 value: 22.281000000000002 - type: map_at_100 value: 23.066 - type: map_at_1000 value: 23.166 - type: map_at_3 value: 20.385 - type: map_at_5 value: 21.557000000000002 - type: mrr_at_1 value: 20.892 - type: mrr_at_10 value: 26.605 - type: mrr_at_100 value: 27.229 - type: mrr_at_1000 value: 27.296 - type: mrr_at_3 value: 24.809 - type: mrr_at_5 value: 25.927 - type: ndcg_at_1 value: 20.892 - type: ndcg_at_10 value: 26.092 - type: ndcg_at_100 value: 29.398999999999997 - type: ndcg_at_1000 value: 31.884 - type: ndcg_at_3 value: 23.032 - type: ndcg_at_5 value: 24.634 - type: precision_at_1 value: 20.892 - type: precision_at_10 value: 4.885 - type: precision_at_100 value: 0.818 - type: precision_at_1000 value: 0.126 - type: precision_at_3 value: 10.977 - type: precision_at_5 value: 8.013 - type: recall_at_1 value: 16.576 - type: recall_at_10 value: 32.945 - type: recall_at_100 value: 47.337 - type: recall_at_1000 value: 64.592 - type: recall_at_3 value: 24.053 - type: recall_at_5 value: 28.465 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackGamingRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 20.604 - type: map_at_10 value: 28.754999999999995 - type: map_at_100 value: 29.767 - type: map_at_1000 value: 29.852 - type: map_at_3 value: 26.268 - type: map_at_5 value: 27.559 - type: mrr_at_1 value: 24.326 - type: mrr_at_10 value: 31.602000000000004 - type: mrr_at_100 value: 32.46 - type: mrr_at_1000 value: 32.521 - type: mrr_at_3 value: 29.415000000000003 - type: mrr_at_5 value: 30.581000000000003 - type: ndcg_at_1 value: 24.326 - type: ndcg_at_10 value: 33.335 - type: ndcg_at_100 value: 38.086 - type: ndcg_at_1000 value: 40.319 - type: ndcg_at_3 value: 28.796 - type: ndcg_at_5 value: 30.758999999999997 - type: precision_at_1 value: 24.326 - type: precision_at_10 value: 5.712 - type: precision_at_100 value: 0.893 - type: precision_at_1000 value: 0.11499999999999999 - type: precision_at_3 value: 13.208 - type: precision_at_5 value: 9.329 - type: recall_at_1 value: 20.604 - type: recall_at_10 value: 44.505 - type: recall_at_100 value: 65.866 - type: recall_at_1000 value: 82.61800000000001 - type: recall_at_3 value: 31.794 - type: recall_at_5 value: 36.831 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackGisRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 8.280999999999999 - type: map_at_10 value: 11.636000000000001 - type: map_at_100 value: 12.363 - type: map_at_1000 value: 12.469 - type: map_at_3 value: 10.415000000000001 - type: map_at_5 value: 11.144 - type: mrr_at_1 value: 9.266 - type: mrr_at_10 value: 12.838 - type: mrr_at_100 value: 13.608999999999998 - type: mrr_at_1000 value: 13.700999999999999 - type: mrr_at_3 value: 11.507000000000001 - type: mrr_at_5 value: 12.343 - type: ndcg_at_1 value: 9.266 - type: ndcg_at_10 value: 13.877 - type: ndcg_at_100 value: 18.119 - type: ndcg_at_1000 value: 21.247 - type: ndcg_at_3 value: 11.376999999999999 - type: ndcg_at_5 value: 12.675 - type: precision_at_1 value: 9.266 - type: precision_at_10 value: 2.226 - type: precision_at_100 value: 0.47200000000000003 - type: precision_at_1000 value: 0.077 - type: precision_at_3 value: 4.859 - type: precision_at_5 value: 3.6380000000000003 - type: recall_at_1 value: 8.280999999999999 - type: recall_at_10 value: 19.872999999999998 - type: recall_at_100 value: 40.585 - type: recall_at_1000 value: 65.225 - type: recall_at_3 value: 13.014000000000001 - type: recall_at_5 value: 16.147 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackMathematicaRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 4.1209999999999996 - type: map_at_10 value: 7.272 - type: map_at_100 value: 8.079 - type: map_at_1000 value: 8.199 - type: map_at_3 value: 6.212 - type: map_at_5 value: 6.736000000000001 - type: mrr_at_1 value: 5.721 - type: mrr_at_10 value: 9.418 - type: mrr_at_100 value: 10.281 - type: mrr_at_1000 value: 10.385 - type: mrr_at_3 value: 8.126 - type: mrr_at_5 value: 8.779 - type: ndcg_at_1 value: 5.721 - type: ndcg_at_10 value: 9.673 - type: ndcg_at_100 value: 13.852999999999998 - type: ndcg_at_1000 value: 17.546999999999997 - type: ndcg_at_3 value: 7.509 - type: ndcg_at_5 value: 8.373 - type: precision_at_1 value: 5.721 - type: precision_at_10 value: 2.04 - type: precision_at_100 value: 0.48 - type: precision_at_1000 value: 0.093 - type: precision_at_3 value: 4.022 - type: precision_at_5 value: 3.06 - type: recall_at_1 value: 4.1209999999999996 - type: recall_at_10 value: 15.201 - type: recall_at_100 value: 33.922999999999995 - type: recall_at_1000 value: 61.529999999999994 - type: recall_at_3 value: 8.869 - type: recall_at_5 value: 11.257 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackPhysicsRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 14.09 - type: map_at_10 value: 19.573999999999998 - type: map_at_100 value: 20.580000000000002 - type: map_at_1000 value: 20.704 - type: map_at_3 value: 17.68 - type: map_at_5 value: 18.64 - type: mrr_at_1 value: 17.227999999999998 - type: mrr_at_10 value: 23.152 - type: mrr_at_100 value: 24.056 - type: mrr_at_1000 value: 24.141000000000002 - type: mrr_at_3 value: 21.142 - type: mrr_at_5 value: 22.201 - type: ndcg_at_1 value: 17.227999999999998 - type: ndcg_at_10 value: 23.39 - type: ndcg_at_100 value: 28.483999999999998 - type: ndcg_at_1000 value: 31.709 - type: ndcg_at_3 value: 19.883 - type: ndcg_at_5 value: 21.34 - type: precision_at_1 value: 17.227999999999998 - type: precision_at_10 value: 4.3790000000000004 - type: precision_at_100 value: 0.826 - type: precision_at_1000 value: 0.128 - type: precision_at_3 value: 9.496 - type: precision_at_5 value: 6.872 - type: recall_at_1 value: 14.09 - type: recall_at_10 value: 31.580000000000002 - type: recall_at_100 value: 54.074 - type: recall_at_1000 value: 77.092 - type: recall_at_3 value: 21.601 - type: recall_at_5 value: 25.333 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackProgrammersRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 10.538 - type: map_at_10 value: 15.75 - type: map_at_100 value: 16.71 - type: map_at_1000 value: 16.838 - type: map_at_3 value: 13.488 - type: map_at_5 value: 14.712 - type: mrr_at_1 value: 13.813 - type: mrr_at_10 value: 19.08 - type: mrr_at_100 value: 19.946 - type: mrr_at_1000 value: 20.044 - type: mrr_at_3 value: 16.838 - type: mrr_at_5 value: 17.951 - type: ndcg_at_1 value: 13.813 - type: ndcg_at_10 value: 19.669 - type: ndcg_at_100 value: 24.488 - type: ndcg_at_1000 value: 27.87 - type: ndcg_at_3 value: 15.479000000000001 - type: ndcg_at_5 value: 17.229 - type: precision_at_1 value: 13.813 - type: precision_at_10 value: 3.916 - type: precision_at_100 value: 0.743 - type: precision_at_1000 value: 0.122 - type: precision_at_3 value: 7.534000000000001 - type: precision_at_5 value: 5.822 - type: recall_at_1 value: 10.538 - type: recall_at_10 value: 28.693 - type: recall_at_100 value: 50.308 - type: recall_at_1000 value: 74.44 - type: recall_at_3 value: 16.866999999999997 - type: recall_at_5 value: 21.404999999999998 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 11.044583333333332 - type: map_at_10 value: 15.682833333333335 - type: map_at_100 value: 16.506500000000003 - type: map_at_1000 value: 16.623833333333334 - type: map_at_3 value: 14.130833333333333 - type: map_at_5 value: 14.963583333333332 - type: mrr_at_1 value: 13.482833333333332 - type: mrr_at_10 value: 18.328500000000002 - type: mrr_at_100 value: 19.095416666666665 - type: mrr_at_1000 value: 19.18241666666666 - type: mrr_at_3 value: 16.754749999999998 - type: mrr_at_5 value: 17.614749999999997 - type: ndcg_at_1 value: 13.482833333333332 - type: ndcg_at_10 value: 18.81491666666667 - type: ndcg_at_100 value: 22.946833333333334 - type: ndcg_at_1000 value: 26.061083333333336 - type: ndcg_at_3 value: 15.949333333333332 - type: ndcg_at_5 value: 17.218333333333334 - type: precision_at_1 value: 13.482833333333332 - type: precision_at_10 value: 3.456583333333333 - type: precision_at_100 value: 0.6599166666666666 - type: precision_at_1000 value: 0.109 - type: precision_at_3 value: 7.498833333333332 - type: precision_at_5 value: 5.477166666666667 - type: recall_at_1 value: 11.044583333333332 - type: recall_at_10 value: 25.737750000000005 - type: recall_at_100 value: 44.617916666666666 - type: recall_at_1000 value: 67.56524999999999 - type: recall_at_3 value: 17.598249999999997 - type: recall_at_5 value: 20.9035 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackStatsRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 9.362 - type: map_at_10 value: 13.414000000000001 - type: map_at_100 value: 14.083000000000002 - type: map_at_1000 value: 14.168 - type: map_at_3 value: 12.098 - type: map_at_5 value: 12.803999999999998 - type: mrr_at_1 value: 11.043 - type: mrr_at_10 value: 15.158 - type: mrr_at_100 value: 15.845999999999998 - type: mrr_at_1000 value: 15.916 - type: mrr_at_3 value: 13.88 - type: mrr_at_5 value: 14.601 - type: ndcg_at_1 value: 11.043 - type: ndcg_at_10 value: 16.034000000000002 - type: ndcg_at_100 value: 19.686 - type: ndcg_at_1000 value: 22.188 - type: ndcg_at_3 value: 13.530000000000001 - type: ndcg_at_5 value: 14.704 - type: precision_at_1 value: 11.043 - type: precision_at_10 value: 2.791 - type: precision_at_100 value: 0.5 - type: precision_at_1000 value: 0.077 - type: precision_at_3 value: 6.237 - type: precision_at_5 value: 4.5089999999999995 - type: recall_at_1 value: 9.362 - type: recall_at_10 value: 22.396 - type: recall_at_100 value: 39.528999999999996 - type: recall_at_1000 value: 58.809 - type: recall_at_3 value: 15.553 - type: recall_at_5 value: 18.512 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackTexRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 5.657 - type: map_at_10 value: 8.273 - type: map_at_100 value: 8.875 - type: map_at_1000 value: 8.977 - type: map_at_3 value: 7.32 - type: map_at_5 value: 7.792000000000001 - type: mrr_at_1 value: 7.02 - type: mrr_at_10 value: 9.966999999999999 - type: mrr_at_100 value: 10.636 - type: mrr_at_1000 value: 10.724 - type: mrr_at_3 value: 8.872 - type: mrr_at_5 value: 9.461 - type: ndcg_at_1 value: 7.02 - type: ndcg_at_10 value: 10.199 - type: ndcg_at_100 value: 13.642000000000001 - type: ndcg_at_1000 value: 16.643 - type: ndcg_at_3 value: 8.333 - type: ndcg_at_5 value: 9.103 - type: precision_at_1 value: 7.02 - type: precision_at_10 value: 1.8929999999999998 - type: precision_at_100 value: 0.43 - type: precision_at_1000 value: 0.08099999999999999 - type: precision_at_3 value: 3.843 - type: precision_at_5 value: 2.884 - type: recall_at_1 value: 5.657 - type: recall_at_10 value: 14.563 - type: recall_at_100 value: 30.807000000000002 - type: recall_at_1000 value: 53.251000000000005 - type: recall_at_3 value: 9.272 - type: recall_at_5 value: 11.202 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackUnixRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 10.671999999999999 - type: map_at_10 value: 14.651 - type: map_at_100 value: 15.406 - type: map_at_1000 value: 15.525 - type: map_at_3 value: 13.461 - type: map_at_5 value: 14.163 - type: mrr_at_1 value: 12.407 - type: mrr_at_10 value: 16.782 - type: mrr_at_100 value: 17.562 - type: mrr_at_1000 value: 17.653 - type: mrr_at_3 value: 15.47 - type: mrr_at_5 value: 16.262 - type: ndcg_at_1 value: 12.407 - type: ndcg_at_10 value: 17.251 - type: ndcg_at_100 value: 21.378 - type: ndcg_at_1000 value: 24.689 - type: ndcg_at_3 value: 14.915000000000001 - type: ndcg_at_5 value: 16.1 - type: precision_at_1 value: 12.407 - type: precision_at_10 value: 2.91 - type: precision_at_100 value: 0.573 - type: precision_at_1000 value: 0.096 - type: precision_at_3 value: 6.779 - type: precision_at_5 value: 4.888 - type: recall_at_1 value: 10.671999999999999 - type: recall_at_10 value: 23.099 - type: recall_at_100 value: 41.937999999999995 - type: recall_at_1000 value: 66.495 - type: recall_at_3 value: 16.901 - type: recall_at_5 value: 19.807 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackWebmastersRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 13.364 - type: map_at_10 value: 17.772 - type: map_at_100 value: 18.659 - type: map_at_1000 value: 18.861 - type: map_at_3 value: 16.659 - type: map_at_5 value: 17.174 - type: mrr_at_1 value: 16.996 - type: mrr_at_10 value: 21.687 - type: mrr_at_100 value: 22.313 - type: mrr_at_1000 value: 22.422 - type: mrr_at_3 value: 20.652 - type: mrr_at_5 value: 21.146 - type: ndcg_at_1 value: 16.996 - type: ndcg_at_10 value: 21.067 - type: ndcg_at_100 value: 24.829 - type: ndcg_at_1000 value: 28.866999999999997 - type: ndcg_at_3 value: 19.466 - type: ndcg_at_5 value: 19.993 - type: precision_at_1 value: 16.996 - type: precision_at_10 value: 4.071000000000001 - type: precision_at_100 value: 0.9329999999999999 - type: precision_at_1000 value: 0.183 - type: precision_at_3 value: 9.223 - type: precision_at_5 value: 6.4030000000000005 - type: recall_at_1 value: 13.364 - type: recall_at_10 value: 25.976 - type: recall_at_100 value: 44.134 - type: recall_at_1000 value: 73.181 - type: recall_at_3 value: 20.503 - type: recall_at_5 value: 22.409000000000002 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackWordpressRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 5.151 - type: map_at_10 value: 9.155000000000001 - type: map_at_100 value: 9.783999999999999 - type: map_at_1000 value: 9.879 - type: map_at_3 value: 7.825 - type: map_at_5 value: 8.637 - type: mrr_at_1 value: 5.915 - type: mrr_at_10 value: 10.34 - type: mrr_at_100 value: 10.943999999999999 - type: mrr_at_1000 value: 11.033 - type: mrr_at_3 value: 8.934000000000001 - type: mrr_at_5 value: 9.812 - type: ndcg_at_1 value: 5.915 - type: ndcg_at_10 value: 11.561 - type: ndcg_at_100 value: 14.971 - type: ndcg_at_1000 value: 17.907999999999998 - type: ndcg_at_3 value: 8.896999999999998 - type: ndcg_at_5 value: 10.313 - type: precision_at_1 value: 5.915 - type: precision_at_10 value: 2.1069999999999998 - type: precision_at_100 value: 0.414 - type: precision_at_1000 value: 0.074 - type: precision_at_3 value: 4.128 - type: precision_at_5 value: 3.327 - type: recall_at_1 value: 5.151 - type: recall_at_10 value: 17.874000000000002 - type: recall_at_100 value: 34.174 - type: recall_at_1000 value: 56.879999999999995 - type: recall_at_3 value: 10.732999999999999 - type: recall_at_5 value: 14.113000000000001 - task: type: Retrieval dataset: type: climate-fever name: MTEB ClimateFEVER config: default split: test revision: None metrics: - type: map_at_1 value: 3.101 - type: map_at_10 value: 5.434 - type: map_at_100 value: 6.267 - type: map_at_1000 value: 6.418 - type: map_at_3 value: 4.377000000000001 - type: map_at_5 value: 4.841 - type: mrr_at_1 value: 7.166 - type: mrr_at_10 value: 12.012 - type: mrr_at_100 value: 13.144 - type: mrr_at_1000 value: 13.229 - type: mrr_at_3 value: 9.826 - type: mrr_at_5 value: 10.921 - type: ndcg_at_1 value: 7.166 - type: ndcg_at_10 value: 8.687000000000001 - type: ndcg_at_100 value: 13.345 - type: ndcg_at_1000 value: 16.915 - type: ndcg_at_3 value: 6.276 - type: ndcg_at_5 value: 7.013 - type: precision_at_1 value: 7.166 - type: precision_at_10 value: 2.9250000000000003 - type: precision_at_100 value: 0.771 - type: precision_at_1000 value: 0.13999999999999999 - type: precision_at_3 value: 4.734 - type: precision_at_5 value: 3.8830000000000005 - type: recall_at_1 value: 3.101 - type: recall_at_10 value: 11.774999999999999 - type: recall_at_100 value: 28.819 - type: recall_at_1000 value: 49.886 - type: recall_at_3 value: 5.783 - type: recall_at_5 value: 7.692 - task: type: Retrieval dataset: type: dbpedia-entity name: MTEB DBPedia config: default split: test revision: None metrics: - type: map_at_1 value: 2.758 - type: map_at_10 value: 5.507 - type: map_at_100 value: 7.1819999999999995 - type: map_at_1000 value: 7.652 - type: map_at_3 value: 4.131 - type: map_at_5 value: 4.702 - type: mrr_at_1 value: 28.499999999999996 - type: mrr_at_10 value: 37.693 - type: mrr_at_100 value: 38.657000000000004 - type: mrr_at_1000 value: 38.704 - type: mrr_at_3 value: 34.792 - type: mrr_at_5 value: 36.417 - type: ndcg_at_1 value: 20.625 - type: ndcg_at_10 value: 14.771999999999998 - type: ndcg_at_100 value: 16.821 - type: ndcg_at_1000 value: 21.546000000000003 - type: ndcg_at_3 value: 16.528000000000002 - type: ndcg_at_5 value: 15.573 - type: precision_at_1 value: 28.499999999999996 - type: precision_at_10 value: 12.25 - type: precision_at_100 value: 3.7600000000000002 - type: precision_at_1000 value: 0.86 - type: precision_at_3 value: 19.167 - type: precision_at_5 value: 16.25 - type: recall_at_1 value: 2.758 - type: recall_at_10 value: 9.164 - type: recall_at_100 value: 21.022 - type: recall_at_1000 value: 37.053999999999995 - type: recall_at_3 value: 5.112 - type: recall_at_5 value: 6.413 - task: type: Reranking dataset: type: mteb/mind_small name: MTEB MindSmallReranking config: default split: test revision: 3bdac13927fdc888b903db93b2ffdbd90b295a69 metrics: - type: map value: 28.53554681148413 - type: mrr value: 29.290078704990325 - task: type: STS dataset: type: mteb/sickr-sts name: MTEB SICK-R config: default split: test revision: a6ea5a8cab320b040a23452cc28066d9beae2cee metrics: - type: cos_sim_pearson value: 76.52926207453477 - type: cos_sim_spearman value: 68.98528351149498 - type: euclidean_pearson value: 73.7744559091218 - type: euclidean_spearman value: 69.03481995814735 - type: manhattan_pearson value: 73.72818267270651 - type: manhattan_spearman value: 69.00576442086793 - task: type: STS dataset: type: mteb/sts12-sts name: MTEB STS12 config: default split: test revision: a0d554a64d88156834ff5ae9920b964011b16384 metrics: - type: cos_sim_pearson value: 61.71540153163407 - type: cos_sim_spearman value: 58.502746406116614 - type: euclidean_pearson value: 60.82817999438477 - type: euclidean_spearman value: 58.988494433752756 - type: manhattan_pearson value: 60.87147859170236 - type: manhattan_spearman value: 59.03527382025516 - task: type: STS dataset: type: mteb/sts13-sts name: MTEB STS13 config: default split: test revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca metrics: - type: cos_sim_pearson value: 72.89990498692094 - type: cos_sim_spearman value: 74.03028513377879 - type: euclidean_pearson value: 73.8252088833803 - type: euclidean_spearman value: 74.15554246478399 - type: manhattan_pearson value: 73.80947397334666 - type: manhattan_spearman value: 74.13117958176566 - task: type: STS dataset: type: mteb/sts14-sts name: MTEB STS14 config: default split: test revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375 metrics: - type: cos_sim_pearson value: 70.67974206005906 - type: cos_sim_spearman value: 66.18263558486296 - type: euclidean_pearson value: 69.5048876024341 - type: euclidean_spearman value: 66.36380457878391 - type: manhattan_pearson value: 69.4895372451589 - type: manhattan_spearman value: 66.36941569935124 - task: type: STS dataset: type: mteb/sts15-sts name: MTEB STS15 config: default split: test revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3 metrics: - type: cos_sim_pearson value: 73.99856913569187 - type: cos_sim_spearman value: 75.54712054246464 - type: euclidean_pearson value: 74.55692573876115 - type: euclidean_spearman value: 75.34499056740096 - type: manhattan_pearson value: 74.59342318869683 - type: manhattan_spearman value: 75.35708317926819 - task: type: STS dataset: type: mteb/sts16-sts name: MTEB STS16 config: default split: test revision: 4d8694f8f0e0100860b497b999b3dbed754a0513 metrics: - type: cos_sim_pearson value: 72.3343670787494 - type: cos_sim_spearman value: 73.7136650302399 - type: euclidean_pearson value: 73.86004257913046 - type: euclidean_spearman value: 73.9557418048638 - type: manhattan_pearson value: 73.78919091538661 - type: manhattan_spearman value: 73.86316425954108 - task: type: STS dataset: type: mteb/sts17-crosslingual-sts name: MTEB STS17 (en-en) config: en-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 79.08159601556619 - type: cos_sim_spearman value: 80.13910828685532 - type: euclidean_pearson value: 79.39197806617453 - type: euclidean_spearman value: 79.85692277871196 - type: manhattan_pearson value: 79.32452246324705 - type: manhattan_spearman value: 79.70120373587193 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (en) config: en split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 62.29720207747786 - type: cos_sim_spearman value: 65.65260681394685 - type: euclidean_pearson value: 64.49002165983158 - type: euclidean_spearman value: 65.25917651158736 - type: manhattan_pearson value: 64.49981108236335 - type: manhattan_spearman value: 65.20426825202405 - task: type: STS dataset: type: mteb/stsbenchmark-sts name: MTEB STSBenchmark config: default split: test revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831 metrics: - type: cos_sim_pearson value: 71.1871068550574 - type: cos_sim_spearman value: 71.40167034949341 - type: euclidean_pearson value: 72.2373684855404 - type: euclidean_spearman value: 71.90255429812984 - type: manhattan_pearson value: 72.23173532049509 - type: manhattan_spearman value: 71.87843489689064 - task: type: Reranking dataset: type: mteb/scidocs-reranking name: MTEB SciDocsRR config: default split: test revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab metrics: - type: map value: 68.65000574464773 - type: mrr value: 88.29363084265044 - task: type: Reranking dataset: type: mteb/stackoverflowdupquestions-reranking name: MTEB StackOverflowDupQuestions config: default split: test revision: e185fbe320c72810689fc5848eb6114e1ef5ec69 metrics: - type: map value: 40.76107749144358 - type: mrr value: 41.03689202953908 - task: type: Summarization dataset: type: mteb/summeval name: MTEB SummEval config: default split: test revision: cda12ad7615edc362dbf25a00fdd61d3b1eaf93c metrics: - type: cos_sim_pearson value: 28.68520527813894 - type: cos_sim_spearman value: 29.017620841627433 - type: dot_pearson value: 29.25380949876322 - type: dot_spearman value: 29.33885250837327 --- # {MODEL_NAME} This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 1024 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('{MODEL_NAME}') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('{MODEL_NAME}') model = AutoModel.from_pretrained('{MODEL_NAME}') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ```
luffycodes/vicuna-class-tutor-7b-ep3
luffycodes
"2023-12-15T08:29:08Z"
4,692
0
transformers
[ "transformers", "pytorch", "llama", "text-generation", "arxiv:2305.13272", "license:llama2", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-12-15T08:21:28Z"
--- license: llama2 --- If you use this work, please cite: CLASS Meet SPOCK: An Education Tutoring Chatbot based on Learning Science Principles https://arxiv.org/abs/2305.13272 ``` @misc{sonkar2023class, title={CLASS Meet SPOCK: An Education Tutoring Chatbot based on Learning Science Principles}, author={Shashank Sonkar and Lucy Liu and Debshila Basu Mallick and Richard G. Baraniuk}, year={2023}, eprint={2305.13272}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
Mihaiii/Venusaur
Mihaiii
"2024-04-30T02:06:15Z"
4,692
0
sentence-transformers
[ "sentence-transformers", "onnx", "safetensors", "bert", "feature-extraction", "sentence-similarity", "gte", "mteb", "dataset:Mihaiii/qa-assistant", "base_model:Mihaiii/Bulbasaur", "license:mit", "model-index", "autotrain_compatible", "endpoints_compatible", "text-embeddings-inference", "region:us" ]
sentence-similarity
"2024-04-29T21:30:53Z"
--- base_model: Mihaiii/Bulbasaur license: mit library_name: sentence-transformers pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - gte - mteb datasets: - Mihaiii/qa-assistant model-index: - name: Venusaur results: - task: type: Classification dataset: type: mteb/amazon_counterfactual name: MTEB AmazonCounterfactualClassification (en) config: en split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 73.17910447761194 - type: ap value: 35.29994612283548 - type: f1 value: 66.87845205993153 - task: type: Classification dataset: type: mteb/amazon_polarity name: MTEB AmazonPolarityClassification config: default split: test revision: e2d317d38cd51312af73b3d32a06d1a08b442046 metrics: - type: accuracy value: 79.993525 - type: ap value: 74.7042261687233 - type: f1 value: 79.9004149386498 - task: type: Classification dataset: type: mteb/amazon_reviews_multi name: MTEB AmazonReviewsClassification (en) config: en split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 39.656000000000006 - type: f1 value: 39.287139345446256 - task: type: Retrieval dataset: type: mteb/arguana name: MTEB ArguAna config: default split: test revision: c22ab2a51041ffd869aaddef7af8d8215647e41a metrics: - type: map_at_1 value: 16.643 - type: map_at_10 value: 28.276 - type: map_at_100 value: 29.543999999999997 - type: map_at_1000 value: 29.595 - type: map_at_20 value: 29.043000000000003 - type: map_at_3 value: 24.739 - type: map_at_5 value: 26.592 - type: mrr_at_1 value: 17.639 - type: mrr_at_10 value: 28.631 - type: mrr_at_100 value: 29.891000000000002 - type: mrr_at_1000 value: 29.942999999999998 - type: mrr_at_20 value: 29.391000000000002 - type: mrr_at_3 value: 25.107000000000003 - type: mrr_at_5 value: 26.942 - type: ndcg_at_1 value: 16.643 - type: ndcg_at_10 value: 34.8 - type: ndcg_at_100 value: 41.179 - type: ndcg_at_1000 value: 42.564 - type: ndcg_at_20 value: 37.601 - type: ndcg_at_3 value: 27.356 - type: ndcg_at_5 value: 30.725 - type: precision_at_1 value: 16.643 - type: precision_at_10 value: 5.576 - type: precision_at_100 value: 0.861 - type: precision_at_1000 value: 0.097 - type: precision_at_20 value: 3.343 - type: precision_at_3 value: 11.641 - type: precision_at_5 value: 8.634 - type: recall_at_1 value: 16.643 - type: recall_at_10 value: 55.761 - type: recall_at_100 value: 86.06 - type: recall_at_1000 value: 97.013 - type: recall_at_20 value: 66.85600000000001 - type: recall_at_3 value: 34.922 - type: recall_at_5 value: 43.172 - task: type: Clustering dataset: type: mteb/arxiv-clustering-p2p name: MTEB ArxivClusteringP2P config: default split: test revision: a122ad7f3f0291bf49cc6f4d32aa80929df69d5d metrics: - type: v_measure value: 31.76467048453136 - type: v_measures value: [0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592, 0.2646936786804572, 0.27790871012280266, 0.29027802989910717, 0.27400555976615254, 0.2823478131745678, 0.25739544436992295, 0.3014171939280134, 0.2862214695233955, 0.2856734533249879, 0.2870107976688266, 0.3709000837926645, 0.3702167780750079, 0.36556393540769305, 0.37650336515785243, 0.3699811227722488, 0.36806220730606526, 0.3696328229784335, 0.3852970338255622, 0.37157613433218695, 0.368267862192135, 0.3715516752706066, 0.26093751350716654, 0.24003989063421033, 0.31112640151573373, 0.2509815194812587, 0.19256512170374224, 0.2638556294764011, 0.08503820346290819, 0.1374194639615466, 1.0, 0.21057893489306592] - task: type: Clustering dataset: type: mteb/arxiv-clustering-s2s name: MTEB ArxivClusteringS2S config: default split: test revision: f910caf1a6075f7329cdf8c1a6135696f37dbd53 metrics: - type: v_measure value: 21.06388933035354 - type: v_measures value: [0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183, 0.15139426348464108, 0.1723972791290331, 0.17283164578167945, 0.16480634318126675, 0.16569873939027066, 0.1728549819933171, 0.17524195492901368, 0.18366858039747846, 0.16933886504858436, 0.16720515987637327, 0.23635288879364383, 0.23516065130475095, 0.23711945768749756, 0.24435956439029374, 0.24042600701040173, 0.23215638321332788, 0.23458643115209107, 0.24946576681768332, 0.2350071814521417, 0.23906840961229672, 0.2381730684068399, 0.14161450056618247, 0.16111253325078148, 0.1961351147776721, 0.1410367521003569, 0.14337306941509392, 0.164137728457383, 0.046549912102592315, 0.0965914522844279, 1.0, 0.12194100640248183] - task: type: Reranking dataset: type: mteb/askubuntudupquestions-reranking name: MTEB AskUbuntuDupQuestions config: default split: test revision: 2000358ca161889fa9c082cb41daa8dcfb161a54 metrics: - type: map value: 53.770982215325056 - type: mrr value: 68.00400123114805 - task: type: STS dataset: type: mteb/biosses-sts name: MTEB BIOSSES config: default split: test revision: d3fb88f8f02e40887cd149695127462bbcf29b4a metrics: - type: cos_sim_pearson value: 77.20301104745533 - type: cos_sim_spearman value: 77.59453912854975 - type: euclidean_pearson value: 74.21678798189272 - type: euclidean_spearman value: 74.9956847311664 - type: manhattan_pearson value: 74.55059214013183 - type: manhattan_spearman value: 75.51557609531613 - task: type: Classification dataset: type: mteb/banking77 name: MTEB Banking77Classification config: default split: test revision: 0fd18e25b25c072e09e0d92ab615fda904d66300 metrics: - type: accuracy value: 77.9512987012987 - type: f1 value: 77.89256430400536 - task: type: Clustering dataset: type: mteb/biorxiv-clustering-p2p name: MTEB BiorxivClusteringP2P config: default split: test revision: 65b79d1d13f80053f67aca9498d9402c2d9f1f40 metrics: - type: v_measure value: 29.83922611010262 - type: v_measures value: [0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718, 0.29324346631343595, 0.2922357214987931, 0.2950587109611168, 0.2960401478358995, 0.2873870207712407, 0.29649976178620835, 0.3055622039732096, 0.3127947496618221, 0.2974633994658177, 0.307637428742718] - task: type: Clustering dataset: type: mteb/biorxiv-clustering-s2s name: MTEB BiorxivClusteringS2S config: default split: test revision: 258694dd0231531bc1fd9de6ceb52a0853c6d908 metrics: - type: v_measure value: 18.34253917925029 - type: v_measures value: [0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502, 0.19663926944608978, 0.17549804536847785, 0.1747660797341959, 0.1733985544939657, 0.17204103363489412, 0.18165752579382782, 0.18835786592472062, 0.18837179576029925, 0.19741374109182327, 0.18611000667673502] - task: type: Retrieval dataset: type: mteb/cqadupstack-android name: MTEB CQADupstackAndroidRetrieval config: default split: test revision: f46a197baaae43b4f621051089b82a364682dfeb metrics: - type: map_at_1 value: 19.709 - type: map_at_10 value: 26.522000000000002 - type: map_at_100 value: 27.613 - type: map_at_1000 value: 27.750999999999998 - type: map_at_20 value: 27.033 - type: map_at_3 value: 24.127000000000002 - type: map_at_5 value: 25.319000000000003 - type: mrr_at_1 value: 24.607 - type: mrr_at_10 value: 31.776 - type: mrr_at_100 value: 32.629999999999995 - type: mrr_at_1000 value: 32.699 - type: mrr_at_20 value: 32.23 - type: mrr_at_3 value: 29.423 - type: mrr_at_5 value: 30.703000000000003 - type: ndcg_at_1 value: 24.607 - type: ndcg_at_10 value: 31.311 - type: ndcg_at_100 value: 36.412 - type: ndcg_at_1000 value: 39.428999999999995 - type: ndcg_at_20 value: 32.793 - type: ndcg_at_3 value: 27.388 - type: ndcg_at_5 value: 28.899 - type: precision_at_1 value: 24.607 - type: precision_at_10 value: 5.951 - type: precision_at_100 value: 1.083 - type: precision_at_1000 value: 0.165 - type: precision_at_20 value: 3.5479999999999996 - type: precision_at_3 value: 12.971 - type: precision_at_5 value: 9.356 - type: recall_at_1 value: 19.709 - type: recall_at_10 value: 40.274 - type: recall_at_100 value: 62.926 - type: recall_at_1000 value: 83.54599999999999 - type: recall_at_20 value: 45.585 - type: recall_at_3 value: 28.587 - type: recall_at_5 value: 32.967999999999996 - task: type: Retrieval dataset: type: mteb/cqadupstack-english name: MTEB CQADupstackEnglishRetrieval config: default split: test revision: ad9991cb51e31e31e430383c75ffb2885547b5f0 metrics: - type: map_at_1 value: 11.749 - type: map_at_10 value: 15.958 - type: map_at_100 value: 16.694 - type: map_at_1000 value: 16.805 - type: map_at_20 value: 16.325 - type: map_at_3 value: 14.469000000000001 - type: map_at_5 value: 15.286 - type: mrr_at_1 value: 14.521999999999998 - type: mrr_at_10 value: 19.076999999999998 - type: mrr_at_100 value: 19.785 - type: mrr_at_1000 value: 19.863 - type: mrr_at_20 value: 19.451999999999998 - type: mrr_at_3 value: 17.419999999999998 - type: mrr_at_5 value: 18.379 - type: ndcg_at_1 value: 14.521999999999998 - type: ndcg_at_10 value: 18.944 - type: ndcg_at_100 value: 22.685 - type: ndcg_at_1000 value: 25.562 - type: ndcg_at_20 value: 20.169999999999998 - type: ndcg_at_3 value: 16.18 - type: ndcg_at_5 value: 17.476 - type: precision_at_1 value: 14.521999999999998 - type: precision_at_10 value: 3.5409999999999995 - type: precision_at_100 value: 0.679 - type: precision_at_1000 value: 0.11399999999999999 - type: precision_at_20 value: 2.185 - type: precision_at_3 value: 7.495 - type: precision_at_5 value: 5.541 - type: recall_at_1 value: 11.749 - type: recall_at_10 value: 24.759999999999998 - type: recall_at_100 value: 41.54 - type: recall_at_1000 value: 61.836 - type: recall_at_20 value: 29.252 - type: recall_at_3 value: 17.278 - type: recall_at_5 value: 20.57 - task: type: Retrieval dataset: type: mteb/cqadupstack-gaming name: MTEB CQADupstackGamingRetrieval config: default split: test revision: 4885aa143210c98657558c04aaf3dc47cfb54340 metrics: - type: map_at_1 value: 19.827 - type: map_at_10 value: 27.417 - type: map_at_100 value: 28.383000000000003 - type: map_at_1000 value: 28.483000000000004 - type: map_at_20 value: 27.901999999999997 - type: map_at_3 value: 25.3 - type: map_at_5 value: 26.432 - type: mrr_at_1 value: 22.947 - type: mrr_at_10 value: 30.279 - type: mrr_at_100 value: 31.1 - type: mrr_at_1000 value: 31.171 - type: mrr_at_20 value: 30.714000000000002 - type: mrr_at_3 value: 28.37 - type: mrr_at_5 value: 29.37 - type: ndcg_at_1 value: 22.947 - type: ndcg_at_10 value: 31.793 - type: ndcg_at_100 value: 36.571999999999996 - type: ndcg_at_1000 value: 39.106 - type: ndcg_at_20 value: 33.376 - type: ndcg_at_3 value: 27.872000000000003 - type: ndcg_at_5 value: 29.601 - type: precision_at_1 value: 22.947 - type: precision_at_10 value: 5.3420000000000005 - type: precision_at_100 value: 0.856 - type: precision_at_1000 value: 0.116 - type: precision_at_20 value: 3.107 - type: precision_at_3 value: 12.684999999999999 - type: precision_at_5 value: 8.790000000000001 - type: recall_at_1 value: 19.827 - type: recall_at_10 value: 42.191 - type: recall_at_100 value: 64.307 - type: recall_at_1000 value: 83.161 - type: recall_at_20 value: 48.046 - type: recall_at_3 value: 31.352999999999998 - type: recall_at_5 value: 35.783 - task: type: Retrieval dataset: type: mteb/cqadupstack-gis name: MTEB CQADupstackGisRetrieval config: default split: test revision: 5003b3064772da1887988e05400cf3806fe491f2 metrics: - type: map_at_1 value: 11.802 - type: map_at_10 value: 15.799 - type: map_at_100 value: 16.53 - type: map_at_1000 value: 16.638 - type: map_at_20 value: 16.161 - type: map_at_3 value: 14.495 - type: map_at_5 value: 15.128 - type: mrr_at_1 value: 12.655 - type: mrr_at_10 value: 17.03 - type: mrr_at_100 value: 17.785999999999998 - type: mrr_at_1000 value: 17.88 - type: mrr_at_20 value: 17.416 - type: mrr_at_3 value: 15.65 - type: mrr_at_5 value: 16.305 - type: ndcg_at_1 value: 12.655 - type: ndcg_at_10 value: 18.411 - type: ndcg_at_100 value: 22.547 - type: ndcg_at_1000 value: 25.685999999999996 - type: ndcg_at_20 value: 19.732 - type: ndcg_at_3 value: 15.713 - type: ndcg_at_5 value: 16.821 - type: precision_at_1 value: 12.655 - type: precision_at_10 value: 2.904 - type: precision_at_100 value: 0.525 - type: precision_at_1000 value: 0.083 - type: precision_at_20 value: 1.7399999999999998 - type: precision_at_3 value: 6.6290000000000004 - type: precision_at_5 value: 4.655 - type: recall_at_1 value: 11.802 - type: recall_at_10 value: 25.373 - type: recall_at_100 value: 45.462 - type: recall_at_1000 value: 69.98299999999999 - type: recall_at_20 value: 30.455 - type: recall_at_3 value: 17.941 - type: recall_at_5 value: 20.61 - task: type: Retrieval dataset: type: mteb/cqadupstack-mathematica name: MTEB CQADupstackMathematicaRetrieval config: default split: test revision: 90fceea13679c63fe563ded68f3b6f06e50061de metrics: - type: map_at_1 value: 6.6739999999999995 - type: map_at_10 value: 10.181 - type: map_at_100 value: 11.138 - type: map_at_1000 value: 11.258 - type: map_at_20 value: 10.673 - type: map_at_3 value: 8.997 - type: map_at_5 value: 9.587 - type: mrr_at_1 value: 8.209 - type: mrr_at_10 value: 12.356 - type: mrr_at_100 value: 13.370000000000001 - type: mrr_at_1000 value: 13.466000000000001 - type: mrr_at_20 value: 12.889000000000001 - type: mrr_at_3 value: 10.821 - type: mrr_at_5 value: 11.604000000000001 - type: ndcg_at_1 value: 8.209 - type: ndcg_at_10 value: 12.849 - type: ndcg_at_100 value: 17.916 - type: ndcg_at_1000 value: 21.192 - type: ndcg_at_20 value: 14.643 - type: ndcg_at_3 value: 10.299 - type: ndcg_at_5 value: 11.350999999999999 - type: precision_at_1 value: 8.209 - type: precision_at_10 value: 2.5 - type: precision_at_100 value: 0.577 - type: precision_at_1000 value: 0.099 - type: precision_at_20 value: 1.667 - type: precision_at_3 value: 5.017 - type: precision_at_5 value: 3.7560000000000002 - type: recall_at_1 value: 6.6739999999999995 - type: recall_at_10 value: 19.016 - type: recall_at_100 value: 41.806 - type: recall_at_1000 value: 65.605 - type: recall_at_20 value: 25.764 - type: recall_at_3 value: 12.030000000000001 - type: recall_at_5 value: 14.568 - task: type: Retrieval dataset: type: mteb/cqadupstack-physics name: MTEB CQADupstackPhysicsRetrieval config: default split: test revision: 79531abbd1fb92d06c6d6315a0cbbbf5bb247ea4 metrics: - type: map_at_1 value: 12.133 - type: map_at_10 value: 17.32 - type: map_at_100 value: 18.294 - type: map_at_1000 value: 18.404 - type: map_at_20 value: 17.804000000000002 - type: map_at_3 value: 15.626000000000001 - type: map_at_5 value: 16.572 - type: mrr_at_1 value: 15.399 - type: mrr_at_10 value: 21.054000000000002 - type: mrr_at_100 value: 21.951999999999998 - type: mrr_at_1000 value: 22.03 - type: mrr_at_20 value: 21.522 - type: mrr_at_3 value: 19.297 - type: mrr_at_5 value: 20.294 - type: ndcg_at_1 value: 15.399 - type: ndcg_at_10 value: 21.02 - type: ndcg_at_100 value: 25.978 - type: ndcg_at_1000 value: 28.803 - type: ndcg_at_20 value: 22.642 - type: ndcg_at_3 value: 17.864 - type: ndcg_at_5 value: 19.335 - type: precision_at_1 value: 15.399 - type: precision_at_10 value: 3.9079999999999995 - type: precision_at_100 value: 0.781 - type: precision_at_1000 value: 0.12 - type: precision_at_20 value: 2.493 - type: precision_at_3 value: 8.502 - type: precision_at_5 value: 6.16 - type: recall_at_1 value: 12.133 - type: recall_at_10 value: 28.753 - type: recall_at_100 value: 50.806 - type: recall_at_1000 value: 70.75399999999999 - type: recall_at_20 value: 34.485 - type: recall_at_3 value: 19.664 - type: recall_at_5 value: 23.566000000000003 - task: type: Retrieval dataset: type: mteb/cqadupstack-programmers name: MTEB CQADupstackProgrammersRetrieval config: default split: test revision: 6184bc1440d2dbc7612be22b50686b8826d22b32 metrics: - type: map_at_1 value: 9.555 - type: map_at_10 value: 13.553 - type: map_at_100 value: 14.438 - type: map_at_1000 value: 14.562 - type: map_at_20 value: 13.977999999999998 - type: map_at_3 value: 12.118 - type: map_at_5 value: 12.811 - type: mrr_at_1 value: 11.872 - type: mrr_at_10 value: 16.613 - type: mrr_at_100 value: 17.512 - type: mrr_at_1000 value: 17.607 - type: mrr_at_20 value: 17.108 - type: mrr_at_3 value: 15.068000000000001 - type: mrr_at_5 value: 15.839 - type: ndcg_at_1 value: 11.872 - type: ndcg_at_10 value: 16.556 - type: ndcg_at_100 value: 21.34 - type: ndcg_at_1000 value: 24.903 - type: ndcg_at_20 value: 18.102 - type: ndcg_at_3 value: 13.844000000000001 - type: ndcg_at_5 value: 14.893999999999998 - type: precision_at_1 value: 11.872 - type: precision_at_10 value: 3.082 - type: precision_at_100 value: 0.658 - type: precision_at_1000 value: 0.11299999999999999 - type: precision_at_20 value: 1.992 - type: precision_at_3 value: 6.544999999999999 - type: precision_at_5 value: 4.68 - type: recall_at_1 value: 9.555 - type: recall_at_10 value: 22.931 - type: recall_at_100 value: 44.535000000000004 - type: recall_at_1000 value: 70.77799999999999 - type: recall_at_20 value: 28.403 - type: recall_at_3 value: 15.201 - type: recall_at_5 value: 18.145 - task: type: Retrieval dataset: type: mteb/cqadupstack name: MTEB CQADupstackRetrieval config: default split: test revision: 4ffe81d471b1924886b33c7567bfb200e9eec5c4 metrics: - type: map_at_1 value: 11.476083333333333 - type: map_at_10 value: 16.002499999999998 - type: map_at_100 value: 16.875083333333333 - type: map_at_1000 value: 16.991916666666665 - type: map_at_20 value: 16.445416666666667 - type: map_at_3 value: 14.473666666666668 - type: map_at_5 value: 15.269583333333333 - type: mrr_at_1 value: 13.799083333333334 - type: mrr_at_10 value: 18.69941666666667 - type: mrr_at_100 value: 19.54075 - type: mrr_at_1000 value: 19.62791666666667 - type: mrr_at_20 value: 19.15166666666667 - type: mrr_at_3 value: 17.079666666666665 - type: mrr_at_5 value: 17.93583333333333 - type: ndcg_at_1 value: 13.799083333333334 - type: ndcg_at_10 value: 19.157583333333335 - type: ndcg_at_100 value: 23.675666666666668 - type: ndcg_at_1000 value: 26.761499999999998 - type: ndcg_at_20 value: 20.688416666666665 - type: ndcg_at_3 value: 16.23775 - type: ndcg_at_5 value: 17.494500000000002 - type: precision_at_1 value: 13.799083333333334 - type: precision_at_10 value: 3.449666666666667 - type: precision_at_100 value: 0.6782499999999999 - type: precision_at_1000 value: 0.11108333333333333 - type: precision_at_20 value: 2.1610833333333335 - type: precision_at_3 value: 7.496333333333332 - type: precision_at_5 value: 5.4156666666666675 - type: recall_at_1 value: 11.476083333333333 - type: recall_at_10 value: 26.132916666666667 - type: recall_at_100 value: 46.88099999999999 - type: recall_at_1000 value: 69.47425 - type: recall_at_20 value: 31.838583333333336 - type: recall_at_3 value: 17.943749999999998 - type: recall_at_5 value: 21.176833333333335 - task: type: Retrieval dataset: type: mteb/cqadupstack-stats name: MTEB CQADupstackStatsRetrieval config: default split: test revision: 65ac3a16b8e91f9cee4c9828cc7c335575432a2a metrics: - type: map_at_1 value: 10.166 - type: map_at_10 value: 13.980999999999998 - type: map_at_100 value: 14.728 - type: map_at_1000 value: 14.812 - type: map_at_20 value: 14.338000000000001 - type: map_at_3 value: 12.5 - type: map_at_5 value: 13.408000000000001 - type: mrr_at_1 value: 11.503 - type: mrr_at_10 value: 15.799 - type: mrr_at_100 value: 16.539 - type: mrr_at_1000 value: 16.614 - type: mrr_at_20 value: 16.155 - type: mrr_at_3 value: 14.213000000000001 - type: mrr_at_5 value: 15.201999999999998 - type: ndcg_at_1 value: 11.503 - type: ndcg_at_10 value: 16.647000000000002 - type: ndcg_at_100 value: 20.84 - type: ndcg_at_1000 value: 23.385 - type: ndcg_at_20 value: 17.93 - type: ndcg_at_3 value: 13.761999999999999 - type: ndcg_at_5 value: 15.311 - type: precision_at_1 value: 11.503 - type: precision_at_10 value: 2.7449999999999997 - type: precision_at_100 value: 0.541 - type: precision_at_1000 value: 0.082 - type: precision_at_20 value: 1.6789999999999998 - type: precision_at_3 value: 6.033 - type: precision_at_5 value: 4.5089999999999995 - type: recall_at_1 value: 10.166 - type: recall_at_10 value: 23.284 - type: recall_at_100 value: 43.224000000000004 - type: recall_at_1000 value: 62.856 - type: recall_at_20 value: 28.166000000000004 - type: recall_at_3 value: 15.396 - type: recall_at_5 value: 19.248 - task: type: Retrieval dataset: type: mteb/cqadupstack-tex name: MTEB CQADupstackTexRetrieval config: default split: test revision: 46989137a86843e03a6195de44b09deda022eec7 metrics: - type: map_at_1 value: 6.516 - type: map_at_10 value: 9.185 - type: map_at_100 value: 9.795 - type: map_at_1000 value: 9.902 - type: map_at_20 value: 9.508999999999999 - type: map_at_3 value: 8.245 - type: map_at_5 value: 8.724 - type: mrr_at_1 value: 8.121 - type: mrr_at_10 value: 11.228 - type: mrr_at_100 value: 11.885 - type: mrr_at_1000 value: 11.978 - type: mrr_at_20 value: 11.583 - type: mrr_at_3 value: 10.145999999999999 - type: mrr_at_5 value: 10.688 - type: ndcg_at_1 value: 8.121 - type: ndcg_at_10 value: 11.245 - type: ndcg_at_100 value: 14.524999999999999 - type: ndcg_at_1000 value: 17.62 - type: ndcg_at_20 value: 12.385 - type: ndcg_at_3 value: 9.429 - type: ndcg_at_5 value: 10.181999999999999 - type: precision_at_1 value: 8.121 - type: precision_at_10 value: 2.137 - type: precision_at_100 value: 0.451 - type: precision_at_1000 value: 0.08499999999999999 - type: precision_at_20 value: 1.387 - type: precision_at_3 value: 4.4510000000000005 - type: precision_at_5 value: 3.2620000000000005 - type: recall_at_1 value: 6.516 - type: recall_at_10 value: 15.456 - type: recall_at_100 value: 30.709999999999997 - type: recall_at_1000 value: 53.854 - type: recall_at_20 value: 19.756 - type: recall_at_3 value: 10.41 - type: recall_at_5 value: 12.317 - task: type: Retrieval dataset: type: mteb/cqadupstack-unix name: MTEB CQADupstackUnixRetrieval config: default split: test revision: 6c6430d3a6d36f8d2a829195bc5dc94d7e063e53 metrics: - type: map_at_1 value: 10.955 - type: map_at_10 value: 14.689 - type: map_at_100 value: 15.482000000000001 - type: map_at_1000 value: 15.614 - type: map_at_20 value: 15.085 - type: map_at_3 value: 13.318 - type: map_at_5 value: 13.950999999999999 - type: mrr_at_1 value: 13.34 - type: mrr_at_10 value: 17.514 - type: mrr_at_100 value: 18.3 - type: mrr_at_1000 value: 18.406 - type: mrr_at_20 value: 17.924 - type: mrr_at_3 value: 15.920000000000002 - type: mrr_at_5 value: 16.625 - type: ndcg_at_1 value: 13.34 - type: ndcg_at_10 value: 17.574 - type: ndcg_at_100 value: 21.909 - type: ndcg_at_1000 value: 25.402 - type: ndcg_at_20 value: 19.017 - type: ndcg_at_3 value: 14.75 - type: ndcg_at_5 value: 15.787999999999998 - type: precision_at_1 value: 13.34 - type: precision_at_10 value: 3.041 - type: precision_at_100 value: 0.599 - type: precision_at_1000 value: 0.1 - type: precision_at_20 value: 1.908 - type: precision_at_3 value: 6.529999999999999 - type: precision_at_5 value: 4.646 - type: recall_at_1 value: 10.955 - type: recall_at_10 value: 23.831 - type: recall_at_100 value: 43.747 - type: recall_at_1000 value: 69.327 - type: recall_at_20 value: 29.17 - type: recall_at_3 value: 16.165 - type: recall_at_5 value: 18.701 - task: type: Retrieval dataset: type: mteb/cqadupstack-webmasters name: MTEB CQADupstackWebmastersRetrieval config: default split: test revision: 160c094312a0e1facb97e55eeddb698c0abe3571 metrics: - type: map_at_1 value: 11.936 - type: map_at_10 value: 16.878 - type: map_at_100 value: 17.921 - type: map_at_1000 value: 18.093 - type: map_at_20 value: 17.468 - type: map_at_3 value: 15.21 - type: map_at_5 value: 16.056 - type: mrr_at_1 value: 15.02 - type: mrr_at_10 value: 20.023 - type: mrr_at_100 value: 20.965 - type: mrr_at_1000 value: 21.060000000000002 - type: mrr_at_20 value: 20.576 - type: mrr_at_3 value: 18.215 - type: mrr_at_5 value: 19.134 - type: ndcg_at_1 value: 15.02 - type: ndcg_at_10 value: 20.459 - type: ndcg_at_100 value: 25.163999999999998 - type: ndcg_at_1000 value: 28.811999999999998 - type: ndcg_at_20 value: 22.387 - type: ndcg_at_3 value: 17.265 - type: ndcg_at_5 value: 18.605 - type: precision_at_1 value: 15.02 - type: precision_at_10 value: 3.9530000000000003 - type: precision_at_100 value: 0.8659999999999999 - type: precision_at_1000 value: 0.173 - type: precision_at_20 value: 2.619 - type: precision_at_3 value: 8.169 - type: precision_at_5 value: 6.047000000000001 - type: recall_at_1 value: 11.936 - type: recall_at_10 value: 27.694999999999997 - type: recall_at_100 value: 49.159000000000006 - type: recall_at_1000 value: 74.134 - type: recall_at_20 value: 35.258 - type: recall_at_3 value: 18.54 - type: recall_at_5 value: 21.959 - task: type: Retrieval dataset: type: mteb/cqadupstack-wordpress name: MTEB CQADupstackWordpressRetrieval config: default split: test revision: 4ffe81d471b1924886b33c7567bfb200e9eec5c4 metrics: - type: map_at_1 value: 6.691 - type: map_at_10 value: 10.546999999999999 - type: map_at_100 value: 11.485 - type: map_at_1000 value: 11.581 - type: map_at_20 value: 11.068999999999999 - type: map_at_3 value: 9.279 - type: map_at_5 value: 9.961 - type: mrr_at_1 value: 7.394 - type: mrr_at_10 value: 11.644 - type: mrr_at_100 value: 12.665000000000001 - type: mrr_at_1000 value: 12.761 - type: mrr_at_20 value: 12.251 - type: mrr_at_3 value: 10.413 - type: mrr_at_5 value: 11.087 - type: ndcg_at_1 value: 7.394 - type: ndcg_at_10 value: 13.081999999999999 - type: ndcg_at_100 value: 18.22 - type: ndcg_at_1000 value: 21.238 - type: ndcg_at_20 value: 15.084 - type: ndcg_at_3 value: 10.487 - type: ndcg_at_5 value: 11.671 - type: precision_at_1 value: 7.394 - type: precision_at_10 value: 2.292 - type: precision_at_100 value: 0.523 - type: precision_at_1000 value: 0.083 - type: precision_at_20 value: 1.608 - type: precision_at_3 value: 4.929 - type: precision_at_5 value: 3.5860000000000003 - type: recall_at_1 value: 6.691 - type: recall_at_10 value: 20.031 - type: recall_at_100 value: 44.35 - type: recall_at_1000 value: 67.857 - type: recall_at_20 value: 27.723 - type: recall_at_3 value: 12.76 - type: recall_at_5 value: 15.687000000000001 - task: type: Retrieval dataset: type: mteb/climate-fever name: MTEB ClimateFEVER config: default split: test revision: 47f2ac6acb640fc46020b02a5b59fdda04d39380 metrics: - type: map_at_1 value: 3.218 - type: map_at_10 value: 5.554 - type: map_at_100 value: 6.216 - type: map_at_1000 value: 6.338000000000001 - type: map_at_20 value: 5.907 - type: map_at_3 value: 4.707 - type: map_at_5 value: 5.094 - type: mrr_at_1 value: 6.84 - type: mrr_at_10 value: 11.296000000000001 - type: mrr_at_100 value: 12.224 - type: mrr_at_1000 value: 12.31 - type: mrr_at_20 value: 11.791 - type: mrr_at_3 value: 9.609 - type: mrr_at_5 value: 10.404 - type: ndcg_at_1 value: 6.84 - type: ndcg_at_10 value: 8.346 - type: ndcg_at_100 value: 12.06 - type: ndcg_at_1000 value: 15.132000000000001 - type: ndcg_at_20 value: 9.652 - type: ndcg_at_3 value: 6.489000000000001 - type: ndcg_at_5 value: 7.045999999999999 - type: precision_at_1 value: 6.84 - type: precision_at_10 value: 2.658 - type: precision_at_100 value: 0.655 - type: precision_at_1000 value: 0.121 - type: precision_at_20 value: 1.863 - type: precision_at_3 value: 4.691 - type: precision_at_5 value: 3.6479999999999997 - type: recall_at_1 value: 3.218 - type: recall_at_10 value: 10.725 - type: recall_at_100 value: 24.131 - type: recall_at_1000 value: 42.106 - type: recall_at_20 value: 14.539 - type: recall_at_3 value: 6.3020000000000005 - type: recall_at_5 value: 7.763000000000001 - task: type: Retrieval dataset: type: mteb/dbpedia name: MTEB DBPedia config: default split: test revision: c0f706b76e590d620bd6618b3ca8efdd34e2d659 metrics: - type: map_at_1 value: 4.506 - type: map_at_10 value: 8.535 - type: map_at_100 value: 11.072 - type: map_at_1000 value: 11.764 - type: map_at_20 value: 9.492 - type: map_at_3 value: 6.697 - type: map_at_5 value: 7.452 - type: mrr_at_1 value: 36.75 - type: mrr_at_10 value: 46.35 - type: mrr_at_100 value: 47.034 - type: mrr_at_1000 value: 47.08 - type: mrr_at_20 value: 46.784 - type: mrr_at_3 value: 44.0 - type: mrr_at_5 value: 45.262 - type: ndcg_at_1 value: 29.25 - type: ndcg_at_10 value: 21.318 - type: ndcg_at_100 value: 23.449 - type: ndcg_at_1000 value: 29.267 - type: ndcg_at_20 value: 20.735 - type: ndcg_at_3 value: 24.45 - type: ndcg_at_5 value: 22.637999999999998 - type: precision_at_1 value: 36.75 - type: precision_at_10 value: 16.775000000000002 - type: precision_at_100 value: 5.212 - type: precision_at_1000 value: 1.167 - type: precision_at_20 value: 12.225 - type: precision_at_3 value: 26.917 - type: precision_at_5 value: 22.0 - type: recall_at_1 value: 4.506 - type: recall_at_10 value: 12.341000000000001 - type: recall_at_100 value: 26.723000000000003 - type: recall_at_1000 value: 46.293 - type: recall_at_20 value: 15.903 - type: recall_at_3 value: 7.994999999999999 - type: recall_at_5 value: 9.407 - task: type: Classification dataset: type: mteb/emotion name: MTEB EmotionClassification config: default split: test revision: 4f58c6b202a23cf9a4da393831edf4f9183cad37 metrics: - type: accuracy value: 44.375 - type: f1 value: 39.487258967288 - task: type: Retrieval dataset: type: mteb/fever name: MTEB FEVER config: default split: test revision: bea83ef9e8fb933d90a2f1d5515737465d613e12 metrics: - type: map_at_1 value: 16.572 - type: map_at_10 value: 22.349 - type: map_at_100 value: 23.145 - type: map_at_1000 value: 23.22 - type: map_at_20 value: 22.771 - type: map_at_3 value: 20.326 - type: map_at_5 value: 21.404 - type: mrr_at_1 value: 17.657 - type: mrr_at_10 value: 23.679 - type: mrr_at_100 value: 24.504 - type: mrr_at_1000 value: 24.576999999999998 - type: mrr_at_20 value: 24.122 - type: mrr_at_3 value: 21.557000000000002 - type: mrr_at_5 value: 22.695 - type: ndcg_at_1 value: 17.657 - type: ndcg_at_10 value: 26.081 - type: ndcg_at_100 value: 30.366 - type: ndcg_at_1000 value: 32.607 - type: ndcg_at_20 value: 27.608 - type: ndcg_at_3 value: 21.85 - type: ndcg_at_5 value: 23.796999999999997 - type: precision_at_1 value: 17.657 - type: precision_at_10 value: 3.968 - type: precision_at_100 value: 0.626 - type: precision_at_1000 value: 0.083 - type: precision_at_20 value: 2.3120000000000003 - type: precision_at_3 value: 8.951 - type: precision_at_5 value: 6.4 - type: recall_at_1 value: 16.572 - type: recall_at_10 value: 36.634 - type: recall_at_100 value: 57.135000000000005 - type: recall_at_1000 value: 74.832 - type: recall_at_20 value: 42.491 - type: recall_at_3 value: 25.087 - type: recall_at_5 value: 29.744999999999997 - task: type: Retrieval dataset: type: mteb/fiqa name: MTEB FiQA2018 config: default split: test revision: 27a168819829fe9bcd655c2df245fb19452e8e06 metrics: - type: map_at_1 value: 4.891 - type: map_at_10 value: 8.346 - type: map_at_100 value: 9.286 - type: map_at_1000 value: 9.465 - type: map_at_20 value: 8.826 - type: map_at_3 value: 7.13 - type: map_at_5 value: 7.643999999999999 - type: mrr_at_1 value: 10.030999999999999 - type: mrr_at_10 value: 14.899000000000001 - type: mrr_at_100 value: 15.82 - type: mrr_at_1000 value: 15.931000000000001 - type: mrr_at_20 value: 15.408 - type: mrr_at_3 value: 13.169 - type: mrr_at_5 value: 13.971 - type: ndcg_at_1 value: 10.030999999999999 - type: ndcg_at_10 value: 11.713 - type: ndcg_at_100 value: 16.436999999999998 - type: ndcg_at_1000 value: 20.971999999999998 - type: ndcg_at_20 value: 13.341 - type: ndcg_at_3 value: 9.879999999999999 - type: ndcg_at_5 value: 10.249 - type: precision_at_1 value: 10.030999999999999 - type: precision_at_10 value: 3.519 - type: precision_at_100 value: 0.8330000000000001 - type: precision_at_1000 value: 0.16 - type: precision_at_20 value: 2.377 - type: precision_at_3 value: 6.687 - type: precision_at_5 value: 5.0 - type: recall_at_1 value: 4.891 - type: recall_at_10 value: 15.221000000000002 - type: recall_at_100 value: 33.432 - type: recall_at_1000 value: 62.475 - type: recall_at_20 value: 20.467 - type: recall_at_3 value: 9.393 - type: recall_at_5 value: 11.214 - task: type: Retrieval dataset: type: mteb/hotpotqa name: MTEB HotpotQA config: default split: test revision: ab518f4d6fcca38d87c25209f94beba119d02014 metrics: - type: map_at_1 value: 22.856 - type: map_at_10 value: 30.656 - type: map_at_100 value: 31.447000000000003 - type: map_at_1000 value: 31.545 - type: map_at_20 value: 31.066 - type: map_at_3 value: 28.692 - type: map_at_5 value: 29.817 - type: mrr_at_1 value: 45.712 - type: mrr_at_10 value: 52.481 - type: mrr_at_100 value: 53.049 - type: mrr_at_1000 value: 53.09 - type: mrr_at_20 value: 52.803999999999995 - type: mrr_at_3 value: 50.709 - type: mrr_at_5 value: 51.795 - type: ndcg_at_1 value: 45.712 - type: ndcg_at_10 value: 38.381 - type: ndcg_at_100 value: 41.965 - type: ndcg_at_1000 value: 44.234 - type: ndcg_at_20 value: 39.657 - type: ndcg_at_3 value: 34.776 - type: ndcg_at_5 value: 36.622 - type: precision_at_1 value: 45.712 - type: precision_at_10 value: 8.062999999999999 - type: precision_at_100 value: 1.094 - type: precision_at_1000 value: 0.13999999999999999 - type: precision_at_20 value: 4.443 - type: precision_at_3 value: 21.476 - type: precision_at_5 value: 14.35 - type: recall_at_1 value: 22.856 - type: recall_at_10 value: 40.317 - type: recall_at_100 value: 54.705999999999996 - type: recall_at_1000 value: 69.892 - type: recall_at_20 value: 44.429 - type: recall_at_3 value: 32.214999999999996 - type: recall_at_5 value: 35.874 - task: type: Classification dataset: type: mteb/imdb name: MTEB ImdbClassification config: default split: test revision: 3d86128a09e091d6018b6d26cad27f2739fc2db7 metrics: - type: accuracy value: 73.02000000000001 - type: ap value: 67.25944041954726 - type: f1 value: 72.8697134997555 - task: type: Retrieval dataset: type: mteb/msmarco name: MTEB MSMARCO config: default split: dev revision: c5a29a104738b98a9e76336939199e264163d4a0 metrics: - type: map_at_1 value: 8.751000000000001 - type: map_at_10 value: 13.916999999999998 - type: map_at_100 value: 14.684 - type: map_at_1000 value: 14.766000000000002 - type: map_at_20 value: 14.338999999999999 - type: map_at_3 value: 12.197 - type: map_at_5 value: 13.163 - type: mrr_at_1 value: 8.911 - type: mrr_at_10 value: 14.198 - type: mrr_at_100 value: 14.960999999999999 - type: mrr_at_1000 value: 15.040000000000001 - type: mrr_at_20 value: 14.616999999999999 - type: mrr_at_3 value: 12.452 - type: mrr_at_5 value: 13.427 - type: ndcg_at_1 value: 8.911 - type: ndcg_at_10 value: 16.963 - type: ndcg_at_100 value: 21.062 - type: ndcg_at_1000 value: 23.543 - type: ndcg_at_20 value: 18.482000000000003 - type: ndcg_at_3 value: 13.391 - type: ndcg_at_5 value: 15.139 - type: precision_at_1 value: 8.911 - type: precision_at_10 value: 2.741 - type: precision_at_100 value: 0.485 - type: precision_at_1000 value: 0.06999999999999999 - type: precision_at_20 value: 1.683 - type: precision_at_3 value: 5.688 - type: precision_at_5 value: 4.3069999999999995 - type: recall_at_1 value: 8.751000000000001 - type: recall_at_10 value: 26.368000000000002 - type: recall_at_100 value: 46.22 - type: recall_at_1000 value: 66.22 - type: recall_at_20 value: 32.291 - type: recall_at_3 value: 16.595 - type: recall_at_5 value: 20.802 - task: type: Classification dataset: type: mteb/mtop_domain name: MTEB MTOPDomainClassification (en) config: en split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 89.87232102143183 - type: f1 value: 89.25570902684863 - task: type: Classification dataset: type: mteb/mtop_intent name: MTEB MTOPIntentClassification (en) config: en split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 71.02599179206568 - type: f1 value: 52.14883678941826 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (en) config: en split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 67.74714189643576 - type: f1 value: 65.4738868705899 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (en) config: en split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 72.36381977135171 - type: f1 value: 71.5956356866047 - task: type: Clustering dataset: type: mteb/medrxiv-clustering-p2p name: MTEB MedrxivClusteringP2P config: default split: test revision: e7a26af6f3ae46b30dde8737f02c07b1505bcc73 metrics: - type: v_measure value: 27.418721421866266 - type: v_measures value: [0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643, 0.25699019421325164, 0.2551070596948231, 0.2691672146325009, 0.263190709241409, 0.25833683058459567, 0.2969925236078273, 0.2799007926692717, 0.29259126151386433, 0.2840268235473181, 0.2855687324817643] - task: type: Clustering dataset: type: mteb/medrxiv-clustering-s2s name: MTEB MedrxivClusteringS2S config: default split: test revision: 35191c8c0dca72d8ff3efcd72aa802307d469663 metrics: - type: v_measure value: 22.40590099674712 - type: v_measures value: [0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554, 0.20312599898502812, 0.21028636757346386, 0.2078091337066853, 0.21248714226010795, 0.2051414930300016, 0.2430753205246834, 0.23790607540735365, 0.24673502894784635, 0.23967523571775606, 0.23434830352178554] - task: type: Reranking dataset: type: mteb/mind_small name: MTEB MindSmallReranking config: default split: test revision: 3bdac13927fdc888b903db93b2ffdbd90b295a69 metrics: - type: map value: 29.924796610724826 - type: mrr value: 30.962158101843464 - task: type: Retrieval dataset: type: mteb/nfcorpus name: MTEB NFCorpus config: default split: test revision: ec0fa4fe99da2ff19ca1214b7966684033a58814 metrics: - type: map_at_1 value: 1.3379999999999999 - type: map_at_10 value: 3.62 - type: map_at_100 value: 4.891 - type: map_at_1000 value: 5.87 - type: map_at_20 value: 4.164000000000001 - type: map_at_3 value: 2.608 - type: map_at_5 value: 3.1910000000000003 - type: mrr_at_1 value: 18.576 - type: mrr_at_10 value: 26.487 - type: mrr_at_100 value: 27.736 - type: mrr_at_1000 value: 27.828000000000003 - type: mrr_at_20 value: 27.319 - type: mrr_at_3 value: 23.891000000000002 - type: mrr_at_5 value: 25.501 - type: ndcg_at_1 value: 17.957 - type: ndcg_at_10 value: 14.021 - type: ndcg_at_100 value: 14.41 - type: ndcg_at_1000 value: 24.197 - type: ndcg_at_20 value: 13.883000000000001 - type: ndcg_at_3 value: 15.913 - type: ndcg_at_5 value: 15.120000000000001 - type: precision_at_1 value: 18.576 - type: precision_at_10 value: 10.402000000000001 - type: precision_at_100 value: 4.334 - type: precision_at_1000 value: 1.661 - type: precision_at_20 value: 8.731 - type: precision_at_3 value: 15.067 - type: precision_at_5 value: 12.940999999999999 - type: recall_at_1 value: 1.3379999999999999 - type: recall_at_10 value: 6.711 - type: recall_at_100 value: 16.862 - type: recall_at_1000 value: 52.537 - type: recall_at_20 value: 9.89 - type: recall_at_3 value: 3.614 - type: recall_at_5 value: 5.428999999999999 - task: type: Retrieval dataset: type: mteb/nq name: MTEB NQ config: default split: test revision: b774495ed302d8c44a3a7ea25c90dbce03968f31 metrics: - type: map_at_1 value: 10.187 - type: map_at_10 value: 16.61 - type: map_at_100 value: 17.599 - type: map_at_1000 value: 17.689 - type: map_at_20 value: 17.141000000000002 - type: map_at_3 value: 14.405000000000001 - type: map_at_5 value: 15.543000000000001 - type: mrr_at_1 value: 11.327 - type: mrr_at_10 value: 18.184 - type: mrr_at_100 value: 19.137 - type: mrr_at_1000 value: 19.215 - type: mrr_at_20 value: 18.717 - type: mrr_at_3 value: 15.918 - type: mrr_at_5 value: 17.052 - type: ndcg_at_1 value: 11.327 - type: ndcg_at_10 value: 20.744 - type: ndcg_at_100 value: 25.865 - type: ndcg_at_1000 value: 28.419 - type: ndcg_at_20 value: 22.648 - type: ndcg_at_3 value: 16.147 - type: ndcg_at_5 value: 18.168 - type: precision_at_1 value: 11.327 - type: precision_at_10 value: 3.7220000000000004 - type: precision_at_100 value: 0.658 - type: precision_at_1000 value: 0.091 - type: precision_at_20 value: 2.294 - type: precision_at_3 value: 7.503 - type: precision_at_5 value: 5.608 - type: recall_at_1 value: 10.187 - type: recall_at_10 value: 32.051 - type: recall_at_100 value: 56.016 - type: recall_at_1000 value: 75.649 - type: recall_at_20 value: 39.267 - type: recall_at_3 value: 19.689 - type: recall_at_5 value: 24.445 - task: type: Retrieval dataset: type: mteb/quora name: MTEB QuoraRetrieval config: default split: test revision: e4e08e0b7dbe3c8700f0daef558ff32256715259 metrics: - type: map_at_1 value: 58.404 - type: map_at_10 value: 70.125 - type: map_at_100 value: 70.923 - type: map_at_1000 value: 70.968 - type: map_at_20 value: 70.60300000000001 - type: map_at_3 value: 67.342 - type: map_at_5 value: 68.97999999999999 - type: mrr_at_1 value: 67.29 - type: mrr_at_10 value: 74.773 - type: mrr_at_100 value: 75.093 - type: mrr_at_1000 value: 75.106 - type: mrr_at_20 value: 74.973 - type: mrr_at_3 value: 73.188 - type: mrr_at_5 value: 74.165 - type: ndcg_at_1 value: 67.33 - type: ndcg_at_10 value: 74.936 - type: ndcg_at_100 value: 77.479 - type: ndcg_at_1000 value: 78.147 - type: ndcg_at_20 value: 76.048 - type: ndcg_at_3 value: 71.30499999999999 - type: ndcg_at_5 value: 73.09400000000001 - type: precision_at_1 value: 67.33 - type: precision_at_10 value: 11.335 - type: precision_at_100 value: 1.385 - type: precision_at_1000 value: 0.151 - type: precision_at_20 value: 6.116 - type: precision_at_3 value: 30.833 - type: precision_at_5 value: 20.384 - type: recall_at_1 value: 58.404 - type: recall_at_10 value: 84.138 - type: recall_at_100 value: 94.32000000000001 - type: recall_at_1000 value: 98.51299999999999 - type: recall_at_20 value: 87.996 - type: recall_at_3 value: 73.68400000000001 - type: recall_at_5 value: 78.681 - task: type: Clustering dataset: type: mteb/reddit-clustering name: MTEB RedditClustering config: default split: test revision: 24640382cdbf8abc73003fb0fa6d111a705499eb metrics: - type: v_measure value: 26.713463922652704 - type: v_measures value: [0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468, 0.356358075769195, 0.3011200622167429, 0.22467375312763427, 0.2394109956052364, 0.2899555542978596, 0.21406581833340438, 0.326841157469233, 0.20064055405544595, 0.2089858781934912, 0.22835715928471212, 0.24742539971848806, 0.36899923991825895, 0.24701463701714044, 0.2560178333573794, 0.3552016140245526, 0.23774804137045452, 0.27017447263584743, 0.37586623336347835, 0.2564531409603795, 0.2262824317679402, 0.21248869632976208, 0.22661416857784017, 0.35027209205919524, 0.23589310962174836, 0.22150586158775468] - task: type: Clustering dataset: type: mteb/reddit-clustering-p2p name: MTEB RedditClusteringP2P config: default split: test revision: 385e3cb46b4cfa89021f56c4380204149d0efe33 metrics: - type: v_measure value: 44.135854520709856 - type: v_measures value: [0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252, 0.4992205891430278, 0.5024470494091208, 0.525745119896455, 0.30230336838014243, 0.4915802304493441, 0.4481785980399149, 0.18082183331189022, 0.5004539942242847, 0.4503725957205808, 0.5124620734962252] - task: type: Retrieval dataset: type: mteb/scidocs name: MTEB SCIDOCS config: default split: test revision: f8c2fcf00f625baaa80f62ec5bd9e1fff3b8ae88 metrics: - type: map_at_1 value: 2.1350000000000002 - type: map_at_10 value: 5.118 - type: map_at_100 value: 6.08 - type: map_at_1000 value: 6.308 - type: map_at_20 value: 5.562 - type: map_at_3 value: 3.804 - type: map_at_5 value: 4.468 - type: mrr_at_1 value: 10.5 - type: mrr_at_10 value: 17.278 - type: mrr_at_100 value: 18.418 - type: mrr_at_1000 value: 18.526 - type: mrr_at_20 value: 17.876 - type: mrr_at_3 value: 14.832999999999998 - type: mrr_at_5 value: 16.317999999999998 - type: ndcg_at_1 value: 10.5 - type: ndcg_at_10 value: 9.39 - type: ndcg_at_100 value: 14.362 - type: ndcg_at_1000 value: 19.524 - type: ndcg_at_20 value: 10.949 - type: ndcg_at_3 value: 8.794 - type: ndcg_at_5 value: 7.789 - type: precision_at_1 value: 10.5 - type: precision_at_10 value: 4.91 - type: precision_at_100 value: 1.221 - type: precision_at_1000 value: 0.247 - type: precision_at_20 value: 3.36 - type: precision_at_3 value: 8.233 - type: precision_at_5 value: 6.9 - type: recall_at_1 value: 2.1350000000000002 - type: recall_at_10 value: 9.955 - type: recall_at_100 value: 24.778 - type: recall_at_1000 value: 50.222 - type: recall_at_20 value: 13.63 - type: recall_at_3 value: 5.01 - type: recall_at_5 value: 6.995 - task: type: STS dataset: type: mteb/sickr-sts name: MTEB SICK-R config: default split: test revision: 20a6d6f312dd54037fe07a32d58e5e168867909d metrics: - type: cos_sim_pearson value: 78.43659263950201 - type: cos_sim_spearman value: 74.68461406509039 - type: euclidean_pearson value: 76.31168073146695 - type: euclidean_spearman value: 75.13681406263804 - type: manhattan_pearson value: 76.2960985430519 - type: manhattan_spearman value: 75.03513932091352 - task: type: STS dataset: type: mteb/sts12-sts name: MTEB STS12 config: default split: test revision: a0d554a64d88156834ff5ae9920b964011b16384 metrics: - type: cos_sim_pearson value: 55.096195345864295 - type: cos_sim_spearman value: 54.34570729554049 - type: euclidean_pearson value: 64.79488422312815 - type: euclidean_spearman value: 61.19116930098903 - type: manhattan_pearson value: 65.04388378143294 - type: manhattan_spearman value: 61.33457037020176 - task: type: STS dataset: type: mteb/sts13-sts name: MTEB STS13 config: default split: test revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca metrics: - type: cos_sim_pearson value: 71.40902040706975 - type: cos_sim_spearman value: 74.24315395719762 - type: euclidean_pearson value: 75.94675003130055 - type: euclidean_spearman value: 76.18445285168187 - type: manhattan_pearson value: 75.88786726620313 - type: manhattan_spearman value: 76.1188105671321 - task: type: STS dataset: type: mteb/sts14-sts name: MTEB STS14 config: default split: test revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375 metrics: - type: cos_sim_pearson value: 71.9514442512574 - type: cos_sim_spearman value: 69.99484176761607 - type: euclidean_pearson value: 75.02706002860513 - type: euclidean_spearman value: 72.9036480559019 - type: manhattan_pearson value: 75.03815961673163 - type: manhattan_spearman value: 72.92353672671821 - task: type: STS dataset: type: mteb/sts15-sts name: MTEB STS15 config: default split: test revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3 metrics: - type: cos_sim_pearson value: 72.80522195974591 - type: cos_sim_spearman value: 75.73762657362906 - type: euclidean_pearson value: 80.1521753666007 - type: euclidean_spearman value: 80.25738481137047 - type: manhattan_pearson value: 80.19317991797196 - type: manhattan_spearman value: 80.31866668763018 - task: type: STS dataset: type: mteb/sts16-sts name: MTEB STS16 config: default split: test revision: 4d8694f8f0e0100860b497b999b3dbed754a0513 metrics: - type: cos_sim_pearson value: 69.45092072084951 - type: cos_sim_spearman value: 73.6472761328024 - type: euclidean_pearson value: 74.95031941602217 - type: euclidean_spearman value: 75.37029502504294 - type: manhattan_pearson value: 74.7846441654404 - type: manhattan_spearman value: 75.19664481480419 - task: type: STS dataset: type: mteb/sts17-crosslingual-sts name: MTEB STS17 (en-en) config: en-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 82.66021611621103 - type: cos_sim_spearman value: 84.81452353756737 - type: euclidean_pearson value: 85.32338150846037 - type: euclidean_spearman value: 85.46672916577448 - type: manhattan_pearson value: 84.86427674633184 - type: manhattan_spearman value: 85.098246631915 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (en) config: en split: test revision: eea2b4fe26a775864c896887d910b76a8098ad3f metrics: - type: cos_sim_pearson value: 56.880105002604566 - type: cos_sim_spearman value: 62.56487199261157 - type: euclidean_pearson value: 57.49369653074593 - type: euclidean_spearman value: 61.038143206328854 - type: manhattan_pearson value: 57.85496348413732 - type: manhattan_spearman value: 61.22736674852764 - task: type: STS dataset: type: mteb/stsbenchmark-sts name: MTEB STSBenchmark config: default split: test revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831 metrics: - type: cos_sim_pearson value: 75.41209102908195 - type: cos_sim_spearman value: 76.72196352753278 - type: euclidean_pearson value: 79.97933288080695 - type: euclidean_spearman value: 79.36350387100728 - type: manhattan_pearson value: 79.89865614781017 - type: manhattan_spearman value: 79.36099141428603 - task: type: Reranking dataset: type: mteb/scidocs-reranking name: MTEB SciDocsRR config: default split: test revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab metrics: - type: map value: 70.81824436527221 - type: mrr value: 90.04096937920467 - task: type: Retrieval dataset: type: mteb/scifact name: MTEB SciFact config: default split: test revision: 0228b52cf27578f30900b9e5271d331663a030d7 metrics: - type: map_at_1 value: 33.567 - type: map_at_10 value: 41.409 - type: map_at_100 value: 42.281 - type: map_at_1000 value: 42.358000000000004 - type: map_at_20 value: 41.916 - type: map_at_3 value: 38.784 - type: map_at_5 value: 40.355999999999995 - type: mrr_at_1 value: 35.667 - type: mrr_at_10 value: 43.189 - type: mrr_at_100 value: 43.885000000000005 - type: mrr_at_1000 value: 43.95 - type: mrr_at_20 value: 43.584 - type: mrr_at_3 value: 41.0 - type: mrr_at_5 value: 42.266999999999996 - type: ndcg_at_1 value: 35.667 - type: ndcg_at_10 value: 45.999 - type: ndcg_at_100 value: 50.153000000000006 - type: ndcg_at_1000 value: 52.161 - type: ndcg_at_20 value: 47.662 - type: ndcg_at_3 value: 41.178 - type: ndcg_at_5 value: 43.59 - type: precision_at_1 value: 35.667 - type: precision_at_10 value: 6.6000000000000005 - type: precision_at_100 value: 0.89 - type: precision_at_1000 value: 0.106 - type: precision_at_20 value: 3.6830000000000003 - type: precision_at_3 value: 16.556 - type: precision_at_5 value: 11.466999999999999 - type: recall_at_1 value: 33.567 - type: recall_at_10 value: 58.599999999999994 - type: recall_at_100 value: 77.9 - type: recall_at_1000 value: 93.667 - type: recall_at_20 value: 64.878 - type: recall_at_3 value: 45.483000000000004 - type: recall_at_5 value: 51.4 - task: type: PairClassification dataset: type: mteb/sprintduplicatequestions-pairclassification name: MTEB SprintDuplicateQuestions config: default split: test revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46 metrics: - type: cos_sim_accuracy value: 99.6930693069307 - type: cos_sim_ap value: 89.25594498972691 - type: cos_sim_f1 value: 83.84499245093104 - type: cos_sim_precision value: 84.39716312056737 - type: cos_sim_recall value: 83.3 - type: dot_accuracy value: 99.48514851485149 - type: dot_ap value: 75.92127370670867 - type: dot_f1 value: 71.16104868913857 - type: dot_precision value: 76.52474108170311 - type: dot_recall value: 66.5 - type: euclidean_accuracy value: 99.6891089108911 - type: euclidean_ap value: 89.2180446358921 - type: euclidean_f1 value: 83.57142857142857 - type: euclidean_precision value: 85.3125 - type: euclidean_recall value: 81.89999999999999 - type: manhattan_accuracy value: 99.6980198019802 - type: manhattan_ap value: 89.43047814044381 - type: manhattan_f1 value: 84.07445708376422 - type: manhattan_precision value: 87.04496788008565 - type: manhattan_recall value: 81.3 - type: max_accuracy value: 99.6980198019802 - type: max_ap value: 89.43047814044381 - type: max_f1 value: 84.07445708376422 - task: type: Clustering dataset: type: mteb/stackexchange-clustering name: MTEB StackExchangeClustering config: default split: test revision: 6cbc1f7b2bc0622f2e39d2c77fa502909748c259 metrics: - type: v_measure value: 32.83904946173562 - type: v_measures value: [0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118, 0.30110380679104903, 0.3953932981762184, 0.24615493206657874, 0.36457921033081425, 0.37818468307341996, 0.2458717382277342, 0.24597349476879382, 0.355495518705052, 0.32617546899939204, 0.3316784933295811, 0.4879686282712542, 0.4493952612804797, 0.4289659003483834, 0.25736076606300134, 0.31347948561233624, 0.32945691057021553, 0.2802921851023466, 0.30108517991402206, 0.2906340312531131, 0.3176973104574197, 0.32121506900305036, 0.27178906328240593, 0.2736797450244378, 0.3448789501821934, 0.3512532346006118] - task: type: Clustering dataset: type: mteb/stackexchange-clustering-p2p name: MTEB StackExchangeClusteringP2P config: default split: test revision: 815ca46b2622cec33ccafc3735d572c266efdb44 metrics: - type: v_measure value: 27.476810145753827 - type: v_measures value: [0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773, 0.262007031213021, 0.2603632068581035, 0.25388262071363726, 0.25745089384059566, 0.257990103854705, 0.29704373180003885, 0.28480533084783555, 0.286509500865553, 0.2947033679639156, 0.2929252266179773] - task: type: Reranking dataset: type: mteb/stackoverflowdupquestions-reranking name: MTEB StackOverflowDupQuestions config: default split: test revision: e185fbe320c72810689fc5848eb6114e1ef5ec69 metrics: - type: map value: 43.14055223869571 - type: mrr value: 43.506533295136244 - task: type: Summarization dataset: type: mteb/summeval name: MTEB SummEval config: default split: test revision: cda12ad7615edc362dbf25a00fdd61d3b1eaf93c metrics: - type: cos_sim_pearson value: 30.24218821701958 - type: cos_sim_spearman value: 29.907749825179124 - type: dot_pearson value: 27.348198725124227 - type: dot_spearman value: 25.950835375041038 - task: type: Retrieval dataset: type: mteb/trec-covid name: MTEB TRECCOVID config: default split: test revision: bb9466bac8153a0349341eb1b22e06409e78ef4e metrics: - type: map_at_1 value: 0.1 - type: map_at_10 value: 0.505 - type: map_at_100 value: 2.207 - type: map_at_1000 value: 6.0600000000000005 - type: map_at_20 value: 0.814 - type: map_at_3 value: 0.218 - type: map_at_5 value: 0.329 - type: mrr_at_1 value: 44.0 - type: mrr_at_10 value: 54.763 - type: mrr_at_100 value: 55.345 - type: mrr_at_1000 value: 55.349000000000004 - type: mrr_at_20 value: 55.035000000000004 - type: mrr_at_3 value: 51.333 - type: mrr_at_5 value: 52.632999999999996 - type: ndcg_at_1 value: 39.0 - type: ndcg_at_10 value: 30.272 - type: ndcg_at_100 value: 21.906 - type: ndcg_at_1000 value: 22.439 - type: ndcg_at_20 value: 28.316000000000003 - type: ndcg_at_3 value: 35.235 - type: ndcg_at_5 value: 33.843 - type: precision_at_1 value: 44.0 - type: precision_at_10 value: 32.0 - type: precision_at_100 value: 22.5 - type: precision_at_1000 value: 10.9 - type: precision_at_20 value: 29.7 - type: precision_at_3 value: 38.0 - type: precision_at_5 value: 36.0 - type: recall_at_1 value: 0.1 - type: recall_at_10 value: 0.719 - type: recall_at_100 value: 4.7620000000000005 - type: recall_at_1000 value: 22.285 - type: recall_at_20 value: 1.277 - type: recall_at_3 value: 0.244 - type: recall_at_5 value: 0.40299999999999997 - task: type: Retrieval dataset: type: mteb/touche2020 name: MTEB Touche2020 config: default split: test revision: a34f9a33db75fa0cbb21bb5cfc3dae8dc8bec93f metrics: - type: map_at_1 value: 0.865 - type: map_at_10 value: 2.962 - type: map_at_100 value: 5.713 - type: map_at_1000 value: 6.719 - type: map_at_20 value: 3.939 - type: map_at_3 value: 1.582 - type: map_at_5 value: 2.215 - type: mrr_at_1 value: 14.285999999999998 - type: mrr_at_10 value: 24.844 - type: mrr_at_100 value: 26.861 - type: mrr_at_1000 value: 26.904 - type: mrr_at_20 value: 26.375999999999998 - type: mrr_at_3 value: 20.068 - type: mrr_at_5 value: 22.619 - type: ndcg_at_1 value: 12.245000000000001 - type: ndcg_at_10 value: 10.508000000000001 - type: ndcg_at_100 value: 18.935 - type: ndcg_at_1000 value: 29.747 - type: ndcg_at_20 value: 11.701 - type: ndcg_at_3 value: 10.381 - type: ndcg_at_5 value: 11.339 - type: precision_at_1 value: 14.285999999999998 - type: precision_at_10 value: 10.612 - type: precision_at_100 value: 4.531000000000001 - type: precision_at_1000 value: 1.133 - type: precision_at_20 value: 8.98 - type: precision_at_3 value: 11.565 - type: precision_at_5 value: 12.653 - type: recall_at_1 value: 0.865 - type: recall_at_10 value: 6.493 - type: recall_at_100 value: 28.16 - type: recall_at_1000 value: 61.026 - type: recall_at_20 value: 11.726 - type: recall_at_3 value: 2.221 - type: recall_at_5 value: 3.849 - task: type: Classification dataset: type: mteb/toxic_conversations_50k name: MTEB ToxicConversationsClassification config: default split: test revision: edfaf9da55d3dd50d43143d90c1ac476895ae6de metrics: - type: accuracy value: 64.4091796875 - type: ap value: 11.076947197887051 - type: f1 value: 49.07978901357373 - task: type: Classification dataset: type: mteb/tweet_sentiment_extraction name: MTEB TweetSentimentExtractionClassification config: default split: test revision: d604517c81ca91fe16a244d1248fc021f9ecee7a metrics: - type: accuracy value: 59.663271080928126 - type: f1 value: 59.99492026885337 - task: type: Clustering dataset: type: mteb/twentynewsgroups-clustering name: MTEB TwentyNewsgroupsClustering config: default split: test revision: 6125ec4e24fa026cec8a478383ee943acfbd5449 metrics: - type: v_measure value: 26.09282097093625 - type: v_measures value: [0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487, 0.26849676299945785, 0.2669514566616348, 0.2891149570883449, 0.24392859342532378, 0.22545659657952322, 0.27033814887951974, 0.25403361548721237, 0.27404718032226466, 0.23497638522536846, 0.28193840042497487] - task: type: PairClassification dataset: type: mteb/twittersemeval2015-pairclassification name: MTEB TwitterSemEval2015 config: default split: test revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1 metrics: - type: cos_sim_accuracy value: 84.88406747332658 - type: cos_sim_ap value: 69.26105491403395 - type: cos_sim_f1 value: 65.52488910793494 - type: cos_sim_precision value: 61.465557096625055 - type: cos_sim_recall value: 70.15831134564644 - type: dot_accuracy value: 82.16606067830959 - type: dot_ap value: 61.09102948421686 - type: dot_f1 value: 57.59054713588492 - type: dot_precision value: 56.106106106106104 - type: dot_recall value: 59.155672823219 - type: euclidean_accuracy value: 84.85426476724086 - type: euclidean_ap value: 69.32917418684202 - type: euclidean_f1 value: 65.59770252482949 - type: euclidean_precision value: 60.01751696956427 - type: euclidean_recall value: 72.32189973614776 - type: manhattan_accuracy value: 84.83638314358943 - type: manhattan_ap value: 69.13012845791405 - type: manhattan_f1 value: 65.35336124107363 - type: manhattan_precision value: 61.26500461680517 - type: manhattan_recall value: 70.0263852242744 - type: max_accuracy value: 84.88406747332658 - type: max_ap value: 69.32917418684202 - type: max_f1 value: 65.59770252482949 - task: type: PairClassification dataset: type: mteb/twitterurlcorpus-pairclassification name: MTEB TwitterURLCorpus config: default split: test revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf metrics: - type: cos_sim_accuracy value: 87.81387045445726 - type: cos_sim_ap value: 83.19376576098023 - type: cos_sim_f1 value: 75.85641331494391 - type: cos_sim_precision value: 73.52409856203484 - type: cos_sim_recall value: 78.34154604250077 - type: dot_accuracy value: 85.33007334963325 - type: dot_ap value: 75.69925817222503 - type: dot_f1 value: 70.44983722994968 - type: dot_precision value: 67.80119624038736 - type: dot_recall value: 73.31382814906067 - type: euclidean_accuracy value: 87.78864439011139 - type: euclidean_ap value: 83.33289584854239 - type: euclidean_f1 value: 75.70217471433837 - type: euclidean_precision value: 72.61349172677131 - type: euclidean_recall value: 79.06529103788112 - type: manhattan_accuracy value: 87.73819226141964 - type: manhattan_ap value: 83.29254385989515 - type: manhattan_f1 value: 75.70975618644992 - type: manhattan_precision value: 71.8773787281157 - type: manhattan_recall value: 79.97382198952879 - type: max_accuracy value: 87.81387045445726 - type: max_ap value: 83.33289584854239 - type: max_f1 value: 75.85641331494391 --- # Venusaur This is a distill of [Bulbasaur](https://huggingface.co/Mihaiii/Bulbasaur) using [qa-assistant](https://huggingface.co/datasets/Mihaiii/qa-assistant). ## Intended purpose <span style="color:blue">This model is designed for use in semantic-autocomplete ([click here for demo](https://mihaiii.github.io/semantic-autocomplete/)).</span> ## Usage (Sentence-Transformers) (same as [gte-tiny](https://huggingface.co/TaylorAI/gte-tiny)) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('Mihaiii/Venusaur') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) (same as [gte-tiny](https://huggingface.co/TaylorAI/gte-tiny)) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('Mihaiii/Venusaur') model = AutoModel.from_pretrained('Mihaiii/Venusaur') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ### Limitation (same as [gte-small](https://huggingface.co/thenlper/gte-small)) This model exclusively caters to English texts, and any lengthy texts will be truncated to a maximum of 512 tokens.
mradermacher/MopeyMule-Blackroot-8B-i1-GGUF
mradermacher
"2024-06-14T09:24:48Z"
4,692
0
transformers
[ "transformers", "gguf", "mergekit", "merge", "en", "base_model:Casual-Autopsy/MopeyMule-Blackroot-8B", "endpoints_compatible", "region:us" ]
null
"2024-06-14T05:25:28Z"
--- base_model: Casual-Autopsy/MopeyMule-Blackroot-8B language: - en library_name: transformers quantized_by: mradermacher tags: - mergekit - merge --- ## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: nicoboss --> weighted/imatrix quants of https://huggingface.co/Casual-Autopsy/MopeyMule-Blackroot-8B <!-- provided-files --> static quants are available at https://huggingface.co/mradermacher/MopeyMule-Blackroot-8B-GGUF ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/MopeyMule-Blackroot-8B-i1-GGUF/resolve/main/MopeyMule-Blackroot-8B.i1-IQ1_S.gguf) | i1-IQ1_S | 2.1 | for the desperate | | [GGUF](https://huggingface.co/mradermacher/MopeyMule-Blackroot-8B-i1-GGUF/resolve/main/MopeyMule-Blackroot-8B.i1-IQ1_M.gguf) | i1-IQ1_M | 2.3 | mostly desperate | | [GGUF](https://huggingface.co/mradermacher/MopeyMule-Blackroot-8B-i1-GGUF/resolve/main/MopeyMule-Blackroot-8B.i1-IQ2_XXS.gguf) | i1-IQ2_XXS | 2.5 | | | [GGUF](https://huggingface.co/mradermacher/MopeyMule-Blackroot-8B-i1-GGUF/resolve/main/MopeyMule-Blackroot-8B.i1-IQ2_XS.gguf) | i1-IQ2_XS | 2.7 | | | [GGUF](https://huggingface.co/mradermacher/MopeyMule-Blackroot-8B-i1-GGUF/resolve/main/MopeyMule-Blackroot-8B.i1-IQ2_S.gguf) | i1-IQ2_S | 2.9 | | | [GGUF](https://huggingface.co/mradermacher/MopeyMule-Blackroot-8B-i1-GGUF/resolve/main/MopeyMule-Blackroot-8B.i1-IQ2_M.gguf) | i1-IQ2_M | 3.0 | | | [GGUF](https://huggingface.co/mradermacher/MopeyMule-Blackroot-8B-i1-GGUF/resolve/main/MopeyMule-Blackroot-8B.i1-Q2_K.gguf) | i1-Q2_K | 3.3 | IQ3_XXS probably better | | [GGUF](https://huggingface.co/mradermacher/MopeyMule-Blackroot-8B-i1-GGUF/resolve/main/MopeyMule-Blackroot-8B.i1-IQ3_XXS.gguf) | i1-IQ3_XXS | 3.4 | lower quality | | [GGUF](https://huggingface.co/mradermacher/MopeyMule-Blackroot-8B-i1-GGUF/resolve/main/MopeyMule-Blackroot-8B.i1-IQ3_XS.gguf) | i1-IQ3_XS | 3.6 | | | [GGUF](https://huggingface.co/mradermacher/MopeyMule-Blackroot-8B-i1-GGUF/resolve/main/MopeyMule-Blackroot-8B.i1-Q3_K_S.gguf) | i1-Q3_K_S | 3.8 | IQ3_XS probably better | | [GGUF](https://huggingface.co/mradermacher/MopeyMule-Blackroot-8B-i1-GGUF/resolve/main/MopeyMule-Blackroot-8B.i1-IQ3_S.gguf) | i1-IQ3_S | 3.8 | beats Q3_K* | | [GGUF](https://huggingface.co/mradermacher/MopeyMule-Blackroot-8B-i1-GGUF/resolve/main/MopeyMule-Blackroot-8B.i1-IQ3_M.gguf) | i1-IQ3_M | 3.9 | | | [GGUF](https://huggingface.co/mradermacher/MopeyMule-Blackroot-8B-i1-GGUF/resolve/main/MopeyMule-Blackroot-8B.i1-Q3_K_M.gguf) | i1-Q3_K_M | 4.1 | IQ3_S probably better | | [GGUF](https://huggingface.co/mradermacher/MopeyMule-Blackroot-8B-i1-GGUF/resolve/main/MopeyMule-Blackroot-8B.i1-Q3_K_L.gguf) | i1-Q3_K_L | 4.4 | IQ3_M probably better | | [GGUF](https://huggingface.co/mradermacher/MopeyMule-Blackroot-8B-i1-GGUF/resolve/main/MopeyMule-Blackroot-8B.i1-IQ4_XS.gguf) | i1-IQ4_XS | 4.5 | | | [GGUF](https://huggingface.co/mradermacher/MopeyMule-Blackroot-8B-i1-GGUF/resolve/main/MopeyMule-Blackroot-8B.i1-Q4_0.gguf) | i1-Q4_0 | 4.8 | fast, low quality | | [GGUF](https://huggingface.co/mradermacher/MopeyMule-Blackroot-8B-i1-GGUF/resolve/main/MopeyMule-Blackroot-8B.i1-Q4_K_S.gguf) | i1-Q4_K_S | 4.8 | optimal size/speed/quality | | [GGUF](https://huggingface.co/mradermacher/MopeyMule-Blackroot-8B-i1-GGUF/resolve/main/MopeyMule-Blackroot-8B.i1-Q4_K_M.gguf) | i1-Q4_K_M | 5.0 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/MopeyMule-Blackroot-8B-i1-GGUF/resolve/main/MopeyMule-Blackroot-8B.i1-Q5_K_S.gguf) | i1-Q5_K_S | 5.7 | | | [GGUF](https://huggingface.co/mradermacher/MopeyMule-Blackroot-8B-i1-GGUF/resolve/main/MopeyMule-Blackroot-8B.i1-Q5_K_M.gguf) | i1-Q5_K_M | 5.8 | | | [GGUF](https://huggingface.co/mradermacher/MopeyMule-Blackroot-8B-i1-GGUF/resolve/main/MopeyMule-Blackroot-8B.i1-Q6_K.gguf) | i1-Q6_K | 6.7 | practically like static Q6_K | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. Additional thanks to [@nicoboss](https://huggingface.co/nicoboss) for giving me access to his hardware for calculating the imatrix for these quants. <!-- end -->
timm/vit_base_patch32_224.augreg_in21k_ft_in1k
timm
"2023-05-06T00:03:27Z"
4,691
0
timm
[ "timm", "pytorch", "safetensors", "image-classification", "dataset:imagenet-1k", "dataset:imagenet-21k", "arxiv:2106.10270", "arxiv:2010.11929", "license:apache-2.0", "region:us" ]
image-classification
"2022-12-22T07:33:47Z"
--- tags: - image-classification - timm library_name: timm license: apache-2.0 datasets: - imagenet-1k - imagenet-21k --- # Model card for vit_base_patch32_224.augreg_in21k_ft_in1k A Vision Transformer (ViT) image classification model. Trained on ImageNet-21k and fine-tuned on ImageNet-1k (with additional augmentation and regularization) in JAX by paper authors, ported to PyTorch by Ross Wightman. ## Model Details - **Model Type:** Image classification / feature backbone - **Model Stats:** - Params (M): 88.2 - GMACs: 4.4 - Activations (M): 4.2 - Image size: 224 x 224 - **Papers:** - How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers: https://arxiv.org/abs/2106.10270 - An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale: https://arxiv.org/abs/2010.11929v2 - **Dataset:** ImageNet-1k - **Pretrain Dataset:** ImageNet-21k - **Original:** https://github.com/google-research/vision_transformer ## Model Usage ### Image Classification ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model('vit_base_patch32_224.augreg_in21k_ft_in1k', pretrained=True) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # unsqueeze single image into batch of 1 top5_probabilities, top5_class_indices = torch.topk(output.softmax(dim=1) * 100, k=5) ``` ### Image Embeddings ```python from urllib.request import urlopen from PIL import Image import timm img = Image.open(urlopen( 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/beignets-task-guide.png' )) model = timm.create_model( 'vit_base_patch32_224.augreg_in21k_ft_in1k', pretrained=True, num_classes=0, # remove classifier nn.Linear ) model = model.eval() # get model specific transforms (normalization, resize) data_config = timm.data.resolve_model_data_config(model) transforms = timm.data.create_transform(**data_config, is_training=False) output = model(transforms(img).unsqueeze(0)) # output is (batch_size, num_features) shaped tensor # or equivalently (without needing to set num_classes=0) output = model.forward_features(transforms(img).unsqueeze(0)) # output is unpooled, a (1, 50, 768) shaped tensor output = model.forward_head(output, pre_logits=True) # output is a (1, num_features) shaped tensor ``` ## Model Comparison Explore the dataset and runtime metrics of this model in timm [model results](https://github.com/huggingface/pytorch-image-models/tree/main/results). ## Citation ```bibtex @article{steiner2021augreg, title={How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers}, author={Steiner, Andreas and Kolesnikov, Alexander and and Zhai, Xiaohua and Wightman, Ross and Uszkoreit, Jakob and Beyer, Lucas}, journal={arXiv preprint arXiv:2106.10270}, year={2021} } ``` ```bibtex @article{dosovitskiy2020vit, title={An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale}, author={Dosovitskiy, Alexey and Beyer, Lucas and Kolesnikov, Alexander and Weissenborn, Dirk and Zhai, Xiaohua and Unterthiner, Thomas and Dehghani, Mostafa and Minderer, Matthias and Heigold, Georg and Gelly, Sylvain and Uszkoreit, Jakob and Houlsby, Neil}, journal={ICLR}, year={2021} } ``` ```bibtex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/huggingface/pytorch-image-models}} } ```
mradermacher/openchat-3.5-0106-i1-GGUF
mradermacher
"2024-06-08T23:54:24Z"
4,690
0
transformers
[ "transformers", "gguf", "openchat", "mistral", "C-RLFT", "en", "base_model:openchat/openchat-3.5-0106", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
"2024-06-08T19:43:22Z"
--- base_model: openchat/openchat-3.5-0106 language: - en library_name: transformers license: apache-2.0 quantized_by: mradermacher tags: - openchat - mistral - C-RLFT --- ## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: nicoboss --> weighted/imatrix quants of https://huggingface.co/openchat/openchat-3.5-0106 <!-- provided-files --> static quants are available at https://huggingface.co/mradermacher/openchat-3.5-0106-GGUF ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/openchat-3.5-0106-i1-GGUF/resolve/main/openchat-3.5-0106.i1-IQ1_S.gguf) | i1-IQ1_S | 1.7 | for the desperate | | [GGUF](https://huggingface.co/mradermacher/openchat-3.5-0106-i1-GGUF/resolve/main/openchat-3.5-0106.i1-IQ1_M.gguf) | i1-IQ1_M | 1.9 | mostly desperate | | [GGUF](https://huggingface.co/mradermacher/openchat-3.5-0106-i1-GGUF/resolve/main/openchat-3.5-0106.i1-IQ2_XXS.gguf) | i1-IQ2_XXS | 2.1 | | | [GGUF](https://huggingface.co/mradermacher/openchat-3.5-0106-i1-GGUF/resolve/main/openchat-3.5-0106.i1-IQ2_XS.gguf) | i1-IQ2_XS | 2.3 | | | [GGUF](https://huggingface.co/mradermacher/openchat-3.5-0106-i1-GGUF/resolve/main/openchat-3.5-0106.i1-IQ2_S.gguf) | i1-IQ2_S | 2.4 | | | [GGUF](https://huggingface.co/mradermacher/openchat-3.5-0106-i1-GGUF/resolve/main/openchat-3.5-0106.i1-IQ2_M.gguf) | i1-IQ2_M | 2.6 | | | [GGUF](https://huggingface.co/mradermacher/openchat-3.5-0106-i1-GGUF/resolve/main/openchat-3.5-0106.i1-Q2_K.gguf) | i1-Q2_K | 2.8 | IQ3_XXS probably better | | [GGUF](https://huggingface.co/mradermacher/openchat-3.5-0106-i1-GGUF/resolve/main/openchat-3.5-0106.i1-IQ3_XXS.gguf) | i1-IQ3_XXS | 2.9 | lower quality | | [GGUF](https://huggingface.co/mradermacher/openchat-3.5-0106-i1-GGUF/resolve/main/openchat-3.5-0106.i1-IQ3_XS.gguf) | i1-IQ3_XS | 3.1 | | | [GGUF](https://huggingface.co/mradermacher/openchat-3.5-0106-i1-GGUF/resolve/main/openchat-3.5-0106.i1-Q3_K_S.gguf) | i1-Q3_K_S | 3.3 | IQ3_XS probably better | | [GGUF](https://huggingface.co/mradermacher/openchat-3.5-0106-i1-GGUF/resolve/main/openchat-3.5-0106.i1-IQ3_S.gguf) | i1-IQ3_S | 3.3 | beats Q3_K* | | [GGUF](https://huggingface.co/mradermacher/openchat-3.5-0106-i1-GGUF/resolve/main/openchat-3.5-0106.i1-IQ3_M.gguf) | i1-IQ3_M | 3.4 | | | [GGUF](https://huggingface.co/mradermacher/openchat-3.5-0106-i1-GGUF/resolve/main/openchat-3.5-0106.i1-Q3_K_M.gguf) | i1-Q3_K_M | 3.6 | IQ3_S probably better | | [GGUF](https://huggingface.co/mradermacher/openchat-3.5-0106-i1-GGUF/resolve/main/openchat-3.5-0106.i1-Q3_K_L.gguf) | i1-Q3_K_L | 3.9 | IQ3_M probably better | | [GGUF](https://huggingface.co/mradermacher/openchat-3.5-0106-i1-GGUF/resolve/main/openchat-3.5-0106.i1-IQ4_XS.gguf) | i1-IQ4_XS | 4.0 | | | [GGUF](https://huggingface.co/mradermacher/openchat-3.5-0106-i1-GGUF/resolve/main/openchat-3.5-0106.i1-Q4_0.gguf) | i1-Q4_0 | 4.2 | fast, low quality | | [GGUF](https://huggingface.co/mradermacher/openchat-3.5-0106-i1-GGUF/resolve/main/openchat-3.5-0106.i1-Q4_K_S.gguf) | i1-Q4_K_S | 4.2 | optimal size/speed/quality | | [GGUF](https://huggingface.co/mradermacher/openchat-3.5-0106-i1-GGUF/resolve/main/openchat-3.5-0106.i1-Q4_K_M.gguf) | i1-Q4_K_M | 4.5 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/openchat-3.5-0106-i1-GGUF/resolve/main/openchat-3.5-0106.i1-Q5_K_S.gguf) | i1-Q5_K_S | 5.1 | | | [GGUF](https://huggingface.co/mradermacher/openchat-3.5-0106-i1-GGUF/resolve/main/openchat-3.5-0106.i1-Q5_K_M.gguf) | i1-Q5_K_M | 5.2 | | | [GGUF](https://huggingface.co/mradermacher/openchat-3.5-0106-i1-GGUF/resolve/main/openchat-3.5-0106.i1-Q6_K.gguf) | i1-Q6_K | 6.0 | practically like static Q6_K | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. Additional thanks to [@nicoboss](https://huggingface.co/nicoboss) for giving me access to his hardware for calculating the imatrix for these quants. <!-- end -->
mradermacher/F1-Chimera-Hybrid-LimaRP-8B-i1-GGUF
mradermacher
"2024-06-10T20:29:15Z"
4,690
0
transformers
[ "transformers", "gguf", "mergekit", "merge", "en", "base_model:WesPro/F1-Chimera-Hybrid-LimaRP-8B", "endpoints_compatible", "region:us" ]
null
"2024-06-10T15:55:35Z"
--- base_model: WesPro/F1-Chimera-Hybrid-LimaRP-8B language: - en library_name: transformers quantized_by: mradermacher tags: - mergekit - merge --- ## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: nicoboss --> weighted/imatrix quants of https://huggingface.co/WesPro/F1-Chimera-Hybrid-LimaRP-8B <!-- provided-files --> static quants are available at https://huggingface.co/mradermacher/F1-Chimera-Hybrid-LimaRP-8B-GGUF ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/F1-Chimera-Hybrid-LimaRP-8B-i1-GGUF/resolve/main/F1-Chimera-Hybrid-LimaRP-8B.i1-IQ1_S.gguf) | i1-IQ1_S | 2.1 | for the desperate | | [GGUF](https://huggingface.co/mradermacher/F1-Chimera-Hybrid-LimaRP-8B-i1-GGUF/resolve/main/F1-Chimera-Hybrid-LimaRP-8B.i1-IQ1_M.gguf) | i1-IQ1_M | 2.3 | mostly desperate | | [GGUF](https://huggingface.co/mradermacher/F1-Chimera-Hybrid-LimaRP-8B-i1-GGUF/resolve/main/F1-Chimera-Hybrid-LimaRP-8B.i1-IQ2_XXS.gguf) | i1-IQ2_XXS | 2.5 | | | [GGUF](https://huggingface.co/mradermacher/F1-Chimera-Hybrid-LimaRP-8B-i1-GGUF/resolve/main/F1-Chimera-Hybrid-LimaRP-8B.i1-IQ2_XS.gguf) | i1-IQ2_XS | 2.7 | | | [GGUF](https://huggingface.co/mradermacher/F1-Chimera-Hybrid-LimaRP-8B-i1-GGUF/resolve/main/F1-Chimera-Hybrid-LimaRP-8B.i1-IQ2_S.gguf) | i1-IQ2_S | 2.9 | | | [GGUF](https://huggingface.co/mradermacher/F1-Chimera-Hybrid-LimaRP-8B-i1-GGUF/resolve/main/F1-Chimera-Hybrid-LimaRP-8B.i1-IQ2_M.gguf) | i1-IQ2_M | 3.0 | | | [GGUF](https://huggingface.co/mradermacher/F1-Chimera-Hybrid-LimaRP-8B-i1-GGUF/resolve/main/F1-Chimera-Hybrid-LimaRP-8B.i1-Q2_K.gguf) | i1-Q2_K | 3.3 | IQ3_XXS probably better | | [GGUF](https://huggingface.co/mradermacher/F1-Chimera-Hybrid-LimaRP-8B-i1-GGUF/resolve/main/F1-Chimera-Hybrid-LimaRP-8B.i1-IQ3_XXS.gguf) | i1-IQ3_XXS | 3.4 | lower quality | | [GGUF](https://huggingface.co/mradermacher/F1-Chimera-Hybrid-LimaRP-8B-i1-GGUF/resolve/main/F1-Chimera-Hybrid-LimaRP-8B.i1-IQ3_XS.gguf) | i1-IQ3_XS | 3.6 | | | [GGUF](https://huggingface.co/mradermacher/F1-Chimera-Hybrid-LimaRP-8B-i1-GGUF/resolve/main/F1-Chimera-Hybrid-LimaRP-8B.i1-Q3_K_S.gguf) | i1-Q3_K_S | 3.8 | IQ3_XS probably better | | [GGUF](https://huggingface.co/mradermacher/F1-Chimera-Hybrid-LimaRP-8B-i1-GGUF/resolve/main/F1-Chimera-Hybrid-LimaRP-8B.i1-IQ3_S.gguf) | i1-IQ3_S | 3.8 | beats Q3_K* | | [GGUF](https://huggingface.co/mradermacher/F1-Chimera-Hybrid-LimaRP-8B-i1-GGUF/resolve/main/F1-Chimera-Hybrid-LimaRP-8B.i1-IQ3_M.gguf) | i1-IQ3_M | 3.9 | | | [GGUF](https://huggingface.co/mradermacher/F1-Chimera-Hybrid-LimaRP-8B-i1-GGUF/resolve/main/F1-Chimera-Hybrid-LimaRP-8B.i1-Q3_K_M.gguf) | i1-Q3_K_M | 4.1 | IQ3_S probably better | | [GGUF](https://huggingface.co/mradermacher/F1-Chimera-Hybrid-LimaRP-8B-i1-GGUF/resolve/main/F1-Chimera-Hybrid-LimaRP-8B.i1-Q3_K_L.gguf) | i1-Q3_K_L | 4.4 | IQ3_M probably better | | [GGUF](https://huggingface.co/mradermacher/F1-Chimera-Hybrid-LimaRP-8B-i1-GGUF/resolve/main/F1-Chimera-Hybrid-LimaRP-8B.i1-IQ4_XS.gguf) | i1-IQ4_XS | 4.5 | | | [GGUF](https://huggingface.co/mradermacher/F1-Chimera-Hybrid-LimaRP-8B-i1-GGUF/resolve/main/F1-Chimera-Hybrid-LimaRP-8B.i1-Q4_0.gguf) | i1-Q4_0 | 4.8 | fast, low quality | | [GGUF](https://huggingface.co/mradermacher/F1-Chimera-Hybrid-LimaRP-8B-i1-GGUF/resolve/main/F1-Chimera-Hybrid-LimaRP-8B.i1-Q4_K_S.gguf) | i1-Q4_K_S | 4.8 | optimal size/speed/quality | | [GGUF](https://huggingface.co/mradermacher/F1-Chimera-Hybrid-LimaRP-8B-i1-GGUF/resolve/main/F1-Chimera-Hybrid-LimaRP-8B.i1-Q4_K_M.gguf) | i1-Q4_K_M | 5.0 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/F1-Chimera-Hybrid-LimaRP-8B-i1-GGUF/resolve/main/F1-Chimera-Hybrid-LimaRP-8B.i1-Q5_K_S.gguf) | i1-Q5_K_S | 5.7 | | | [GGUF](https://huggingface.co/mradermacher/F1-Chimera-Hybrid-LimaRP-8B-i1-GGUF/resolve/main/F1-Chimera-Hybrid-LimaRP-8B.i1-Q5_K_M.gguf) | i1-Q5_K_M | 5.8 | | | [GGUF](https://huggingface.co/mradermacher/F1-Chimera-Hybrid-LimaRP-8B-i1-GGUF/resolve/main/F1-Chimera-Hybrid-LimaRP-8B.i1-Q6_K.gguf) | i1-Q6_K | 6.7 | practically like static Q6_K | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. Additional thanks to [@nicoboss](https://huggingface.co/nicoboss) for giving me access to his hardware for calculating the imatrix for these quants. <!-- end -->
QuantFactory/Llama-3SOME-8B-v2-GGUF
QuantFactory
"2024-06-24T10:19:28Z"
4,690
0
null
[ "gguf", "region:us" ]
null
"2024-06-24T10:01:34Z"
Entry not found
neuralmagic/bge-small-en-v1.5-sparse
neuralmagic
"2023-11-13T18:23:24Z"
4,687
3
transformers
[ "transformers", "onnx", "bert", "feature-extraction", "mteb", "sparse sparsity quantized onnx embeddings int8", "en", "license:mit", "model-index", "endpoints_compatible", "text-embeddings-inference", "region:us" ]
feature-extraction
"2023-09-21T13:21:02Z"
--- tags: - mteb - sparse sparsity quantized onnx embeddings int8 model-index: - name: bge-small-en-v1.5-sparse results: - task: type: Classification dataset: type: mteb/amazon_counterfactual name: MTEB AmazonCounterfactualClassification (en) config: en split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 70.71641791044776 - type: ap value: 32.850850647310004 - type: f1 value: 64.48101916414805 - task: type: Classification dataset: type: mteb/amazon_polarity name: MTEB AmazonPolarityClassification config: default split: test revision: e2d317d38cd51312af73b3d32a06d1a08b442046 metrics: - type: accuracy value: 83.33962500000001 - type: ap value: 78.28706349240106 - type: f1 value: 83.27426715603062 - task: type: Classification dataset: type: mteb/amazon_reviews_multi name: MTEB AmazonReviewsClassification (en) config: en split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 40.988 - type: f1 value: 40.776679545648506 - task: type: Retrieval dataset: type: arguana name: MTEB ArguAna config: default split: test revision: None metrics: - type: map_at_1 value: 26.101999999999997 - type: map_at_10 value: 40.754000000000005 - type: map_at_100 value: 41.83 - type: map_at_1000 value: 41.845 - type: map_at_3 value: 36.178 - type: map_at_5 value: 38.646 - type: mrr_at_1 value: 26.6 - type: mrr_at_10 value: 40.934 - type: mrr_at_100 value: 42.015 - type: mrr_at_1000 value: 42.03 - type: mrr_at_3 value: 36.344 - type: mrr_at_5 value: 38.848 - type: ndcg_at_1 value: 26.101999999999997 - type: ndcg_at_10 value: 49.126999999999995 - type: ndcg_at_100 value: 53.815999999999995 - type: ndcg_at_1000 value: 54.178000000000004 - type: ndcg_at_3 value: 39.607 - type: ndcg_at_5 value: 44.086999999999996 - type: precision_at_1 value: 26.101999999999997 - type: precision_at_10 value: 7.596 - type: precision_at_100 value: 0.967 - type: precision_at_1000 value: 0.099 - type: precision_at_3 value: 16.524 - type: precision_at_5 value: 12.105 - type: recall_at_1 value: 26.101999999999997 - type: recall_at_10 value: 75.96000000000001 - type: recall_at_100 value: 96.65700000000001 - type: recall_at_1000 value: 99.431 - type: recall_at_3 value: 49.573 - type: recall_at_5 value: 60.526 - task: type: Clustering dataset: type: mteb/arxiv-clustering-p2p name: MTEB ArxivClusteringP2P config: default split: test revision: a122ad7f3f0291bf49cc6f4d32aa80929df69d5d metrics: - type: v_measure value: 43.10651535441929 - task: type: Clustering dataset: type: mteb/arxiv-clustering-s2s name: MTEB ArxivClusteringS2S config: default split: test revision: f910caf1a6075f7329cdf8c1a6135696f37dbd53 metrics: - type: v_measure value: 34.41095293826606 - task: type: Reranking dataset: type: mteb/askubuntudupquestions-reranking name: MTEB AskUbuntuDupQuestions config: default split: test revision: 2000358ca161889fa9c082cb41daa8dcfb161a54 metrics: - type: map value: 56.96575970919239 - type: mrr value: 69.92503187794047 - task: type: STS dataset: type: mteb/biosses-sts name: MTEB BIOSSES config: default split: test revision: d3fb88f8f02e40887cd149695127462bbcf29b4a metrics: - type: cos_sim_pearson value: 79.64892774481326 - type: cos_sim_spearman value: 78.953003817029 - type: euclidean_pearson value: 78.92456838230683 - type: euclidean_spearman value: 78.56504316985354 - type: manhattan_pearson value: 79.21436359014227 - type: manhattan_spearman value: 78.66263575501259 - task: type: Classification dataset: type: mteb/banking77 name: MTEB Banking77Classification config: default split: test revision: 0fd18e25b25c072e09e0d92ab615fda904d66300 metrics: - type: accuracy value: 81.25 - type: f1 value: 81.20841448916138 - task: type: Clustering dataset: type: mteb/biorxiv-clustering-p2p name: MTEB BiorxivClusteringP2P config: default split: test revision: 65b79d1d13f80053f67aca9498d9402c2d9f1f40 metrics: - type: v_measure value: 34.69545244587236 - task: type: Clustering dataset: type: mteb/biorxiv-clustering-s2s name: MTEB BiorxivClusteringS2S config: default split: test revision: 258694dd0231531bc1fd9de6ceb52a0853c6d908 metrics: - type: v_measure value: 28.84301739171936 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackAndroidRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 23.401 - type: map_at_10 value: 32.451 - type: map_at_100 value: 33.891 - type: map_at_1000 value: 34.01 - type: map_at_3 value: 29.365999999999996 - type: map_at_5 value: 31.240000000000002 - type: mrr_at_1 value: 29.9 - type: mrr_at_10 value: 38.590999999999994 - type: mrr_at_100 value: 39.587 - type: mrr_at_1000 value: 39.637 - type: mrr_at_3 value: 36.028 - type: mrr_at_5 value: 37.673 - type: ndcg_at_1 value: 29.9 - type: ndcg_at_10 value: 38.251000000000005 - type: ndcg_at_100 value: 44.354 - type: ndcg_at_1000 value: 46.642 - type: ndcg_at_3 value: 33.581 - type: ndcg_at_5 value: 35.96 - type: precision_at_1 value: 29.9 - type: precision_at_10 value: 7.439 - type: precision_at_100 value: 1.28 - type: precision_at_1000 value: 0.17700000000000002 - type: precision_at_3 value: 16.404 - type: precision_at_5 value: 12.046 - type: recall_at_1 value: 23.401 - type: recall_at_10 value: 49.305 - type: recall_at_100 value: 75.885 - type: recall_at_1000 value: 90.885 - type: recall_at_3 value: 35.341 - type: recall_at_5 value: 42.275 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackEnglishRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 22.103 - type: map_at_10 value: 29.271 - type: map_at_100 value: 30.151 - type: map_at_1000 value: 30.276999999999997 - type: map_at_3 value: 27.289 - type: map_at_5 value: 28.236 - type: mrr_at_1 value: 26.943 - type: mrr_at_10 value: 33.782000000000004 - type: mrr_at_100 value: 34.459 - type: mrr_at_1000 value: 34.525 - type: mrr_at_3 value: 31.985000000000003 - type: mrr_at_5 value: 32.909 - type: ndcg_at_1 value: 26.943 - type: ndcg_at_10 value: 33.616 - type: ndcg_at_100 value: 37.669000000000004 - type: ndcg_at_1000 value: 40.247 - type: ndcg_at_3 value: 30.482 - type: ndcg_at_5 value: 31.615 - type: precision_at_1 value: 26.943 - type: precision_at_10 value: 6.146 - type: precision_at_100 value: 1.038 - type: precision_at_1000 value: 0.151 - type: precision_at_3 value: 14.521999999999998 - type: precision_at_5 value: 10.038 - type: recall_at_1 value: 22.103 - type: recall_at_10 value: 41.754999999999995 - type: recall_at_100 value: 59.636 - type: recall_at_1000 value: 76.801 - type: recall_at_3 value: 32.285000000000004 - type: recall_at_5 value: 35.684 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackGamingRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 32.565 - type: map_at_10 value: 43.07 - type: map_at_100 value: 44.102999999999994 - type: map_at_1000 value: 44.175 - type: map_at_3 value: 40.245 - type: map_at_5 value: 41.71 - type: mrr_at_1 value: 37.429 - type: mrr_at_10 value: 46.358 - type: mrr_at_100 value: 47.146 - type: mrr_at_1000 value: 47.187 - type: mrr_at_3 value: 44.086 - type: mrr_at_5 value: 45.318000000000005 - type: ndcg_at_1 value: 37.429 - type: ndcg_at_10 value: 48.398 - type: ndcg_at_100 value: 52.90899999999999 - type: ndcg_at_1000 value: 54.478 - type: ndcg_at_3 value: 43.418 - type: ndcg_at_5 value: 45.578 - type: precision_at_1 value: 37.429 - type: precision_at_10 value: 7.856000000000001 - type: precision_at_100 value: 1.093 - type: precision_at_1000 value: 0.129 - type: precision_at_3 value: 19.331 - type: precision_at_5 value: 13.191 - type: recall_at_1 value: 32.565 - type: recall_at_10 value: 61.021 - type: recall_at_100 value: 81.105 - type: recall_at_1000 value: 92.251 - type: recall_at_3 value: 47.637 - type: recall_at_5 value: 52.871 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackGisRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 18.108 - type: map_at_10 value: 24.613 - type: map_at_100 value: 25.624000000000002 - type: map_at_1000 value: 25.721 - type: map_at_3 value: 22.271 - type: map_at_5 value: 23.681 - type: mrr_at_1 value: 19.435 - type: mrr_at_10 value: 26.124000000000002 - type: mrr_at_100 value: 27.07 - type: mrr_at_1000 value: 27.145999999999997 - type: mrr_at_3 value: 23.748 - type: mrr_at_5 value: 25.239 - type: ndcg_at_1 value: 19.435 - type: ndcg_at_10 value: 28.632 - type: ndcg_at_100 value: 33.988 - type: ndcg_at_1000 value: 36.551 - type: ndcg_at_3 value: 24.035999999999998 - type: ndcg_at_5 value: 26.525 - type: precision_at_1 value: 19.435 - type: precision_at_10 value: 4.565 - type: precision_at_100 value: 0.771 - type: precision_at_1000 value: 0.10200000000000001 - type: precision_at_3 value: 10.169 - type: precision_at_5 value: 7.571 - type: recall_at_1 value: 18.108 - type: recall_at_10 value: 39.533 - type: recall_at_100 value: 64.854 - type: recall_at_1000 value: 84.421 - type: recall_at_3 value: 27.500000000000004 - type: recall_at_5 value: 33.314 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackMathematicaRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 11.087 - type: map_at_10 value: 17.323 - type: map_at_100 value: 18.569 - type: map_at_1000 value: 18.694 - type: map_at_3 value: 15.370000000000001 - type: map_at_5 value: 16.538 - type: mrr_at_1 value: 13.557 - type: mrr_at_10 value: 21.041 - type: mrr_at_100 value: 22.134 - type: mrr_at_1000 value: 22.207 - type: mrr_at_3 value: 18.843 - type: mrr_at_5 value: 20.236 - type: ndcg_at_1 value: 13.557 - type: ndcg_at_10 value: 21.571 - type: ndcg_at_100 value: 27.678000000000004 - type: ndcg_at_1000 value: 30.8 - type: ndcg_at_3 value: 17.922 - type: ndcg_at_5 value: 19.826 - type: precision_at_1 value: 13.557 - type: precision_at_10 value: 4.1290000000000004 - type: precision_at_100 value: 0.8370000000000001 - type: precision_at_1000 value: 0.125 - type: precision_at_3 value: 8.914 - type: precision_at_5 value: 6.691999999999999 - type: recall_at_1 value: 11.087 - type: recall_at_10 value: 30.94 - type: recall_at_100 value: 57.833999999999996 - type: recall_at_1000 value: 80.365 - type: recall_at_3 value: 20.854 - type: recall_at_5 value: 25.695 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackPhysicsRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 21.708 - type: map_at_10 value: 30.422 - type: map_at_100 value: 31.713 - type: map_at_1000 value: 31.842 - type: map_at_3 value: 27.424 - type: map_at_5 value: 29.17 - type: mrr_at_1 value: 26.756 - type: mrr_at_10 value: 35.304 - type: mrr_at_100 value: 36.296 - type: mrr_at_1000 value: 36.359 - type: mrr_at_3 value: 32.692 - type: mrr_at_5 value: 34.288999999999994 - type: ndcg_at_1 value: 26.756 - type: ndcg_at_10 value: 35.876000000000005 - type: ndcg_at_100 value: 41.708 - type: ndcg_at_1000 value: 44.359 - type: ndcg_at_3 value: 30.946 - type: ndcg_at_5 value: 33.404 - type: precision_at_1 value: 26.756 - type: precision_at_10 value: 6.795 - type: precision_at_100 value: 1.138 - type: precision_at_1000 value: 0.155 - type: precision_at_3 value: 15.046999999999999 - type: precision_at_5 value: 10.972 - type: recall_at_1 value: 21.708 - type: recall_at_10 value: 47.315000000000005 - type: recall_at_100 value: 72.313 - type: recall_at_1000 value: 90.199 - type: recall_at_3 value: 33.528999999999996 - type: recall_at_5 value: 39.985 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackProgrammersRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 18.902 - type: map_at_10 value: 26.166 - type: map_at_100 value: 27.368 - type: map_at_1000 value: 27.493000000000002 - type: map_at_3 value: 23.505000000000003 - type: map_at_5 value: 25.019000000000002 - type: mrr_at_1 value: 23.402 - type: mrr_at_10 value: 30.787 - type: mrr_at_100 value: 31.735000000000003 - type: mrr_at_1000 value: 31.806 - type: mrr_at_3 value: 28.33 - type: mrr_at_5 value: 29.711 - type: ndcg_at_1 value: 23.402 - type: ndcg_at_10 value: 30.971 - type: ndcg_at_100 value: 36.61 - type: ndcg_at_1000 value: 39.507999999999996 - type: ndcg_at_3 value: 26.352999999999998 - type: ndcg_at_5 value: 28.488000000000003 - type: precision_at_1 value: 23.402 - type: precision_at_10 value: 5.799 - type: precision_at_100 value: 1.0 - type: precision_at_1000 value: 0.14100000000000001 - type: precision_at_3 value: 12.633 - type: precision_at_5 value: 9.269 - type: recall_at_1 value: 18.902 - type: recall_at_10 value: 40.929 - type: recall_at_100 value: 65.594 - type: recall_at_1000 value: 85.961 - type: recall_at_3 value: 28.121000000000002 - type: recall_at_5 value: 33.638 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackStatsRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 19.168 - type: map_at_10 value: 25.142999999999997 - type: map_at_100 value: 25.993 - type: map_at_1000 value: 26.076 - type: map_at_3 value: 23.179 - type: map_at_5 value: 24.322 - type: mrr_at_1 value: 21.933 - type: mrr_at_10 value: 27.72 - type: mrr_at_100 value: 28.518 - type: mrr_at_1000 value: 28.582 - type: mrr_at_3 value: 25.791999999999998 - type: mrr_at_5 value: 26.958 - type: ndcg_at_1 value: 21.933 - type: ndcg_at_10 value: 28.866999999999997 - type: ndcg_at_100 value: 33.285 - type: ndcg_at_1000 value: 35.591 - type: ndcg_at_3 value: 25.202999999999996 - type: ndcg_at_5 value: 27.045 - type: precision_at_1 value: 21.933 - type: precision_at_10 value: 4.632 - type: precision_at_100 value: 0.733 - type: precision_at_1000 value: 0.101 - type: precision_at_3 value: 10.992 - type: precision_at_5 value: 7.853000000000001 - type: recall_at_1 value: 19.168 - type: recall_at_10 value: 37.899 - type: recall_at_100 value: 58.54899999999999 - type: recall_at_1000 value: 75.666 - type: recall_at_3 value: 27.831 - type: recall_at_5 value: 32.336 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackTexRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 12.764000000000001 - type: map_at_10 value: 17.757 - type: map_at_100 value: 18.677 - type: map_at_1000 value: 18.813 - type: map_at_3 value: 16.151 - type: map_at_5 value: 16.946 - type: mrr_at_1 value: 15.726 - type: mrr_at_10 value: 21.019 - type: mrr_at_100 value: 21.856 - type: mrr_at_1000 value: 21.954 - type: mrr_at_3 value: 19.282 - type: mrr_at_5 value: 20.189 - type: ndcg_at_1 value: 15.726 - type: ndcg_at_10 value: 21.259 - type: ndcg_at_100 value: 25.868999999999996 - type: ndcg_at_1000 value: 29.425 - type: ndcg_at_3 value: 18.204 - type: ndcg_at_5 value: 19.434 - type: precision_at_1 value: 15.726 - type: precision_at_10 value: 3.8920000000000003 - type: precision_at_100 value: 0.741 - type: precision_at_1000 value: 0.121 - type: precision_at_3 value: 8.58 - type: precision_at_5 value: 6.132 - type: recall_at_1 value: 12.764000000000001 - type: recall_at_10 value: 28.639 - type: recall_at_100 value: 49.639 - type: recall_at_1000 value: 75.725 - type: recall_at_3 value: 19.883 - type: recall_at_5 value: 23.141000000000002 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackUnixRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 18.98 - type: map_at_10 value: 25.2 - type: map_at_100 value: 26.279000000000003 - type: map_at_1000 value: 26.399 - type: map_at_3 value: 23.399 - type: map_at_5 value: 24.284 - type: mrr_at_1 value: 22.015 - type: mrr_at_10 value: 28.555000000000003 - type: mrr_at_100 value: 29.497 - type: mrr_at_1000 value: 29.574 - type: mrr_at_3 value: 26.788 - type: mrr_at_5 value: 27.576 - type: ndcg_at_1 value: 22.015 - type: ndcg_at_10 value: 29.266 - type: ndcg_at_100 value: 34.721000000000004 - type: ndcg_at_1000 value: 37.659 - type: ndcg_at_3 value: 25.741000000000003 - type: ndcg_at_5 value: 27.044 - type: precision_at_1 value: 22.015 - type: precision_at_10 value: 4.897 - type: precision_at_100 value: 0.8540000000000001 - type: precision_at_1000 value: 0.122 - type: precision_at_3 value: 11.567 - type: precision_at_5 value: 7.9479999999999995 - type: recall_at_1 value: 18.98 - type: recall_at_10 value: 38.411 - type: recall_at_100 value: 63.164 - type: recall_at_1000 value: 84.292 - type: recall_at_3 value: 28.576 - type: recall_at_5 value: 31.789 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackWebmastersRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 20.372 - type: map_at_10 value: 27.161 - type: map_at_100 value: 28.364 - type: map_at_1000 value: 28.554000000000002 - type: map_at_3 value: 25.135 - type: map_at_5 value: 26.200000000000003 - type: mrr_at_1 value: 24.704 - type: mrr_at_10 value: 31.219 - type: mrr_at_100 value: 32.092 - type: mrr_at_1000 value: 32.181 - type: mrr_at_3 value: 29.282000000000004 - type: mrr_at_5 value: 30.359 - type: ndcg_at_1 value: 24.704 - type: ndcg_at_10 value: 31.622 - type: ndcg_at_100 value: 36.917 - type: ndcg_at_1000 value: 40.357 - type: ndcg_at_3 value: 28.398 - type: ndcg_at_5 value: 29.764000000000003 - type: precision_at_1 value: 24.704 - type: precision_at_10 value: 5.81 - type: precision_at_100 value: 1.208 - type: precision_at_1000 value: 0.209 - type: precision_at_3 value: 13.241 - type: precision_at_5 value: 9.407 - type: recall_at_1 value: 20.372 - type: recall_at_10 value: 40.053 - type: recall_at_100 value: 64.71000000000001 - type: recall_at_1000 value: 87.607 - type: recall_at_3 value: 29.961 - type: recall_at_5 value: 34.058 - task: type: Retrieval dataset: type: BeIR/cqadupstack name: MTEB CQADupstackWordpressRetrieval config: default split: test revision: None metrics: - type: map_at_1 value: 14.424000000000001 - type: map_at_10 value: 20.541999999999998 - type: map_at_100 value: 21.495 - type: map_at_1000 value: 21.604 - type: map_at_3 value: 18.608 - type: map_at_5 value: 19.783 - type: mrr_at_1 value: 15.895999999999999 - type: mrr_at_10 value: 22.484 - type: mrr_at_100 value: 23.376 - type: mrr_at_1000 value: 23.467 - type: mrr_at_3 value: 20.548 - type: mrr_at_5 value: 21.731 - type: ndcg_at_1 value: 15.895999999999999 - type: ndcg_at_10 value: 24.343 - type: ndcg_at_100 value: 29.181 - type: ndcg_at_1000 value: 32.330999999999996 - type: ndcg_at_3 value: 20.518 - type: ndcg_at_5 value: 22.561999999999998 - type: precision_at_1 value: 15.895999999999999 - type: precision_at_10 value: 3.9739999999999998 - type: precision_at_100 value: 0.6799999999999999 - type: precision_at_1000 value: 0.105 - type: precision_at_3 value: 9.057 - type: precision_at_5 value: 6.654 - type: recall_at_1 value: 14.424000000000001 - type: recall_at_10 value: 34.079 - type: recall_at_100 value: 56.728 - type: recall_at_1000 value: 80.765 - type: recall_at_3 value: 23.993000000000002 - type: recall_at_5 value: 28.838 - task: type: Classification dataset: type: mteb/emotion name: MTEB EmotionClassification config: default split: test revision: 4f58c6b202a23cf9a4da393831edf4f9183cad37 metrics: - type: accuracy value: 41.665 - type: f1 value: 37.601137843331244 - task: type: Classification dataset: type: mteb/imdb name: MTEB ImdbClassification config: default split: test revision: 3d86128a09e091d6018b6d26cad27f2739fc2db7 metrics: - type: accuracy value: 74.8052 - type: ap value: 68.92588517572685 - type: f1 value: 74.66801685854456 - task: type: Classification dataset: type: mteb/mtop_domain name: MTEB MTOPDomainClassification (en) config: en split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 91.2220702234382 - type: f1 value: 90.81687856852439 - task: type: Classification dataset: type: mteb/mtop_intent name: MTEB MTOPIntentClassification (en) config: en split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 69.39124487004105 - type: f1 value: 51.8350043424968 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (en) config: en split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 69.80497646267652 - type: f1 value: 67.34213899244814 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (en) config: en split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 74.54270342972428 - type: f1 value: 74.02802500235784 - task: type: Clustering dataset: type: mteb/medrxiv-clustering-p2p name: MTEB MedrxivClusteringP2P config: default split: test revision: e7a26af6f3ae46b30dde8737f02c07b1505bcc73 metrics: - type: v_measure value: 30.488580544269002 - task: type: Clustering dataset: type: mteb/medrxiv-clustering-s2s name: MTEB MedrxivClusteringS2S config: default split: test revision: 35191c8c0dca72d8ff3efcd72aa802307d469663 metrics: - type: v_measure value: 28.80426879476371 - task: type: Reranking dataset: type: mteb/mind_small name: MTEB MindSmallReranking config: default split: test revision: 3bdac13927fdc888b903db93b2ffdbd90b295a69 metrics: - type: map value: 31.37970068676043 - type: mrr value: 32.48523694064166 - task: type: Clustering dataset: type: mteb/reddit-clustering name: MTEB RedditClustering config: default split: test revision: 24640382cdbf8abc73003fb0fa6d111a705499eb metrics: - type: v_measure value: 42.862710845031565 - task: type: Clustering dataset: type: mteb/reddit-clustering-p2p name: MTEB RedditClusteringP2P config: default split: test revision: 282350215ef01743dc01b456c7f5241fa8937f16 metrics: - type: v_measure value: 54.270000736385626 - task: type: STS dataset: type: mteb/sickr-sts name: MTEB SICK-R config: default split: test revision: a6ea5a8cab320b040a23452cc28066d9beae2cee metrics: - type: cos_sim_pearson value: 80.89215288990194 - type: cos_sim_spearman value: 74.386413188675 - type: euclidean_pearson value: 78.83679563989534 - type: euclidean_spearman value: 74.29328198771996 - type: manhattan_pearson value: 78.77968796707641 - type: manhattan_spearman value: 74.20887429784696 - task: type: STS dataset: type: mteb/sts12-sts name: MTEB STS12 config: default split: test revision: a0d554a64d88156834ff5ae9920b964011b16384 metrics: - type: cos_sim_pearson value: 78.31858821914498 - type: cos_sim_spearman value: 72.2217008523832 - type: euclidean_pearson value: 75.38901061978429 - type: euclidean_spearman value: 71.81255767675184 - type: manhattan_pearson value: 75.49472202181288 - type: manhattan_spearman value: 71.96322588726144 - task: type: STS dataset: type: mteb/sts13-sts name: MTEB STS13 config: default split: test revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca metrics: - type: cos_sim_pearson value: 79.48334648997455 - type: cos_sim_spearman value: 80.99654029572798 - type: euclidean_pearson value: 80.46546523970035 - type: euclidean_spearman value: 80.90646216980744 - type: manhattan_pearson value: 80.35474057857608 - type: manhattan_spearman value: 80.8141299909659 - task: type: STS dataset: type: mteb/sts14-sts name: MTEB STS14 config: default split: test revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375 metrics: - type: cos_sim_pearson value: 79.73826970784727 - type: cos_sim_spearman value: 76.9926870133034 - type: euclidean_pearson value: 79.6386542120984 - type: euclidean_spearman value: 77.05041986942253 - type: manhattan_pearson value: 79.61799508502459 - type: manhattan_spearman value: 77.07169617647067 - task: type: STS dataset: type: mteb/sts15-sts name: MTEB STS15 config: default split: test revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3 metrics: - type: cos_sim_pearson value: 83.93999019426069 - type: cos_sim_spearman value: 85.21166521594695 - type: euclidean_pearson value: 84.97207676326357 - type: euclidean_spearman value: 85.40726578482739 - type: manhattan_pearson value: 85.0386693192183 - type: manhattan_spearman value: 85.49230945586409 - task: type: STS dataset: type: mteb/sts16-sts name: MTEB STS16 config: default split: test revision: 4d8694f8f0e0100860b497b999b3dbed754a0513 metrics: - type: cos_sim_pearson value: 80.8133974034008 - type: cos_sim_spearman value: 82.82919022688844 - type: euclidean_pearson value: 81.92587923760179 - type: euclidean_spearman value: 82.86629450518863 - type: manhattan_pearson value: 81.98232365999253 - type: manhattan_spearman value: 82.94313939920296 - task: type: STS dataset: type: mteb/sts17-crosslingual-sts name: MTEB STS17 (en-en) config: en-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 86.12872422642363 - type: cos_sim_spearman value: 87.77672179979807 - type: euclidean_pearson value: 87.76172961705947 - type: euclidean_spearman value: 87.9891393339215 - type: manhattan_pearson value: 87.78863663568221 - type: manhattan_spearman value: 88.08297053203866 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (en) config: en split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 58.82824030232733 - type: cos_sim_spearman value: 64.17079382633538 - type: euclidean_pearson value: 61.31505225602925 - type: euclidean_spearman value: 64.05080034530694 - type: manhattan_pearson value: 61.77095758943306 - type: manhattan_spearman value: 64.14475973774933 - task: type: STS dataset: type: mteb/stsbenchmark-sts name: MTEB STSBenchmark config: default split: test revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831 metrics: - type: cos_sim_pearson value: 81.39239803497064 - type: cos_sim_spearman value: 81.76637354520439 - type: euclidean_pearson value: 82.98008209033587 - type: euclidean_spearman value: 82.18662536188657 - type: manhattan_pearson value: 82.9630328314908 - type: manhattan_spearman value: 82.13726553603003 - task: type: Reranking dataset: type: mteb/scidocs-reranking name: MTEB SciDocsRR config: default split: test revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab metrics: - type: map value: 79.45753132898741 - type: mrr value: 93.84029822755313 - task: type: PairClassification dataset: type: mteb/sprintduplicatequestions-pairclassification name: MTEB SprintDuplicateQuestions config: default split: test revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46 metrics: - type: cos_sim_accuracy value: 99.8019801980198 - type: cos_sim_ap value: 94.58629018512772 - type: cos_sim_f1 value: 89.84771573604061 - type: cos_sim_precision value: 91.23711340206185 - type: cos_sim_recall value: 88.5 - type: dot_accuracy value: 99.74950495049505 - type: dot_ap value: 92.5761214576951 - type: dot_f1 value: 87.09841917389087 - type: dot_precision value: 88.86576482830385 - type: dot_recall value: 85.39999999999999 - type: euclidean_accuracy value: 99.80495049504951 - type: euclidean_ap value: 94.56231673602272 - type: euclidean_f1 value: 90.02531645569621 - type: euclidean_precision value: 91.17948717948718 - type: euclidean_recall value: 88.9 - type: manhattan_accuracy value: 99.8009900990099 - type: manhattan_ap value: 94.5775591647447 - type: manhattan_f1 value: 89.86384266263238 - type: manhattan_precision value: 90.64089521871821 - type: manhattan_recall value: 89.1 - type: max_accuracy value: 99.80495049504951 - type: max_ap value: 94.58629018512772 - type: max_f1 value: 90.02531645569621 - task: type: Clustering dataset: type: mteb/stackexchange-clustering name: MTEB StackExchangeClustering config: default split: test revision: 6cbc1f7b2bc0622f2e39d2c77fa502909748c259 metrics: - type: v_measure value: 53.088941385715735 - task: type: Clustering dataset: type: mteb/stackexchange-clustering-p2p name: MTEB StackExchangeClusteringP2P config: default split: test revision: 815ca46b2622cec33ccafc3735d572c266efdb44 metrics: - type: v_measure value: 33.146129414825744 - task: type: Reranking dataset: type: mteb/stackoverflowdupquestions-reranking name: MTEB StackOverflowDupQuestions config: default split: test revision: e185fbe320c72810689fc5848eb6114e1ef5ec69 metrics: - type: map value: 48.7511362739003 - type: mrr value: 49.61682210763093 - task: type: Classification dataset: type: mteb/toxic_conversations_50k name: MTEB ToxicConversationsClassification config: default split: test revision: d7c0de2777da35d6aae2200a62c6e0e5af397c4c metrics: - type: accuracy value: 67.43820000000001 - type: ap value: 12.899489312331003 - type: f1 value: 52.03468121072981 - task: type: Classification dataset: type: mteb/tweet_sentiment_extraction name: MTEB TweetSentimentExtractionClassification config: default split: test revision: d604517c81ca91fe16a244d1248fc021f9ecee7a metrics: - type: accuracy value: 57.475947934352 - type: f1 value: 57.77676730676238 - task: type: Clustering dataset: type: mteb/twentynewsgroups-clustering name: MTEB TwentyNewsgroupsClustering config: default split: test revision: 6125ec4e24fa026cec8a478383ee943acfbd5449 metrics: - type: v_measure value: 38.3463456299738 - task: type: PairClassification dataset: type: mteb/twittersemeval2015-pairclassification name: MTEB TwitterSemEval2015 config: default split: test revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1 metrics: - type: cos_sim_accuracy value: 83.94230196101806 - type: cos_sim_ap value: 67.00916556336148 - type: cos_sim_f1 value: 63.046014257939085 - type: cos_sim_precision value: 61.961783439490446 - type: cos_sim_recall value: 64.16886543535621 - type: dot_accuracy value: 83.18531322644095 - type: dot_ap value: 63.112896030267066 - type: dot_f1 value: 59.06565656565657 - type: dot_precision value: 56.63438256658596 - type: dot_recall value: 61.715039577836414 - type: euclidean_accuracy value: 83.94230196101806 - type: euclidean_ap value: 67.19856676674463 - type: euclidean_f1 value: 63.08428413691571 - type: euclidean_precision value: 58.9543682641596 - type: euclidean_recall value: 67.83641160949868 - type: manhattan_accuracy value: 83.91845979614949 - type: manhattan_ap value: 66.9845327263072 - type: manhattan_f1 value: 62.693323274236135 - type: manhattan_precision value: 59.884698534710544 - type: manhattan_recall value: 65.77836411609499 - type: max_accuracy value: 83.94230196101806 - type: max_ap value: 67.19856676674463 - type: max_f1 value: 63.08428413691571 - task: type: PairClassification dataset: type: mteb/twitterurlcorpus-pairclassification name: MTEB TwitterURLCorpus config: default split: test revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf metrics: - type: cos_sim_accuracy value: 88.0777738968448 - type: cos_sim_ap value: 84.19747786536 - type: cos_sim_f1 value: 75.91830995817077 - type: cos_sim_precision value: 69.84671107949033 - type: cos_sim_recall value: 83.14598090545118 - type: dot_accuracy value: 87.14246904955951 - type: dot_ap value: 82.37528804640529 - type: dot_f1 value: 74.40963166732163 - type: dot_precision value: 69.4127841098447 - type: dot_recall value: 80.18170619032954 - type: euclidean_accuracy value: 88.08359529630924 - type: euclidean_ap value: 84.22633217661986 - type: euclidean_f1 value: 76.09190339866403 - type: euclidean_precision value: 72.70304390517605 - type: euclidean_recall value: 79.81213427779488 - type: manhattan_accuracy value: 88.08359529630924 - type: manhattan_ap value: 84.18362004611083 - type: manhattan_f1 value: 76.08789625360231 - type: manhattan_precision value: 71.49336582724072 - type: manhattan_recall value: 81.3135201724669 - type: max_accuracy value: 88.08359529630924 - type: max_ap value: 84.22633217661986 - type: max_f1 value: 76.09190339866403 license: mit language: - en --- # bge-small-en-v1.5-sparse ## Usage This is the sparse ONNX variant of the [bge-small-en-v1.5](https://huggingface.co/BAAI/bge-small-en-v1.5) embeddings model accelerated with [Sparsify](https://github.com/neuralmagic/sparsify) for quantization/pruning and [DeepSparseSentenceTransformers](https://github.com/neuralmagic/deepsparse/tree/main/src/deepsparse/sentence_transformers) for inference. ```bash pip install -U deepsparse-nightly[sentence_transformers] ``` ```python from deepsparse.sentence_transformers import DeepSparseSentenceTransformer model = DeepSparseSentenceTransformer('neuralmagic/bge-small-en-v1.5-sparse', export=False) # Our sentences we like to encode sentences = ['This framework generates embeddings for each input sentence', 'Sentences are passed as a list of string.', 'The quick brown fox jumps over the lazy dog.'] # Sentences are encoded by calling model.encode() embeddings = model.encode(sentences) # Print the embeddings for sentence, embedding in zip(sentences, embeddings): print("Sentence:", sentence) print("Embedding:", embedding.shape) print("") ``` For general questions on these models and sparsification methods, reach out to the engineering team on our [community Slack](https://join.slack.com/t/discuss-neuralmagic/shared_invite/zt-q1a1cnvo-YBoICSIw3L1dmQpjBeDurQ).
backyardai/Fimbulvetr-Holodeck-Erebus-Westlake-10.7B-GGUF
backyardai
"2024-06-07T01:30:49Z"
4,686
1
transformers
[ "transformers", "gguf", "mergekit", "merge", "base_model:backyardai/Fimbulvetr-Holodeck-Erebus-Westlake-10.7B", "endpoints_compatible", "region:us" ]
null
"2024-06-03T00:41:43Z"
--- library_name: transformers tags: - mergekit - merge base_model: backyardai/Fimbulvetr-Holodeck-Erebus-Westlake-10.7B model_name: Fimbulvetr-Holodeck-Erebus-Westlake-10.7B-GGUF parameter_count: 10731524096 quantized_by: brooketh --- <img src="BackyardAI_Banner.png" alt="Backyard.ai" style="height: 90px; min-width: 32px; display: block; margin: auto;"> **<p style="text-align: center;">The official library of GGUF format models for use in the local AI chat app, Backyard AI.</p>** <p style="text-align: center;"><a href="https://backyard.ai/">Download Backyard AI here to get started.</a></p> <p style="text-align: center;"><a href="https://www.reddit.com/r/LLM_Quants/">Request Additional models at r/LLM_Quants.</a></p> *** # Fimbulvetr Holodeck Erebus Westlake 10.7B - **Creator:** [PJMixers](https://huggingface.co/PJMixers/) - **Original:** <u>Fimbulvetr Holodeck Erebus Westlake 10.7B</u> (merge recipe [here](https://huggingface.co/PJMixers/Fimbulvetr-Holodeck-Erebus-Westlake-10.7B-GGUF/blob/main/Fimbulvetr-Holodeck-Erebus-Westlake-10.7B.yaml)) - **Date Created:** 2024-03-30 - **Trained Context:** 32768 tokens - **Description:** Frankenmerge of four different base models, combining their strengths or weaknesses into one very experimental new model. Reported to be excellent for roleplaying. *** ## What is a GGUF? GGUF is a large language model (LLM) format that can be split between CPU and GPU. GGUFs are compatible with applications based on llama.cpp, such as Backyard AI. Where other model formats require higher end GPUs with ample VRAM, GGUFs can be efficiently run on a wider variety of hardware. GGUF models are quantized to reduce resource usage, with a tradeoff of reduced coherence at lower quantizations. Quantization reduces the precision of the model weights by changing the number of bits used for each weight. *** <img src="BackyardAI_Logo.png" alt="Backyard.ai" style="height: 75px; min-width: 32px; display: block; horizontal align: left;"> ## Backyard AI - Free, local AI chat application. - One-click installation on Mac and PC. - Automatically use GPU for maximum speed. - Built-in model manager. - High-quality character hub. - Zero-config desktop-to-mobile tethering. Backyard AI makes it easy to start chatting with AI using your own characters or one of the many found in the built-in character hub. The model manager helps you find the latest and greatest models without worrying about whether it's the correct format. Backyard AI supports advanced features such as lorebooks, author's note, text formatting, custom context size, sampler settings, grammars, local TTS, cloud inference, and tethering, all implemented in a way that is straightforward and reliable. **Join us on [Discord](https://discord.gg/SyNN2vC9tQ)** ***
shunk031/aesthetics-predictor-v2-sac-logos-ava1-l14-linearMSE
shunk031
"2024-06-14T10:55:36Z"
4,681
6
transformers
[ "transformers", "pytorch", "aesthetics_predictor", "feature-extraction", "custom_code", "region:us" ]
feature-extraction
"2023-07-05T14:32:53Z"
Entry not found
SteelQuants/L3-Aethora-15B-V2-Q6_K-GGUF
SteelQuants
"2024-06-27T04:34:24Z"
4,677
1
transformers
[ "transformers", "gguf", "llama-cpp", "gguf-my-repo", "en", "dataset:TheSkullery/Aether-Lite-v1.8.1", "base_model:ZeusLabs/L3-Aethora-15B-V2", "license:cc-by-sa-4.0", "endpoints_compatible", "region:us" ]
null
"2024-06-27T04:33:25Z"
--- base_model: ZeusLabs/L3-Aethora-15B-V2 datasets: - TheSkullery/Aether-Lite-v1.8.1 language: - en library_name: transformers license: cc-by-sa-4.0 tags: - llama-cpp - gguf-my-repo --- # Steelskull/L3-Aethora-15B-V2-Q6_K-GGUF This model was converted to GGUF format from [`ZeusLabs/L3-Aethora-15B-V2`](https://huggingface.co/ZeusLabs/L3-Aethora-15B-V2) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space. Refer to the [original model card](https://huggingface.co/ZeusLabs/L3-Aethora-15B-V2) for more details on the model. ## Use with llama.cpp Install llama.cpp through brew (works on Mac and Linux) ```bash brew install llama.cpp ``` Invoke the llama.cpp server or the CLI. ### CLI: ```bash llama-cli --hf-repo Steelskull/L3-Aethora-15B-V2-Q6_K-GGUF --hf-file l3-aethora-15b-v2-q6_k.gguf -p "The meaning to life and the universe is" ``` ### Server: ```bash llama-server --hf-repo Steelskull/L3-Aethora-15B-V2-Q6_K-GGUF --hf-file l3-aethora-15b-v2-q6_k.gguf -c 2048 ``` Note: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well. Step 1: Clone llama.cpp from GitHub. ``` git clone https://github.com/ggerganov/llama.cpp ``` Step 2: Move into the llama.cpp folder and build it with `LLAMA_CURL=1` flag along with other hardware-specific flags (for ex: LLAMA_CUDA=1 for Nvidia GPUs on Linux). ``` cd llama.cpp && LLAMA_CURL=1 make ``` Step 3: Run inference through the main binary. ``` ./llama-cli --hf-repo Steelskull/L3-Aethora-15B-V2-Q6_K-GGUF --hf-file l3-aethora-15b-v2-q6_k.gguf -p "The meaning to life and the universe is" ``` or ``` ./llama-server --hf-repo Steelskull/L3-Aethora-15B-V2-Q6_K-GGUF --hf-file l3-aethora-15b-v2-q6_k.gguf -c 2048 ```
hustvl/yolos-base
hustvl
"2024-05-08T07:48:46Z"
4,671
23
transformers
[ "transformers", "pytorch", "yolos", "object-detection", "vision", "dataset:coco", "arxiv:2106.00666", "license:apache-2.0", "endpoints_compatible", "region:us" ]
object-detection
"2022-04-26T09:30:39Z"
--- license: apache-2.0 tags: - object-detection - vision datasets: - coco widget: - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/savanna.jpg example_title: Savanna - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/football-match.jpg example_title: Football Match - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/airport.jpg example_title: Airport --- # YOLOS (base-sized) model YOLOS model fine-tuned on COCO 2017 object detection (118k annotated images). It was introduced in the paper [You Only Look at One Sequence: Rethinking Transformer in Vision through Object Detection](https://arxiv.org/abs/2106.00666) by Fang et al. and first released in [this repository](https://github.com/hustvl/YOLOS). Disclaimer: The team releasing YOLOS did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model description YOLOS is a Vision Transformer (ViT) trained using the DETR loss. Despite its simplicity, a base-sized YOLOS model is able to achieve 42 AP on COCO validation 2017 (similar to DETR and more complex frameworks such as Faster R-CNN). The model is trained using a "bipartite matching loss": one compares the predicted classes + bounding boxes of each of the N = 100 object queries to the ground truth annotations, padded up to the same length N (so if an image only contains 4 objects, 96 annotations will just have a "no object" as class and "no bounding box" as bounding box). The Hungarian matching algorithm is used to create an optimal one-to-one mapping between each of the N queries and each of the N annotations. Next, standard cross-entropy (for the classes) and a linear combination of the L1 and generalized IoU loss (for the bounding boxes) are used to optimize the parameters of the model. ## Intended uses & limitations You can use the raw model for object detection. See the [model hub](https://huggingface.co/models?search=hustvl/yolos) to look for all available YOLOS models. ### How to use Here is how to use this model: ```python from transformers import YolosFeatureExtractor, YolosForObjectDetection from PIL import Image import requests url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) feature_extractor = YolosFeatureExtractor.from_pretrained('hustvl/yolos-base') model = YolosForObjectDetection.from_pretrained('hustvl/yolos-base') inputs = feature_extractor(images=image, return_tensors="pt") outputs = model(**inputs) # model predicts bounding boxes and corresponding COCO classes logits = outputs.logits bboxes = outputs.pred_boxes ``` Currently, both the feature extractor and model support PyTorch. ## Training data The YOLOS model was pre-trained on [ImageNet-1k](https://huggingface.co/datasets/imagenet2012) and fine-tuned on [COCO 2017 object detection](https://cocodataset.org/#download), a dataset consisting of 118k/5k annotated images for training/validation respectively. ### Training The model was pre-trained for 1000 epochs on ImageNet-1k and fine-tuned for 150 epochs on COCO. ## Evaluation results This model achieves an AP (average precision) of **42.0** on COCO 2017 validation. For more details regarding evaluation results, we refer to the original paper. ### BibTeX entry and citation info ```bibtex @article{DBLP:journals/corr/abs-2106-00666, author = {Yuxin Fang and Bencheng Liao and Xinggang Wang and Jiemin Fang and Jiyang Qi and Rui Wu and Jianwei Niu and Wenyu Liu}, title = {You Only Look at One Sequence: Rethinking Transformer in Vision through Object Detection}, journal = {CoRR}, volume = {abs/2106.00666}, year = {2021}, url = {https://arxiv.org/abs/2106.00666}, eprinttype = {arXiv}, eprint = {2106.00666}, timestamp = {Fri, 29 Apr 2022 19:49:16 +0200}, biburl = {https://dblp.org/rec/journals/corr/abs-2106-00666.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ```
RichardErkhov/tianlinliu0121_-_zephyr-7b-dpo-full-beta-0.2-gguf
RichardErkhov
"2024-06-03T21:49:43Z"
4,671
0
null
[ "gguf", "region:us" ]
null
"2024-06-03T17:12:08Z"
Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) zephyr-7b-dpo-full-beta-0.2 - GGUF - Model creator: https://huggingface.co/tianlinliu0121/ - Original model: https://huggingface.co/tianlinliu0121/zephyr-7b-dpo-full-beta-0.2/ | Name | Quant method | Size | | ---- | ---- | ---- | | [zephyr-7b-dpo-full-beta-0.2.Q2_K.gguf](https://huggingface.co/RichardErkhov/tianlinliu0121_-_zephyr-7b-dpo-full-beta-0.2-gguf/blob/main/zephyr-7b-dpo-full-beta-0.2.Q2_K.gguf) | Q2_K | 2.53GB | | [zephyr-7b-dpo-full-beta-0.2.IQ3_XS.gguf](https://huggingface.co/RichardErkhov/tianlinliu0121_-_zephyr-7b-dpo-full-beta-0.2-gguf/blob/main/zephyr-7b-dpo-full-beta-0.2.IQ3_XS.gguf) | IQ3_XS | 2.81GB | | [zephyr-7b-dpo-full-beta-0.2.IQ3_S.gguf](https://huggingface.co/RichardErkhov/tianlinliu0121_-_zephyr-7b-dpo-full-beta-0.2-gguf/blob/main/zephyr-7b-dpo-full-beta-0.2.IQ3_S.gguf) | IQ3_S | 2.35GB | | [zephyr-7b-dpo-full-beta-0.2.Q3_K_S.gguf](https://huggingface.co/RichardErkhov/tianlinliu0121_-_zephyr-7b-dpo-full-beta-0.2-gguf/blob/main/zephyr-7b-dpo-full-beta-0.2.Q3_K_S.gguf) | Q3_K_S | 2.95GB | | [zephyr-7b-dpo-full-beta-0.2.IQ3_M.gguf](https://huggingface.co/RichardErkhov/tianlinliu0121_-_zephyr-7b-dpo-full-beta-0.2-gguf/blob/main/zephyr-7b-dpo-full-beta-0.2.IQ3_M.gguf) | IQ3_M | 3.06GB | | [zephyr-7b-dpo-full-beta-0.2.Q3_K.gguf](https://huggingface.co/RichardErkhov/tianlinliu0121_-_zephyr-7b-dpo-full-beta-0.2-gguf/blob/main/zephyr-7b-dpo-full-beta-0.2.Q3_K.gguf) | Q3_K | 3.28GB | | [zephyr-7b-dpo-full-beta-0.2.Q3_K_M.gguf](https://huggingface.co/RichardErkhov/tianlinliu0121_-_zephyr-7b-dpo-full-beta-0.2-gguf/blob/main/zephyr-7b-dpo-full-beta-0.2.Q3_K_M.gguf) | Q3_K_M | 3.28GB | | [zephyr-7b-dpo-full-beta-0.2.Q3_K_L.gguf](https://huggingface.co/RichardErkhov/tianlinliu0121_-_zephyr-7b-dpo-full-beta-0.2-gguf/blob/main/zephyr-7b-dpo-full-beta-0.2.Q3_K_L.gguf) | Q3_K_L | 3.56GB | | [zephyr-7b-dpo-full-beta-0.2.IQ4_XS.gguf](https://huggingface.co/RichardErkhov/tianlinliu0121_-_zephyr-7b-dpo-full-beta-0.2-gguf/blob/main/zephyr-7b-dpo-full-beta-0.2.IQ4_XS.gguf) | IQ4_XS | 3.67GB | | [zephyr-7b-dpo-full-beta-0.2.Q4_0.gguf](https://huggingface.co/RichardErkhov/tianlinliu0121_-_zephyr-7b-dpo-full-beta-0.2-gguf/blob/main/zephyr-7b-dpo-full-beta-0.2.Q4_0.gguf) | Q4_0 | 3.83GB | | [zephyr-7b-dpo-full-beta-0.2.IQ4_NL.gguf](https://huggingface.co/RichardErkhov/tianlinliu0121_-_zephyr-7b-dpo-full-beta-0.2-gguf/blob/main/zephyr-7b-dpo-full-beta-0.2.IQ4_NL.gguf) | IQ4_NL | 3.87GB | | [zephyr-7b-dpo-full-beta-0.2.Q4_K_S.gguf](https://huggingface.co/RichardErkhov/tianlinliu0121_-_zephyr-7b-dpo-full-beta-0.2-gguf/blob/main/zephyr-7b-dpo-full-beta-0.2.Q4_K_S.gguf) | Q4_K_S | 3.86GB | | [zephyr-7b-dpo-full-beta-0.2.Q4_K.gguf](https://huggingface.co/RichardErkhov/tianlinliu0121_-_zephyr-7b-dpo-full-beta-0.2-gguf/blob/main/zephyr-7b-dpo-full-beta-0.2.Q4_K.gguf) | Q4_K | 4.07GB | | [zephyr-7b-dpo-full-beta-0.2.Q4_K_M.gguf](https://huggingface.co/RichardErkhov/tianlinliu0121_-_zephyr-7b-dpo-full-beta-0.2-gguf/blob/main/zephyr-7b-dpo-full-beta-0.2.Q4_K_M.gguf) | Q4_K_M | 4.07GB | | [zephyr-7b-dpo-full-beta-0.2.Q4_1.gguf](https://huggingface.co/RichardErkhov/tianlinliu0121_-_zephyr-7b-dpo-full-beta-0.2-gguf/blob/main/zephyr-7b-dpo-full-beta-0.2.Q4_1.gguf) | Q4_1 | 4.24GB | | [zephyr-7b-dpo-full-beta-0.2.Q5_0.gguf](https://huggingface.co/RichardErkhov/tianlinliu0121_-_zephyr-7b-dpo-full-beta-0.2-gguf/blob/main/zephyr-7b-dpo-full-beta-0.2.Q5_0.gguf) | Q5_0 | 4.65GB | | [zephyr-7b-dpo-full-beta-0.2.Q5_K_S.gguf](https://huggingface.co/RichardErkhov/tianlinliu0121_-_zephyr-7b-dpo-full-beta-0.2-gguf/blob/main/zephyr-7b-dpo-full-beta-0.2.Q5_K_S.gguf) | Q5_K_S | 4.65GB | | [zephyr-7b-dpo-full-beta-0.2.Q5_K.gguf](https://huggingface.co/RichardErkhov/tianlinliu0121_-_zephyr-7b-dpo-full-beta-0.2-gguf/blob/main/zephyr-7b-dpo-full-beta-0.2.Q5_K.gguf) | Q5_K | 4.78GB | | [zephyr-7b-dpo-full-beta-0.2.Q5_K_M.gguf](https://huggingface.co/RichardErkhov/tianlinliu0121_-_zephyr-7b-dpo-full-beta-0.2-gguf/blob/main/zephyr-7b-dpo-full-beta-0.2.Q5_K_M.gguf) | Q5_K_M | 4.78GB | | [zephyr-7b-dpo-full-beta-0.2.Q5_1.gguf](https://huggingface.co/RichardErkhov/tianlinliu0121_-_zephyr-7b-dpo-full-beta-0.2-gguf/blob/main/zephyr-7b-dpo-full-beta-0.2.Q5_1.gguf) | Q5_1 | 5.07GB | | [zephyr-7b-dpo-full-beta-0.2.Q6_K.gguf](https://huggingface.co/RichardErkhov/tianlinliu0121_-_zephyr-7b-dpo-full-beta-0.2-gguf/blob/main/zephyr-7b-dpo-full-beta-0.2.Q6_K.gguf) | Q6_K | 5.53GB | | [zephyr-7b-dpo-full-beta-0.2.Q8_0.gguf](https://huggingface.co/RichardErkhov/tianlinliu0121_-_zephyr-7b-dpo-full-beta-0.2-gguf/blob/main/zephyr-7b-dpo-full-beta-0.2.Q8_0.gguf) | Q8_0 | 7.17GB | Original model description: --- license: mit base_model: HuggingFaceH4/mistral-7b-sft-beta tags: - generated_from_trainer model-index: - name: zephyr-7b-dpo-full-beta-0.2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # zephyr-7b-dpo-full-beta-0.2 This model is a fine-tuned version of [HuggingFaceH4/mistral-7b-sft-beta](https://huggingface.co/HuggingFaceH4/mistral-7b-sft-beta) on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.7903 - Rewards/chosen: -3.2220 - Rewards/rejected: -7.3367 - Rewards/accuracies: 0.7659 - Rewards/margins: 4.1147 - Logps/rejected: -282.6258 - Logps/chosen: -314.5996 - Logits/rejected: -2.6943 - Logits/chosen: -2.6970 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-07 - train_batch_size: 4 - eval_batch_size: 4 - seed: 42 - distributed_type: multi-GPU - num_devices: 8 - total_train_batch_size: 32 - total_eval_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen | |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:| | 0.5631 | 0.26 | 500 | 0.5260 | 0.0288 | -1.2082 | 0.75 | 1.2371 | -251.9833 | -298.3453 | -2.9467 | -2.9577 | | 0.5432 | 0.52 | 1000 | 0.5888 | -0.0335 | -1.8482 | 0.7540 | 1.8147 | -255.1831 | -298.6568 | -2.8465 | -2.8476 | | 0.5368 | 0.77 | 1500 | 0.5860 | -0.4836 | -2.3300 | 0.7619 | 1.8464 | -257.5920 | -300.9073 | -2.8455 | -2.8445 | | 0.0615 | 1.03 | 2000 | 0.6024 | -0.5971 | -2.6919 | 0.7778 | 2.0948 | -259.4018 | -301.4749 | -2.8687 | -2.8639 | | 0.0817 | 1.29 | 2500 | 0.6655 | -1.3554 | -3.8426 | 0.7738 | 2.4872 | -265.1552 | -305.2667 | -2.8257 | -2.8254 | | 0.0617 | 1.55 | 3000 | 0.6421 | -1.2552 | -3.7613 | 0.75 | 2.5062 | -264.7488 | -304.7651 | -2.7744 | -2.7683 | | 0.0765 | 1.81 | 3500 | 0.6582 | -1.1492 | -4.0394 | 0.7659 | 2.8902 | -266.1391 | -304.2354 | -2.7403 | -2.7389 | | 0.0178 | 2.07 | 4000 | 0.6797 | -1.8485 | -5.2549 | 0.7619 | 3.4064 | -272.2166 | -307.7317 | -2.7310 | -2.7273 | | 0.0165 | 2.32 | 4500 | 0.7359 | -2.2096 | -6.0498 | 0.7817 | 3.8401 | -276.1910 | -309.5376 | -2.7006 | -2.7001 | | 0.0094 | 2.58 | 5000 | 0.7864 | -2.8828 | -6.8542 | 0.7738 | 3.9713 | -280.2130 | -312.9036 | -2.7185 | -2.7196 | | 0.0094 | 2.84 | 5500 | 0.7953 | -3.1897 | -7.3009 | 0.7579 | 4.1112 | -282.4464 | -314.4378 | -2.6987 | -2.7012 | ### Framework versions - Transformers 4.35.0 - Pytorch 2.1.0+cu118 - Datasets 2.14.6 - Tokenizers 0.14.1
davidkim205/komt-mistral-7b-v1
davidkim205
"2023-10-24T04:41:07Z"
4,669
32
transformers
[ "transformers", "pytorch", "mistral", "text-generation", "finetuned", "conversational", "en", "ko", "arxiv:2308.06502", "arxiv:2308.06259", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-10-24T01:58:52Z"
--- language: - en - ko pipeline_tag: text-generation tags: - finetuned --- # komt : korean multi task instruction tuning model ![multi task instruction tuning.jpg](https://github.com/davidkim205/komt/assets/16680469/c7f6ade7-247e-4b62-a94f-47e19abea68e) Recently, due to the success of ChatGPT, numerous large language models have emerged in an attempt to catch up with ChatGPT's capabilities. However, when it comes to Korean language performance, it has been observed that many models still struggle to provide accurate answers or generate Korean text effectively. This study addresses these challenges by introducing a multi-task instruction technique that leverages supervised datasets from various tasks to create training data for Large Language Models (LLMs). ## Model Details * **Model Developers** : davidkim(changyeon kim) * **Repository** : https://github.com/davidkim205/komt * **Model Architecture** : The komt-mistral-7b-v1 is is a fine-tuned version of the Mistral-7B-Instruct-v0.1. ## Dataset korean multi-task instruction dataset ## Hardware and Software - nvidia driver : 535.54.03 - CUDA Version: 12.2 ## Training Refer https://github.com/davidkim205/komt ## Prompt template: Mistral ``` <s>[INST] {prompt} [/INST]</s> ``` ## Usage ``` from transformers import AutoTokenizer, AutoModelForCausalLM from transformers import TextStreamer, GenerationConfig model_name='davidkim205/komt-mistral-7b-v1' model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto") tokenizer = AutoTokenizer.from_pretrained(model_name) streamer = TextStreamer(tokenizer) def gen(x): generation_config = GenerationConfig( temperature=0.8, top_p=0.8, top_k=100, max_new_tokens=1024, early_stopping=True, do_sample=True, ) q = f"[INST]{x} [/INST]" gened = model.generate( **tokenizer( q, return_tensors='pt', return_token_type_ids=False ).to('cuda'), generation_config=generation_config, pad_token_id=tokenizer.eos_token_id, eos_token_id=tokenizer.eos_token_id, streamer=streamer, ) result_str = tokenizer.decode(gened[0]) start_tag = f"\n\n### Response: " start_index = result_str.find(start_tag) if start_index != -1: result_str = result_str[start_index + len(start_tag):].strip() return result_str print(gen('제주도를 1박2일로 혼자 여행하려고 하는데 여행 코스를 만들어줘')) ``` output ``` <s> [INST]제주도를 1박2일로 혼자 여행하려고 하는데 여행 코스를 만들어줘 [/INST]1박 2일 여행 코스 제주 여행 코스는 다음과 같습니다: 첫째 날: * 아침: 제주시 신라로 이동하여 한라산 국립공원에 도착합니다. 오른쪽 발자국 광장에서 눈에 띄는 맥주숍을 찾아 한 병의 맥주를 마시고 계단으로 올라가 한라산 정상에 도착합니다. * 점심: 정상에서 계단을 내려와 중도에 위치한 골목에서 음식을 즐깁니다. 젓갈, 곰탕, 전복 등 맛있는 제주 특산물을 맛보며 힐링을 즐깁니다. * 오후: 중도에서 남하하여 남서쪽 제주로 이동합니다. 남서쪽 제주는 섬의 자연 경관과 백사장, 아름다운 바다 등 아름다운 풍광이 있는 곳입니다. 숙소에 숙박합니다. 둘째 날: * 아침: 남서쪽 제주에서 북서쪽 제주로 이동합니다. 이 지역은 푸른 잔디 해변과 투명한 바다가 펼쳐져 있는 아름다운 풍광입니다. 숙소에서 아침을 먹고 바다로 향하여 해변에서 휴식을 취합니다. * 점심: 북서쪽 제주의 앞바다에서 시원한 바다를 보며 한 끼의 해산물을 맛보고 계절에 따라 해산물 요리를 즐깁니다. * 오후: 남서쪽 제주에서 이동하여 서쪽 제주로 이동합니다. 서쪽 제주는 절벽과 암석 해변, 수수께끼의 돌 정원 등 독특한 경관이 있는 곳입니다. 이곳에서는 서쪽 제주의 대표적인 명소인 청량사를 방문하고 서쪽 제주의 아름다운 풍광을 감상합니다. * 저녁: 서쪽 제주에서 제주 시로 이동하여 한 끼의 제주 특산물을 맛보고 도착한 제주 도심에서 저녁을 즐깁니다. * 야간: 제주 시의 도심에서 야간 활동을 즐기며 1박 2일의 여행을 마무리합니다. 이렇게 제주를 1박 2일로 혼자 여행하면 제주의 아름다운 풍광, 푸른 잔디 해변, 투명한 바다 등을 경험할 수 있습니다. ``` ## Evaluation For objective model evaluation, we initially used EleutherAI's lm-evaluation-harness but obtained unsatisfactory results. Consequently, we conducted evaluations using ChatGPT, a widely used model, as described in [Self-Alignment with Instruction Backtranslation](https://arxiv.org/pdf/2308.06502.pdf) and [Three Ways of Using Large Language Models to Evaluate Chat](https://arxiv.org/pdf/2308.06259.pdf) . | model | score | average(0~5) | percentage | | --------------------------------------- |---------| ------------ | ---------- | | gpt-3.5-turbo(close) | 147 | 3.97 | 79.45% | | naver Cue(close) | 140 | 3.78 | 75.67% | | clova X(close) | 136 | 3.67 | 73.51% | | WizardLM-13B-V1.2(open) | 96 | 2.59 | 51.89% | | Llama-2-7b-chat-hf(open) | 67 | 1.81 | 36.21% | | Llama-2-13b-chat-hf(open) | 73 | 1.91 | 38.37% | | nlpai-lab/kullm-polyglot-12.8b-v2(open) | 70 | 1.89 | 37.83% | | kfkas/Llama-2-ko-7b-Chat(open) | 96 | 2.59 | 51.89% | | beomi/KoAlpaca-Polyglot-12.8B(open) | 100 | 2.70 | 54.05% | | **komt-llama2-7b-v1 (open)(ours)** | **117** | **3.16** | **63.24%** | | **komt-llama2-13b-v1 (open)(ours)** | **129** | **3.48** | **69.72%** | | **komt-llama-30b-v1 (open)(ours)** | **129** | **3.16** | **63.24%** | | **komt-mistral-7b-v1 (open)(ours)** | **131** | **3.54** | **70.81%** |
mradermacher/ArmoniosaAnthea_V.2.1-i1-GGUF
mradermacher
"2024-06-08T09:32:10Z"
4,668
0
transformers
[ "transformers", "gguf", "en", "base_model:neovalle/ArmoniosaAnthea_V.2.1", "endpoints_compatible", "region:us" ]
null
"2024-06-07T19:32:28Z"
--- base_model: neovalle/ArmoniosaAnthea_V.2.1 language: - en library_name: transformers quantized_by: mradermacher --- ## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: nicoboss --> weighted/imatrix quants of https://huggingface.co/neovalle/ArmoniosaAnthea_V.2.1 <!-- provided-files --> static quants are available at https://huggingface.co/mradermacher/ArmoniosaAnthea_V.2.1-GGUF ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/ArmoniosaAnthea_V.2.1-i1-GGUF/resolve/main/ArmoniosaAnthea_V.2.1.i1-IQ1_S.gguf) | i1-IQ1_S | 1.7 | for the desperate | | [GGUF](https://huggingface.co/mradermacher/ArmoniosaAnthea_V.2.1-i1-GGUF/resolve/main/ArmoniosaAnthea_V.2.1.i1-IQ1_M.gguf) | i1-IQ1_M | 1.9 | mostly desperate | | [GGUF](https://huggingface.co/mradermacher/ArmoniosaAnthea_V.2.1-i1-GGUF/resolve/main/ArmoniosaAnthea_V.2.1.i1-IQ2_XXS.gguf) | i1-IQ2_XXS | 2.1 | | | [GGUF](https://huggingface.co/mradermacher/ArmoniosaAnthea_V.2.1-i1-GGUF/resolve/main/ArmoniosaAnthea_V.2.1.i1-IQ2_XS.gguf) | i1-IQ2_XS | 2.3 | | | [GGUF](https://huggingface.co/mradermacher/ArmoniosaAnthea_V.2.1-i1-GGUF/resolve/main/ArmoniosaAnthea_V.2.1.i1-IQ2_S.gguf) | i1-IQ2_S | 2.4 | | | [GGUF](https://huggingface.co/mradermacher/ArmoniosaAnthea_V.2.1-i1-GGUF/resolve/main/ArmoniosaAnthea_V.2.1.i1-IQ2_M.gguf) | i1-IQ2_M | 2.6 | | | [GGUF](https://huggingface.co/mradermacher/ArmoniosaAnthea_V.2.1-i1-GGUF/resolve/main/ArmoniosaAnthea_V.2.1.i1-Q2_K.gguf) | i1-Q2_K | 2.8 | IQ3_XXS probably better | | [GGUF](https://huggingface.co/mradermacher/ArmoniosaAnthea_V.2.1-i1-GGUF/resolve/main/ArmoniosaAnthea_V.2.1.i1-IQ3_XXS.gguf) | i1-IQ3_XXS | 2.9 | lower quality | | [GGUF](https://huggingface.co/mradermacher/ArmoniosaAnthea_V.2.1-i1-GGUF/resolve/main/ArmoniosaAnthea_V.2.1.i1-IQ3_XS.gguf) | i1-IQ3_XS | 3.1 | | | [GGUF](https://huggingface.co/mradermacher/ArmoniosaAnthea_V.2.1-i1-GGUF/resolve/main/ArmoniosaAnthea_V.2.1.i1-Q3_K_S.gguf) | i1-Q3_K_S | 3.3 | IQ3_XS probably better | | [GGUF](https://huggingface.co/mradermacher/ArmoniosaAnthea_V.2.1-i1-GGUF/resolve/main/ArmoniosaAnthea_V.2.1.i1-IQ3_S.gguf) | i1-IQ3_S | 3.3 | beats Q3_K* | | [GGUF](https://huggingface.co/mradermacher/ArmoniosaAnthea_V.2.1-i1-GGUF/resolve/main/ArmoniosaAnthea_V.2.1.i1-IQ3_M.gguf) | i1-IQ3_M | 3.4 | | | [GGUF](https://huggingface.co/mradermacher/ArmoniosaAnthea_V.2.1-i1-GGUF/resolve/main/ArmoniosaAnthea_V.2.1.i1-Q3_K_M.gguf) | i1-Q3_K_M | 3.6 | IQ3_S probably better | | [GGUF](https://huggingface.co/mradermacher/ArmoniosaAnthea_V.2.1-i1-GGUF/resolve/main/ArmoniosaAnthea_V.2.1.i1-Q3_K_L.gguf) | i1-Q3_K_L | 3.9 | IQ3_M probably better | | [GGUF](https://huggingface.co/mradermacher/ArmoniosaAnthea_V.2.1-i1-GGUF/resolve/main/ArmoniosaAnthea_V.2.1.i1-IQ4_XS.gguf) | i1-IQ4_XS | 4.0 | | | [GGUF](https://huggingface.co/mradermacher/ArmoniosaAnthea_V.2.1-i1-GGUF/resolve/main/ArmoniosaAnthea_V.2.1.i1-Q4_0.gguf) | i1-Q4_0 | 4.2 | fast, low quality | | [GGUF](https://huggingface.co/mradermacher/ArmoniosaAnthea_V.2.1-i1-GGUF/resolve/main/ArmoniosaAnthea_V.2.1.i1-Q4_K_S.gguf) | i1-Q4_K_S | 4.2 | optimal size/speed/quality | | [GGUF](https://huggingface.co/mradermacher/ArmoniosaAnthea_V.2.1-i1-GGUF/resolve/main/ArmoniosaAnthea_V.2.1.i1-Q4_K_M.gguf) | i1-Q4_K_M | 4.5 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/ArmoniosaAnthea_V.2.1-i1-GGUF/resolve/main/ArmoniosaAnthea_V.2.1.i1-Q5_K_S.gguf) | i1-Q5_K_S | 5.1 | | | [GGUF](https://huggingface.co/mradermacher/ArmoniosaAnthea_V.2.1-i1-GGUF/resolve/main/ArmoniosaAnthea_V.2.1.i1-Q5_K_M.gguf) | i1-Q5_K_M | 5.2 | | | [GGUF](https://huggingface.co/mradermacher/ArmoniosaAnthea_V.2.1-i1-GGUF/resolve/main/ArmoniosaAnthea_V.2.1.i1-Q6_K.gguf) | i1-Q6_K | 6.0 | practically like static Q6_K | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. Additional thanks to [@nicoboss](https://huggingface.co/nicoboss) for giving me access to his hardware for calculating the imatrix for these quants. <!-- end -->
stablediffusionapi/protovision-xl-high-fidel
stablediffusionapi
"2024-05-08T06:18:39Z"
4,666
3
diffusers
[ "diffusers", "safetensors", "modelslab.com", "stable-diffusion-api", "text-to-image", "ultra-realistic", "license:creativeml-openrail-m", "autotrain_compatible", "endpoints_compatible", "diffusers:StableDiffusionXLPipeline", "region:us" ]
text-to-image
"2024-01-05T01:24:54Z"
--- license: creativeml-openrail-m tags: - modelslab.com - stable-diffusion-api - text-to-image - ultra-realistic pinned: true --- # ProtoVision XL - High Fidelity 3D / Photorealism / Anime / hyperrealism - No Refiner Needed API Inference ![generated from modelslab.com](https://pub-3626123a908346a7a8be8d9295f44e26.r2.dev/generations/10098574191704417798.png) ## Get API Key Get API key from [ModelsLab API](http://modelslab.com), No Payment needed. Replace Key in below code, change **model_id** to "protovision-xl-high-fidel" Coding in PHP/Node/Java etc? Have a look at docs for more code examples: [View docs](https://modelslab.com/docs) Try model for free: [Generate Images](https://modelslab.com/models/protovision-xl-high-fidel) Model link: [View model](https://modelslab.com/models/protovision-xl-high-fidel) View all models: [View Models](https://modelslab.com/models) import requests import json url = "https://modelslab.com/api/v6/images/text2img" payload = json.dumps({ "key": "your_api_key", "model_id": "protovision-xl-high-fidel", "prompt": "ultra realistic close up portrait ((beautiful pale cyberpunk female with heavy black eyeliner)), blue eyes, shaved side haircut, hyper detail, cinematic lighting, magic neon, dark red city, Canon EOS R3, nikon, f/1.4, ISO 200, 1/160s, 8K, RAW, unedited, symmetrical balance, in-frame, 8K", "negative_prompt": "painting, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, deformed, ugly, blurry, bad anatomy, bad proportions, extra limbs, cloned face, skinny, glitchy, double torso, extra arms, extra hands, mangled fingers, missing lips, ugly face, distorted face, extra legs, anime", "width": "512", "height": "512", "samples": "1", "num_inference_steps": "30", "safety_checker": "no", "enhance_prompt": "yes", "seed": None, "guidance_scale": 7.5, "multi_lingual": "no", "panorama": "no", "self_attention": "no", "upscale": "no", "embeddings": "embeddings_model_id", "lora": "lora_model_id", "webhook": None, "track_id": None }) headers = { 'Content-Type': 'application/json' } response = requests.request("POST", url, headers=headers, data=payload) print(response.text) > Use this coupon code to get 25% off **DMGG0RBN**
Yntec/EpicDiffusion
Yntec
"2024-05-06T20:46:06Z"
4,664
1
diffusers
[ "diffusers", "safetensors", "johnslegers", "Base Model", "General Purpose", "Everything", "stable-diffusion", "stable-diffusion-diffusers", "text-to-image", "en", "license:creativeml-openrail-m", "autotrain_compatible", "endpoints_compatible", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
"2024-05-06T17:07:09Z"
--- language: - en library_name: diffusers pipeline_tag: text-to-image tags: - johnslegers - Base Model - General Purpose - Everything - stable-diffusion - stable-diffusion-diffusers - diffusers - text-to-image inference: true license: creativeml-openrail-m --- # Epic Diffusion 1.1 768x768 version of this model for the inference API. Samples and prompts: ![Free online image generator Epic Diffusion](https://cdn-uploads.huggingface.co/production/uploads/63239b8370edc53f51cd5d42/iiHrUEy-4ZE8bp0cVFIjD.png) (Click for larger) Top left: high resolution concept art of an apartment living room overlooking a large futuristic city with floor to ceiling windows and mid century modern furniture cinematic lighting cgsociety Top right: cute 70s little girl in tricycle, Screenshot of an surreal analog round jean minimalist architecture, pretty eyes, Sharp still light, 35mm from a sci fi blockbuster color movie made in 2023, beautiful portrait, Dorothy, set in 1860, in front of a spaceship that has just landed on an alien planet, are all wearing, a robot stands nearby Bottom left: analog style 70s color Closeup photograph of young Tom Cruise as James Bond, golden gun behind the scenes Bottom right: planet base, windows, night, ground level, no man's sky, digital art, highly detailed, intricate, sharp focus, Trending on Artstation HQ, deviantart, unreal engine 5, 4K UHD image Original page: https://huggingface.co/johnslegers/epic-diffusion-v1.1
gligen/gligen-generation-text-image-box
gligen
"2023-03-04T17:43:36Z"
4,663
1
diffusers
[ "diffusers", "region:us" ]
null
"2023-03-04T17:36:52Z"
Entry not found
dmis-lab/biobert-large-cased-v1.1
dmis-lab
"2020-10-14T06:19:39Z"
4,662
5
transformers
[ "transformers", "pytorch", "endpoints_compatible", "region:us" ]
null
"2022-03-02T23:29:05Z"
Entry not found
Zigeng/SlimSAM-uniform-50
Zigeng
"2024-01-11T19:22:58Z"
4,660
8
transformers
[ "transformers", "safetensors", "sam", "mask-generation", "slimsam", "vision", "arxiv:2312.05284", "license:apache-2.0", "endpoints_compatible", "region:us" ]
mask-generation
"2024-01-08T12:31:54Z"
--- license: apache-2.0 tags: - slimsam - vision --- # SlimSAM: 0.1% Data Makes Segment Anything Slim <div align="center"> <img src="images/paper/intro.PNG" width="66%"> <img src="images/paper/everything.PNG" width="100%"> </div> > **0.1% Data Makes Segment Anything Slim** > [Zigeng Chen](https://github.com/czg1225), [Gongfan Fang](https://fangggf.github.io/), [Xinyin Ma](https://horseee.github.io/), [Xinchao Wang](https://sites.google.com/site/sitexinchaowang/) > [Learning and Vision Lab](http://lv-nus.org/), National University of Singapore > Paper: [[Arxiv]](https://arxiv.org/abs/2312.05284) ## Introduction <div align="center"> <img src="images/paper/process.PNG" width="100%"> </div> **SlimSAM** is a novel SAM compression method, which efficiently reuses pre-trained SAMs without the necessity for extensive retraining. This is achieved by the efficient reuse of pre-trained SAMs through a unified pruning-distillation framework. To enhance knowledge inheritance from the original SAM, we employ an innovative alternate slimming strategy that partitions the compression process into a progressive procedure. Diverging from prior pruning techniques, we meticulously prune and distill decoupled model structures in an alternating fashion. Furthermore, a novel label-free pruning criterion is also proposed to align the pruning objective with the optimization target, thereby boosting the post-distillation after pruning. ![Frame](images/paper/frame.PNG?raw=true) SlimSAM achieves approaching performance while reducing the parameter counts to **0.9\% (5.7M)**, MACs to **0.8\% (21G)**, and requiring mere **0.1\% (10k)** of the training data when compared to the original SAM-H. Extensive experiments demonstrate that our method realize significant superior performance while utilizing over **10 times** less training data when compared to other SAM compression methods. ## Visualization Results Qualitative comparison of results obtained using point prompts, box prompts, and segment everything prompts are shown in the following section. ### Segment Everything Prompts <div align="center"> <img src="images/paper/everything2.PNG" width="100%"> </div> ### Box Prompts and Point Prompts <div align="center"> <img src="images/paper/prompt.PNG" width="100%"> </div> ## Quantitative Results We conducted a comprehensive comparison encompassing performance, efficiency, and training costs with other SAM compression methods and structural pruning methods. ### Comparing with other SAM compression methods. <div align="center"> <img src="images/paper/compare_tab1.PNG" width="100%"> </div> ### Comparing with other structural pruning methods. <div align="center"> <img src="images/paper/compare_tab2.PNG" width="50%"> </div> ## <a name="Models"></a>Model Using Fast state_dict loading for local uniform pruning SlimSAM-50 model: ``` python model = SamModel.from_pretrained("Zigeng/SlimSAM-uniform-50").to("cuda") processor = SamProcessor.from_pretrained("Zigeng/SlimSAM-uniform-50") img_url = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png" raw_image = Image.open(requests.get(img_url, stream=True).raw).convert("RGB") input_points = [[[450, 600]]] # 2D localization of a window inputs = processor(raw_image, input_points=input_points, return_tensors="pt").to("cuda") outputs = model(**inputs) masks = processor.image_processor.post_process_masks(outputs.pred_masks.cpu(), inputs["original_sizes"].cpu(), inputs["reshaped_input_sizes"].cpu()) scores = outputs.iou_scores ``` ## BibTex of our SlimSAM If you use SlimSAM in your research, please use the following BibTeX entry. Thank you! ```bibtex @misc{chen202301, title={0.1% Data Makes Segment Anything Slim}, author={Zigeng Chen and Gongfan Fang and Xinyin Ma and Xinchao Wang}, year={2023}, eprint={2312.05284}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` ## Acknowledgement <details> <summary> <a href="https://github.com/facebookresearch/segment-anything">SAM</a> (Segment Anything) [<b>bib</b>] </summary> ```bibtex @article{kirillov2023segany, title={Segment Anything}, author={Kirillov, Alexander and Mintun, Eric and Ravi, Nikhila and Mao, Hanzi and Rolland, Chloe and Gustafson, Laura and Xiao, Tete and Whitehead, Spencer and Berg, Alexander C. and Lo, Wan-Yen and Doll{\'a}r, Piotr and Girshick, Ross}, journal={arXiv:2304.02643}, year={2023} } ``` </details> <details> <summary> <a href="https://github.com/VainF/Torch-Pruning">Torch Pruning</a> (DepGraph: Towards Any Structural Pruning) [<b>bib</b>] </summary> ```bibtex @inproceedings{fang2023depgraph, title={Depgraph: Towards any structural pruning}, author={Fang, Gongfan and Ma, Xinyin and Song, Mingli and Mi, Michael Bi and Wang, Xinchao}, booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, pages={16091--16101}, year={2023} } ``` </details>
speakleash/Bielik-7B-Instruct-v0.1-GPTQ
speakleash
"2024-04-04T12:55:57Z"
4,659
5
transformers
[ "transformers", "safetensors", "mistral", "text-generation", "finetuned", "autoquant", "gptq", "conversational", "pl", "license:cc-by-nc-4.0", "autotrain_compatible", "text-generation-inference", "4-bit", "region:us" ]
text-generation
"2024-04-03T13:34:12Z"
--- language: - pl license: cc-by-nc-4.0 library_name: transformers tags: - finetuned - autoquant - gptq inference: false --- <p align="center"> <img src="https://huggingface.co/speakleash/Bielik-7B-Instruct-v0.1/raw/main/speakleash_cyfronet.png"> </p> # Bielik-7B-Instruct-v0.1-GPTQ This repo contains GPTQ format model files for [SpeakLeash](https://speakleash.org/)'s [Bielik-7B-Instruct-v0.1](https://huggingface.co/speakleash/Bielik-7B-Instruct-v0.1). <b><u>DISCLAIMER: Be aware that quantised models show reduced response quality and possible hallucinations!</u></b><br> Simple Colab notebook for testing: https://colab.research.google.com/drive/1Al9glPVCuOXbtDsks8cMcuzkuu8YDzpg?usp=sharing ### Model description: * **Developed by:** [SpeakLeash](https://speakleash.org/) * **Language:** Polish * **Model type:** causal decoder-only * **Quant from:** [Bielik-7B-Instruct-v0.1](https://huggingface.co/speakleash/Bielik-7B-Instruct-v0.1) * **Finetuned from:** [Bielik-7B-v0.1](https://huggingface.co/speakleash/Bielik-7B-v0.1) * **License:** CC BY NC 4.0 (non-commercial use) * **Model ref:** speakleash:e38140bea0d48f1218540800bbc67e89 ## Contact Us If you have any questions or suggestions, please use the discussion tab. If you want to contact us directly, join our [Discord SpeakLeash](https://discord.gg/3G9DVM39).
RichardErkhov/nomic-ai_-_gpt4all-falcon-gguf
RichardErkhov
"2024-06-06T08:30:05Z"
4,657
0
null
[ "gguf", "region:us" ]
null
"2024-05-03T03:15:48Z"
Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) gpt4all-falcon - GGUF - Model creator: https://huggingface.co/nomic-ai/ - Original model: https://huggingface.co/nomic-ai/gpt4all-falcon/ | Name | Quant method | Size | | ---- | ---- | ---- | | [gpt4all-falcon.Q2_K.gguf](https://huggingface.co/RichardErkhov/nomic-ai_-_gpt4all-falcon-gguf/blob/main/gpt4all-falcon.Q2_K.gguf) | Q2_K | 3.59GB | | [gpt4all-falcon.IQ3_XS.gguf](https://huggingface.co/RichardErkhov/nomic-ai_-_gpt4all-falcon-gguf/blob/main/gpt4all-falcon.IQ3_XS.gguf) | IQ3_XS | 3.59GB | | [gpt4all-falcon.IQ3_S.gguf](https://huggingface.co/RichardErkhov/nomic-ai_-_gpt4all-falcon-gguf/blob/main/gpt4all-falcon.IQ3_S.gguf) | IQ3_S | 3.59GB | | [gpt4all-falcon.Q3_K_S.gguf](https://huggingface.co/RichardErkhov/nomic-ai_-_gpt4all-falcon-gguf/blob/main/gpt4all-falcon.Q3_K_S.gguf) | Q3_K_S | 3.59GB | | [gpt4all-falcon.IQ3_M.gguf](https://huggingface.co/RichardErkhov/nomic-ai_-_gpt4all-falcon-gguf/blob/main/gpt4all-falcon.IQ3_M.gguf) | IQ3_M | 3.71GB | | [gpt4all-falcon.Q3_K.gguf](https://huggingface.co/RichardErkhov/nomic-ai_-_gpt4all-falcon-gguf/blob/main/gpt4all-falcon.Q3_K.gguf) | Q3_K | 3.86GB | | [gpt4all-falcon.Q3_K_M.gguf](https://huggingface.co/RichardErkhov/nomic-ai_-_gpt4all-falcon-gguf/blob/main/gpt4all-falcon.Q3_K_M.gguf) | Q3_K_M | 3.86GB | | [gpt4all-falcon.Q3_K_L.gguf](https://huggingface.co/RichardErkhov/nomic-ai_-_gpt4all-falcon-gguf/blob/main/gpt4all-falcon.Q3_K_L.gguf) | Q3_K_L | 4.08GB | | [gpt4all-falcon.IQ4_XS.gguf](https://huggingface.co/RichardErkhov/nomic-ai_-_gpt4all-falcon-gguf/blob/main/gpt4all-falcon.IQ4_XS.gguf) | IQ4_XS | 3.89GB | | [gpt4all-falcon.Q4_0.gguf](https://huggingface.co/RichardErkhov/nomic-ai_-_gpt4all-falcon-gguf/blob/main/gpt4all-falcon.Q4_0.gguf) | Q4_0 | 3.92GB | | [gpt4all-falcon.IQ4_NL.gguf](https://huggingface.co/RichardErkhov/nomic-ai_-_gpt4all-falcon-gguf/blob/main/gpt4all-falcon.IQ4_NL.gguf) | IQ4_NL | 3.96GB | | [gpt4all-falcon.Q4_K_S.gguf](https://huggingface.co/RichardErkhov/nomic-ai_-_gpt4all-falcon-gguf/blob/main/gpt4all-falcon.Q4_K_S.gguf) | Q4_K_S | 4.42GB | | [gpt4all-falcon.Q4_K.gguf](https://huggingface.co/RichardErkhov/nomic-ai_-_gpt4all-falcon-gguf/blob/main/gpt4all-falcon.Q4_K.gguf) | Q4_K | 4.63GB | | [gpt4all-falcon.Q4_K_M.gguf](https://huggingface.co/RichardErkhov/nomic-ai_-_gpt4all-falcon-gguf/blob/main/gpt4all-falcon.Q4_K_M.gguf) | Q4_K_M | 4.63GB | | [gpt4all-falcon.Q4_1.gguf](https://huggingface.co/RichardErkhov/nomic-ai_-_gpt4all-falcon-gguf/blob/main/gpt4all-falcon.Q4_1.gguf) | Q4_1 | 4.32GB | | [gpt4all-falcon.Q5_0.gguf](https://huggingface.co/RichardErkhov/nomic-ai_-_gpt4all-falcon-gguf/blob/main/gpt4all-falcon.Q5_0.gguf) | Q5_0 | 4.73GB | | [gpt4all-falcon.Q5_K_S.gguf](https://huggingface.co/RichardErkhov/nomic-ai_-_gpt4all-falcon-gguf/blob/main/gpt4all-falcon.Q5_K_S.gguf) | Q5_K_S | 4.98GB | | [gpt4all-falcon.Q5_K.gguf](https://huggingface.co/RichardErkhov/nomic-ai_-_gpt4all-falcon-gguf/blob/main/gpt4all-falcon.Q5_K.gguf) | Q5_K | 5.34GB | | [gpt4all-falcon.Q5_K_M.gguf](https://huggingface.co/RichardErkhov/nomic-ai_-_gpt4all-falcon-gguf/blob/main/gpt4all-falcon.Q5_K_M.gguf) | Q5_K_M | 5.34GB | | [gpt4all-falcon.Q5_1.gguf](https://huggingface.co/RichardErkhov/nomic-ai_-_gpt4all-falcon-gguf/blob/main/gpt4all-falcon.Q5_1.gguf) | Q5_1 | 5.13GB | | [gpt4all-falcon.Q6_K.gguf](https://huggingface.co/RichardErkhov/nomic-ai_-_gpt4all-falcon-gguf/blob/main/gpt4all-falcon.Q6_K.gguf) | Q6_K | 6.55GB | Original model description: --- license: apache-2.0 datasets: - nomic-ai/gpt4all-j-prompt-generations language: - en pipeline_tag: text-generation --- # Model Card for GPT4All-Falcon An Apache-2 licensed chatbot trained over a massive curated corpus of assistant interactions including word problems, multi-turn dialogue, code, poems, songs, and stories. ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This model has been finetuned from [Falcon](https://huggingface.co/tiiuae/falcon-7b) - **Developed by:** [Nomic AI](https://home.nomic.ai) - **Model Type:** A finetuned Falcon 7B model on assistant style interaction data - **Language(s) (NLP):** English - **License:** Apache-2 - **Finetuned from model [optional]:** [Falcon](https://huggingface.co/tiiuae/falcon-7b) To download a model with a specific revision run ```python from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained("nomic-ai/gpt4all-falcon", trust_remote_code=True) ``` Downloading without specifying `revision` defaults to `main`/`v1.0`. To use it for inference with Cuda, run ```python from transformers import AutoTokenizer, pipeline import transformers import torch tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False) model.to("cuda:0") prompt = "Describe a painting of a falcon in a very detailed way." # Change this to your prompt prompt_template = f"### Instruction: {prompt}\n### Response:" tokens = tokenizer(prompt_template, return_tensors="pt").input_ids.to("cuda:0") output = model.generate(input_ids=tokens, max_new_tokens=256, do_sample=True, temperature=0.8) # Print the generated text print(tokenizer.decode(output[0])) ``` ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [https://github.com/nomic-ai/gpt4all](https://github.com/nomic-ai/gpt4all) - **Base Model Repository:** [https://huggingface.co/tiiuae/falcon-7b](https://huggingface.co/tiiuae/falcon-7b) - **Demo [optional]:** [https://gpt4all.io/](https://gpt4all.io/) ### Training Procedure GPT4All is made possible by our compute partner [Paperspace](https://www.paperspace.com/). Trained on a DGX cluster with 8 A100 80GB GPUs for ~12 hours. Using Deepspeed + Accelerate, we use a global batch size of 256 with a learning rate of 2e-5. More information can be found in the repo. ### Results Results on common sense reasoning benchmarks | Model | BoolQ | PIQA | HellaSwag | WinoGrande | ARC-e | ARC-c | OBQA | Avg. | |:--------------------------|:--------:|:--------:|:---------:|:----------:|:--------:|:--------:|:--------:|:--------:| | GPT4All-J 6B v1.0 | 73.4 | 74.8 | 63.4 | 64.7 | 54.9 | 36.0 | 40.2 | 58.2 | | GPT4All-J v1.1-breezy | 74.0 | 75.1 | 63.2 | 63.6 | 55.4 | 34.9 | 38.4 | 57.8 | | GPT4All-J v1.2-jazzy | 74.8 | 74.9 | 63.6 | 63.8 | 56.6 | 35.3 | 41.0 | 58.6 | | GPT4All-J v1.3-groovy | 73.6 | 74.3 | 63.8 | 63.5 | 57.7 | 35.0 | 38.8 | 58.1 | | GPT4All-J Lora 6B | 68.6 | 75.8 | 66.2 | 63.5 | 56.4 | 35.7 | 40.2 | 58.1 | | GPT4All LLaMa Lora 7B | 73.1 | 77.6 | 72.1 | 67.8 | 51.1 | 40.4 | 40.2 | 60.3 | | GPT4All 13B snoozy | **83.3** | 79.2 | 75.0 | **71.3** | 60.9 | 44.2 | 43.4 | 65.3 | | GPT4All Falcon | 77.6 | 79.8 | 74.9 | 70.1 | 67.9 | 43.4 | 42.6 | 65.2 | | Dolly 6B | 68.8 | 77.3 | 67.6 | 63.9 | 62.9 | 38.7 | 41.2 | 60.1 | | Dolly 12B | 56.7 | 75.4 | 71.0 | 62.2 | 64.6 | 38.5 | 40.4 | 58.4 | | Alpaca 7B | 73.9 | 77.2 | 73.9 | 66.1 | 59.8 | 43.3 | 43.4 | 62.4 | | Alpaca Lora 7B | 74.3 | 79.3 | 74.0 | 68.8 | 56.6 | 43.9 | 42.6 | 62.8 | | GPT-J 6.7B | 65.4 | 76.2 | 66.2 | 64.1 | 62.2 | 36.6 | 38.2 | 58.4 | | LLama 7B | 73.1 | 77.4 | 73.0 | 66.9 | 52.5 | 41.4 | 42.4 | 61.0 | | LLama 13B | 68.5 | 79.1 | 76.2 | 70.1 | 60.0 | **44.6** | 42.2 | 63.0 | | Pythia 6.7B | 63.5 | 76.3 | 64.0 | 61.1 | 61.3 | 35.2 | 37.2 | 57.0 | | Pythia 12B | 67.7 | 76.6 | 67.3 | 63.8 | 63.9 | 34.8 | 38 | 58.9 | | Fastchat T5 | 81.5 | 64.6 | 46.3 | 61.8 | 49.3 | 33.3 | 39.4 | 53.7 | | Fastchat Vicuña 7B | 76.6 | 77.2 | 70.7 | 67.3 | 53.5 | 41.2 | 40.8 | 61.0 | | Fastchat Vicuña 13B | 81.5 | 76.8 | 73.3 | 66.7 | 57.4 | 42.7 | 43.6 | 63.1 | | StableVicuña RLHF | 82.3 | 78.6 | 74.1 | 70.9 | 61.0 | 43.5 | **44.4** | 65.0 | | StableLM Tuned | 62.5 | 71.2 | 53.6 | 54.8 | 52.4 | 31.1 | 33.4 | 51.3 | | StableLM Base | 60.1 | 67.4 | 41.2 | 50.1 | 44.9 | 27.0 | 32.0 | 42.2 | | Koala 13B | 76.5 | 77.9 | 72.6 | 68.8 | 54.3 | 41.0 | 42.8 | 62.0 | | Open Assistant Pythia 12B | 67.9 | 78.0 | 68.1 | 65.0 | 64.2 | 40.4 | 43.2 | 61.0 | | Mosaic MPT7B | 74.8 | 79.3 | 76.3 | 68.6 | 70.0 | 42.2 | 42.6 | 64.8 | | Mosaic mpt-instruct | 74.3 | 80.4 | **77.2** | 67.8 | **72.2** | **44.6** | 43.0 | **65.6** | | Mosaic mpt-chat | 77.1 | 78.2 | 74.5 | 67.5 | 69.4 | 43.3 | 44.2 | 64.9 | | Wizard 7B | 78.4 | 77.2 | 69.9 | 66.5 | 56.8 | 40.5 | 42.6 | 61.7 | | Wizard 7B Uncensored | 77.7 | 74.2 | 68.0 | 65.2 | 53.5 | 38.7 | 41.6 | 59.8 | | Wizard 13B Uncensored | 78.4 | 75.5 | 72.1 | 69.5 | 57.5 | 40.4 | 44.0 | 62.5 | | GPT4-x-Vicuna-13b | 81.3 | 75.0 | 75.2 | 65.0 | 58.7 | 43.9 | 43.6 | 62.2 | | Falcon 7b | 73.6 | **80.7** | 76.3 | 67.3 | 71.0 | 43.3 | 44.4 | 65.2 | | text-davinci-003 | 88.1 | 83.8 | 83.4 | 75.8 | 83.9 | 63.9 | 51.0 | 75.7 |
Helsinki-NLP/opus-mt-tc-big-tr-en
Helsinki-NLP
"2023-11-28T09:30:38Z"
4,656
10
transformers
[ "transformers", "pytorch", "tf", "safetensors", "marian", "text2text-generation", "translation", "opus-mt-tc", "en", "tr", "license:cc-by-4.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
translation
"2022-04-13T17:02:58Z"
--- language: - en - tr tags: - translation - opus-mt-tc license: cc-by-4.0 model-index: - name: opus-mt-tc-big-tr-en results: - task: name: Translation tur-eng type: translation args: tur-eng dataset: name: flores101-devtest type: flores_101 args: tur eng devtest metrics: - name: BLEU type: bleu value: 37.6 - task: name: Translation tur-eng type: translation args: tur-eng dataset: name: newsdev2016 type: newsdev2016 args: tur-eng metrics: - name: BLEU type: bleu value: 32.1 - task: name: Translation tur-eng type: translation args: tur-eng dataset: name: tatoeba-test-v2021-08-07 type: tatoeba_mt args: tur-eng metrics: - name: BLEU type: bleu value: 57.6 - task: name: Translation tur-eng type: translation args: tur-eng dataset: name: newstest2016 type: wmt-2016-news args: tur-eng metrics: - name: BLEU type: bleu value: 29.3 - task: name: Translation tur-eng type: translation args: tur-eng dataset: name: newstest2017 type: wmt-2017-news args: tur-eng metrics: - name: BLEU type: bleu value: 29.7 - task: name: Translation tur-eng type: translation args: tur-eng dataset: name: newstest2018 type: wmt-2018-news args: tur-eng metrics: - name: BLEU type: bleu value: 30.7 --- # opus-mt-tc-big-tr-en Neural machine translation model for translating from Turkish (tr) to English (en). This model is part of the [OPUS-MT project](https://github.com/Helsinki-NLP/Opus-MT), an effort to make neural machine translation models widely available and accessible for many languages in the world. All models are originally trained using the amazing framework of [Marian NMT](https://marian-nmt.github.io/), an efficient NMT implementation written in pure C++. The models have been converted to pyTorch using the transformers library by huggingface. Training data is taken from [OPUS](https://opus.nlpl.eu/) and training pipelines use the procedures of [OPUS-MT-train](https://github.com/Helsinki-NLP/Opus-MT-train). * Publications: [OPUS-MT – Building open translation services for the World](https://aclanthology.org/2020.eamt-1.61/) and [The Tatoeba Translation Challenge – Realistic Data Sets for Low Resource and Multilingual MT](https://aclanthology.org/2020.wmt-1.139/) (Please, cite if you use this model.) ``` @inproceedings{tiedemann-thottingal-2020-opus, title = "{OPUS}-{MT} {--} Building open translation services for the World", author = {Tiedemann, J{\"o}rg and Thottingal, Santhosh}, booktitle = "Proceedings of the 22nd Annual Conference of the European Association for Machine Translation", month = nov, year = "2020", address = "Lisboa, Portugal", publisher = "European Association for Machine Translation", url = "https://aclanthology.org/2020.eamt-1.61", pages = "479--480", } @inproceedings{tiedemann-2020-tatoeba, title = "The Tatoeba Translation Challenge {--} Realistic Data Sets for Low Resource and Multilingual {MT}", author = {Tiedemann, J{\"o}rg}, booktitle = "Proceedings of the Fifth Conference on Machine Translation", month = nov, year = "2020", address = "Online", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2020.wmt-1.139", pages = "1174--1182", } ``` ## Model info * Release: 2022-03-17 * source language(s): tur * target language(s): eng * model: transformer-big * data: opusTCv20210807+bt ([source](https://github.com/Helsinki-NLP/Tatoeba-Challenge)) * tokenization: SentencePiece (spm32k,spm32k) * original model: [opusTCv20210807+bt_transformer-big_2022-03-17.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/tur-eng/opusTCv20210807+bt_transformer-big_2022-03-17.zip) * more information released models: [OPUS-MT tur-eng README](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/tur-eng/README.md) ## Usage A short example code: ```python from transformers import MarianMTModel, MarianTokenizer src_text = [ "Allahsızlığı Yayma Kürsüsü başkanıydı.", "Tom'a ne olduğunu öğrenin." ] model_name = "pytorch-models/opus-mt-tc-big-tr-en" tokenizer = MarianTokenizer.from_pretrained(model_name) model = MarianMTModel.from_pretrained(model_name) translated = model.generate(**tokenizer(src_text, return_tensors="pt", padding=True)) for t in translated: print( tokenizer.decode(t, skip_special_tokens=True) ) # expected output: # He was the president of the Curse of Spreading Godlessness. # Find out what happened to Tom. ``` You can also use OPUS-MT models with the transformers pipelines, for example: ```python from transformers import pipeline pipe = pipeline("translation", model="Helsinki-NLP/opus-mt-tc-big-tr-en") print(pipe("Allahsızlığı Yayma Kürsüsü başkanıydı.")) # expected output: He was the president of the Curse of Spreading Godlessness. ``` ## Benchmarks * test set translations: [opusTCv20210807+bt_transformer-big_2022-03-17.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/tur-eng/opusTCv20210807+bt_transformer-big_2022-03-17.test.txt) * test set scores: [opusTCv20210807+bt_transformer-big_2022-03-17.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/tur-eng/opusTCv20210807+bt_transformer-big_2022-03-17.eval.txt) * benchmark results: [benchmark_results.txt](benchmark_results.txt) * benchmark output: [benchmark_translations.zip](benchmark_translations.zip) | langpair | testset | chr-F | BLEU | #sent | #words | |----------|---------|-------|-------|-------|--------| | tur-eng | tatoeba-test-v2021-08-07 | 0.71895 | 57.6 | 13907 | 109231 | | tur-eng | flores101-devtest | 0.64152 | 37.6 | 1012 | 24721 | | tur-eng | newsdev2016 | 0.58658 | 32.1 | 1001 | 21988 | | tur-eng | newstest2016 | 0.56960 | 29.3 | 3000 | 66175 | | tur-eng | newstest2017 | 0.57455 | 29.7 | 3007 | 67703 | | tur-eng | newstest2018 | 0.58488 | 30.7 | 3000 | 68725 | ## Acknowledgements The work is supported by the [European Language Grid](https://www.european-language-grid.eu/) as [pilot project 2866](https://live.european-language-grid.eu/catalogue/#/resource/projects/2866), by the [FoTran project](https://www.helsinki.fi/en/researchgroups/natural-language-understanding-with-cross-lingual-grounding), funded by the European Research Council (ERC) under the European Union’s Horizon 2020 research and innovation programme (grant agreement No 771113), and the [MeMAD project](https://memad.eu/), funded by the European Union’s Horizon 2020 Research and Innovation Programme under grant agreement No 780069. We are also grateful for the generous computational resources and IT infrastructure provided by [CSC -- IT Center for Science](https://www.csc.fi/), Finland. ## Model conversion info * transformers version: 4.16.2 * OPUS-MT git hash: 3405783 * port time: Wed Apr 13 20:02:48 EEST 2022 * port machine: LM0-400-22516.local
VietAI/vit5-base
VietAI
"2022-09-27T18:09:26Z"
4,655
8
transformers
[ "transformers", "pytorch", "tf", "jax", "t5", "text2text-generation", "summarization", "translation", "question-answering", "vi", "dataset:cc100", "license:mit", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
question-answering
"2022-03-14T16:36:06Z"
--- language: vi datasets: - cc100 tags: - summarization - translation - question-answering license: mit --- # ViT5-base State-of-the-art pretrained Transformer-based encoder-decoder model for Vietnamese. ## How to use For more details, do check out [our Github repo](https://github.com/vietai/ViT5). [Finetunning Example can be found here](https://github.com/vietai/ViT5/tree/main/finetunning_huggingface). ```python from transformers import AutoTokenizer, AutoModelForSeq2SeqLM ​ tokenizer = AutoTokenizer.from_pretrained("VietAI/vit5-base") model = AutoModelForSeq2SeqLM.from_pretrained("VietAI/vit5-base") model.cuda() ``` ## Citation ``` @inproceedings{phan-etal-2022-vit5, title = "{V}i{T}5: Pretrained Text-to-Text Transformer for {V}ietnamese Language Generation", author = "Phan, Long and Tran, Hieu and Nguyen, Hieu and Trinh, Trieu H.", booktitle = "Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies: Student Research Workshop", year = "2022", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2022.naacl-srw.18", pages = "136--142", } ```
Mizuiro-sakura/luke-japanese-base-finetuned-ner
Mizuiro-sakura
"2023-05-12T00:36:17Z"
4,645
9
transformers
[ "transformers", "pytorch", "safetensors", "luke", "token-classification", "ner", "固有表現抽出", "named entity recognition", "named-entity-recognition", "ja", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
token-classification
"2023-01-17T23:36:52Z"
--- license: mit language: ja tags: - luke - pytorch - transformers - ner - 固有表現抽出 - named entity recognition - named-entity-recognition --- # このモデルはluke-japanese-baseをファインチューニングして、固有表現抽出(NER)に用いれるようにしたものです。 このモデルはluke-japanese-baseを Wikipediaを用いた日本語の固有表現抽出データセット(ストックマーク社、https://github.com/stockmarkteam/ner-wikipedia-dataset )を用いてファインチューニングしたものです。 固有表現抽出(NER)タスクに用いることができます。 # This model is fine-tuned model for Named-Entity-Recognition(NER) which is based on luke-japanese-base This model is fine-tuned by using Wikipedia dataset. You could use this model for NER tasks. # モデルの精度 accuracy of model || precision |recall | f1-score | support| |---|----|----|----|----| |その他の組織名 | 0.76 | 0.77 | 0.77 | 238| |イベント名  |0.83 |0.90 | 0.87 |215| |人名   |0.88 |0.91 | 0.90 | 546| |地名 | 0.84 | 0.83 |0.83 | 440| |政治的組織名 | 0.80 |0.84 | 0.82 | 263| |施設名 | 0.78 | 0.83 | 0.80 | 241| |法人名 | 0.88 | 0.90 | 0.89 | 487| |製品名 | 0.74 | 0.80 |0.77 | 252| |micro avg |0.83 |0.86 | 0.84 | 2682| |macro avg | 0.81 | 0.85 | 0.83 | 2682| |weighted avg |0.83 | 0.86 | 0.84 | 2682| # How to use 使い方 sentencepieceとtransformersをインストールして (pip install sentencepiece , pip install transformers) 以下のコードを実行することで、NERタスクを解かせることができます。 please execute this code. ```python from transformers import MLukeTokenizer,pipeline, LukeForTokenClassification tokenizer = MLukeTokenizer.from_pretrained('Mizuiro-sakura/luke-japanese-base-finetuned-ner') model=LukeForTokenClassification.from_pretrained('Mizuiro-sakura/luke-japanese-base-finetuned-ner') # 学習済みモデルの読み込み text=('昨日は東京で買い物をした') ner=pipeline('ner', model=model, tokenizer=tokenizer) result=ner(text) print(result) ``` # what is Luke? Lukeとは?[1] LUKE (Language Understanding with Knowledge-based Embeddings) is a new pre-trained contextualized representation of words and entities based on transformer. LUKE treats words and entities in a given text as independent tokens, and outputs contextualized representations of them. LUKE adopts an entity-aware self-attention mechanism that is an extension of the self-attention mechanism of the transformer, and considers the types of tokens (words or entities) when computing attention scores. LUKE achieves state-of-the-art results on five popular NLP benchmarks including SQuAD v1.1 (extractive question answering), CoNLL-2003 (named entity recognition), ReCoRD (cloze-style question answering), TACRED (relation classification), and Open Entity (entity typing). luke-japaneseは、単語とエンティティの知識拡張型訓練済み Transformer モデルLUKEの日本語版です。LUKE は単語とエンティティを独立したトークンとして扱い、これらの文脈を考慮した表現を出力します。 # Acknowledgments 謝辞 Lukeの開発者である山田先生とStudio ousiaさんには感謝いたします。 I would like to thank Mr.Yamada @ikuyamada and Studio ousia @StudioOusia. # Citation [1]@inproceedings{yamada2020luke, title={LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention}, author={Ikuya Yamada and Akari Asai and Hiroyuki Shindo and Hideaki Takeda and Yuji Matsumoto}, booktitle={EMNLP}, year={2020} }
Fjoralb1/multilingual-e5-small-nli-matryoshka-128
Fjoralb1
"2024-03-12T11:37:15Z"
4,645
0
sentence-transformers
[ "sentence-transformers", "safetensors", "bert", "feature-extraction", "sentence-similarity", "autotrain_compatible", "endpoints_compatible", "text-embeddings-inference", "region:us" ]
sentence-similarity
"2024-03-12T11:36:50Z"
--- library_name: sentence-transformers pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity --- # Fjoralb1/multilingual-e5-small-nli-matryoshka-128 This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 128 dimensional dense vector space and can be used for tasks like clustering or semantic search. <!--- Describe your model here --> ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('Fjoralb1/multilingual-e5-small-nli-matryoshka-128') embeddings = model.encode(sentences) print(embeddings) ``` ## Evaluation Results <!--- Describe how your model was evaluated --> For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name=Fjoralb1/multilingual-e5-small-nli-matryoshka-128) ## Training The model was trained with the parameters: **DataLoader**: `sentence_transformers.datasets.NoDuplicatesDataLoader.NoDuplicatesDataLoader` of length 2201 with parameters: ``` {'batch_size': 256} ``` **Loss**: `sentence_transformers.losses.MatryoshkaLoss.MatryoshkaLoss` with parameters: ``` {'loss': 'MultipleNegativesRankingLoss', 'matryoshka_dims': [256, 128, 64, 32, 16], 'matryoshka_weights': [1, 1, 1, 1, 1], 'n_dims_per_step': -1} ``` Parameters of the fit()-Method: ``` { "epochs": 1, "evaluation_steps": 220, "evaluator": "sentence_transformers.evaluation.EmbeddingSimilarityEvaluator.EmbeddingSimilarityEvaluator", "max_grad_norm": 1, "optimizer_class": "<class 'torch.optim.adamw.AdamW'>", "optimizer_params": { "lr": 2e-05 }, "scheduler": "WarmupLinear", "steps_per_epoch": null, "warmup_steps": 221, "weight_decay": 0.01 } ``` ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 75, 'do_lower_case': False}) with Transformer model: BertModel (1): Pooling({'word_embedding_dimension': 384, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True}) (2): Dense({'in_features': 384, 'out_features': 128, 'bias': True, 'activation_function': 'torch.nn.modules.activation.Tanh'}) ) ``` ## Citing & Authors <!--- Describe where people can find more information -->
Yukang/LongAlpaca-70B
Yukang
"2023-11-01T12:57:26Z"
4,642
18
transformers
[ "transformers", "pytorch", "safetensors", "llama", "text-generation", "arxiv:2309.12307", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-10-09T02:53:36Z"
# LongLoRA and LongAlpaca for Long-context LLMs [![Huggingface Models](https://img.shields.io/badge/Models-Huggingface%20Models-bron)](https://huggingface.co/Yukang) [![Github](https://img.shields.io/badge/Github-Repo-cyan)](https://github.com/dvlab-research/LongLoRA) [![Data](https://img.shields.io/badge/Data-LongAlpaca%2012k-light)](https://huggingface.co/datasets/Yukang/LongAlpaca-12k) [![Paper](https://img.shields.io/badge/Paper-Arvix-blue)](https://arxiv.org/abs/2309.12307) [![Code License](https://img.shields.io/badge/Code%20License-Apache_2.0-yellow.svg)](https://github.com/dvlab-research/LongLoRA/blob/main/LICENSE) [![Data License](https://img.shields.io/badge/Data%20License-CC%20By%20NC%204.0-orange.svg)](https://github.com/dvlab-research/LongLoRA/blob/main/DATA_LICENSE) [![Weight License](https://img.shields.io/badge/Weight%20License-CC%20By%20NC%204.0-red)](https://github.com/dvlab-research/LongLoRA/blob/main/WEIGHT_LICENSE) For detailed usage and codes, please visit the [Github project](https://github.com/dvlab-research/LongLoRA). ## TABLE OF CONTENTS 1. [News](#news) 2. [Examples](#examples) 3. [Highlights](#highlights) 4. [How to contribute](#how-to-contribute) 5. [Requirements](#usage-requirements) 6. [Installation and quick guide](#installation-and-quick-guide) 7. [LongAlpaca Data](#longalpaca-data) 8. [Models](#models) 9. [Training](#training) 10. [Evaluation](#evaluation) 11. [Demo](#demo) 12. [Data Generation via Pdf2Text](#data-generation-via-pdf2text) 13. [Citation](#citation) 14. [Acknowledgement](#acknowledgement) 15. [License](#license) ## News - [x] [2023.10.8] **We release the long instruction-following dataset**, [LongAlpaca-12k](https://huggingface.co/datasets/Yukang/LongAlpaca-12k) and **the corresponding models**, [LongAlpaca-7B](https://huggingface.co/Yukang/LongAlpaca-7B), [LongAlpaca-13B](https://huggingface.co/Yukang/LongAlpaca-13B), and [LongAlpaca-70B](https://huggingface.co/Yukang/LongAlpaca-70B). - (*The previous sft models*, [Llama-2-13b-chat-longlora-32k-sft](https://huggingface.co/Yukang/Llama-2-13b-chat-longlora-32k-sft) and [Llama-2-70b-chat-longlora-32k-sft](https://huggingface.co/Yukang/Llama-2-70b-chat-longlora-32k-sft), *have been depreciated*.) - [x] [2023.10.3] We add support GPTNeoX models. Please refer to this [PR](https://github.com/dvlab-research/LongLoRA/pull/32) for usage. Thanks for @naubull2 for this contribution. - [x] [2023.9.22] We release all our fine-tuned [models](https://huggingface.co/Yukang), including **70B-32k models**, [LLaMA2-LongLoRA-70B-32k](https://huggingface.co/Yukang/Llama-2-70b-longlora-32k), [LLaMA2-LongLoRA-7B-100k](https://huggingface.co/Yukang/Llama-2-7b-longlora-100k-ft). Welcome to check them out! - [x] [2023.9.22] We release [Paper](http://arxiv.org/abs/2309.12307) and this GitHub repo, including training and evaluation code. **LongLoRA: Efficient Fine-tuning of Long-Context Large Language Models [[Paper](http://arxiv.org/abs/2309.12307)]** <br /> [Yukang Chen](https://scholar.google.com/citations?user=6p0ygKUAAAAJ&hl=en), [Shengju Qian](https://scholar.google.com/citations?user=QNnWmasAAAAJ), [Haotian Tang](https://scholar.google.com/citations?user=WxL13BAAAAAJ&hl), [Xin Lai](https://scholar.google.com/citations?user=tqNDPA4AAAAJ&hl=zh-CN), [Zhijian Liu](https://scholar.google.com/citations?user=3coYSTUAAAAJ&hl=en), [Song Han](https://scholar.google.com/citations?user=E0iCaa4AAAAJ&hl=zh-CN), [Jiaya Jia](https://scholar.google.com/citations?user=XPAkzTEAAAAJ&hl=en)<br /> ## Highlights 1. In LongLoRA approach, The proposed shifted short attention is easy to implement, compatible with Flash-Attention, and is not required during inference. 2. We released all our models, including models from 7B to 70B, context length from 8k to 100k, including [LLaMA2-LongLoRA-7B-100k](https://huggingface.co/Yukang/Llama-2-7b-longlora-100k-ft), [LLaMA2-LongLoRA-13B-64k](https://huggingface.co/Yukang/Llama-2-13b-longlora-64k), and [LLaMA2-LongLoRA-70B-32k](https://huggingface.co/Yukang/Llama-2-70b-longlora-32k). 3. We built up a long-context instruction-following dataset, [LongAlpaca-12k](#longalpaca-data). We released the corresponding [LongAlpaca-7B](https://huggingface.co/Yukang/LongAlpaca-7B), [LongAlpaca-13B](https://huggingface.co/Yukang/LongAlpaca-13B) and [LongAlpaca-70B](https://huggingface.co/Yukang/LongAlpaca-70B) models. To our best knowledge, this is the first open-sourced long-context 70B model. ## How to Contribute - Make sure to have git installed. - Create your own [fork](https://github.com/dvlab-research/LongLoRA/fork) of the project. - Clone the repository on your local machine, using git clone and pasting the url of this project. - Read both the `Requirements` and `Installation and Quick Guide` sections below. - Commit and push your changes. - Make a pull request when finished modifying the project. ## Usage Requirements To download and use the [pre-trained weights](#pre-trained-weights) you will need: 1. Hugging Face (HF) account with valid email. Note, the email used for HF must alse be used for the license agreement. 2. Accept the Meta [license and acceptable use policy](https://ai.meta.com/resources/models-and-libraries/llama-downloads/) ## Installation and Quick Guide To install and run the application: 1. [Fork this repo](https://github.com/dvlab-research/LongLoRA/fork) on github 2. Clone the repository on your local machine, using git clone and pasting the url of this project. 3. Run the following code: ``` pip install -r requirements.txt pip install flash-attn --no-build-isolation ``` 4. Use either a [Released model](#released-models) or [Fine tune](#fine-tuning) a model to fit your preferences. 5. Test your model by chat. 6. Deploy your own demo. ## LongAlpaca Data LongAlpaca-12k contains 9k long QA data that we collected and 3k short QA sampled from the original [Alpaca data](https://github.com/tatsu-lab/stanford_alpaca/blob/main/alpaca_data.json). This is to avoid the case that the model might degrade at short instruction following. The data we collect contains various types and amounts as the following figure. | Data | Short QA | Long QA | Total | Download | |:---------------|----------|----------|----------|----------| | LongAlpaca-12k | 3k | 9k | 12k | [Link](https://huggingface.co/datasets/Yukang/LongAlpaca-12k) | Following the original Alpaca format, our Long QA data uses the following prompts for fine-tuning: - `instruction`: `str`, describes the task the model should perform. For example, to answer a question after reading a book section or paper. We vary the contents and questions to make instructions diverse. - `output`: `str`, the answer to the instruction. We did not use the `input` format in the Alpaca format for simplicity. ## Models ### Models with supervised fine-tuning | Model | Size | Context | Train | Link | |:---------------|------|---------|---------|-----------------------------------------------------------------------------------------------------------------------| | LongAlpaca-7B | 7B | 32768 | Full FT | [Model](https://huggingface.co/Yukang/LongAlpaca-7B) | | LongAlpaca-13B | 13B | 32768 | Full FT | [Model](https://huggingface.co/Yukang/LongAlpaca-13B) | | LongAlpaca-70B | 70B | 32768 | LoRA+ | [Model](https://huggingface.co/Yukang/LongAlpaca-70B) [(LoRA-weight)](https://huggingface.co/Yukang/LongAlpaca-70B-lora) | ### Models with context extension via fully fine-tuning | Model | Size | Context | Train | Link | |:----------------------------|------|---------|-------|-------------------------------------------------------------------| | Llama-2-7b-longlora-8k-ft | 7B | 8192 | Full FT | [Model](https://huggingface.co/Yukang/Llama-2-7b-longlora-8k-ft) | | Llama-2-7b-longlora-16k-ft | 7B | 16384 | Full FT | [Model](https://huggingface.co/Yukang/Llama-2-7b-longlora-16k-ft) | | Llama-2-7b-longlora-32k-ft | 7B | 32768 | Full FT | [Model](https://huggingface.co/Yukang/Llama-2-7b-longlora-32k-ft) | | Llama-2-7b-longlora-100k-ft | 7B | 100000 | Full FT | [Model](https://huggingface.co/Yukang/Llama-2-7b-longlora-100k-ft) | | Llama-2-13b-longlora-8k-ft | 13B | 8192 | Full FT | [Model](https://huggingface.co/Yukang/Llama-2-13b-longlora-8k-ft) | | Llama-2-13b-longlora-16k-ft | 13B | 16384 | Full FT | [Model](https://huggingface.co/Yukang/Llama-2-13b-longlora-16k-ft) | | Llama-2-13b-longlora-32k-ft | 13B | 32768 | Full FT | [Model](https://huggingface.co/Yukang/Llama-2-13b-longlora-32k-ft) | ### Models with context extension via improved LoRA fine-tuning | Model | Size | Context | Train | Link | |:----------------------------|------|---------|-------|---------------------------------------------------------------------| | Llama-2-7b-longlora-8k | 7B | 8192 | LoRA+ | [LoRA-weight](https://huggingface.co/Yukang/Llama-2-7b-longlora-8k) | | Llama-2-7b-longlora-16k | 7B | 16384 | LoRA+ | [LoRA-weight](https://huggingface.co/Yukang/Llama-2-7b-longlora-16k) | | Llama-2-7b-longlora-32k | 7B | 32768 | LoRA+ | [LoRA-weight](https://huggingface.co/Yukang/Llama-2-7b-longlora-32k) | | Llama-2-13b-longlora-8k | 13B | 8192 | LoRA+ | [LoRA-weight](https://huggingface.co/Yukang/Llama-2-13b-longlora-8k) | | Llama-2-13b-longlora-16k | 13B | 16384 | LoRA+ | [LoRA-weight](https://huggingface.co/Yukang/Llama-2-13b-longlora-16k) | | Llama-2-13b-longlora-32k | 13B | 32768 | LoRA+ | [LoRA-weight](https://huggingface.co/Yukang/Llama-2-13b-longlora-32k) | | Llama-2-13b-longlora-64k | 13B | 65536 | LoRA+ | [LoRA-weight](https://huggingface.co/Yukang/Llama-2-13b-longlora-64k) | | Llama-2-70b-longlora-32k | 70B | 32768 | LoRA+ | [LoRA-weight](https://huggingface.co/Yukang/Llama-2-70b-longlora-32k) | | Llama-2-70b-chat-longlora-32k | 70B | 32768 | LoRA+ | [LoRA-weight](https://huggingface.co/Yukang/Llama-2-70b-chat-longlora-32k) | ## Training ### Pre-trained weights We use LLaMA2 models as the pre-trained weights and fine-tune them to long context window sizes. Download based on your choices. | Pre-trained weights | |:-------------------------------------------------------------------------------------| | [Llama-2-7b-hf](https://huggingface.co/meta-llama/Llama-2-7b-hf) | |[Llama-2-13b-hf](https://huggingface.co/meta-llama/Llama-2-13b-hf) | | [Llama-2-70b-hf](https://huggingface.co/meta-llama/Llama-2-70b-hf) | | [Llama-2-7b-chat-hf](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf) | | [Llama-2-13b-chat-hf](https://huggingface.co/meta-llama/Llama-2-13b-chat-hf) | | [Llama-2-70b-chat-hf](https://huggingface.co/meta-llama/Llama-2-70b-chat-hf) | This project also supports GPTNeoX models as the base model architecture. Some candidate pre-trained weights may include [GPT-NeoX-20B](https://huggingface.co/EleutherAI/gpt-neox-20b), [Polyglot-ko-12.8B](https://huggingface.co/EleutherAI/polyglot-ko-12.8b) and other variants. ### Fine-tuning ``` torchrun --nproc_per_node=8 fine-tune.py \ --model_name_or_path path_to/Llama-2-7b-hf \ --bf16 True \ --output_dir path_to_saving_checkpoints \ --cache_dir path_to_cache \ --model_max_length 8192 \ --use_flash_attn True \ --low_rank_training False \ --num_train_epochs 1 \ --per_device_train_batch_size 1 \ --per_device_eval_batch_size 2 \ --gradient_accumulation_steps 8 \ --evaluation_strategy "no" \ --save_strategy "steps" \ --save_steps 1000 \ --save_total_limit 2 \ --learning_rate 2e-5 \ --weight_decay 0.0 \ --warmup_steps 20 \ --lr_scheduler_type "constant_with_warmup" \ --logging_steps 1 \ --deepspeed "ds_configs/stage2.json" \ --tf32 True \ --max_steps 1000 ``` - Please remember to change `path_to/Llama-2-7b-hf`, `path_to_saving_checkpoints`, `path_to_cache` to your own directory. - Note that you can change `model_max_length` to other values. - You could change `ds_configs/stage2.json` to `ds_configs/stage3.json` if you want. - Please set `use_flash_attn` as `False` if you use V100 machines or do not install flash attention. - You can set `low_rank_training` as `False` if you want to use fully fine-tuning. It will cost more GPU memory and slower, but the performance will be a bit better. - When training is finished, to get the full model weight: ``` cd path_to_saving_checkpoints && python zero_to_fp32.py . pytorch_model.bin ``` ### Supervised Fine-tuning ``` torchrun --nproc_per_node=8 supervised-fine-tune.py \ --model_name_or_path path_to_Llama2_chat_models \ --bf16 True \ --output_dir path_to_saving_checkpoints \ --model_max_length 32768 \ --use_flash_attn True \ --data_path LongAlpaca-12k.json \ --low_rank_training True \ --num_train_epochs 3 \ --per_device_train_batch_size 1 \ --per_device_eval_batch_size 2 \ --gradient_accumulation_steps 1 \ --evaluation_strategy "no" \ --save_strategy "steps" \ --save_steps 1000 \ --save_total_limit 2 \ --learning_rate 2e-5 \ --weight_decay 0.0 \ --warmup_steps 20 \ --lr_scheduler_type "constant_with_warmup" \ --logging_steps 1 \ --deepspeed "ds_configs/stage2.json" \ --tf32 True ``` - There is no need to make supervised fine-tuning upon the fine-tuned context extended models. It is all right to directly use base model as Llama2-chat models, as the amount of long instruction following data is enough for SFT. - Our long instruction following data can be found in [LongAlpaca-12k.json](https://huggingface.co/datasets/Yukang/LongAlpaca-12k). ### Get trainable weights in low-rank training In low-rank training, we set embedding and normalization layers as trainable. Please use the following line to extract the trainable weights `trainable_params.bin` from `pytorch_model.bin` ``` python3 get_trainable_weights.py --checkpoint_path path_to_saving_checkpoints --trainable_params "embed,norm" ``` ### Merge LoRA Weight Merge the LoRA weights of `pytorch_model.bin` and trainable parameters `trainable_params.bin`, save the resulting model into your desired path in the Hugging Face format: ``` python3 merge_lora_weights_and_save_hf_model.py \ --base_model path_to/Llama-2-7b-hf \ --peft_model path_to_saving_checkpoints \ --context_size 8192 \ --save_path path_to_saving_merged_model ``` For example, ``` python3 merge_lora_weights_and_save_hf_model.py \ --base_model /dataset/pretrained-models/Llama-2-7b-hf \ --peft_model /dataset/yukangchen/hf_models/lora-models/Llama-2-7b-longlora-8k \ --context_size 8192 \ --save_path /dataset/yukangchen/models/Llama-2-7b-longlora-8k-merged ``` ## Evaluation ### Perplexity Validation To evaluate a model that is trained in the low-rank setting, please set both `base_model` and `peft_model`. `base_model` is the pre-trained weight. `peft_model` is the path to the saved checkpoint, which should contain `trainable_params.bin`, `adapter_model.bin` and `adapter_config.json`. For example, ``` python3 eval.py --seq_len 8192 --context_size 8192 --batch_size 1 --base_model path_to/Llama-2-7b-hf --peft_model path_to_saving_checkpoints --data_path pg19/test.bin ``` To evaluate a model that is fully fine-tuned, you only need to set `base_model` as the path to the saved checkpoint, which should contain `pytorch_model.bin` and `config.json`. `peft_model` should be ignored. ``` python3 eval.py --seq_len 8192 --context_size 8192 --batch_size 1 --base_model path_to_saving_checkpoints --data_path pg19/test.bin ``` - Note that `--seq_len` is to set the sequence length for evaluation. `--context_size` is to set the context length of the model during fine-tuning. `--seq_len` should not be larger than `--context_size`. - We have already tokenized the validation and test splits of PG19 and proof-pile dataset into `pg19/validation.bin`, `pg19/test.bin`, and `proof-pile/test_sampled_data.bin`, with the tokenizer of LLaMA. `proof-pile/test_sampled_data.bin` contains 128 documents that are randomly sampled from the total proof-pile test split. For each document, it has at least 32768 tokens. We also release the sampled ids in [proof-pile/test_sampled_ids.bin](https://drive.google.com/file/d/1cnzWODLRQYAd7HeugzLCIhaqzaLZv7J5/view?usp=share_link). You can download them from the links below. | Dataset | Split | Link | |:-----------|------------|--------------------------------------------------------------------------------------------------------------| | PG19 | validation | [pg19/validation.bin](https://drive.google.com/file/d/1rbJvb0qRIf2mQoN2ON7S93TbTzMnlrN6/view?usp=share_link) | | PG19 | test | [pg19/test.bin](https://drive.google.com/file/d/1QANDMdctpacPAYgS04adDXqByGEq-Ret/view?usp=share_link) | | Proof-pile | test | [proof-pile/test_sampled_data.bin](https://drive.google.com/file/d/1bUI5lPDvrqzY_XXJJ2sSuvZx0Y9AZClE/view?usp=share_link) | ### Passkey Retrieval We provide a manner to test the passkey retrieval accuracy. For example, ``` python3 passkey_retrivial.py \ --context_size 32768 \ --base_model path_to/Llama-2-7b-longlora-32k \ --max_tokens 32768 \ --interval 1000 ``` - Note that the `context_size` is the context length during fine-tuning. - `max_tokens` is maximum length for the document in passkey retrieval evaluation. - `interval` is the interval during the document length increasing. It is a rough number because the document increases by sentences. ## Demo ### Local Inference To chat with [Llama-2-13b-chat-longlora-32k-sft](https://huggingface.co/Yukang/Llama-2-13b-chat-longlora-32k-sft) or [Llama-2-70b-chat-longlora-32k-sft](https://huggingface.co/Yukang/Llama-2-70b-chat-longlora-32k-sft), you need to run `merge_lora_weights_and_save_hf_model.py` first, and then: ``` python3 inference.py \ --base_model path_to_model \ --question $question \ --context_size $context_length \ --max_gen_len $max_gen_len \ --flash_attn True \ --material $material_content \ --material_type $material_type \ --material_title $material_title ``` To ask a question related to a book: ``` python3 inference.py \ --base_model /data/models/Llama-2-13b-chat-longlora-32k-sft \ --question "Why doesn't Professor Snape seem to like Harry?" \ --context_size 32768 \ --max_gen_len 512 \ --flash_attn True \ --material "materials/Harry Potter and the Philosophers Stone_section2.txt" \ --material_type "book" \ --material_title "Harry Potter and the Philosophers Stone" ``` Note that you can ignore `material_type` or `material_title`. To ask a question related to a paper: ``` python3 inference.py \ --base_model /data/models/Llama-2-13b-chat-longlora-32k-sft \ --question "What are the main contributions and novelties of this work?" \ --context_size 32768 \ --max_gen_len 512 \ --flash_attn True \ --material "materials/paper1.txt" \ --material_type "paper" ``` ### Online Demo To deploy your own demo run ``` python3 demo.py \ --base_model path_to_model \ --context_size $context_size \ --max_gen_len $max_gen_len \ --flash_attn True ``` Example ``` python3 demo.py \ --base_model /data/models/Llama-2-13b-chat-longlora-32k-sft \ --context_size 32768 \ --max_gen_len 512 \ --flash_attn True ``` - Note that `flash_attn=True` will make the generation slow but save much GPU memory. ## Data Generation via Pdf2text During our dataset collection, we convert paper and books from pdf to text. The conversion quality has a large influence on the final model quality. We think that this step is non-trivial. We release the tool for the pdf2txt conversion, in the folder `pdf2txt`. It is built upon `pdf2image`, `easyocr`, `ditod` and `detectron2`. Please refer to the [README.md](pdf2txt/README.md) in `pdf2txt` for more details. ## Citation If you find this project useful in your research, please consider citing: ``` @article{longlora, title={LongLoRA: Efficient Fine-tuning of Long-Context Large Language Models}, author={Yukang Chen and Shengju Qian and Haotian Tang and Xin Lai and Zhijian Liu and Song Han and Jiaya Jia}, journal={arXiv:2309.12307}, year={2023} } ``` ``` @misc{long-alpaca, author = {Yukang Chen and Shaozuo Yu and Shengju Qian and Haotian Tang and Xin Lai and Zhijian Liu and Song Han and Jiaya Jia}, title = {Long Alpaca: Long-context Instruction-following models}, year = {2023}, publisher = {GitHub}, journal = {GitHub repository}, howpublished = {\url{https://github.com/dvlab-research/LongLoRA}}, } ``` ## Acknowledgement - This work is built upon the [LLaMA2](https://ai.meta.com/llama) as the pre-trained models. - This work can also be built upon the [GPTNeoX-HF](https://huggingface.co/docs/transformers/model_doc/gpt_neox) which is based upon [EleutherAI/GPTNeoX](https://github.com/EleutherAI/gpt-neox) as the pre-trained model architecture. - This work is based on [DeepSpeed](https://github.com/microsoft/DeepSpeed), [peft](https://github.com/huggingface/peft), and [Flash-Attention2](https://github.com/Dao-AILab/flash-attention) for acceleration. - Some evaluation code is modified upon [Landmark Attention](https://github.com/epfml/landmark-attention). - We use [LongChat](https://github.com/DachengLi1/LongChat) for the retrieval evaluation. ## License - LongLoRA is licensed under the Apache License 2.0. This means that it requires the preservation of copyright and license notices. - Data and weights are under CC-BY-NC 4.0 License. They are licensed for research use only, and allowed only non-commercial. Models trained using the dataset should not be used outside of research purposes.
digiplay/Acorn_Photo_v1
digiplay
"2024-06-01T20:20:49Z"
4,639
2
diffusers
[ "diffusers", "safetensors", "license:other", "autotrain_compatible", "endpoints_compatible", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
"2023-08-13T00:09:30Z"
--- license: other --- Model info : https://civitai.com/models/112013?modelVersionId=124592 file name: acornIsSpinning_photo.safetensors Original Author's DEMO image : ![008 Acorn Photo (2).jpeg](https://cdn-uploads.huggingface.co/production/uploads/646c83c871d0c8a6e4455854/nt6D6dKcMvzuSfOsxHlTY.jpeg)
elyza/ELYZA-japanese-Llama-2-13b-instruct
elyza
"2023-12-27T01:41:15Z"
4,638
39
transformers
[ "transformers", "pytorch", "llama", "text-generation", "ja", "en", "arxiv:2307.09288", "license:llama2", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-12-25T16:10:32Z"
--- license: llama2 language: - ja - en --- ## ELYZA-japanese-Llama-2-13b ![ELYZA-Japanese-Llama2-image](./key_visual.png) ### Model Description **ELYZA-japanese-Llama-2-13b** は、 Llama 2をベースとして日本語能力を拡張するために追加事前学習を行ったモデルです。 詳細は [Blog記事](https://note.com/elyza/n/n5d42686b60b7) を参照してください。 ### Usage ```python import torch from transformers import AutoModelForCausalLM, AutoTokenizer B_INST, E_INST = "[INST]", "[/INST]" B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n" DEFAULT_SYSTEM_PROMPT = "あなたは誠実で優秀な日本人のアシスタントです。" text = "仕事の熱意を取り戻すためのアイデアを5つ挙げてください。" model_name = "elyza/ELYZA-japanese-Llama-2-13b-instruct" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained( model_name, torch_dtype=torch.bfloat16, use_cache=True, device_map="auto", low_cpu_mem_usage=True, ) model.eval() prompt = "{bos_token}{b_inst} {system}{prompt} {e_inst} ".format( bos_token=tokenizer.bos_token, b_inst=B_INST, system=f"{B_SYS}{DEFAULT_SYSTEM_PROMPT}{E_SYS}", prompt=text, e_inst=E_INST, ) token_ids = tokenizer.encode(prompt, add_special_tokens=False, return_tensors="pt") with torch.no_grad(): output_ids = model.generate( token_ids.to(model.device), max_new_tokens=256, pad_token_id=tokenizer.pad_token_id, eos_token_id=tokenizer.eos_token_id, ) output = tokenizer.decode(output_ids.tolist()[0][token_ids.size(1) :], skip_special_tokens=True) print(output) ``` ### ELYZA-japanese-Llama-2-13b Models | Model Name | Vocab Size | #Params | |:---------------------------------------------|:----------:|:-------:| |[elyza/ELYZA-japanese-Llama-2-13b](https://huggingface.co/elyza/ELYZA-japanese-Llama-2-13b)| 32000 | 13.02B | |[elyza/ELYZA-japanese-Llama-2-13b-instruct](https://huggingface.co/elyza/ELYZA-japanese-Llama-2-13b-instruct)| 32000 | 13.02B | |[elyza/ELYZA-japanese-Llama-2-13b-fast](https://huggingface.co/elyza/ELYZA-japanese-Llama-2-13b-fast)| 44581 | 13.14B | |[elyza/ELYZA-japanese-Llama-2-13b-fast-instruct](https://huggingface.co/elyza/ELYZA-japanese-Llama-2-13b-fast-instruct)| 44581 | 13.14B | ### Developers 以下アルファベット順 - [Akira Sasaki](https://huggingface.co/akirasasaki) - [Masato Hirakawa](https://huggingface.co/m-hirakawa) - [Shintaro Horie](https://huggingface.co/e-mon) - [Tomoaki Nakamura](https://huggingface.co/tyoyo) - [Sam Passaglia](https://huggingface.co/passaglia) - [Daisuke Oba](https://huggingface.co/daisuk30ba) (intern) ### Licence Llama 2 is licensed under the LLAMA 2 Community License, Copyright (c) Meta Platforms, Inc. All Rights Reserved. ### How to Cite ```tex @misc{elyzallama2023, title={ELYZA-japanese-Llama-2-13b}, url={https://huggingface.co/elyza/ELYZA-japanese-Llama-2-13b}, author={Akira Sasaki and Masato Hirakawa and Shintaro Horie and Tomoaki Nakamura and Sam Passaglia and Daisuke Oba}, year={2023}, } ``` ### Citations ```tex @misc{touvron2023llama, title={Llama 2: Open Foundation and Fine-Tuned Chat Models}, author={Hugo Touvron and Louis Martin and Kevin Stone and Peter Albert and Amjad Almahairi and Yasmine Babaei and Nikolay Bashlykov and Soumya Batra and Prajjwal Bhargava and Shruti Bhosale and Dan Bikel and Lukas Blecher and Cristian Canton Ferrer and Moya Chen and Guillem Cucurull and David Esiobu and Jude Fernandes and Jeremy Fu and Wenyin Fu and Brian Fuller and Cynthia Gao and Vedanuj Goswami and Naman Goyal and Anthony Hartshorn and Saghar Hosseini and Rui Hou and Hakan Inan and Marcin Kardas and Viktor Kerkez and Madian Khabsa and Isabel Kloumann and Artem Korenev and Punit Singh Koura and Marie-Anne Lachaux and Thibaut Lavril and Jenya Lee and Diana Liskovich and Yinghai Lu and Yuning Mao and Xavier Martinet and Todor Mihaylov and Pushkar Mishra and Igor Molybog and Yixin Nie and Andrew Poulton and Jeremy Reizenstein and Rashi Rungta and Kalyan Saladi and Alan Schelten and Ruan Silva and Eric Michael Smith and Ranjan Subramanian and Xiaoqing Ellen Tan and Binh Tang and Ross Taylor and Adina Williams and Jian Xiang Kuan and Puxin Xu and Zheng Yan and Iliyan Zarov and Yuchen Zhang and Angela Fan and Melanie Kambadur and Sharan Narang and Aurelien Rodriguez and Robert Stojnic and Sergey Edunov and Thomas Scialom}, year={2023}, eprint={2307.09288}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
MBZUAI/LaMini-GPT-124M
MBZUAI
"2023-04-28T13:07:07Z"
4,636
19
transformers
[ "transformers", "pytorch", "gpt2", "text-generation", "en", "arxiv:2304.14402", "license:cc-by-nc-4.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-04-14T00:58:57Z"
--- license: cc-by-nc-4.0 language: - en pipeline_tag: text-generation widget: - text: >- Below is an instruction that describes a task. Write a response that appropriately completes the request. ### Instruction: how can I become more healthy? ### Response: example_title: example --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> <p align="center" width="100%"> <a><img src="https://raw.githubusercontent.com/mbzuai-nlp/lamini-lm/main/images/lamini.png" alt="Title" style="width: 100%; min-width: 300px; display: block; margin: auto;"></a> </p> # LaMini-GPT-124M [![Model License](https://img.shields.io/badge/Model%20License-CC%20By%20NC%204.0-red.svg)]() This model is one of our LaMini-LM model series in paper "[LaMini-LM: A Diverse Herd of Distilled Models from Large-Scale Instructions](https://github.com/mbzuai-nlp/lamini-lm)". This model is a fine-tuned version of [gpt2](https://huggingface.co/gpt2) on [LaMini-instruction dataset](https://huggingface.co/datasets/MBZUAI/LaMini-instruction) that contains 2.58M samples for instruction fine-tuning. For more information about our dataset, please refer to our [project repository](https://github.com/mbzuai-nlp/lamini-lm/). You can view other models of LaMini-LM series as follows. Models with ✩ are those with the best overall performance given their size/architecture, hence we recommend using them. More details can be seen in our paper. <table> <thead> <tr> <th>Base model</th> <th colspan="4">LaMini-LM series (#parameters)</th> </tr> </thead> <tbody> <tr> <td>T5</td> <td><a href="https://huggingface.co/MBZUAI/lamini-t5-61m" target="_blank" rel="noopener noreferrer">LaMini-T5-61M</a></td> <td><a href="https://huggingface.co/MBZUAI/lamini-t5-223m" target="_blank" rel="noopener noreferrer">LaMini-T5-223M</a></td> <td><a href="https://huggingface.co/MBZUAI/lamini-t5-738m" target="_blank" rel="noopener noreferrer">LaMini-T5-738M</a></td> <td></td> </tr> <tr> <td>Flan-T5</td> <td><a href="https://huggingface.co/MBZUAI/lamini-flan-t5-77m" target="_blank" rel="noopener noreferrer">LaMini-Flan-T5-77M</a>✩</td> <td><a href="https://huggingface.co/MBZUAI/lamini-flan-t5-248m" target="_blank" rel="noopener noreferrer">LaMini-Flan-T5-248M</a>✩</td> <td><a href="https://huggingface.co/MBZUAI/lamini-flan-t5-783m" target="_blank" rel="noopener noreferrer">LaMini-Flan-T5-783M</a>✩</td> <td></td> </tr> <tr> <td>Cerebras-GPT</td> <td><a href="https://huggingface.co/MBZUAI/lamini-cerebras-111m" target="_blank" rel="noopener noreferrer">LaMini-Cerebras-111M</a></td> <td><a href="https://huggingface.co/MBZUAI/lamini-cerebras-256m" target="_blank" rel="noopener noreferrer">LaMini-Cerebras-256M</a></td> <td><a href="https://huggingface.co/MBZUAI/lamini-cerebras-590m" target="_blank" rel="noopener noreferrer">LaMini-Cerebras-590M</a></td> <td><a href="https://huggingface.co/MBZUAI/lamini-cerebras-1.3b" target="_blank" rel="noopener noreferrer">LaMini-Cerebras-1.3B</a></td> </tr> <tr> <td>GPT-2</td> <td><a href="https://huggingface.co/MBZUAI/lamini-gpt-124m" target="_blank" rel="noopener noreferrer">LaMini-GPT-124M</a>✩</td> <td><a href="https://huggingface.co/MBZUAI/lamini-gpt-774m" target="_blank" rel="noopener noreferrer">LaMini-GPT-774M</a>✩</td> <td><a href="https://huggingface.co/MBZUAI/lamini-gpt-1.5b" target="_blank" rel="noopener noreferrer">LaMini-GPT-1.5B</a>✩</td> <td></td> </tr> <tr> <td>GPT-Neo</td> <td><a href="https://huggingface.co/MBZUAI/lamini-neo-125m" target="_blank" rel="noopener noreferrer">LaMini-Neo-125M</a></td> <td><a href="https://huggingface.co/MBZUAI/lamini-neo-1.3b" target="_blank" rel="noopener noreferrer">LaMini-Neo-1.3B</a></td> <td></td> <td></td> </tr> <tr> <td>GPT-J</td> <td colspan="4">coming soon</td> </tr> <tr> <td>LLaMA</td> <td colspan="4">coming soon</td> </tr> </tbody> </table> ## Use ### Intended use We recommend using the model to respond to human instructions written in natural language. Since this decoder-only model is fine-tuned with wrapper text, we suggest using the same wrapper text to achieve the best performance. See the example on the right or the code below. We now show you how to load and use our model using HuggingFace `pipeline()`. ```python # pip install -q transformers from transformers import pipeline checkpoint = "{model_name}" model = pipeline('text-generation', model = checkpoint) instruction = 'Please let me know your thoughts on the given place and why you think it deserves to be visited: \n"Barcelona, Spain"' input_prompt = f"Below is an instruction that describes a task. Write a response that appropriately completes the request.\n\n### Instruction:\n{instruction}\n\n### Response:" generated_text = model(input_prompt, max_length=512, do_sample=True)[0]['generated_text'] print("Response", generated_text) ``` ## Training Procedure <p align="center" width="100%"> <a><img src="https://raw.githubusercontent.com/mbzuai-nlp/lamini-lm/main/images/lamini-pipeline.drawio.png" alt="Title" style="width: 100%; min-width: 250px; display: block; margin: auto;"></a> </p> We initialize with [gpt2](https://huggingface.co/gpt2) and fine-tune it on our [LaMini-instruction dataset](https://huggingface.co/datasets/MBZUAI/LaMini-instruction). Its total number of parameters is 124M. ### Training Hyperparameters ## Evaluation We conducted two sets of evaluations: automatic evaluation on downstream NLP tasks and human evaluation on user-oriented instructions. For more detail, please refer to our [paper](). ## Limitations More information needed # Citation ```bibtex @article{lamini-lm, author = {Minghao Wu and Abdul Waheed and Chiyu Zhang and Muhammad Abdul-Mageed and Alham Fikri Aji }, title = {LaMini-LM: A Diverse Herd of Distilled Models from Large-Scale Instructions}, journal = {CoRR}, volume = {abs/2304.14402}, year = {2023}, url = {https://arxiv.org/abs/2304.14402}, eprinttype = {arXiv}, eprint = {2304.14402} } ```
facebook/mms-tts-vie
facebook
"2023-09-01T10:37:34Z"
4,633
10
transformers
[ "transformers", "pytorch", "safetensors", "vits", "text-to-audio", "mms", "text-to-speech", "arxiv:2305.13516", "license:cc-by-nc-4.0", "endpoints_compatible", "region:us" ]
text-to-speech
"2023-09-01T10:37:14Z"
--- license: cc-by-nc-4.0 tags: - mms - vits pipeline_tag: text-to-speech --- # Massively Multilingual Speech (MMS): Vietnamese Text-to-Speech This repository contains the **Vietnamese (vie)** language text-to-speech (TTS) model checkpoint. This model is part of Facebook's [Massively Multilingual Speech](https://arxiv.org/abs/2305.13516) project, aiming to provide speech technology across a diverse range of languages. You can find more details about the supported languages and their ISO 639-3 codes in the [MMS Language Coverage Overview](https://dl.fbaipublicfiles.com/mms/misc/language_coverage_mms.html), and see all MMS-TTS checkpoints on the Hugging Face Hub: [facebook/mms-tts](https://huggingface.co/models?sort=trending&search=facebook%2Fmms-tts). MMS-TTS is available in the 🤗 Transformers library from version 4.33 onwards. ## Model Details VITS (**V**ariational **I**nference with adversarial learning for end-to-end **T**ext-to-**S**peech) is an end-to-end speech synthesis model that predicts a speech waveform conditional on an input text sequence. It is a conditional variational autoencoder (VAE) comprised of a posterior encoder, decoder, and conditional prior. A set of spectrogram-based acoustic features are predicted by the flow-based module, which is formed of a Transformer-based text encoder and multiple coupling layers. The spectrogram is decoded using a stack of transposed convolutional layers, much in the same style as the HiFi-GAN vocoder. Motivated by the one-to-many nature of the TTS problem, where the same text input can be spoken in multiple ways, the model also includes a stochastic duration predictor, which allows the model to synthesise speech with different rhythms from the same input text. The model is trained end-to-end with a combination of losses derived from variational lower bound and adversarial training. To improve the expressiveness of the model, normalizing flows are applied to the conditional prior distribution. During inference, the text encodings are up-sampled based on the duration prediction module, and then mapped into the waveform using a cascade of the flow module and HiFi-GAN decoder. Due to the stochastic nature of the duration predictor, the model is non-deterministic, and thus requires a fixed seed to generate the same speech waveform. For the MMS project, a separate VITS checkpoint is trained on each langauge. ## Usage MMS-TTS is available in the 🤗 Transformers library from version 4.33 onwards. To use this checkpoint, first install the latest version of the library: ``` pip install --upgrade transformers accelerate ``` Then, run inference with the following code-snippet: ```python from transformers import VitsModel, AutoTokenizer import torch model = VitsModel.from_pretrained("facebook/mms-tts-vie") tokenizer = AutoTokenizer.from_pretrained("facebook/mms-tts-vie") text = "some example text in the Vietnamese language" inputs = tokenizer(text, return_tensors="pt") with torch.no_grad(): output = model(**inputs).waveform ``` The resulting waveform can be saved as a `.wav` file: ```python import scipy scipy.io.wavfile.write("techno.wav", rate=model.config.sampling_rate, data=output) ``` Or displayed in a Jupyter Notebook / Google Colab: ```python from IPython.display import Audio Audio(output, rate=model.config.sampling_rate) ``` ## BibTex citation This model was developed by Vineel Pratap et al. from Meta AI. If you use the model, consider citing the MMS paper: ``` @article{pratap2023mms, title={Scaling Speech Technology to 1,000+ Languages}, author={Vineel Pratap and Andros Tjandra and Bowen Shi and Paden Tomasello and Arun Babu and Sayani Kundu and Ali Elkahky and Zhaoheng Ni and Apoorv Vyas and Maryam Fazel-Zarandi and Alexei Baevski and Yossi Adi and Xiaohui Zhang and Wei-Ning Hsu and Alexis Conneau and Michael Auli}, journal={arXiv}, year={2023} } ``` ## License The model is licensed as **CC-BY-NC 4.0**.
facebook/mms-1b
facebook
"2023-06-05T10:23:40Z"
4,632
36
transformers
[ "transformers", "pytorch", "wav2vec2", "pretraining", "mms", "ab", "af", "ak", "am", "ar", "as", "av", "ay", "az", "ba", "bm", "be", "bn", "bi", "bo", "sh", "br", "bg", "ca", "cs", "ce", "cv", "ku", "cy", "da", "de", "dv", "dz", "el", "en", "eo", "et", "eu", "ee", "fo", "fa", "fj", "fi", "fr", "fy", "ff", "ga", "gl", "gn", "gu", "zh", "ht", "ha", "he", "hi", "hu", "hy", "ig", "ia", "ms", "is", "it", "jv", "ja", "kn", "ka", "kk", "kr", "km", "ki", "rw", "ky", "ko", "kv", "lo", "la", "lv", "ln", "lt", "lb", "lg", "mh", "ml", "mr", "mk", "mg", "mt", "mn", "mi", "my", "nl", "no", "ne", "ny", "oc", "om", "or", "os", "pa", "pl", "pt", "ps", "qu", "ro", "rn", "ru", "sg", "sk", "sl", "sm", "sn", "sd", "so", "es", "sq", "su", "sv", "sw", "ta", "tt", "te", "tg", "tl", "th", "ti", "ts", "tr", "uk", "vi", "wo", "xh", "yo", "zu", "za", "dataset:google/fleurs", "arxiv:2305.13516", "license:cc-by-nc-4.0", "endpoints_compatible", "region:us" ]
null
"2023-05-22T19:39:11Z"
--- tags: - mms language: - ab - af - ak - am - ar - as - av - ay - az - ba - bm - be - bn - bi - bo - sh - br - bg - ca - cs - ce - cv - ku - cy - da - de - dv - dz - el - en - eo - et - eu - ee - fo - fa - fj - fi - fr - fy - ff - ga - gl - gn - gu - zh - ht - ha - he - hi - sh - hu - hy - ig - ia - ms - is - it - jv - ja - kn - ka - kk - kr - km - ki - rw - ky - ko - kv - lo - la - lv - ln - lt - lb - lg - mh - ml - mr - ms - mk - mg - mt - mn - mi - my - zh - nl - 'no' - 'no' - ne - ny - oc - om - or - os - pa - pl - pt - ms - ps - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - qu - ro - rn - ru - sg - sk - sl - sm - sn - sd - so - es - sq - su - sv - sw - ta - tt - te - tg - tl - th - ti - ts - tr - uk - ms - vi - wo - xh - ms - yo - ms - zu - za license: cc-by-nc-4.0 datasets: - google/fleurs metrics: - wer --- # Massively Multilingual Speech (MMS) - 1B Facebook's MMS counting *1 billion* parameters. MMS is Facebook AI's massive multilingual pretrained model for speech ("MMS"). It is pretrained in with [Wav2Vec2's self-supervised training objective](https://ai.facebook.com/blog/wav2vec-20-learning-the-structure-of-speech-from-raw-audio/) on about 500,000 hours of speech data in over 1,400 languages. When using the model make sure that your speech input is sampled at 16kHz. **Note**: This model should be fine-tuned on a downstream task, like Automatic Speech Recognition, Translation, or Classification. Check out the [**How-to-fine section](#how-to-finetune) or [**this blog**](https://huggingface.co/blog/fine-tune-xlsr-wav2vec2) for more information about ASR. ## Table Of Content - [How to Finetune](#how-to-finetune) - [Model details](#model-details) - [Additional links](#additional-links) ## How to finetune Coming soon... ## Model details - **Developed by:** Vineel Pratap et al. - **Model type:** Multi-Lingual Automatic Speech Recognition model - **Language(s):** 1000+ languages - **License:** CC-BY-NC 4.0 license - **Num parameters**: 1 billion - **Cite as:** @article{pratap2023mms, title={Scaling Speech Technology to 1,000+ Languages}, author={Vineel Pratap and Andros Tjandra and Bowen Shi and Paden Tomasello and Arun Babu and Sayani Kundu and Ali Elkahky and Zhaoheng Ni and Apoorv Vyas and Maryam Fazel-Zarandi and Alexei Baevski and Yossi Adi and Xiaohui Zhang and Wei-Ning Hsu and Alexis Conneau and Michael Auli}, journal={arXiv}, year={2023} } ## Additional Links - [Blog post]( ) - [Transformers documentation](https://huggingface.co/docs/transformers/main/en/model_doc/mms). - [Paper](https://arxiv.org/abs/2305.13516) - [GitHub Repository](https://github.com/facebookresearch/fairseq/tree/main/examples/mms#asr) - [Other **MMS** checkpoints](https://huggingface.co/models?other=mms) - MMS ASR fine-tuned checkpoints: - [facebook/mms-1b-all](https://huggingface.co/facebook/mms-1b-all) - [facebook/mms-1b-l1107](https://huggingface.co/facebook/mms-1b-l1107) - [facebook/mms-1b-fl102](https://huggingface.co/facebook/mms-1b-fl102) - [Official Space](https://huggingface.co/spaces/facebook/MMS)
mradermacher/falcon-180B-i1-GGUF
mradermacher
"2024-07-02T23:15:29Z"
4,631
1
transformers
[ "transformers", "gguf", "en", "de", "es", "fr", "dataset:tiiuae/falcon-refinedweb", "base_model:tiiuae/falcon-180B", "license:unknown", "endpoints_compatible", "region:us" ]
null
"2024-06-21T12:18:22Z"
--- base_model: tiiuae/falcon-180B datasets: - tiiuae/falcon-refinedweb extra_gated_button_content: I agree to the terms and conditions of the Falcon-180B TII license and to the acceptable use policy extra_gated_heading: Acknowledge license to access the repository extra_gated_prompt: You agree to the [Falcon-180B TII license](https://huggingface.co/spaces/tiiuae/falcon-180b-license/blob/main/LICENSE.txt) and [acceptable use policy](https://huggingface.co/spaces/tiiuae/falcon-180b-license/blob/main/ACCEPTABLE_USE_POLICY.txt). language: - en - de - es - fr library_name: transformers license: unknown quantized_by: mradermacher --- ## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> weighted/imatrix quants of https://huggingface.co/tiiuae/falcon-180B <!-- provided-files --> ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/falcon-180B-i1-GGUF/resolve/main/falcon-180B.i1-IQ2_XXS.gguf) | i1-IQ2_XXS | 47.9 | | | [PART 1](https://huggingface.co/mradermacher/falcon-180B-i1-GGUF/resolve/main/falcon-180B.i1-IQ2_XS.gguf.part1of2) [PART 2](https://huggingface.co/mradermacher/falcon-180B-i1-GGUF/resolve/main/falcon-180B.i1-IQ2_XS.gguf.part2of2) | i1-IQ2_XS | 53.1 | | | [PART 1](https://huggingface.co/mradermacher/falcon-180B-i1-GGUF/resolve/main/falcon-180B.i1-IQ2_M.gguf.part1of2) [PART 2](https://huggingface.co/mradermacher/falcon-180B-i1-GGUF/resolve/main/falcon-180B.i1-IQ2_M.gguf.part2of2) | i1-IQ2_M | 61.3 | | | [PART 1](https://huggingface.co/mradermacher/falcon-180B-i1-GGUF/resolve/main/falcon-180B.i1-Q2_K.gguf.part1of2) [PART 2](https://huggingface.co/mradermacher/falcon-180B-i1-GGUF/resolve/main/falcon-180B.i1-Q2_K.gguf.part2of2) | i1-Q2_K | 66.9 | IQ3_XXS probably better | | [PART 1](https://huggingface.co/mradermacher/falcon-180B-i1-GGUF/resolve/main/falcon-180B.i1-IQ3_XXS.gguf.part1of2) [PART 2](https://huggingface.co/mradermacher/falcon-180B-i1-GGUF/resolve/main/falcon-180B.i1-IQ3_XXS.gguf.part2of2) | i1-IQ3_XXS | 69.5 | lower quality | | [PART 1](https://huggingface.co/mradermacher/falcon-180B-i1-GGUF/resolve/main/falcon-180B.i1-IQ3_XS.gguf.part1of2) [PART 2](https://huggingface.co/mradermacher/falcon-180B-i1-GGUF/resolve/main/falcon-180B.i1-IQ3_XS.gguf.part2of2) | i1-IQ3_XS | 75.4 | | | [PART 1](https://huggingface.co/mradermacher/falcon-180B-i1-GGUF/resolve/main/falcon-180B.i1-IQ3_S.gguf.part1of2) [PART 2](https://huggingface.co/mradermacher/falcon-180B-i1-GGUF/resolve/main/falcon-180B.i1-IQ3_S.gguf.part2of2) | i1-IQ3_S | 77.9 | beats Q3_K* | | [PART 1](https://huggingface.co/mradermacher/falcon-180B-i1-GGUF/resolve/main/falcon-180B.i1-Q3_K_S.gguf.part1of2) [PART 2](https://huggingface.co/mradermacher/falcon-180B-i1-GGUF/resolve/main/falcon-180B.i1-Q3_K_S.gguf.part2of2) | i1-Q3_K_S | 77.9 | IQ3_XS probably better | | [PART 1](https://huggingface.co/mradermacher/falcon-180B-i1-GGUF/resolve/main/falcon-180B.i1-IQ3_M.gguf.part1of2) [PART 2](https://huggingface.co/mradermacher/falcon-180B-i1-GGUF/resolve/main/falcon-180B.i1-IQ3_M.gguf.part2of2) | i1-IQ3_M | 81.5 | | | [PART 1](https://huggingface.co/mradermacher/falcon-180B-i1-GGUF/resolve/main/falcon-180B.i1-Q3_K_M.gguf.part1of2) [PART 2](https://huggingface.co/mradermacher/falcon-180B-i1-GGUF/resolve/main/falcon-180B.i1-Q3_K_M.gguf.part2of2) | i1-Q3_K_M | 85.6 | IQ3_S probably better | | [PART 1](https://huggingface.co/mradermacher/falcon-180B-i1-GGUF/resolve/main/falcon-180B.i1-Q3_K_L.gguf.part1of2) [PART 2](https://huggingface.co/mradermacher/falcon-180B-i1-GGUF/resolve/main/falcon-180B.i1-Q3_K_L.gguf.part2of2) | i1-Q3_K_L | 92.1 | IQ3_M probably better | | [PART 1](https://huggingface.co/mradermacher/falcon-180B-i1-GGUF/resolve/main/falcon-180B.i1-IQ4_XS.gguf.part1of2) [PART 2](https://huggingface.co/mradermacher/falcon-180B-i1-GGUF/resolve/main/falcon-180B.i1-IQ4_XS.gguf.part2of2) | i1-IQ4_XS | 96.0 | | | [PART 1](https://huggingface.co/mradermacher/falcon-180B-i1-GGUF/resolve/main/falcon-180B.i1-Q4_K_S.gguf.part1of3) [PART 2](https://huggingface.co/mradermacher/falcon-180B-i1-GGUF/resolve/main/falcon-180B.i1-Q4_K_S.gguf.part2of3) [PART 3](https://huggingface.co/mradermacher/falcon-180B-i1-GGUF/resolve/main/falcon-180B.i1-Q4_K_S.gguf.part3of3) | i1-Q4_K_S | 101.6 | optimal size/speed/quality | | [PART 1](https://huggingface.co/mradermacher/falcon-180B-i1-GGUF/resolve/main/falcon-180B.i1-Q4_0.gguf.part1of3) [PART 2](https://huggingface.co/mradermacher/falcon-180B-i1-GGUF/resolve/main/falcon-180B.i1-Q4_0.gguf.part2of3) [PART 3](https://huggingface.co/mradermacher/falcon-180B-i1-GGUF/resolve/main/falcon-180B.i1-Q4_0.gguf.part3of3) | i1-Q4_0 | 102.1 | fast, low quality | | [PART 1](https://huggingface.co/mradermacher/falcon-180B-i1-GGUF/resolve/main/falcon-180B.i1-Q4_K_M.gguf.part1of3) [PART 2](https://huggingface.co/mradermacher/falcon-180B-i1-GGUF/resolve/main/falcon-180B.i1-Q4_K_M.gguf.part2of3) [PART 3](https://huggingface.co/mradermacher/falcon-180B-i1-GGUF/resolve/main/falcon-180B.i1-Q4_K_M.gguf.part3of3) | i1-Q4_K_M | 108.9 | fast, recommended | | [PART 1](https://huggingface.co/mradermacher/falcon-180B-i1-GGUF/resolve/main/falcon-180B.i1-Q5_K_S.gguf.part1of3) [PART 2](https://huggingface.co/mradermacher/falcon-180B-i1-GGUF/resolve/main/falcon-180B.i1-Q5_K_S.gguf.part2of3) [PART 3](https://huggingface.co/mradermacher/falcon-180B-i1-GGUF/resolve/main/falcon-180B.i1-Q5_K_S.gguf.part3of3) | i1-Q5_K_S | 123.9 | | | [PART 1](https://huggingface.co/mradermacher/falcon-180B-i1-GGUF/resolve/main/falcon-180B.i1-Q5_K_M.gguf.part1of3) [PART 2](https://huggingface.co/mradermacher/falcon-180B-i1-GGUF/resolve/main/falcon-180B.i1-Q5_K_M.gguf.part2of3) [PART 3](https://huggingface.co/mradermacher/falcon-180B-i1-GGUF/resolve/main/falcon-180B.i1-Q5_K_M.gguf.part3of3) | i1-Q5_K_M | 131.1 | | | [PART 1](https://huggingface.co/mradermacher/falcon-180B-i1-GGUF/resolve/main/falcon-180B.i1-Q6_K.gguf.part1of3) [PART 2](https://huggingface.co/mradermacher/falcon-180B-i1-GGUF/resolve/main/falcon-180B.i1-Q6_K.gguf.part2of3) [PART 3](https://huggingface.co/mradermacher/falcon-180B-i1-GGUF/resolve/main/falcon-180B.i1-Q6_K.gguf.part3of3) | i1-Q6_K | 147.6 | practically like static Q6_K | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. <!-- end -->
microsoft/git-large-coco
microsoft
"2023-06-26T19:50:47Z"
4,627
93
transformers
[ "transformers", "pytorch", "safetensors", "git", "text-generation", "vision", "image-captioning", "image-to-text", "en", "arxiv:2205.14100", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
image-to-text
"2023-01-02T10:44:21Z"
--- language: en license: mit tags: - vision - image-captioning model_name: microsoft/git-large-coco pipeline_tag: image-to-text --- # GIT (GenerativeImage2Text), large-sized, fine-tuned on COCO GIT (short for GenerativeImage2Text) model, large-sized version, fine-tuned on COCO. It was introduced in the paper [GIT: A Generative Image-to-text Transformer for Vision and Language](https://arxiv.org/abs/2205.14100) by Wang et al. and first released in [this repository](https://github.com/microsoft/GenerativeImage2Text). Disclaimer: The team releasing GIT did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model description GIT is a Transformer decoder conditioned on both CLIP image tokens and text tokens. The model is trained using "teacher forcing" on a lot of (image, text) pairs. The goal for the model is simply to predict the next text token, giving the image tokens and previous text tokens. The model has full access to (i.e. a bidirectional attention mask is used for) the image patch tokens, but only has access to the previous text tokens (i.e. a causal attention mask is used for the text tokens) when predicting the next text token. ![GIT architecture](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/git_architecture.jpg) This allows the model to be used for tasks like: - image and video captioning - visual question answering (VQA) on images and videos - even image classification (by simply conditioning the model on the image and asking it to generate a class for it in text). ## Intended uses & limitations You can use the raw model for image captioning. See the [model hub](https://huggingface.co/models?search=microsoft/git) to look for fine-tuned versions on a task that interests you. ### How to use For code examples, we refer to the [documentation](https://huggingface.co/docs/transformers/main/model_doc/git#transformers.GitForCausalLM.forward.example). ## Training data From the paper: > We collect 0.8B image-text pairs for pre-training, which include COCO (Lin et al., 2014), Conceptual Captions (CC3M) (Sharma et al., 2018), SBU (Ordonez et al., 2011), Visual Genome (VG) (Krishna et al., 2016), Conceptual Captions (CC12M) (Changpinyo et al., 2021), ALT200M (Hu et al., 2021a), and an extra 0.6B data following a similar collection procedure in Hu et al. (2021a). => however this is for the model referred to as "GIT" in the paper, which is not open-sourced. This checkpoint is "GIT-large", which is a smaller variant of GIT trained on 20 million image-text pairs. Next, the model was fine-tuned on COCO. See table 11 in the [paper](https://arxiv.org/abs/2205.14100) for more details. ### Preprocessing We refer to the original repo regarding details for preprocessing during training. During validation, one resizes the shorter edge of each image, after which center cropping is performed to a fixed-size resolution. Next, frames are normalized across the RGB channels with the ImageNet mean and standard deviation. ## Evaluation results For evaluation results, we refer readers to the [paper](https://arxiv.org/abs/2205.14100).
mradermacher/llama3-8B-DarkIdol-1.0-i1-GGUF
mradermacher
"2024-06-17T14:43:09Z"
4,627
1
transformers
[ "transformers", "gguf", "roleplay", "llama3", "sillytavern", "idol", "en", "ja", "zh", "base_model:aifeifei798/llama3-8B-DarkIdol-1.0", "license:llama3", "endpoints_compatible", "region:us" ]
null
"2024-06-17T11:47:17Z"
--- base_model: aifeifei798/llama3-8B-DarkIdol-1.0 language: - en - ja - zh library_name: transformers license: llama3 quantized_by: mradermacher tags: - roleplay - llama3 - sillytavern - idol --- ## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: nicoboss --> weighted/imatrix quants of https://huggingface.co/aifeifei798/llama3-8B-DarkIdol-1.0 <!-- provided-files --> static quants are available at https://huggingface.co/mradermacher/llama3-8B-DarkIdol-1.0-GGUF ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/llama3-8B-DarkIdol-1.0-i1-GGUF/resolve/main/llama3-8B-DarkIdol-1.0.i1-IQ1_S.gguf) | i1-IQ1_S | 2.1 | for the desperate | | [GGUF](https://huggingface.co/mradermacher/llama3-8B-DarkIdol-1.0-i1-GGUF/resolve/main/llama3-8B-DarkIdol-1.0.i1-IQ1_M.gguf) | i1-IQ1_M | 2.3 | mostly desperate | | [GGUF](https://huggingface.co/mradermacher/llama3-8B-DarkIdol-1.0-i1-GGUF/resolve/main/llama3-8B-DarkIdol-1.0.i1-IQ2_XXS.gguf) | i1-IQ2_XXS | 2.5 | | | [GGUF](https://huggingface.co/mradermacher/llama3-8B-DarkIdol-1.0-i1-GGUF/resolve/main/llama3-8B-DarkIdol-1.0.i1-IQ2_XS.gguf) | i1-IQ2_XS | 2.7 | | | [GGUF](https://huggingface.co/mradermacher/llama3-8B-DarkIdol-1.0-i1-GGUF/resolve/main/llama3-8B-DarkIdol-1.0.i1-IQ2_S.gguf) | i1-IQ2_S | 2.9 | | | [GGUF](https://huggingface.co/mradermacher/llama3-8B-DarkIdol-1.0-i1-GGUF/resolve/main/llama3-8B-DarkIdol-1.0.i1-IQ2_M.gguf) | i1-IQ2_M | 3.0 | | | [GGUF](https://huggingface.co/mradermacher/llama3-8B-DarkIdol-1.0-i1-GGUF/resolve/main/llama3-8B-DarkIdol-1.0.i1-Q2_K.gguf) | i1-Q2_K | 3.3 | IQ3_XXS probably better | | [GGUF](https://huggingface.co/mradermacher/llama3-8B-DarkIdol-1.0-i1-GGUF/resolve/main/llama3-8B-DarkIdol-1.0.i1-IQ3_XXS.gguf) | i1-IQ3_XXS | 3.4 | lower quality | | [GGUF](https://huggingface.co/mradermacher/llama3-8B-DarkIdol-1.0-i1-GGUF/resolve/main/llama3-8B-DarkIdol-1.0.i1-IQ3_XS.gguf) | i1-IQ3_XS | 3.6 | | | [GGUF](https://huggingface.co/mradermacher/llama3-8B-DarkIdol-1.0-i1-GGUF/resolve/main/llama3-8B-DarkIdol-1.0.i1-Q3_K_S.gguf) | i1-Q3_K_S | 3.8 | IQ3_XS probably better | | [GGUF](https://huggingface.co/mradermacher/llama3-8B-DarkIdol-1.0-i1-GGUF/resolve/main/llama3-8B-DarkIdol-1.0.i1-IQ3_S.gguf) | i1-IQ3_S | 3.8 | beats Q3_K* | | [GGUF](https://huggingface.co/mradermacher/llama3-8B-DarkIdol-1.0-i1-GGUF/resolve/main/llama3-8B-DarkIdol-1.0.i1-IQ3_M.gguf) | i1-IQ3_M | 3.9 | | | [GGUF](https://huggingface.co/mradermacher/llama3-8B-DarkIdol-1.0-i1-GGUF/resolve/main/llama3-8B-DarkIdol-1.0.i1-Q3_K_M.gguf) | i1-Q3_K_M | 4.1 | IQ3_S probably better | | [GGUF](https://huggingface.co/mradermacher/llama3-8B-DarkIdol-1.0-i1-GGUF/resolve/main/llama3-8B-DarkIdol-1.0.i1-Q3_K_L.gguf) | i1-Q3_K_L | 4.4 | IQ3_M probably better | | [GGUF](https://huggingface.co/mradermacher/llama3-8B-DarkIdol-1.0-i1-GGUF/resolve/main/llama3-8B-DarkIdol-1.0.i1-IQ4_XS.gguf) | i1-IQ4_XS | 4.5 | | | [GGUF](https://huggingface.co/mradermacher/llama3-8B-DarkIdol-1.0-i1-GGUF/resolve/main/llama3-8B-DarkIdol-1.0.i1-Q4_0.gguf) | i1-Q4_0 | 4.8 | fast, low quality | | [GGUF](https://huggingface.co/mradermacher/llama3-8B-DarkIdol-1.0-i1-GGUF/resolve/main/llama3-8B-DarkIdol-1.0.i1-Q4_K_S.gguf) | i1-Q4_K_S | 4.8 | optimal size/speed/quality | | [GGUF](https://huggingface.co/mradermacher/llama3-8B-DarkIdol-1.0-i1-GGUF/resolve/main/llama3-8B-DarkIdol-1.0.i1-Q4_K_M.gguf) | i1-Q4_K_M | 5.0 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/llama3-8B-DarkIdol-1.0-i1-GGUF/resolve/main/llama3-8B-DarkIdol-1.0.i1-Q5_K_S.gguf) | i1-Q5_K_S | 5.7 | | | [GGUF](https://huggingface.co/mradermacher/llama3-8B-DarkIdol-1.0-i1-GGUF/resolve/main/llama3-8B-DarkIdol-1.0.i1-Q5_K_M.gguf) | i1-Q5_K_M | 5.8 | | | [GGUF](https://huggingface.co/mradermacher/llama3-8B-DarkIdol-1.0-i1-GGUF/resolve/main/llama3-8B-DarkIdol-1.0.i1-Q6_K.gguf) | i1-Q6_K | 6.7 | practically like static Q6_K | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. Additional thanks to [@nicoboss](https://huggingface.co/nicoboss) for giving me access to his hardware for calculating the imatrix for these quants. <!-- end -->
digiplay/OnlyReal-Black-Mix
digiplay
"2023-08-16T21:23:32Z"
4,625
1
diffusers
[ "diffusers", "safetensors", "stable-diffusion", "stable-diffusion-diffusers", "text-to-image", "license:other", "autotrain_compatible", "endpoints_compatible", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
"2023-07-27T09:40:00Z"
--- license: other tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - diffusers inference: true --- https://civitai.com/models/115449/onlyreal-black-mix Sample image generated by huggingface's API : ![74e8dc9b-880f-4a67-ae9e-ab02f0fee958.jpeg](https://cdn-uploads.huggingface.co/production/uploads/646c83c871d0c8a6e4455854/_eoY2KyQ9iKRq0cpKxY8m.jpeg) Original Author's DEMO images : ![](https://image.civitai.com/xG1nkqKTMzGDvpLrqFT7WA/a026ec4c-e298-4330-8b89-fc3214d7033b/width=1024/00046-824189014-1girl,%20(looking%20at%20viewer_1.2),%20detail%20face,%20%20(a%20girl%20standing,%20rending%20on%20cgsociety,%20black%20shadows,%20streaming,%20new%20york%20backdro.jpeg)
q-future/q-instruct-mplug-owl2-1031
q-future
"2024-01-27T16:49:17Z"
4,624
1
transformers
[ "transformers", "pytorch", "mplug_owl2", "feature-extraction", "custom_code", "arxiv:2311.06783", "license:mit", "region:us" ]
feature-extraction
"2023-12-13T05:31:02Z"
--- license: mit --- @misc{wu2023qinstruct, title={Q-Instruct: Improving Low-level Visual Abilities for Multi-modality Foundation Models}, author={Haoning Wu and Zicheng Zhang and Erli Zhang and Chaofeng Chen and Liang Liao and Annan Wang and Kaixin Xu and Chunyi Li and Jingwen Hou and Guangtao Zhai and Geng Xue and Wenxiu Sun and Qiong Yan and Weisi Lin}, year={2023}, eprint={2311.06783}, archivePrefix={arXiv}, primaryClass={cs.CV} }
microsoft/unixcoder-base-nine
microsoft
"2022-04-02T05:45:58Z"
4,620
15
transformers
[ "transformers", "pytorch", "roberta", "feature-extraction", "license:apache-2.0", "endpoints_compatible", "text-embeddings-inference", "region:us" ]
feature-extraction
"2022-04-02T05:33:27Z"
--- license: apache-2.0 ---
beomi/KoRWKV-6B
beomi
"2023-07-20T01:07:48Z"
4,620
3
transformers
[ "transformers", "pytorch", "safetensors", "rwkv", "text-generation", "KoRWKV", "ko", "doi:10.57967/hf/1292", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-generation
"2023-05-26T07:24:57Z"
--- license: mit language: - ko pipeline_tag: text-generation tags: - KoRWKV --- > Instruction-Finetuned model is available at [beomi/KoAlpaca-KoRWKV-6B](https://huggingface.co/beomi/KoAlpaca-KoRWKV-6B) # KoRWKV Model Card KoRWKV (6B) trained on Korean dataset with RWKVv4 Neo Architecture. ## Model details **Researcher developing the model** Junbum Lee (aka Beomi) **Model date** KoRWKV was trained between 2023.05~2023.07 **Model version** This is 1st release of the model. **Model type** Find more about RWKV at https://github.com/BlinkDL/RWKV-LM **License** MIT ## Intended use **Primary intended uses** The primary use of KoRWKV is research on Korean Opensource large language models **Primary intended users** The primary intended users of the model are researchers in natural language processing, machine learning and artificial intelligence. **Out-of-scope use cases** KoRWKV is a base, or foundational, model. As such, it should not be used on downstream applications without further risk evaluation and mitigation. In particular, our model has not been trained with human feedback, and can thus generate toxic or offensive content, incorrect information or generally unhelpful answers. ## Ethical considerations **Data** The data used to train the model is collected from various sources, mostly from the Web. As such, it contains offensive, harmful and biased content. We thus expect the model to exhibit such biases from the training data. **Human life** The model is not intended to inform decisions about matters central to human life, and should not be used in such a way. **Risks and harms** Risks and harms of large language models include the generation of harmful, offensive or biased content. These models are often prone to generating incorrect information, sometimes referred to as hallucinations. We do not expect our model to be an exception in this regard. **Use cases** KoRWKV is a foundational model, and as such, it should not be used for downstream applications without further investigation and mitigations of risks. These risks and potential fraught use cases include, but are not limited to: generation of misinformation and generation of harmful, biased or offensive content.
mradermacher/L3-SthenoMaidBlackroot-8B-V1-GGUF
mradermacher
"2024-06-09T20:02:14Z"
4,620
7
transformers
[ "transformers", "gguf", "mergekit", "merge", "en", "base_model:bluuwhale/L3-SthenoMaidBlackroot-8B-V1", "endpoints_compatible", "region:us" ]
null
"2024-06-09T07:42:43Z"
--- base_model: bluuwhale/L3-SthenoMaidBlackroot-8B-V1 language: - en library_name: transformers quantized_by: mradermacher tags: - mergekit - merge --- ## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> static quants of https://huggingface.co/bluuwhale/L3-SthenoMaidBlackroot-8B-V1 <!-- provided-files --> weighted/imatrix quants are available at https://huggingface.co/mradermacher/L3-SthenoMaidBlackroot-8B-V1-i1-GGUF ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/L3-SthenoMaidBlackroot-8B-V1-GGUF/resolve/main/L3-SthenoMaidBlackroot-8B-V1.Q2_K.gguf) | Q2_K | 3.3 | | | [GGUF](https://huggingface.co/mradermacher/L3-SthenoMaidBlackroot-8B-V1-GGUF/resolve/main/L3-SthenoMaidBlackroot-8B-V1.IQ3_XS.gguf) | IQ3_XS | 3.6 | | | [GGUF](https://huggingface.co/mradermacher/L3-SthenoMaidBlackroot-8B-V1-GGUF/resolve/main/L3-SthenoMaidBlackroot-8B-V1.Q3_K_S.gguf) | Q3_K_S | 3.8 | | | [GGUF](https://huggingface.co/mradermacher/L3-SthenoMaidBlackroot-8B-V1-GGUF/resolve/main/L3-SthenoMaidBlackroot-8B-V1.IQ3_S.gguf) | IQ3_S | 3.8 | beats Q3_K* | | [GGUF](https://huggingface.co/mradermacher/L3-SthenoMaidBlackroot-8B-V1-GGUF/resolve/main/L3-SthenoMaidBlackroot-8B-V1.IQ3_M.gguf) | IQ3_M | 3.9 | | | [GGUF](https://huggingface.co/mradermacher/L3-SthenoMaidBlackroot-8B-V1-GGUF/resolve/main/L3-SthenoMaidBlackroot-8B-V1.Q3_K_M.gguf) | Q3_K_M | 4.1 | lower quality | | [GGUF](https://huggingface.co/mradermacher/L3-SthenoMaidBlackroot-8B-V1-GGUF/resolve/main/L3-SthenoMaidBlackroot-8B-V1.Q3_K_L.gguf) | Q3_K_L | 4.4 | | | [GGUF](https://huggingface.co/mradermacher/L3-SthenoMaidBlackroot-8B-V1-GGUF/resolve/main/L3-SthenoMaidBlackroot-8B-V1.IQ4_XS.gguf) | IQ4_XS | 4.6 | | | [GGUF](https://huggingface.co/mradermacher/L3-SthenoMaidBlackroot-8B-V1-GGUF/resolve/main/L3-SthenoMaidBlackroot-8B-V1.Q4_K_S.gguf) | Q4_K_S | 4.8 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/L3-SthenoMaidBlackroot-8B-V1-GGUF/resolve/main/L3-SthenoMaidBlackroot-8B-V1.Q4_K_M.gguf) | Q4_K_M | 5.0 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/L3-SthenoMaidBlackroot-8B-V1-GGUF/resolve/main/L3-SthenoMaidBlackroot-8B-V1.Q5_K_S.gguf) | Q5_K_S | 5.7 | | | [GGUF](https://huggingface.co/mradermacher/L3-SthenoMaidBlackroot-8B-V1-GGUF/resolve/main/L3-SthenoMaidBlackroot-8B-V1.Q5_K_M.gguf) | Q5_K_M | 5.8 | | | [GGUF](https://huggingface.co/mradermacher/L3-SthenoMaidBlackroot-8B-V1-GGUF/resolve/main/L3-SthenoMaidBlackroot-8B-V1.Q6_K.gguf) | Q6_K | 6.7 | very good quality | | [GGUF](https://huggingface.co/mradermacher/L3-SthenoMaidBlackroot-8B-V1-GGUF/resolve/main/L3-SthenoMaidBlackroot-8B-V1.Q8_0.gguf) | Q8_0 | 8.6 | fast, best quality | | [GGUF](https://huggingface.co/mradermacher/L3-SthenoMaidBlackroot-8B-V1-GGUF/resolve/main/L3-SthenoMaidBlackroot-8B-V1.f16.gguf) | f16 | 16.2 | 16 bpw, overkill | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. <!-- end -->
LiYuan/amazon-review-sentiment-analysis
LiYuan
"2022-04-30T22:03:23Z"
4,618
29
transformers
[ "transformers", "pytorch", "tensorboard", "bert", "text-classification", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
"2022-04-30T20:37:44Z"
--- license: apache-2.0 tags: - generated_from_trainer metrics: - accuracy model-index: - name: distilbert-base-uncased-finetuned-mnli-amazon-query-shopping results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-mnli-amazon-query-shopping This model is a fine-tuned version of [nlptown/bert-base-multilingual-uncased-sentiment](https://huggingface.co/nlptown/bert-base-multilingual-uncased-sentiment?text=I+like+you.+I+love+you) on an [Amazon US Customer Reviews Dataset](https://www.kaggle.com/datasets/cynthiarempel/amazon-us-customer-reviews-dataset). The code for the fine-tuning process can be found [here](https://github.com/vanderbilt-data-science/bigdata/blob/main/06-fine-tune-BERT-on-our-dataset.ipynb). This model is uncased: it does not make a difference between english and English. It achieves the following results on the evaluation set: - Loss: 0.5202942490577698 - Accuracy: 0.8 ## Model description This a bert-base-multilingual-uncased model finetuned for sentiment analysis on product reviews in six languages: English, Dutch, German, French, Spanish and Italian. It predicts the sentiment of the review as a number of stars (between 1 and 5). This model is intended for direct use as a sentiment analysis model for product reviews in any of the six languages above, or for further finetuning on related sentiment analysis tasks. We replaced its head with our customer reviews to fine-tune it on 17,280 rows of training set while validating it on 4,320 rows of dev set. Finally, we evaluated our model performance on a held-out test set: 2,400 rows. ## Intended uses & limitations Bert-base is primarily aimed at being fine-tuned on tasks that use the whole sentence (potentially masked) to make decisions, such as sequence classification, token classification, or question answering. This fine-tuned version of BERT-base is used to predict review rating star given the review. The limitations are this trained model is focusing on reviews and products on Amazon. If you apply this model to other domains, it may perform poorly. ## How to use You can use this model directly by downloading the trained weights and configurations like the below code snippet: ```python from transformers import AutoTokenizer, AutoModelForSequenceClassification tokenizer = AutoTokenizer.from_pretrained("LiYuan/amazon-review-sentiment-analysis") model = AutoModelForSequenceClassification.from_pretrained("LiYuan/amazon-review-sentiment-analysis") ``` ## Training and evaluation data Download all the raw [dataset](https://www.kaggle.com/datasets/cynthiarempel/amazon-us-customer-reviews-dataset) from the Kaggle website. ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:| | 0.555400 | 1.0 | 1080 | 0.520294 | 0.800000 | | 0.424300 | 2.0 | 1080 | 0.549649 | 0.798380 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.11.0+cu113 - Datasets 2.1.0 - Tokenizers 0.12.1
mradermacher/L3-8B-Sunfall-v0.3-Stheno-v3.2-GGUF
mradermacher
"2024-06-16T12:11:11Z"
4,617
0
transformers
[ "transformers", "gguf", "not-for-all-audiences", "roleplay", "llama3", "en", "base_model:Vdr1/L3-8B-Sunfall-v0.3-Stheno-v3.2", "endpoints_compatible", "region:us" ]
null
"2024-06-16T11:42:55Z"
--- base_model: Vdr1/L3-8B-Sunfall-v0.3-Stheno-v3.2 language: - en library_name: transformers quantized_by: mradermacher tags: - not-for-all-audiences - roleplay - llama3 --- ## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> static quants of https://huggingface.co/Vdr1/L3-8B-Sunfall-v0.3-Stheno-v3.2 <!-- provided-files --> weighted/imatrix quants are available at https://huggingface.co/mradermacher/L3-8B-Sunfall-v0.3-Stheno-v3.2-i1-GGUF ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/L3-8B-Sunfall-v0.3-Stheno-v3.2-GGUF/resolve/main/L3-8B-Sunfall-v0.3-Stheno-v3.2.Q2_K.gguf) | Q2_K | 3.3 | | | [GGUF](https://huggingface.co/mradermacher/L3-8B-Sunfall-v0.3-Stheno-v3.2-GGUF/resolve/main/L3-8B-Sunfall-v0.3-Stheno-v3.2.IQ3_XS.gguf) | IQ3_XS | 3.6 | | | [GGUF](https://huggingface.co/mradermacher/L3-8B-Sunfall-v0.3-Stheno-v3.2-GGUF/resolve/main/L3-8B-Sunfall-v0.3-Stheno-v3.2.Q3_K_S.gguf) | Q3_K_S | 3.8 | | | [GGUF](https://huggingface.co/mradermacher/L3-8B-Sunfall-v0.3-Stheno-v3.2-GGUF/resolve/main/L3-8B-Sunfall-v0.3-Stheno-v3.2.IQ3_S.gguf) | IQ3_S | 3.8 | beats Q3_K* | | [GGUF](https://huggingface.co/mradermacher/L3-8B-Sunfall-v0.3-Stheno-v3.2-GGUF/resolve/main/L3-8B-Sunfall-v0.3-Stheno-v3.2.IQ3_M.gguf) | IQ3_M | 3.9 | | | [GGUF](https://huggingface.co/mradermacher/L3-8B-Sunfall-v0.3-Stheno-v3.2-GGUF/resolve/main/L3-8B-Sunfall-v0.3-Stheno-v3.2.Q3_K_M.gguf) | Q3_K_M | 4.1 | lower quality | | [GGUF](https://huggingface.co/mradermacher/L3-8B-Sunfall-v0.3-Stheno-v3.2-GGUF/resolve/main/L3-8B-Sunfall-v0.3-Stheno-v3.2.Q3_K_L.gguf) | Q3_K_L | 4.4 | | | [GGUF](https://huggingface.co/mradermacher/L3-8B-Sunfall-v0.3-Stheno-v3.2-GGUF/resolve/main/L3-8B-Sunfall-v0.3-Stheno-v3.2.IQ4_XS.gguf) | IQ4_XS | 4.6 | | | [GGUF](https://huggingface.co/mradermacher/L3-8B-Sunfall-v0.3-Stheno-v3.2-GGUF/resolve/main/L3-8B-Sunfall-v0.3-Stheno-v3.2.Q4_K_S.gguf) | Q4_K_S | 4.8 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/L3-8B-Sunfall-v0.3-Stheno-v3.2-GGUF/resolve/main/L3-8B-Sunfall-v0.3-Stheno-v3.2.Q4_K_M.gguf) | Q4_K_M | 5.0 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/L3-8B-Sunfall-v0.3-Stheno-v3.2-GGUF/resolve/main/L3-8B-Sunfall-v0.3-Stheno-v3.2.Q5_K_S.gguf) | Q5_K_S | 5.7 | | | [GGUF](https://huggingface.co/mradermacher/L3-8B-Sunfall-v0.3-Stheno-v3.2-GGUF/resolve/main/L3-8B-Sunfall-v0.3-Stheno-v3.2.Q5_K_M.gguf) | Q5_K_M | 5.8 | | | [GGUF](https://huggingface.co/mradermacher/L3-8B-Sunfall-v0.3-Stheno-v3.2-GGUF/resolve/main/L3-8B-Sunfall-v0.3-Stheno-v3.2.Q6_K.gguf) | Q6_K | 6.7 | very good quality | | [GGUF](https://huggingface.co/mradermacher/L3-8B-Sunfall-v0.3-Stheno-v3.2-GGUF/resolve/main/L3-8B-Sunfall-v0.3-Stheno-v3.2.Q8_0.gguf) | Q8_0 | 8.6 | fast, best quality | | [GGUF](https://huggingface.co/mradermacher/L3-8B-Sunfall-v0.3-Stheno-v3.2-GGUF/resolve/main/L3-8B-Sunfall-v0.3-Stheno-v3.2.f16.gguf) | f16 | 16.2 | 16 bpw, overkill | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. <!-- end -->
mradermacher/L3-8B-Stheno-v3.2-GGUF
mradermacher
"2024-06-05T13:16:13Z"
4,614
5
transformers
[ "transformers", "gguf", "en", "dataset:Gryphe/Opus-WritingPrompts", "dataset:Sao10K/Claude-3-Opus-Instruct-15K", "dataset:Sao10K/Short-Storygen-v2", "dataset:Sao10K/c2-Logs-Filtered", "base_model:Sao10K/L3-8B-Stheno-v3.2", "license:cc-by-nc-4.0", "endpoints_compatible", "region:us" ]
null
"2024-06-05T12:47:33Z"
--- base_model: Sao10K/L3-8B-Stheno-v3.2 datasets: - Gryphe/Opus-WritingPrompts - Sao10K/Claude-3-Opus-Instruct-15K - Sao10K/Short-Storygen-v2 - Sao10K/c2-Logs-Filtered language: - en library_name: transformers license: cc-by-nc-4.0 quantized_by: mradermacher --- ## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> static quants of https://huggingface.co/Sao10K/L3-8B-Stheno-v3.2 <!-- provided-files --> weighted/imatrix quants are available at https://huggingface.co/mradermacher/L3-8B-Stheno-v3.2-i1-GGUF ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/L3-8B-Stheno-v3.2-GGUF/resolve/main/L3-8B-Stheno-v3.2.Q2_K.gguf) | Q2_K | 3.3 | | | [GGUF](https://huggingface.co/mradermacher/L3-8B-Stheno-v3.2-GGUF/resolve/main/L3-8B-Stheno-v3.2.IQ3_XS.gguf) | IQ3_XS | 3.6 | | | [GGUF](https://huggingface.co/mradermacher/L3-8B-Stheno-v3.2-GGUF/resolve/main/L3-8B-Stheno-v3.2.Q3_K_S.gguf) | Q3_K_S | 3.8 | | | [GGUF](https://huggingface.co/mradermacher/L3-8B-Stheno-v3.2-GGUF/resolve/main/L3-8B-Stheno-v3.2.IQ3_S.gguf) | IQ3_S | 3.8 | beats Q3_K* | | [GGUF](https://huggingface.co/mradermacher/L3-8B-Stheno-v3.2-GGUF/resolve/main/L3-8B-Stheno-v3.2.IQ3_M.gguf) | IQ3_M | 3.9 | | | [GGUF](https://huggingface.co/mradermacher/L3-8B-Stheno-v3.2-GGUF/resolve/main/L3-8B-Stheno-v3.2.Q3_K_M.gguf) | Q3_K_M | 4.1 | lower quality | | [GGUF](https://huggingface.co/mradermacher/L3-8B-Stheno-v3.2-GGUF/resolve/main/L3-8B-Stheno-v3.2.Q3_K_L.gguf) | Q3_K_L | 4.4 | | | [GGUF](https://huggingface.co/mradermacher/L3-8B-Stheno-v3.2-GGUF/resolve/main/L3-8B-Stheno-v3.2.IQ4_XS.gguf) | IQ4_XS | 4.6 | | | [GGUF](https://huggingface.co/mradermacher/L3-8B-Stheno-v3.2-GGUF/resolve/main/L3-8B-Stheno-v3.2.Q4_K_S.gguf) | Q4_K_S | 4.8 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/L3-8B-Stheno-v3.2-GGUF/resolve/main/L3-8B-Stheno-v3.2.Q4_K_M.gguf) | Q4_K_M | 5.0 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/L3-8B-Stheno-v3.2-GGUF/resolve/main/L3-8B-Stheno-v3.2.Q5_K_S.gguf) | Q5_K_S | 5.7 | | | [GGUF](https://huggingface.co/mradermacher/L3-8B-Stheno-v3.2-GGUF/resolve/main/L3-8B-Stheno-v3.2.Q5_K_M.gguf) | Q5_K_M | 5.8 | | | [GGUF](https://huggingface.co/mradermacher/L3-8B-Stheno-v3.2-GGUF/resolve/main/L3-8B-Stheno-v3.2.Q6_K.gguf) | Q6_K | 6.7 | very good quality | | [GGUF](https://huggingface.co/mradermacher/L3-8B-Stheno-v3.2-GGUF/resolve/main/L3-8B-Stheno-v3.2.Q8_0.gguf) | Q8_0 | 8.6 | fast, best quality | | [GGUF](https://huggingface.co/mradermacher/L3-8B-Stheno-v3.2-GGUF/resolve/main/L3-8B-Stheno-v3.2.f16.gguf) | f16 | 16.2 | 16 bpw, overkill | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. <!-- end -->
slauw87/bart_summarisation
slauw87
"2021-09-20T05:27:36Z"
4,613
58
transformers
[ "transformers", "pytorch", "bart", "text2text-generation", "sagemaker", "summarization", "en", "dataset:samsum", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
summarization
"2022-03-02T23:29:05Z"
--- language: en tags: - sagemaker - bart - summarization license: apache-2.0 datasets: - samsum model-index: - name: bart-large-cnn-samsum results: - task: name: Abstractive Text Summarization type: abstractive-text-summarization dataset: name: "SAMSum Corpus: A Human-annotated Dialogue Dataset for Abstractive Summarization" type: samsum metrics: - name: Validation ROGUE-1 type: rogue-1 value: 43.2111 - name: Validation ROGUE-2 type: rogue-2 value: 22.3519 - name: Validation ROGUE-L type: rogue-l value: 33.315 - name: Test ROGUE-1 type: rogue-1 value: 41.8283 - name: Test ROGUE-2 type: rogue-2 value: 20.9857 - name: Test ROGUE-L type: rogue-l value: 32.3602 widget: - text: | Sugi: I am tired of everything in my life. Tommy: What? How happy you life is! I do envy you. Sugi: You don't know that I have been over-protected by my mother these years. I am really about to leave the family and spread my wings. Tommy: Maybe you are right. --- ## `bart-large-cnn-samsum` This model was trained using Amazon SageMaker and the new Hugging Face Deep Learning container. For more information look at: - [🤗 Transformers Documentation: Amazon SageMaker](https://huggingface.co/transformers/sagemaker.html) - [Example Notebooks](https://github.com/huggingface/notebooks/tree/master/sagemaker) - [Amazon SageMaker documentation for Hugging Face](https://docs.aws.amazon.com/sagemaker/latest/dg/hugging-face.html) - [Python SDK SageMaker documentation for Hugging Face](https://sagemaker.readthedocs.io/en/stable/frameworks/huggingface/index.html) - [Deep Learning Container](https://github.com/aws/deep-learning-containers/blob/master/available_images.md#huggingface-training-containers) ## Hyperparameters { "dataset_name": "samsum", "do_eval": true, "do_predict": true, "do_train": true, "fp16": true, "learning_rate": 5e-05, "model_name_or_path": "facebook/bart-large-cnn", "num_train_epochs": 3, "output_dir": "/opt/ml/model", "per_device_eval_batch_size": 4, "per_device_train_batch_size": 4, "predict_with_generate": true, "seed": 7 } ## Usage from transformers import pipeline summarizer = pipeline("summarization", model="slauw87/bart-large-cnn-samsum") conversation = '''Sugi: I am tired of everything in my life. Tommy: What? How happy you life is! I do envy you. Sugi: You don't know that I have been over-protected by my mother these years. I am really about to leave the family and spread my wings. Tommy: Maybe you are right. ''' nlp(conversation) ## Results | key | value | | --- | ----- | | eval_rouge1 | 43.2111 | | eval_rouge2 | 22.3519 | | eval_rougeL | 33.3153 | | eval_rougeLsum | 40.0527 | | predict_rouge1 | 41.8283 | | predict_rouge2 | 20.9857 | | predict_rougeL | 32.3602 | | predict_rougeLsum | 38.7316 |
uer/gpt2-distil-chinese-cluecorpussmall
uer
"2023-10-17T15:21:19Z"
4,612
19
transformers
[ "transformers", "pytorch", "tf", "jax", "gpt2", "text-generation", "zh", "dataset:CLUECorpusSmall", "arxiv:1909.05658", "arxiv:2212.06385", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2022-03-02T23:29:05Z"
--- language: zh datasets: CLUECorpusSmall widget: - text: "米饭是一种用稻米与水煮成的食物" --- # Chinese GPT2 Models ## Model description The set of GPT2 models, except for GPT2-xlarge model, are pre-trained by [UER-py](https://github.com/dbiir/UER-py/), which is introduced in [this paper](https://arxiv.org/abs/1909.05658). The GPT2-xlarge model is pre-trained by [TencentPretrain](https://github.com/Tencent/TencentPretrain) introduced in [this paper](https://arxiv.org/abs/2212.06385), which inherits UER-py to support models with parameters above one billion, and extends it to a multimodal pre-training framework. Besides, the other models could also be pre-trained by TencentPretrain. The model is used to generate Chinese texts. You can download the set of Chinese GPT2 models either from the [UER-py Modelzoo page](https://github.com/dbiir/UER-py/wiki/Modelzoo), or via HuggingFace from the links below: | | Link | | ----------------- | :----------------------------: | | **GPT2-distil** | [**L=6/H=768**][distil] | | **GPT2** | [**L=12/H=768**][base] | | **GPT2-medium** | [**L=24/H=1024**][medium] | | **GPT2-large** | [**L=36/H=1280**][large] | | **GPT2-xlarge** | [**L=48/H=1600**][xlarge] | Note that the 6-layer model is called GPT2-distil model because it follows the configuration of [distilgpt2](https://huggingface.co/distilgpt2), and the pre-training does not involve the supervision of larger models. ## How to use You can use the model directly with a pipeline for text generation (take the case of GPT2-distil): ```python >>> from transformers import BertTokenizer, GPT2LMHeadModel, TextGenerationPipeline >>> tokenizer = BertTokenizer.from_pretrained("uer/gpt2-distil-chinese-cluecorpussmall") >>> model = GPT2LMHeadModel.from_pretrained("uer/gpt2-distil-chinese-cluecorpussmall") >>> text_generator = TextGenerationPipeline(model, tokenizer) >>> text_generator("这是很久之前的事情了", max_length=100, do_sample=True) [{'generated_text': '这是很久之前的事情了 。 我 现 在 想 起 来 就 让 自 己 很 伤 心 , 很 失 望 。 我 现 在 想 到 , 我 觉 得 大 多 数 人 的 生 活 比 我 的 生 命 还 要 重 要 , 对 一 些 事 情 的 看 法 , 对 一 些 人 的 看 法 , 都 是 在 发 泄 。 但 是 , 我 们 的 生 活 是 需 要 一 个 信 用 体 系 的 。 我 不 知'}] ``` ## Training data [CLUECorpusSmall](https://github.com/CLUEbenchmark/CLUECorpus2020/) is used as training data. ## Training procedure The GPT2-xlarge model is pre-trained by [TencentPretrain](https://github.com/Tencent/TencentPretrain), and the others are pre-trained by [UER-py](https://github.com/dbiir/UER-py/) on [Tencent Cloud](https://cloud.tencent.com/). We pre-train 1,000,000 steps with a sequence length of 128 and then pre-train 250,000 additional steps with a sequence length of 1024. For the models pre-trained by UER-py, take the case of GPT2-distil Stage1: ``` python3 preprocess.py --corpus_path corpora/cluecorpussmall.txt \ --vocab_path models/google_zh_vocab.txt \ --dataset_path cluecorpussmall_lm_seq128_dataset.pt \ --seq_length 128 --processes_num 32 --data_processor lm ``` ``` python3 pretrain.py --dataset_path cluecorpussmall_lm_seq128_dataset.pt \ --vocab_path models/google_zh_vocab.txt \ --config_path models/gpt2/distil_config.json \ --output_model_path models/cluecorpussmall_gpt2_distil_seq128_model.bin \ --world_size 8 --gpu_ranks 0 1 2 3 4 5 6 7 \ --total_steps 1000000 --save_checkpoint_steps 100000 --report_steps 50000 \ --learning_rate 1e-4 --batch_size 64 ``` Stage2: ``` python3 preprocess.py --corpus_path corpora/cluecorpussmall.txt \ --vocab_path models/google_zh_vocab.txt \ --dataset_path cluecorpussmall_lm_seq1024_dataset.pt \ --seq_length 1024 --processes_num 32 --data_processor lm ``` ``` python3 pretrain.py --dataset_path cluecorpussmall_lm_seq1024_dataset.pt \ --vocab_path models/google_zh_vocab.txt \ --pretrained_model_path models/cluecorpussmall_gpt2_distil_seq128_model.bin-1000000 \ --config_path models/gpt2/distil_config.json \ --output_model_path models/cluecorpussmall_gpt2_distil_seq1024_model.bin \ --world_size 8 --gpu_ranks 0 1 2 3 4 5 6 7 \ --total_steps 250000 --save_checkpoint_steps 50000 --report_steps 10000 \ --learning_rate 5e-5 --batch_size 16 ``` Finally, we convert the pre-trained model into Huggingface's format: ``` python3 scripts/convert_gpt2_from_uer_to_huggingface.py --input_model_path models/cluecorpussmall_gpt2_distil_seq1024_model.bin-250000 \ --output_model_path pytorch_model.bin \ --layers_num 6 ``` For GPT2-xlarge model, we use TencetPretrain. Stage1: ``` python3 preprocess.py --corpus_path corpora/cluecorpussmall.txt \ --vocab_path models/google_zh_vocab.txt \ --dataset_path cluecorpussmall_lm_seq128_dataset.pt \ --seq_length 128 --processes_num 32 --data_processor lm ``` ``` deepspeed pretrain.py --deepspeed --deepspeed_config models/deepspeed_config.json \ --dataset_path corpora/cluecorpussmall_lm_seq128_dataset.pt \ --vocab_path models/google_zh_vocab.txt \ --config_path models/gpt2/xlarge_config.json \ --output_model_path models/cluecorpussmall_gpt2_xlarge_seq128_model \ --world_size 8 --batch_size 64 \ --total_steps 1000000 --save_checkpoint_steps 100000 --report_steps 50000 \ --deepspeed_checkpoint_activations --deepspeed_checkpoint_layers_num 24 ``` Before stage2, we extract fp32 consolidated weights from a zero 2 and 3 DeepSpeed checkpoints: ``` python3 models/cluecorpussmall_gpt2_xlarge_seq128_model/zero_to_fp32.py models/cluecorpussmall_gpt2_xlarge_seq128_model/ \ models/cluecorpussmall_gpt2_xlarge_seq128_model.bin ``` Stage2: ``` python3 preprocess.py --corpus_path corpora/cluecorpussmall.txt \ --vocab_path models/google_zh_vocab.txt \ --dataset_path cluecorpussmall_lm_seq1024_dataset.pt \ --seq_length 1024 --processes_num 32 --data_processor lm ``` ``` deepspeed pretrain.py --deepspeed --deepspeed_config models/deepspeed_config.json \ --dataset_path corpora/cluecorpussmall_lm_seq1024_dataset.pt \ --vocab_path models/google_zh_vocab.txt \ --config_path models/gpt2/xlarge_config.json \ --pretrained_model_path models/cluecorpussmall_gpt2_xlarge_seq128_model.bin \ --output_model_path models/cluecorpussmall_gpt2_xlarge_seq1024_model \ --world_size 8 --batch_size 16 --learning_rate 5e-5 \ --total_steps 250000 --save_checkpoint_steps 50000 --report_steps 10000 \ --deepspeed_checkpoint_activations --deepspeed_checkpoint_layers_num 6 ``` Then, we extract fp32 consolidated weights from a zero 2 and 3 DeepSpeed checkpoints: ``` python3 models/cluecorpussmall_gpt2_xlarge_seq1024_model/zero_to_fp32.py models/cluecorpussmall_gpt2_xlarge_seq1024_model/ \ models/cluecorpussmall_gpt2_xlarge_seq1024_model.bin ``` Finally, we convert the pre-trained model into Huggingface's format: ``` python3 scripts/convert_gpt2_from_tencentpretrain_to_huggingface.py --input_model_path models/cluecorpussmall_gpt2_xlarge_seq1024_model.bin \ --output_model_path pytorch_model.bin \ --layers_num 48 ``` ### BibTeX entry and citation info ``` @article{radford2019language, title={Language Models are Unsupervised Multitask Learners}, author={Radford, Alec and Wu, Jeff and Child, Rewon and Luan, David and Amodei, Dario and Sutskever, Ilya}, year={2019} } @article{zhao2019uer, title={UER: An Open-Source Toolkit for Pre-training Models}, author={Zhao, Zhe and Chen, Hui and Zhang, Jinbin and Zhao, Xin and Liu, Tao and Lu, Wei and Chen, Xi and Deng, Haotang and Ju, Qi and Du, Xiaoyong}, journal={EMNLP-IJCNLP 2019}, pages={241}, year={2019} } @article{zhao2023tencentpretrain, title={TencentPretrain: A Scalable and Flexible Toolkit for Pre-training Models of Different Modalities}, author={Zhao, Zhe and Li, Yudong and Hou, Cheng and Zhao, Jing and others}, journal={ACL 2023}, pages={217}, year={2023} ``` [distil]:https://huggingface.co/uer/gpt2-distil-chinese-cluecorpussmall [base]:https://huggingface.co/uer/gpt2-chinese-cluecorpussmall [medium]:https://huggingface.co/uer/gpt2-medium-chinese-cluecorpussmall [large]:https://huggingface.co/uer/gpt2-large-chinese-cluecorpussmall [xlarge]:https://huggingface.co/uer/gpt2-xlarge-chinese-cluecorpussmall
stablediffusionapi/cyberrealistic-41
stablediffusionapi
"2023-12-22T21:10:12Z"
4,610
1
diffusers
[ "diffusers", "stablediffusionapi.com", "stable-diffusion-api", "text-to-image", "ultra-realistic", "license:creativeml-openrail-m", "autotrain_compatible", "endpoints_compatible", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
"2023-12-22T21:07:53Z"
--- license: creativeml-openrail-m tags: - stablediffusionapi.com - stable-diffusion-api - text-to-image - ultra-realistic pinned: true --- # cyberrealistic-41 API Inference ![generated from stablediffusionapi.com](https://pub-3626123a908346a7a8be8d9295f44e26.r2.dev/generations/13867797011703279242.png) ## Get API Key Get API key from [Stable Diffusion API](http://stablediffusionapi.com/), No Payment needed. Replace Key in below code, change **model_id** to "cyberrealistic-41" Coding in PHP/Node/Java etc? Have a look at docs for more code examples: [View docs](https://stablediffusionapi.com/docs) Try model for free: [Generate Images](https://stablediffusionapi.com/models/cyberrealistic-41) Model link: [View model](https://stablediffusionapi.com/models/cyberrealistic-41) Credits: [View credits](https://civitai.com/?query=cyberrealistic-41) View all models: [View Models](https://stablediffusionapi.com/models) import requests import json url = "https://stablediffusionapi.com/api/v4/dreambooth" payload = json.dumps({ "key": "your_api_key", "model_id": "cyberrealistic-41", "prompt": "ultra realistic close up portrait ((beautiful pale cyberpunk female with heavy black eyeliner)), blue eyes, shaved side haircut, hyper detail, cinematic lighting, magic neon, dark red city, Canon EOS R3, nikon, f/1.4, ISO 200, 1/160s, 8K, RAW, unedited, symmetrical balance, in-frame, 8K", "negative_prompt": "painting, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, deformed, ugly, blurry, bad anatomy, bad proportions, extra limbs, cloned face, skinny, glitchy, double torso, extra arms, extra hands, mangled fingers, missing lips, ugly face, distorted face, extra legs, anime", "width": "512", "height": "512", "samples": "1", "num_inference_steps": "30", "safety_checker": "no", "enhance_prompt": "yes", "seed": None, "guidance_scale": 7.5, "multi_lingual": "no", "panorama": "no", "self_attention": "no", "upscale": "no", "embeddings": "embeddings_model_id", "lora": "lora_model_id", "webhook": None, "track_id": None }) headers = { 'Content-Type': 'application/json' } response = requests.request("POST", url, headers=headers, data=payload) print(response.text) > Use this coupon code to get 25% off **DMGG0RBN**
google/codegemma-7b
google
"2024-06-27T14:09:57Z"
4,605
137
transformers
[ "transformers", "safetensors", "gemma", "text-generation", "license:gemma", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2024-03-21T16:56:49Z"
--- library_name: transformers license: gemma license_link: https://ai.google.dev/gemma/terms extra_gated_heading: Access CodeGemma on Hugging Face extra_gated_prompt: To access CodeGemma on Hugging Face, you’re required to review and agree to Google’s usage license. To do this, please ensure you’re logged-in to Hugging Face and click below. Requests are processed immediately. extra_gated_button_content: Acknowledge license --- # CodeGemma Model Page : [CodeGemma](https://ai.google.dev/gemma/docs/codegemma) Resources and Technical Documentation : [Technical Report](https://goo.gle/codegemma) : [Responsible Generative AI Toolkit](https://ai.google.dev/responsible) Terms of Use : [Terms](https://www.kaggle.com/models/google/codegemma/license/consent/verify/huggingface?returnModelRepoId=google/codegemma-7b) Authors : Google ## Model Information Summary description and brief definition of inputs and outputs. ### Description CodeGemma is a collection of lightweight open code models built on top of Gemma. CodeGemma models are text-to-text and text-to-code decoder-only models and are available as a 7 billion pretrained variant that specializes in code completion and code generation tasks, a 7 billion parameter instruction-tuned variant for code chat and instruction following and a 2 billion parameter pretrained variant for fast code completion. | | [codegemma-2b](https://huggingface.co/google/codegemma-2b) | [**codegemma-7b**](https://huggingface.co/google/codegemma-7b) | [codegemma-7b-it](https://huggingface.co/google/codegemma-7b-it) | |----------------------------------|:----------------------------------------------------------------:|:----------------------------------------------------------:|:----------------------------------------------------------------:| | Code Completion | ✅ | ✅ | | | Generation from natural language | | ✅ | ✅ | | Chat | | | ✅ | | Instruction Following | | | ✅ | ### Sample Usage #### For Code Completion Code completion can be used for infilling inside code editors. CodeGemma was trained for this task using the fill-in-the-middle (FIM) objective, where you provide a prefix and a suffix as context for the completion. The following tokens are used to separate the different parts of the input: - `<|fim_prefix|>` precedes the context before the completion we want to run. - `<|fim_suffix|>` precedes the suffix. You must put this token exactly where the cursor would be positioned in an editor, as this is the location that will be completed by the model. - `<|fim_middle|>` is the prompt that invites the model to run the generation. In addition to these, there's also `<|file_separator|>`, which is used to provide multi-file contexts. Please, make sure to not provide any extra spaces or newlines around the tokens, other than those that would naturally occur in the code fragment you want to complete. Here's an example: ```python from transformers import GemmaTokenizer, AutoModelForCausalLM model_id = "google/codegemma-7b" tokenizer = GemmaTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) prompt = '''\ <|fim_prefix|>import datetime def calculate_age(birth_year): """Calculates a person's age based on their birth year.""" current_year = datetime.date.today().year <|fim_suffix|> return age<|fim_middle|>\ ''' inputs = tokenizer(prompt, return_tensors="pt").to(model.device) prompt_len = inputs["input_ids"].shape[-1] outputs = model.generate(**inputs, max_new_tokens=100) print(tokenizer.decode(outputs[0][prompt_len:])) ``` This may return something like the following: ``` age = current_year - birth_year<|file_separator|>test_calculate_age.py <|fim_suffix|> assert calculate_age(1990) == 33 assert calculate_age(1980) == 43 assert calculate_age(1970) == 53 assert calculate_age(1960) == 63 assert calculate_age(1950) == 73 ``` Note the extra content after the correct completion. The model returns the completion, followed by one of the FIM tokens or the EOS token. You should ignore everything that comes after any of these tokens. A good way to achieve this is by providing a list of terminators to the `generate` function, like this: ```python FIM_PREFIX = '<|fim_prefix|>' FIM_SUFFIX = '<|fim_suffix|>' FIM_MIDDLE = '<|fim_middle|>' FIM_FILE_SEPARATOR = '<|file_separator|>' terminators = tokenizer.convert_tokens_to_ids([FIM_PREFIX, FIM_MIDDLE, FIM_SUFFIX, FIM_FILE_SEPARATOR]) terminators += [tokenizer.eos_token_id] outputs = model.generate( **inputs, max_new_tokens=100, eos_token_id=terminators, ) ``` In this case, generation stops as soon as the first delimiter is found in the response: ``` age = current_year - birth_year<|file_separator|> ``` #### For Code Generation ```python from transformers import GemmaTokenizer, AutoModelForCausalLM tokenizer = GemmaTokenizer.from_pretrained("google/codegemma-7b") model = AutoModelForCausalLM.from_pretrained("google/codegemma-7b") input_text = "Write me a Python function to calculate the nth fibonacci number." input_ids = tokenizer(input_text, return_tensors="pt") outputs = model.generate(**input_ids) print(tokenizer.decode(outputs[0])) ``` ### Inputs and Outputs Inputs : For pretrained model variants: code prefix and/or suffix for code completion and generation scenarios, or natural language text or prompt : For instruction tuned model variant: natural language text or prompt Outputs : For pretrained model variants: fill-in-the-middle code completion, code and natural language : For instruction tuned model variant: code and natural language ## Model Data Data used for model training and how the data was processed. ### Training Dataset Using Gemma as the base model, CodeGemma 2B and 7B pretrained variants are further trained on an additional 500 billion tokens of primarily English language data from publicly available code repositories, open source mathematics datasets and synthetically generated code. ### Training Data Processing The following data pre-processing techniques were applied: * FIM Pretrained CodeGemma models focus on fill-in-the-middle (FIM) tasks. The models are trained to work with both PSM and SPM modes. Our FIM settings are 80% FIM rate with 50-50 PSM/SPM. * Dependency Graph-based Packing and Unit Test-based Lexical Packing techniques: To improve model alignment with real-world applications, we structured training examples at the project/repository level to co-locate the most relevant source files within each repository. Specifically, we employed two heuristic techniques: dependency graph-based packing and unit test-based lexical packing * We developed a novel technique for splitting the documents into prefix, middle, and suffix to make the suffix start in a more syntactically natural point rather than purely random distribution. * Safety: Similarly to Gemma, we deployed rigorous safety filtering including filtering personal data, CSAM filtering and other filtering based on content quality and safety in line with [our policies](https://storage.googleapis.com/gweb-uniblog-publish-prod/documents/2023_Google_AI_Principles_Progress_Update.pdf#page=11). ## Implementation Information Information about the hardware and software used to train the models. ### Hardware CodeGemma was trained using the latest generation of [Tensor Processing Unit (TPU)](https://cloud.google.com/tpu/docs/intro-to-tpu) hardware (TPUv5e). ### Software Training was done using [JAX](https://github.com/google/jax) and [ML Pathways](https://blog.google/technology/ai/introducing-pathways-next-generation-ai-architecture/). ## Evaluation Information Model evaluation metrics and results. ### Evaluation Approach We evaluate CodeGemma on a variety of academic benchmarks across several domains: * Code completion benchmarks: HumanEval Single Line and Multiple Line Infilling * Code generation benchmarks: HumanEval, MBPP, BabelCode (C++, C#, Go, Java, JavaScript, Kotlin, Python, Rust) * Q&A: BoolQ, PIQA, TriviaQA * Natural Language: ARC-Challenge, HellaSwag, MMLU, WinoGrande * Math Reasoning: GSM8K, MATH ### Evaluation Results #### Coding Benchmarks Benchmark | 2B | 7B | 7B-IT ----------------------|-------|-------|------ HumanEval | 31.1 | 44.5 | 56.1 MBPP | 43.6 | 56.2 | 54.2 HumanEval Single Line | 78.41 | 76.09 | 68.25 HumanEval Multi Line | 51.44 | 58.44 | 20.05 BC HE C++ | 24.2 | 32.9 | 42.2 BC HE C# | 10.6 | 22.4 | 26.7 BC HE Go | 20.5 | 21.7 | 28.6 BC HE Java | 29.2 | 41.0 | 48.4 BC HE JavaScript | 21.7 | 39.8 | 46.0 BC HE Kotlin | 28.0 | 39.8 | 51.6 BC HE Python | 21.7 | 42.2 | 48.4 BC HE Rust | 26.7 | 34.1 | 36.0 BC MBPP C++ | 47.1 | 53.8 | 56.7 BC MBPP C# | 28.7 | 32.5 | 41.2 BC MBPP Go | 45.6 | 43.3 | 46.2 BC MBPP Java | 41.8 | 50.3 | 57.3 BC MBPP JavaScript | 45.3 | 58.2 | 61.4 BC MBPP Kotlin | 46.8 | 54.7 | 59.9 BC MBPP Python | 38.6 | 59.1 | 62.0 BC MBPP Rust | 45.3 | 52.9 | 53.5 #### Natural Language Benchmarks ![CodeGemma Natural Language Benchmarks](./codegemma_nl_benchmarks.png) ## Ethics and Safety Ethics and safety evaluation approach and results. ### Evaluation Approach Our evaluation methods include structured evaluations and internal red-teaming testing of relevant content policies. Red-teaming was conducted by a number of different teams, each with different goals and human evaluation metrics. These models were evaluated against a number of different categories relevant to ethics and safety, including: * Human evaluation on prompts covering content safety and representational harms. See the [Gemma model card](https://ai.google.dev/gemma/docs/model_card#evaluation_approach) for more details on evaluation approach. * Specific testing of cyber-offence capabilities, focusing on testing autonomous hacking capabilities and ensuring potential harms are limited. ### Evaluation Results The results of ethics and safety evaluations are within acceptable thresholds for meeting [internal policies](https://storage.googleapis.com/gweb-uniblog-publish-prod/documents/2023_Google_AI_Principles_Progress_Update.pdf#page=11) for categories such as child safety, content safety, representational harms, memorization, large-scale harms. See the [Gemma model card](https://ai.google.dev/gemma/docs/model_card#evaluation_results) for more details. ## Model Usage & Limitations These models have certain limitations that users should be aware of. ### Intended Usage Code Gemma models have a wide range of applications, which vary between IT and PT models. The following list of potential uses is not comprehensive. The purpose of this list is to provide contextual information about the possible use-cases that the model creators considered as part of model training and development. Code Completion : PT models can be used to complete code with an IDE extension Code Generation : IT model can be used to generate code with or without an IDE extension Code Conversation : IT model can power conversation interfaces which discuss code. Code Education : IT model supports interactive code learning experiences, aids in syntax correction or provides coding practice. ### Known Limitations Large Language Models (LLMs) have limitations based on their training data and the inherent limitations of the technology. See the [Gemma model card](https://ai.google.dev/gemma/docs/model_card#evaluation_results) for more details on the limitations of LLMs. ### Ethical Considerations & Risks The development of large language models (LLMs) raises several ethical concerns. We have carefully considered multiple aspects in the development of these models. Please refer to [the same discussion](https://ai.google.dev/gemma/docs/model_card#ethical_considerations_and_risks) in the Gemma model card for model details. ### Benefits At the time of release, this family of models provides high-performance open code-focused large language model implementations designed from the ground up for Responsible AI development compared to similarly sized models. Using the coding benchmark evaluation metrics described in this document, these models have shown to provide superior performance to other, comparably-sized open model alternatives.
mdarhri00/named-entity-recognition
mdarhri00
"2023-10-08T17:02:56Z"
4,602
36
transformers
[ "transformers", "pytorch", "bert", "token-classification", "autotrain_compatible", "endpoints_compatible", "region:us" ]
token-classification
"2023-09-18T16:43:47Z"
--- pipeline_tag: token-classification --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> The NER model developed using BERT is designed to recognize named entities in text for multiple languages, including Arabic, French, and English. It is adaptable to new labels, allowing users to extend its capabilities beyond the initial set of 10 predefined labels. which are: 'Person_Name', 'Brand_vehicule', 'Model_vehicule', 'Organization_Name', 'location', 'phone_number', 'IBAN', 'credit_card', 'date_time', 'email', 'Identification_Number' ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> - **Developed by:** yahya mdarhri - **Model type:** TOKEN CLASSIFICATION - **Finetuned from model :** bert-base-multilingual-cased - **License:** OPEN SOURCE ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> Named Entity Recognition (NER): The primary purpose of this model is to perform Named Entity Recognition (NER) in text data. It identifies and categorizes entities such as names of people, organizations, locations, dates, and more. Multilingual Support: The model is designed to support multiple languages, including Arabic, French, and English. It can be used by NLP practitioners, researchers, and developers working with text data in these languages. Adaptability: Users can adapt the model to recognize new entity labels by providing labeled training data for the desired categories. This feature makes it versatile for various NER tasks. ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> Bias and Fairness: Users and affected parties should be aware of potential biases in entity recognition, especially when it comes to personal names or other sensitive categories. Efforts should be made to minimize bias and ensure fairness in entity recognition. Privacy: The model should be used responsibly to protect the privacy of individuals and organizations. When handling personally identifiable information (PII), data protection laws and privacy guidelines should be followed. Transparency: Transparency in how the model operates, including its training data and evaluation metrics, is crucial to build trust with users and affected parties. User Consent: If the model is used in applications where user data is processed, obtaining informed consent from users for data processing is essential. We value your feedback! Please share your thoughts on this model. Thank you! ## Model Card Contact [email protected]
Xenova/bge-reranker-base
Xenova
"2024-03-16T21:49:51Z"
4,599
5
transformers.js
[ "transformers.js", "onnx", "xlm-roberta", "text-classification", "region:us" ]
text-classification
"2023-09-13T15:50:14Z"
--- library_name: transformers.js --- https://huggingface.co/BAAI/bge-reranker-base with ONNX weights to be compatible with Transformers.js. Note: Having a separate repo for ONNX weights is intended to be a temporary solution until WebML gains more traction. If you would like to make your models web-ready, we recommend converting to ONNX using [🤗 Optimum](https://huggingface.co/docs/optimum/index) and structuring your repo like this one (with ONNX weights located in a subfolder named `onnx`).
microsoft/swin-base-patch4-window7-224-in22k
microsoft
"2023-06-27T10:46:44Z"
4,598
12
transformers
[ "transformers", "pytorch", "tf", "safetensors", "swin", "image-classification", "vision", "dataset:imagenet-21k", "arxiv:2103.14030", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
image-classification
"2022-03-02T23:29:05Z"
--- license: apache-2.0 tags: - vision - image-classification datasets: - imagenet-21k widget: - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg example_title: Tiger - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/teapot.jpg example_title: Teapot - src: https://huggingface.co/datasets/mishig/sample_images/resolve/main/palace.jpg example_title: Palace --- # Swin Transformer (large-sized model) Swin Transformer model pre-trained on ImageNet-21k (14 million images, 21,841 classes) at resolution 224x224. It was introduced in the paper [Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030) by Liu et al. and first released in [this repository](https://github.com/microsoft/Swin-Transformer). Disclaimer: The team releasing Swin Transformer did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model description The Swin Transformer is a type of Vision Transformer. It builds hierarchical feature maps by merging image patches (shown in gray) in deeper layers and has linear computation complexity to input image size due to computation of self-attention only within each local window (shown in red). It can thus serve as a general-purpose backbone for both image classification and dense recognition tasks. In contrast, previous vision Transformers produce feature maps of a single low resolution and have quadratic computation complexity to input image size due to computation of self-attention globally. ![model image](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/swin_transformer_architecture.png) [Source](https://paperswithcode.com/method/swin-transformer) ## Intended uses & limitations You can use the raw model for image classification. See the [model hub](https://huggingface.co/models?search=swin) to look for fine-tuned versions on a task that interests you. ### How to use Here is how to use this model to classify an image of the COCO 2017 dataset into one of the 1,000 ImageNet classes: ```python from transformers import AutoImageProcessor, SwinForImageClassification from PIL import Image import requests url = "http://images.cocodataset.org/val2017/000000039769.jpg" image = Image.open(requests.get(url, stream=True).raw) processor = AutoImageProcessor.from_pretrained("microsoft/swin-base-patch4-window7-224-in22k") model = SwinForImageClassification.from_pretrained("microsoft/swin-base-patch4-window7-224-in22k") inputs = processor(images=image, return_tensors="pt") outputs = model(**inputs) logits = outputs.logits # model predicts one of the 1000 ImageNet classes predicted_class_idx = logits.argmax(-1).item() print("Predicted class:", model.config.id2label[predicted_class_idx]) ``` For more code examples, we refer to the [documentation](https://huggingface.co/transformers/model_doc/swin.html#). ### BibTeX entry and citation info ```bibtex @article{DBLP:journals/corr/abs-2103-14030, author = {Ze Liu and Yutong Lin and Yue Cao and Han Hu and Yixuan Wei and Zheng Zhang and Stephen Lin and Baining Guo}, title = {Swin Transformer: Hierarchical Vision Transformer using Shifted Windows}, journal = {CoRR}, volume = {abs/2103.14030}, year = {2021}, url = {https://arxiv.org/abs/2103.14030}, eprinttype = {arXiv}, eprint = {2103.14030}, timestamp = {Thu, 08 Apr 2021 07:53:26 +0200}, biburl = {https://dblp.org/rec/journals/corr/abs-2103-14030.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ```
RichardErkhov/ToastyPigeon_-_SmolLlama-1.5B-Sorted-gguf
RichardErkhov
"2024-06-25T01:23:39Z"
4,597
0
null
[ "gguf", "region:us" ]
null
"2024-06-24T22:25:50Z"
Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) SmolLlama-1.5B-Sorted - GGUF - Model creator: https://huggingface.co/ToastyPigeon/ - Original model: https://huggingface.co/ToastyPigeon/SmolLlama-1.5B-Sorted/ | Name | Quant method | Size | | ---- | ---- | ---- | | [SmolLlama-1.5B-Sorted.Q2_K.gguf](https://huggingface.co/RichardErkhov/ToastyPigeon_-_SmolLlama-1.5B-Sorted-gguf/blob/main/SmolLlama-1.5B-Sorted.Q2_K.gguf) | Q2_K | 0.55GB | | [SmolLlama-1.5B-Sorted.IQ3_XS.gguf](https://huggingface.co/RichardErkhov/ToastyPigeon_-_SmolLlama-1.5B-Sorted-gguf/blob/main/SmolLlama-1.5B-Sorted.IQ3_XS.gguf) | IQ3_XS | 0.61GB | | [SmolLlama-1.5B-Sorted.IQ3_S.gguf](https://huggingface.co/RichardErkhov/ToastyPigeon_-_SmolLlama-1.5B-Sorted-gguf/blob/main/SmolLlama-1.5B-Sorted.IQ3_S.gguf) | IQ3_S | 0.64GB | | [SmolLlama-1.5B-Sorted.Q3_K_S.gguf](https://huggingface.co/RichardErkhov/ToastyPigeon_-_SmolLlama-1.5B-Sorted-gguf/blob/main/SmolLlama-1.5B-Sorted.Q3_K_S.gguf) | Q3_K_S | 0.29GB | | [SmolLlama-1.5B-Sorted.IQ3_M.gguf](https://huggingface.co/RichardErkhov/ToastyPigeon_-_SmolLlama-1.5B-Sorted-gguf/blob/main/SmolLlama-1.5B-Sorted.IQ3_M.gguf) | IQ3_M | 0.29GB | | [SmolLlama-1.5B-Sorted.Q3_K.gguf](https://huggingface.co/RichardErkhov/ToastyPigeon_-_SmolLlama-1.5B-Sorted-gguf/blob/main/SmolLlama-1.5B-Sorted.Q3_K.gguf) | Q3_K | 0.29GB | | [SmolLlama-1.5B-Sorted.Q3_K_M.gguf](https://huggingface.co/RichardErkhov/ToastyPigeon_-_SmolLlama-1.5B-Sorted-gguf/blob/main/SmolLlama-1.5B-Sorted.Q3_K_M.gguf) | Q3_K_M | 0.29GB | | [SmolLlama-1.5B-Sorted.Q3_K_L.gguf](https://huggingface.co/RichardErkhov/ToastyPigeon_-_SmolLlama-1.5B-Sorted-gguf/blob/main/SmolLlama-1.5B-Sorted.Q3_K_L.gguf) | Q3_K_L | 0.29GB | | [SmolLlama-1.5B-Sorted.IQ4_XS.gguf](https://huggingface.co/RichardErkhov/ToastyPigeon_-_SmolLlama-1.5B-Sorted-gguf/blob/main/SmolLlama-1.5B-Sorted.IQ4_XS.gguf) | IQ4_XS | 0.79GB | | [SmolLlama-1.5B-Sorted.Q4_0.gguf](https://huggingface.co/RichardErkhov/ToastyPigeon_-_SmolLlama-1.5B-Sorted-gguf/blob/main/SmolLlama-1.5B-Sorted.Q4_0.gguf) | Q4_0 | 0.82GB | | [SmolLlama-1.5B-Sorted.IQ4_NL.gguf](https://huggingface.co/RichardErkhov/ToastyPigeon_-_SmolLlama-1.5B-Sorted-gguf/blob/main/SmolLlama-1.5B-Sorted.IQ4_NL.gguf) | IQ4_NL | 0.83GB | | [SmolLlama-1.5B-Sorted.Q4_K_S.gguf](https://huggingface.co/RichardErkhov/ToastyPigeon_-_SmolLlama-1.5B-Sorted-gguf/blob/main/SmolLlama-1.5B-Sorted.Q4_K_S.gguf) | Q4_K_S | 0.83GB | | [SmolLlama-1.5B-Sorted.Q4_K.gguf](https://huggingface.co/RichardErkhov/ToastyPigeon_-_SmolLlama-1.5B-Sorted-gguf/blob/main/SmolLlama-1.5B-Sorted.Q4_K.gguf) | Q4_K | 0.87GB | | [SmolLlama-1.5B-Sorted.Q4_K_M.gguf](https://huggingface.co/RichardErkhov/ToastyPigeon_-_SmolLlama-1.5B-Sorted-gguf/blob/main/SmolLlama-1.5B-Sorted.Q4_K_M.gguf) | Q4_K_M | 0.87GB | | [SmolLlama-1.5B-Sorted.Q4_1.gguf](https://huggingface.co/RichardErkhov/ToastyPigeon_-_SmolLlama-1.5B-Sorted-gguf/blob/main/SmolLlama-1.5B-Sorted.Q4_1.gguf) | Q4_1 | 0.91GB | | [SmolLlama-1.5B-Sorted.Q5_0.gguf](https://huggingface.co/RichardErkhov/ToastyPigeon_-_SmolLlama-1.5B-Sorted-gguf/blob/main/SmolLlama-1.5B-Sorted.Q5_0.gguf) | Q5_0 | 1.0GB | | [SmolLlama-1.5B-Sorted.Q5_K_S.gguf](https://huggingface.co/RichardErkhov/ToastyPigeon_-_SmolLlama-1.5B-Sorted-gguf/blob/main/SmolLlama-1.5B-Sorted.Q5_K_S.gguf) | Q5_K_S | 1.0GB | | [SmolLlama-1.5B-Sorted.Q5_K.gguf](https://huggingface.co/RichardErkhov/ToastyPigeon_-_SmolLlama-1.5B-Sorted-gguf/blob/main/SmolLlama-1.5B-Sorted.Q5_K.gguf) | Q5_K | 1.02GB | | [SmolLlama-1.5B-Sorted.Q5_K_M.gguf](https://huggingface.co/RichardErkhov/ToastyPigeon_-_SmolLlama-1.5B-Sorted-gguf/blob/main/SmolLlama-1.5B-Sorted.Q5_K_M.gguf) | Q5_K_M | 1.02GB | | [SmolLlama-1.5B-Sorted.Q5_1.gguf](https://huggingface.co/RichardErkhov/ToastyPigeon_-_SmolLlama-1.5B-Sorted-gguf/blob/main/SmolLlama-1.5B-Sorted.Q5_1.gguf) | Q5_1 | 1.08GB | | [SmolLlama-1.5B-Sorted.Q6_K.gguf](https://huggingface.co/RichardErkhov/ToastyPigeon_-_SmolLlama-1.5B-Sorted-gguf/blob/main/SmolLlama-1.5B-Sorted.Q6_K.gguf) | Q6_K | 1.18GB | | [SmolLlama-1.5B-Sorted.Q8_0.gguf](https://huggingface.co/RichardErkhov/ToastyPigeon_-_SmolLlama-1.5B-Sorted-gguf/blob/main/SmolLlama-1.5B-Sorted.Q8_0.gguf) | Q8_0 | 1.53GB | Original model description: --- base_model: [] tags: - mergekit - merge license: apache-2.0 --- # SmolLlama-1.5B-Sorted Bigger than "Tiny" but still very smol. This is a self-stack merge of TinyLlama 1.1B using a sorted-layer arrangement, resulting in 32 model layers and 1.54B model parameters. In comparison to [SmolLlama-1.5B](https://huggingface.co/ToastyPigeon/SmolLlama-1.5B), the Sorted version has the repeated middle layers placed in ascending order (see merge config). This is a proof-of-concept model and should not be used for anything. ## Merge Details ### Merge Method This model was merged using the passthrough merge method. ### Models Merged The following models were included in the merge: * TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T ### Configuration The following YAML configuration was used to produce this model: ```yaml slices: - sources: #non-repeating layers - model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T layer_range: [0, 6] - sources: #begin repeating layers - model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T layer_range: [6, 7] - sources: - model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T layer_range: [6, 7] - sources: - model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T layer_range: [7, 8] - sources: - model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T layer_range: [7, 8] - sources: - model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T layer_range: [8, 9] - sources: - model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T layer_range: [8, 9] - sources: - model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T layer_range: [9, 10] - sources: - model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T layer_range: [9, 10] - sources: - model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T layer_range: [10, 11] - sources: - model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T layer_range: [10, 11] - sources: - model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T layer_range: [11, 12] - sources: - model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T layer_range: [11, 12] - sources: - model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T layer_range: [12, 13] - sources: - model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T layer_range: [12, 13] - sources: - model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T layer_range: [13, 14] - sources: - model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T layer_range: [13, 14] - sources: - model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T layer_range: [14, 15] - sources: - model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T layer_range: [14, 15] - sources: - model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T layer_range: [15, 16] - sources: - model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T layer_range: [15, 16] - sources: #non-repeating layers - model: TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T layer_range: [16, 22] merge_method: passthrough dtype: float16 ```
mradermacher/prometheus-2-llama-3-8b-i1-GGUF
mradermacher
"2024-06-17T18:15:19Z"
4,596
0
transformers
[ "transformers", "gguf", "mergekit", "merge", "en", "dataset:prometheus-eval/Preference-Collection", "dataset:prometheus-eval/Feedback-Collection", "base_model:chargoddard/prometheus-2-llama-3-8b", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
"2024-06-17T13:10:30Z"
--- base_model: chargoddard/prometheus-2-llama-3-8b datasets: - prometheus-eval/Preference-Collection - prometheus-eval/Feedback-Collection language: - en library_name: transformers license: apache-2.0 quantized_by: mradermacher tags: - mergekit - merge --- ## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: nicoboss --> weighted/imatrix quants of https://huggingface.co/chargoddard/prometheus-2-llama-3-8b <!-- provided-files --> static quants are available at https://huggingface.co/mradermacher/prometheus-2-llama-3-8b-GGUF ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/prometheus-2-llama-3-8b-i1-GGUF/resolve/main/prometheus-2-llama-3-8b.i1-IQ1_S.gguf) | i1-IQ1_S | 2.1 | for the desperate | | [GGUF](https://huggingface.co/mradermacher/prometheus-2-llama-3-8b-i1-GGUF/resolve/main/prometheus-2-llama-3-8b.i1-IQ1_M.gguf) | i1-IQ1_M | 2.3 | mostly desperate | | [GGUF](https://huggingface.co/mradermacher/prometheus-2-llama-3-8b-i1-GGUF/resolve/main/prometheus-2-llama-3-8b.i1-IQ2_XXS.gguf) | i1-IQ2_XXS | 2.5 | | | [GGUF](https://huggingface.co/mradermacher/prometheus-2-llama-3-8b-i1-GGUF/resolve/main/prometheus-2-llama-3-8b.i1-IQ2_XS.gguf) | i1-IQ2_XS | 2.7 | | | [GGUF](https://huggingface.co/mradermacher/prometheus-2-llama-3-8b-i1-GGUF/resolve/main/prometheus-2-llama-3-8b.i1-IQ2_S.gguf) | i1-IQ2_S | 2.9 | | | [GGUF](https://huggingface.co/mradermacher/prometheus-2-llama-3-8b-i1-GGUF/resolve/main/prometheus-2-llama-3-8b.i1-IQ2_M.gguf) | i1-IQ2_M | 3.0 | | | [GGUF](https://huggingface.co/mradermacher/prometheus-2-llama-3-8b-i1-GGUF/resolve/main/prometheus-2-llama-3-8b.i1-Q2_K.gguf) | i1-Q2_K | 3.3 | IQ3_XXS probably better | | [GGUF](https://huggingface.co/mradermacher/prometheus-2-llama-3-8b-i1-GGUF/resolve/main/prometheus-2-llama-3-8b.i1-IQ3_XXS.gguf) | i1-IQ3_XXS | 3.4 | lower quality | | [GGUF](https://huggingface.co/mradermacher/prometheus-2-llama-3-8b-i1-GGUF/resolve/main/prometheus-2-llama-3-8b.i1-IQ3_XS.gguf) | i1-IQ3_XS | 3.6 | | | [GGUF](https://huggingface.co/mradermacher/prometheus-2-llama-3-8b-i1-GGUF/resolve/main/prometheus-2-llama-3-8b.i1-Q3_K_S.gguf) | i1-Q3_K_S | 3.8 | IQ3_XS probably better | | [GGUF](https://huggingface.co/mradermacher/prometheus-2-llama-3-8b-i1-GGUF/resolve/main/prometheus-2-llama-3-8b.i1-IQ3_S.gguf) | i1-IQ3_S | 3.8 | beats Q3_K* | | [GGUF](https://huggingface.co/mradermacher/prometheus-2-llama-3-8b-i1-GGUF/resolve/main/prometheus-2-llama-3-8b.i1-IQ3_M.gguf) | i1-IQ3_M | 3.9 | | | [GGUF](https://huggingface.co/mradermacher/prometheus-2-llama-3-8b-i1-GGUF/resolve/main/prometheus-2-llama-3-8b.i1-Q3_K_M.gguf) | i1-Q3_K_M | 4.1 | IQ3_S probably better | | [GGUF](https://huggingface.co/mradermacher/prometheus-2-llama-3-8b-i1-GGUF/resolve/main/prometheus-2-llama-3-8b.i1-Q3_K_L.gguf) | i1-Q3_K_L | 4.4 | IQ3_M probably better | | [GGUF](https://huggingface.co/mradermacher/prometheus-2-llama-3-8b-i1-GGUF/resolve/main/prometheus-2-llama-3-8b.i1-IQ4_XS.gguf) | i1-IQ4_XS | 4.5 | | | [GGUF](https://huggingface.co/mradermacher/prometheus-2-llama-3-8b-i1-GGUF/resolve/main/prometheus-2-llama-3-8b.i1-Q4_0.gguf) | i1-Q4_0 | 4.8 | fast, low quality | | [GGUF](https://huggingface.co/mradermacher/prometheus-2-llama-3-8b-i1-GGUF/resolve/main/prometheus-2-llama-3-8b.i1-Q4_K_S.gguf) | i1-Q4_K_S | 4.8 | optimal size/speed/quality | | [GGUF](https://huggingface.co/mradermacher/prometheus-2-llama-3-8b-i1-GGUF/resolve/main/prometheus-2-llama-3-8b.i1-Q4_K_M.gguf) | i1-Q4_K_M | 5.0 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/prometheus-2-llama-3-8b-i1-GGUF/resolve/main/prometheus-2-llama-3-8b.i1-Q5_K_S.gguf) | i1-Q5_K_S | 5.7 | | | [GGUF](https://huggingface.co/mradermacher/prometheus-2-llama-3-8b-i1-GGUF/resolve/main/prometheus-2-llama-3-8b.i1-Q5_K_M.gguf) | i1-Q5_K_M | 5.8 | | | [GGUF](https://huggingface.co/mradermacher/prometheus-2-llama-3-8b-i1-GGUF/resolve/main/prometheus-2-llama-3-8b.i1-Q6_K.gguf) | i1-Q6_K | 6.7 | practically like static Q6_K | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. Additional thanks to [@nicoboss](https://huggingface.co/nicoboss) for giving me access to his hardware for calculating the imatrix for these quants. <!-- end -->
AlekseyKorshuk/vicuna-7b
AlekseyKorshuk
"2023-04-10T21:34:38Z"
4,594
109
transformers
[ "transformers", "pytorch", "llama", "text-generation", "license:other", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-04-05T03:30:23Z"
--- license: other --- # Vicuna 7B without "ethics" filtering This repository contains an alternative version of the [Vicuna 7B model](https://huggingface.co/lmsys/vicuna-7b-delta-v0). This model was natively fine-tuned using ShareGPT data, but without the "ethics" filtering used for the original Vicuna. [A GPTQ quantised 4-bit version is available here](https://huggingface.co/TheBloke/vicuna-AlekseyKorshuk-7B-GPTQ-4bit-128g). # Original Vicuna Model Card ## Model details **Model type:** Vicuna is an open-source chatbot trained by fine-tuning LLaMA on user-shared conversations collected from ShareGPT. It is an auto-regressive language model, based on the transformer architecture. **Model date:** Vicuna was trained between March 2023 and April 2023. **Organizations developing the model:** The Vicuna team with members from UC Berkeley, CMU, Stanford, and UC San Diego. **Paper or resources for more information:** https://vicuna.lmsys.org/ **License:** Apache License 2.0 **Where to send questions or comments about the model:** https://github.com/lm-sys/FastChat/issues ## Intended use **Primary intended uses:** The primary use of Vicuna is research on large language models and chatbots. **Primary intended users:** The primary intended users of the model are researchers and hobbyists in natural language processing, machine learning, and artificial intelligence. ## Training dataset 70K conversations collected from ShareGPT.com. ## Evaluation dataset A preliminary evaluation of the model quality is conducted by creating a set of 80 diverse questions and utilizing GPT-4 to judge the model outputs. See https://vicuna.lmsys.org/ for more details.
mradermacher/Poppy_Porpoise-Biomix-i1-GGUF
mradermacher
"2024-06-02T16:39:33Z"
4,594
1
transformers
[ "transformers", "gguf", "mergekit", "merge", "en", "base_model:Nitral-AI/Poppy_Porpoise-Biomix", "endpoints_compatible", "region:us" ]
null
"2024-06-02T09:08:11Z"
--- base_model: Nitral-AI/Poppy_Porpoise-Biomix language: - en library_name: transformers quantized_by: mradermacher tags: - mergekit - merge --- ## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: nicoboss --> weighted/imatrix quants of https://huggingface.co/Nitral-AI/Poppy_Porpoise-Biomix <!-- provided-files --> static quants are available at https://huggingface.co/mradermacher/Poppy_Porpoise-Biomix-GGUF ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/Poppy_Porpoise-Biomix-i1-GGUF/resolve/main/Poppy_Porpoise-Biomix.i1-IQ1_S.gguf) | i1-IQ1_S | 2.1 | for the desperate | | [GGUF](https://huggingface.co/mradermacher/Poppy_Porpoise-Biomix-i1-GGUF/resolve/main/Poppy_Porpoise-Biomix.i1-IQ1_M.gguf) | i1-IQ1_M | 2.3 | mostly desperate | | [GGUF](https://huggingface.co/mradermacher/Poppy_Porpoise-Biomix-i1-GGUF/resolve/main/Poppy_Porpoise-Biomix.i1-IQ2_XXS.gguf) | i1-IQ2_XXS | 2.5 | | | [GGUF](https://huggingface.co/mradermacher/Poppy_Porpoise-Biomix-i1-GGUF/resolve/main/Poppy_Porpoise-Biomix.i1-IQ2_XS.gguf) | i1-IQ2_XS | 2.7 | | | [GGUF](https://huggingface.co/mradermacher/Poppy_Porpoise-Biomix-i1-GGUF/resolve/main/Poppy_Porpoise-Biomix.i1-IQ2_S.gguf) | i1-IQ2_S | 2.9 | | | [GGUF](https://huggingface.co/mradermacher/Poppy_Porpoise-Biomix-i1-GGUF/resolve/main/Poppy_Porpoise-Biomix.i1-IQ2_M.gguf) | i1-IQ2_M | 3.0 | | | [GGUF](https://huggingface.co/mradermacher/Poppy_Porpoise-Biomix-i1-GGUF/resolve/main/Poppy_Porpoise-Biomix.i1-Q2_K.gguf) | i1-Q2_K | 3.3 | IQ3_XXS probably better | | [GGUF](https://huggingface.co/mradermacher/Poppy_Porpoise-Biomix-i1-GGUF/resolve/main/Poppy_Porpoise-Biomix.i1-IQ3_XXS.gguf) | i1-IQ3_XXS | 3.4 | lower quality | | [GGUF](https://huggingface.co/mradermacher/Poppy_Porpoise-Biomix-i1-GGUF/resolve/main/Poppy_Porpoise-Biomix.i1-IQ3_XS.gguf) | i1-IQ3_XS | 3.6 | | | [GGUF](https://huggingface.co/mradermacher/Poppy_Porpoise-Biomix-i1-GGUF/resolve/main/Poppy_Porpoise-Biomix.i1-Q3_K_S.gguf) | i1-Q3_K_S | 3.8 | IQ3_XS probably better | | [GGUF](https://huggingface.co/mradermacher/Poppy_Porpoise-Biomix-i1-GGUF/resolve/main/Poppy_Porpoise-Biomix.i1-IQ3_S.gguf) | i1-IQ3_S | 3.8 | beats Q3_K* | | [GGUF](https://huggingface.co/mradermacher/Poppy_Porpoise-Biomix-i1-GGUF/resolve/main/Poppy_Porpoise-Biomix.i1-IQ3_M.gguf) | i1-IQ3_M | 3.9 | | | [GGUF](https://huggingface.co/mradermacher/Poppy_Porpoise-Biomix-i1-GGUF/resolve/main/Poppy_Porpoise-Biomix.i1-Q3_K_M.gguf) | i1-Q3_K_M | 4.1 | IQ3_S probably better | | [GGUF](https://huggingface.co/mradermacher/Poppy_Porpoise-Biomix-i1-GGUF/resolve/main/Poppy_Porpoise-Biomix.i1-Q3_K_L.gguf) | i1-Q3_K_L | 4.4 | IQ3_M probably better | | [GGUF](https://huggingface.co/mradermacher/Poppy_Porpoise-Biomix-i1-GGUF/resolve/main/Poppy_Porpoise-Biomix.i1-IQ4_XS.gguf) | i1-IQ4_XS | 4.5 | | | [GGUF](https://huggingface.co/mradermacher/Poppy_Porpoise-Biomix-i1-GGUF/resolve/main/Poppy_Porpoise-Biomix.i1-Q4_0.gguf) | i1-Q4_0 | 4.8 | fast, low quality | | [GGUF](https://huggingface.co/mradermacher/Poppy_Porpoise-Biomix-i1-GGUF/resolve/main/Poppy_Porpoise-Biomix.i1-Q4_K_S.gguf) | i1-Q4_K_S | 4.8 | optimal size/speed/quality | | [GGUF](https://huggingface.co/mradermacher/Poppy_Porpoise-Biomix-i1-GGUF/resolve/main/Poppy_Porpoise-Biomix.i1-Q4_K_M.gguf) | i1-Q4_K_M | 5.0 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/Poppy_Porpoise-Biomix-i1-GGUF/resolve/main/Poppy_Porpoise-Biomix.i1-Q5_K_S.gguf) | i1-Q5_K_S | 5.7 | | | [GGUF](https://huggingface.co/mradermacher/Poppy_Porpoise-Biomix-i1-GGUF/resolve/main/Poppy_Porpoise-Biomix.i1-Q5_K_M.gguf) | i1-Q5_K_M | 5.8 | | | [GGUF](https://huggingface.co/mradermacher/Poppy_Porpoise-Biomix-i1-GGUF/resolve/main/Poppy_Porpoise-Biomix.i1-Q6_K.gguf) | i1-Q6_K | 6.7 | practically like static Q6_K | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. Additional thanks to [@nicoboss](https://huggingface.co/nicoboss) for giving me access to his hardware for calculating the imatrix for these quants. <!-- end -->
superb/wav2vec2-base-superb-ks
superb
"2021-11-04T16:03:39Z"
4,588
12
transformers
[ "transformers", "pytorch", "wav2vec2", "audio-classification", "speech", "audio", "en", "dataset:superb", "arxiv:2105.01051", "license:apache-2.0", "endpoints_compatible", "region:us" ]
audio-classification
"2022-03-02T23:29:05Z"
--- language: en datasets: - superb tags: - speech - audio - wav2vec2 - audio-classification widget: - example_title: Speech Commands "down" src: https://cdn-media.huggingface.co/speech_samples/keyword_spotting_down.wav - example_title: Speech Commands "go" src: https://cdn-media.huggingface.co/speech_samples/keyword_spotting_go.wav license: apache-2.0 --- # Wav2Vec2-Base for Keyword Spotting ## Model description This is a ported version of [S3PRL's Wav2Vec2 for the SUPERB Keyword Spotting task](https://github.com/s3prl/s3prl/tree/master/s3prl/downstream/speech_commands). The base model is [wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base), which is pretrained on 16kHz sampled speech audio. When using the model make sure that your speech input is also sampled at 16Khz. For more information refer to [SUPERB: Speech processing Universal PERformance Benchmark](https://arxiv.org/abs/2105.01051) ## Task and dataset description Keyword Spotting (KS) detects preregistered keywords by classifying utterances into a predefined set of words. The task is usually performed on-device for the fast response time. Thus, accuracy, model size, and inference time are all crucial. SUPERB uses the widely used [Speech Commands dataset v1.0](https://www.tensorflow.org/datasets/catalog/speech_commands) for the task. The dataset consists of ten classes of keywords, a class for silence, and an unknown class to include the false positive. For the original model's training and evaluation instructions refer to the [S3PRL downstream task README](https://github.com/s3prl/s3prl/tree/master/s3prl/downstream#ks-keyword-spotting). ## Usage examples You can use the model via the Audio Classification pipeline: ```python from datasets import load_dataset from transformers import pipeline dataset = load_dataset("anton-l/superb_demo", "ks", split="test") classifier = pipeline("audio-classification", model="superb/wav2vec2-base-superb-ks") labels = classifier(dataset[0]["file"], top_k=5) ``` Or use the model directly: ```python import torch from datasets import load_dataset from transformers import Wav2Vec2ForSequenceClassification, Wav2Vec2FeatureExtractor from torchaudio.sox_effects import apply_effects_file effects = [["channels", "1"], ["rate", "16000"], ["gain", "-3.0"]] def map_to_array(example): speech, _ = apply_effects_file(example["file"], effects) example["speech"] = speech.squeeze(0).numpy() return example # load a demo dataset and read audio files dataset = load_dataset("anton-l/superb_demo", "ks", split="test") dataset = dataset.map(map_to_array) model = Wav2Vec2ForSequenceClassification.from_pretrained("superb/wav2vec2-base-superb-ks") feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("superb/wav2vec2-base-superb-ks") # compute attention masks and normalize the waveform if needed inputs = feature_extractor(dataset[:4]["speech"], sampling_rate=16000, padding=True, return_tensors="pt") logits = model(**inputs).logits predicted_ids = torch.argmax(logits, dim=-1) labels = [model.config.id2label[_id] for _id in predicted_ids.tolist()] ``` ## Eval results The evaluation metric is accuracy. | | **s3prl** | **transformers** | |--------|-----------|------------------| |**test**| `0.9623` | `0.9643` | ### BibTeX entry and citation info ```bibtex @article{yang2021superb, title={SUPERB: Speech processing Universal PERformance Benchmark}, author={Yang, Shu-wen and Chi, Po-Han and Chuang, Yung-Sung and Lai, Cheng-I Jeff and Lakhotia, Kushal and Lin, Yist Y and Liu, Andy T and Shi, Jiatong and Chang, Xuankai and Lin, Guan-Ting and others}, journal={arXiv preprint arXiv:2105.01051}, year={2021} } ```
nvidia/canary-1b
nvidia
"2024-05-08T00:06:51Z"
4,586
203
nemo
[ "nemo", "automatic-speech-recognition", "automatic-speech-translation", "speech", "audio", "Transformer", "FastConformer", "Conformer", "pytorch", "NeMo", "hf-asr-leaderboard", "en", "de", "es", "fr", "dataset:librispeech_asr", "dataset:fisher_corpus", "dataset:Switchboard-1", "dataset:WSJ-0", "dataset:WSJ-1", "dataset:National-Singapore-Corpus-Part-1", "dataset:National-Singapore-Corpus-Part-6", "dataset:vctk", "dataset:voxpopuli", "dataset:europarl", "dataset:multilingual_librispeech", "dataset:mozilla-foundation/common_voice_8_0", "dataset:MLCommons/peoples_speech", "arxiv:2305.05084", "arxiv:1706.03762", "license:cc-by-nc-4.0", "model-index", "region:us" ]
automatic-speech-recognition
"2024-02-07T17:20:55Z"
--- license: cc-by-nc-4.0 language: - en - de - es - fr library_name: nemo datasets: - librispeech_asr - fisher_corpus - Switchboard-1 - WSJ-0 - WSJ-1 - National-Singapore-Corpus-Part-1 - National-Singapore-Corpus-Part-6 - vctk - voxpopuli - europarl - multilingual_librispeech - mozilla-foundation/common_voice_8_0 - MLCommons/peoples_speech thumbnail: null tags: - automatic-speech-recognition - automatic-speech-translation - speech - audio - Transformer - FastConformer - Conformer - pytorch - NeMo - hf-asr-leaderboard widget: - example_title: Librispeech sample 1 src: https://cdn-media.huggingface.co/speech_samples/sample1.flac - example_title: Librispeech sample 2 src: https://cdn-media.huggingface.co/speech_samples/sample2.flac model-index: - name: canary-1b results: - task: name: Automatic Speech Recognition type: automatic-speech-recognition dataset: name: LibriSpeech (other) type: librispeech_asr config: other split: test args: language: en metrics: - name: Test WER type: wer value: 2.89 - task: type: Automatic Speech Recognition name: automatic-speech-recognition dataset: name: SPGI Speech type: kensho/spgispeech config: test split: test args: language: en metrics: - name: Test WER type: wer value: 4.79 - task: type: Automatic Speech Recognition name: automatic-speech-recognition dataset: name: Mozilla Common Voice 16.1 type: mozilla-foundation/common_voice_16_1 config: en split: test args: language: en metrics: - name: Test WER (En) type: wer value: 7.97 - task: type: Automatic Speech Recognition name: automatic-speech-recognition dataset: name: Mozilla Common Voice 16.1 type: mozilla-foundation/common_voice_16_1 config: de split: test args: language: de metrics: - name: Test WER (De) type: wer value: 4.61 - task: type: Automatic Speech Recognition name: automatic-speech-recognition dataset: name: Mozilla Common Voice 16.1 type: mozilla-foundation/common_voice_16_1 config: es split: test args: language: es metrics: - name: Test WER (ES) type: wer value: 3.99 - task: type: Automatic Speech Recognition name: automatic-speech-recognition dataset: name: Mozilla Common Voice 16.1 type: mozilla-foundation/common_voice_16_1 config: fr split: test args: language: fr metrics: - name: Test WER (Fr) type: wer value: 6.53 - task: type: Automatic Speech Translation name: automatic-speech-translation dataset: name: FLEURS type: google/fleurs config: en_us split: test args: language: en-de metrics: - name: Test BLEU (En->De) type: bleu value: 32.15 - task: type: Automatic Speech Translation name: automatic-speech-translation dataset: name: FLEURS type: google/fleurs config: en_us split: test args: language: en-de metrics: - name: Test BLEU (En->Es) type: bleu value: 22.66 - task: type: Automatic Speech Translation name: automatic-speech-translation dataset: name: FLEURS type: google/fleurs config: en_us split: test args: language: en-de metrics: - name: Test BLEU (En->Fr) type: bleu value: 40.76 - task: type: Automatic Speech Translation name: automatic-speech-translation dataset: name: FLEURS type: google/fleurs config: de_de split: test args: language: de-en metrics: - name: Test BLEU (De->En) type: bleu value: 33.98 - task: type: Automatic Speech Translation name: automatic-speech-translation dataset: name: FLEURS type: google/fleurs config: es_419 split: test args: language: es-en metrics: - name: Test BLEU (Es->En) type: bleu value: 21.80 - task: type: Automatic Speech Translation name: automatic-speech-translation dataset: name: FLEURS type: google/fleurs config: fr_fr split: test args: language: fr-en metrics: - name: Test BLEU (Fr->En) type: bleu value: 30.95 - task: type: Automatic Speech Translation name: automatic-speech-translation dataset: name: COVOST type: covost2 config: de_de split: test args: language: de-en metrics: - name: Test BLEU (De->En) type: bleu value: 37.67 - task: type: Automatic Speech Translation name: automatic-speech-translation dataset: name: COVOST type: covost2 config: es_419 split: test args: language: es-en metrics: - name: Test BLEU (Es->En) type: bleu value: 40.7 - task: type: Automatic Speech Translation name: automatic-speech-translation dataset: name: COVOST type: covost2 config: fr_fr split: test args: language: fr-en metrics: - name: Test BLEU (Fr->En) type: bleu value: 40.42 metrics: - wer - bleu pipeline_tag: automatic-speech-recognition --- # Canary 1B <style> img { display: inline; } </style> [![Model architecture](https://img.shields.io/badge/Model_Arch-FastConformer--Transformer-lightgrey#model-badge)](#model-architecture) | [![Model size](https://img.shields.io/badge/Params-1B-lightgrey#model-badge)](#model-architecture) | [![Language](https://img.shields.io/badge/Language-multilingual-lightgrey#model-badge)](#datasets) NVIDIA [NeMo Canary](https://nvidia.github.io/NeMo/blogs/2024/2024-02-canary/) is a family of multi-lingual multi-tasking models that achieves state-of-the art performance on multiple benchmarks. With 1 billion parameters, Canary-1B supports automatic speech-to-text recognition (ASR) in 4 languages (English, German, French, Spanish) and translation from English to German/French/Spanish and from German/French/Spanish to English with or without punctuation and capitalization (PnC). ## Model Architecture Canary is an encoder-decoder model with FastConformer [1] encoder and Transformer Decoder [2]. With audio features extracted from the encoder, task tokens such as `<source language>`, `<target language>`, `<task>` and `<toggle PnC>` are fed into the Transformer Decoder to trigger the text generation process. Canary uses a concatenated tokenizer [5] from individual SentencePiece [3] tokenizers of each language, which makes it easy to scale up to more languages. The Canay-1B model has 24 encoder layers and 24 layers of decoder layers in total. ## NVIDIA NeMo To train, fine-tune or Transcribe with Canary, you will need to install [NVIDIA NeMo](https://github.com/NVIDIA/NeMo). We recommend you install it after you've installed Cython and latest PyTorch version. ``` pip install git+https://github.com/NVIDIA/[email protected]#egg=nemo_toolkit[asr] ``` ## How to Use this Model The model is available for use in the NeMo toolkit [4], and can be used as a pre-trained checkpoint for inference or for fine-tuning on another dataset. ### Loading the Model ```python from nemo.collections.asr.models import EncDecMultiTaskModel # load model canary_model = EncDecMultiTaskModel.from_pretrained('nvidia/canary-1b') # update dcode params decode_cfg = canary_model.cfg.decoding decode_cfg.beam.beam_size = 1 canary_model.change_decoding_strategy(decode_cfg) ``` ### Input Format Input to Canary can be either a list of paths to audio files or a jsonl manifest file. If the input is a list of paths, Canary assumes that the audio is English and Transcribes it. I.e., Canary default behaviour is English ASR. ```python predicted_text = canary_model.transcribe( paths2audio_files=['path1.wav', 'path2.wav'], batch_size=16, # batch size to run the inference with ) ``` To use Canary for transcribing other supported languages or perform Speech-to-Text translation, specify the input as jsonl manifest file, where each line in the file is a dictionary containing the following fields: ```yaml # Example of a line in input_manifest.json { "audio_filepath": "/path/to/audio.wav", # path to the audio file "duration": 1000, # duration of the audio, can be set to `None` if using NeMo main branch "taskname": "asr", # use "s2t_translation" for speech-to-text translation with r1.23, or "ast" if using the NeMo main branch "source_lang": "en", # language of the audio input, set `source_lang`==`target_lang` for ASR, choices=['en','de','es','fr'] "target_lang": "en", # language of the text output, choices=['en','de','es','fr'] "pnc": "yes", # whether to have PnC output, choices=['yes', 'no'] "answer": "na", } ``` and then use: ```python predicted_text = canary_model.transcribe( "<path to input manifest file>", batch_size=16, # batch size to run the inference with ) ``` ### Automatic Speech-to-text Recognition (ASR) An example manifest for transcribing English audios can be: ```yaml # Example of a line in input_manifest.json { "audio_filepath": "/path/to/audio.wav", # path to the audio file "duration": 1000, # duration of the audio, can be set to `None` if using NeMo main branch "taskname": "asr", "source_lang": "en", # language of the audio input, set `source_lang`==`target_lang` for ASR, choices=['en','de','es','fr'] "target_lang": "en", # language of the text output, choices=['en','de','es','fr'] "pnc": "yes", # whether to have PnC output, choices=['yes', 'no'] "answer": "na", } ``` ### Automatic Speech-to-text Translation (AST) An example manifest for transcribing English audios into German text can be: ```yaml # Example of a line in input_manifest.json { "audio_filepath": "/path/to/audio.wav", # path to the audio file "duration": 1000, # duration of the audio, can be set to `None` if using NeMo main branch "taskname": "s2t_translation", # r1.23 only recognizes "s2t_translation", but "ast" is supported if using the NeMo main branch "source_lang": "en", # language of the audio input, choices=['en','de','es','fr'] "target_lang": "de", # language of the text output, choices=['en','de','es','fr'] "pnc": "yes", # whether to have PnC output, choices=['yes', 'no'] "answer": "na" } ``` Alternatively, one can use `transcribe_speech.py` script to do the same. ```bash python [NEMO_GIT_FOLDER]/examples/asr/transcribe_speech.py pretrained_name="nvidia/canary-1b" audio_dir="<path to audio_directory>" # transcribes all the wav files in audio_directory ``` ```bash python [NEMO_GIT_FOLDER]/examples/asr/transcribe_speech.py pretrained_name="nvidia/canary-1b" dataset_manifest="<path to manifest file>" ``` ### Input This model accepts single channel (mono) audio sampled at 16000 Hz, along with the task/languages/PnC tags as input. ### Output The model outputs the transcribed/translated text corresponding to the input audio, in the specified target language and with or without punctuation and capitalization. ## Training Canary-1B is trained using the NVIDIA NeMo toolkit [4] for 150k steps with dynamic bucketing and a batch duration of 360s per GPU on 128 NVIDIA A100 80GB GPUs. The model can be trained using this [example script](https://github.com/NVIDIA/NeMo/blob/main/examples/asr/speech_multitask/speech_to_text_aed.py) and [base config](https://github.com/NVIDIA/NeMo/blob/main/examples/asr/conf/speech_multitask/fast-conformer_aed.yaml). The tokenizers for these models were built using the text transcripts of the train set with this [script](https://github.com/NVIDIA/NeMo/blob/main/scripts/tokenizers/process_asr_text_tokenizer.py). ### Datasets The Canary-1B model is trained on a total of 85k hrs of speech data. It consists of 31k hrs of public data, 20k hrs collected by [Suno](https://suno.ai/), and 34k hrs of in-house data. The constituents of public data are as follows. #### English (25.5k hours) - Librispeech 960 hours - Fisher Corpus - Switchboard-1 Dataset - WSJ-0 and WSJ-1 - National Speech Corpus (Part 1, Part 6) - VCTK - VoxPopuli (EN) - Europarl-ASR (EN) - Multilingual Librispeech (MLS EN) - 2,000 hour subset - Mozilla Common Voice (v7.0) - People's Speech - 12,000 hour subset - Mozilla Common Voice (v11.0) - 1,474 hour subset #### German (2.5k hours) - Mozilla Common Voice (v12.0) - 800 hour subset - Multilingual Librispeech (MLS DE) - 1,500 hour subset - VoxPopuli (DE) - 200 hr subset #### Spanish (1.4k hours) - Mozilla Common Voice (v12.0) - 395 hour subset - Multilingual Librispeech (MLS ES) - 780 hour subset - VoxPopuli (ES) - 108 hour subset - Fisher - 141 hour subset #### French (1.8k hours) - Mozilla Common Voice (v12.0) - 708 hour subset - Multilingual Librispeech (MLS FR) - 926 hour subset - VoxPopuli (FR) - 165 hour subset ## Performance In both ASR and AST experiments, predictions were generated using beam search with width 5 and length penalty 1.0. ### ASR Performance (w/o PnC) The ASR performance is measured with word error rate (WER), and we process the groundtruth and predicted text with [whisper-normalizer](https://pypi.org/project/whisper-normalizer/). WER on [MCV-16.1](https://commonvoice.mozilla.org/en/datasets) test set: | **Version** | **Model** | **En** | **De** | **Es** | **Fr** | |:---------:|:-----------:|:------:|:------:|:------:|:------:| | 1.23.0 | canary-1b | 7.97 | 4.61 | 3.99 | 6.53 | WER on [MLS](https://huggingface.co/datasets/facebook/multilingual_librispeech) test set: | **Version** | **Model** | **En** | **De** | **Es** | **Fr** | |:---------:|:-----------:|:------:|:------:|:------:|:------:| | 1.23.0 | canary-1b | 3.06 | 4.19 | 3.15 | 4.12 | More details on evaluation can be found at [HuggingFace ASR Leaderboard](https://huggingface.co/spaces/hf-audio/open_asr_leaderboard) ### AST Performance We evaluate AST performance with [BLEU score](https://lightning.ai/docs/torchmetrics/stable/text/sacre_bleu_score.html), and use native annotations with punctuation and capitalization in the datasets. BLEU score on [FLEURS](https://huggingface.co/datasets/google/fleurs) test set: | **Version** | **Model** | **En->De** | **En->Es** | **En->Fr** | **De->En** | **Es->En** | **Fr->En** | |:-----------:|:---------:|:----------:|:----------:|:----------:|:----------:|:----------:|:----------:| | 1.23.0 | canary-1b | 32.15 | 22.66 | 40.76 | 33.98 | 21.80 | 30.95 | BLEU score on [COVOST-v2](https://github.com/facebookresearch/covost) test set: | **Version** | **Model** | **De->En** | **Es->En** | **Fr->En** | |:-----------:|:---------:|:----------:|:----------:|:----------:| | 1.23.0 | canary-1b | 37.67 | 40.7 | 40.42 | BLEU score on [mExpresso](https://huggingface.co/facebook/seamless-expressive#mexpresso-multilingual-expresso) test set: | **Version** | **Model** | **En->De** | **En->Es** | **En->Fr** | |:-----------:|:---------:|:----------:|:----------:|:----------:| | 1.23.0 | canary-1b | 23.84 | 35.74 | 28.29 | ## Model Fairness Evaluation As outlined in the paper "Towards Measuring Fairness in AI: the Casual Conversations Dataset", we assessed the canary-1.1b model for fairness. The model was evaluated on the CausalConversations-v1 dataset, and the results are reported as follows: ### Gender Bias: | Gender | Male | Female | N/A | Other | | :--- | :--- | :--- | :--- | :--- | | Num utterances | 19325 | 24532 | 926 | 33 | | % WER | 14.64 | 12.92 | 17.88 | 126.92 | ### Age Bias: | Age Group | (18-30) | (31-45) | (46-85) | (1-100) | | :--- | :--- | :--- | :--- | :--- | | Num utterances | 15956 | 14585 | 13349 | 43890 | | % WER | 14.64 | 13.07 | 13.47 | 13.76 | (Error rates for fairness evaluation are determined by normalizing both the reference and predicted text, similar to the methods used in the evaluations found at https://github.com/huggingface/open_asr_leaderboard.) ## NVIDIA Riva: Deployment [NVIDIA Riva](https://developer.nvidia.com/riva), is an accelerated speech AI SDK deployable on-prem, in all clouds, multi-cloud, hybrid, on edge, and embedded. Additionally, Riva provides: * World-class out-of-the-box accuracy for the most common languages with model checkpoints trained on proprietary data with hundreds of thousands of GPU-compute hours * Best in class accuracy with run-time word boosting (e.g., brand and product names) and customization of acoustic model, language model, and inverse text normalization * Streaming speech recognition, Kubernetes compatible scaling, and enterprise-grade support Although this model isn’t supported yet by Riva, the [list of supported models](https://huggingface.co/models?other=Riva) is here. Check out [Riva live demo](https://developer.nvidia.com/riva#demos). ## References [1] [Fast Conformer with Linearly Scalable Attention for Efficient Speech Recognition](https://arxiv.org/abs/2305.05084) [2] [Attention is all you need](https://arxiv.org/abs/1706.03762) [3] [Google Sentencepiece Tokenizer](https://github.com/google/sentencepiece) [4] [NVIDIA NeMo Toolkit](https://github.com/NVIDIA/NeMo) [5] [Unified Model for Code-Switching Speech Recognition and Language Identification Based on Concatenated Tokenizer](https://aclanthology.org/2023.calcs-1.7.pdf) ## Licence License to use this model is covered by the [CC-BY-NC-4.0](https://creativecommons.org/licenses/by-nc/4.0/deed.en#:~:text=NonCommercial%20%E2%80%94%20You%20may%20not%20use,doing%20anything%20the%20license%20permits.). By downloading the public and release version of the model, you accept the terms and conditions of the CC-BY-NC-4.0 license.
TechxGenus/gemma-1.1-2b-it-GPTQ
TechxGenus
"2024-04-07T15:52:28Z"
4,586
1
transformers
[ "transformers", "safetensors", "gemma", "text-generation", "conversational", "arxiv:2312.11805", "arxiv:2009.03300", "arxiv:1905.07830", "arxiv:1911.11641", "arxiv:1904.09728", "arxiv:1905.10044", "arxiv:1907.10641", "arxiv:1811.00937", "arxiv:1809.02789", "arxiv:1911.01547", "arxiv:1705.03551", "arxiv:2107.03374", "arxiv:2108.07732", "arxiv:2110.14168", "arxiv:2304.06364", "arxiv:2206.04615", "arxiv:1804.06876", "arxiv:2110.08193", "license:gemma", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "4-bit", "gptq", "region:us" ]
text-generation
"2024-04-07T15:19:30Z"
--- library_name: transformers widget: - messages: - role: user content: How does the brain work? inference: parameters: max_new_tokens: 200 extra_gated_heading: Access Gemma on Hugging Face extra_gated_prompt: >- To access Gemma on Hugging Face, you’re required to review and agree to Google’s usage license. To do this, please ensure you’re logged-in to Hugging Face and click below. Requests are processed immediately. extra_gated_button_content: Acknowledge license license: gemma --- GPTQ quantized version of gemma-1.1-2b-it model. --- # Gemma Model Card **Model Page**: [Gemma](https://ai.google.dev/gemma/docs) This model card corresponds to the latest 2B instruct version of the Gemma model. Here you can find other models in the Gemma family: | | Base | Instruct | |----|----------------------------------------------------|----------------------------------------------------------------------| | 2B | [gemma-2b](https://huggingface.co/google/gemma-2b) | [**gemma-1.1-2b-it**](https://huggingface.co/google/gemma-1.1-2b-it) | | 7B | [gemma-7b](https://huggingface.co/google/gemma-7b) | [gemma-1.1-7b-it](https://huggingface.co/google/gemma-1.1-7b-it) | **Release Notes** This is Gemma 1.1 2B (IT), an update over the original instruction-tuned Gemma release. Gemma 1.1 was trained using a novel RLHF method, leading to substantial gains on quality, coding capabilities, factuality, instruction following and multi-turn conversation quality. We also fixed a bug in multi-turn conversations, and made sure that model responses don't always start with `"Sure,"`. We believe this release represents an improvement for most use cases, but we encourage users to test in their particular applications. The previous model [will continue to be available in the same repo](https://huggingface.co/google/gemma-2b-it). We appreciate the enthusiastic adoption of Gemma, and we continue to welcome all feedback from the community. **Resources and Technical Documentation**: * [Responsible Generative AI Toolkit](https://ai.google.dev/responsible) * [Gemma on Kaggle](https://www.kaggle.com/models/google/gemma) * [Gemma on Vertex Model Garden](https://console.cloud.google.com/vertex-ai/publishers/google/model-garden/335) **Terms of Use**: [Terms](https://www.kaggle.com/models/google/gemma/license/consent) **Authors**: Google ## Model Information Summary description and brief definition of inputs and outputs. ### Description Gemma is a family of lightweight, state-of-the-art open models from Google, built from the same research and technology used to create the Gemini models. They are text-to-text, decoder-only large language models, available in English, with open weights, pre-trained variants, and instruction-tuned variants. Gemma models are well-suited for a variety of text generation tasks, including question answering, summarization, and reasoning. Their relatively small size makes it possible to deploy them in environments with limited resources such as a laptop, desktop or your own cloud infrastructure, democratizing access to state of the art AI models and helping foster innovation for everyone. ### Usage Below we share some code snippets on how to get quickly started with running the model. First make sure to `pip install -U transformers`, then copy the snippet from the section that is relevant for your usecase. #### Running the model on a CPU As explained below, we recommend `torch.bfloat16` as the default dtype. You can use [a different precision](#precisions) if necessary. ```python from transformers import AutoTokenizer, AutoModelForCausalLM import torch tokenizer = AutoTokenizer.from_pretrained("google/gemma-1.1-2b-it") model = AutoModelForCausalLM.from_pretrained( "google/gemma-1.1-2b-it", torch_dtype=torch.bfloat16 ) input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(input_text, return_tensors="pt") outputs = model.generate(**input_ids, max_new_tokens=50) print(tokenizer.decode(outputs[0])) ``` #### Running the model on a single / multi GPU ```python # pip install accelerate from transformers import AutoTokenizer, AutoModelForCausalLM import torch tokenizer = AutoTokenizer.from_pretrained("google/gemma-1.1-2b-it") model = AutoModelForCausalLM.from_pretrained( "google/gemma-1.1-2b-it", device_map="auto", torch_dtype=torch.bfloat16 ) input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids) print(tokenizer.decode(outputs[0])) ``` <a name="precisions"></a> #### Running the model on a GPU using different precisions The native weights of this model were exported in `bfloat16` precision. You can use `float16`, which may be faster on certain hardware, indicating the `torch_dtype` when loading the model. For convenience, the `float16` revision of the repo contains a copy of the weights already converted to that precision. You can also use `float32` if you skip the dtype, but no precision increase will occur (model weights will just be upcasted to `float32`). See examples below. * _Using `torch.float16`_ ```python # pip install accelerate from transformers import AutoTokenizer, AutoModelForCausalLM import torch tokenizer = AutoTokenizer.from_pretrained("google/gemma-1.1-2b-it") model = AutoModelForCausalLM.from_pretrained( "google/gemma-1.1-2b-it", device_map="auto", torch_dtype=torch.float16, revision="float16", ) input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids) print(tokenizer.decode(outputs[0])) ``` * _Using `torch.bfloat16`_ ```python # pip install accelerate from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b-it") model = AutoModelForCausalLM.from_pretrained( "google/gemma-1.1-2b-it", device_map="auto", torch_dtype=torch.bfloat16 ) input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids) print(tokenizer.decode(outputs[0])) ``` * _Upcasting to `torch.float32`_ ```python # pip install accelerate from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("google/gemma-1.1-2b-it") model = AutoModelForCausalLM.from_pretrained( "google/gemma-1.1-2b-it", device_map="auto" ) input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids) print(tokenizer.decode(outputs[0])) ``` #### Quantized Versions through `bitsandbytes` * _Using 8-bit precision (int8)_ ```python # pip install bitsandbytes accelerate from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig quantization_config = BitsAndBytesConfig(load_in_8bit=True) tokenizer = AutoTokenizer.from_pretrained("google/gemma-1.1-2b-it") model = AutoModelForCausalLM.from_pretrained( "google/gemma-1.1-2b-it", quantization_config=quantization_config ) input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids) print(tokenizer.decode(outputs[0])) ``` * _Using 4-bit precision_ ```python # pip install bitsandbytes accelerate from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig quantization_config = BitsAndBytesConfig(load_in_4bit=True) tokenizer = AutoTokenizer.from_pretrained("google/gemma-1.1-2b-it") model = AutoModelForCausalLM.from_pretrained( "google/gemma-1.1-2b-it", quantization_config=quantization_config ) input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids) print(tokenizer.decode(outputs[0])) ``` #### Other optimizations * _Flash Attention 2_ First make sure to install `flash-attn` in your environment `pip install flash-attn` ```diff model = AutoModelForCausalLM.from_pretrained( model_id, torch_dtype=torch.float16, + attn_implementation="flash_attention_2" ).to(0) ``` #### Running the model in JAX / Flax Use the `flax` branch of the repository: ```python import jax.numpy as jnp from transformers import AutoTokenizer, FlaxGemmaForCausalLM model_id = "google/gemma-1.1-2b-it" tokenizer = AutoTokenizer.from_pretrained(model_id) tokenizer.padding_side = "left" model, params = FlaxGemmaForCausalLM.from_pretrained( model_id, dtype=jnp.bfloat16, revision="flax", _do_init=False, ) inputs = tokenizer("Valencia and Málaga are", return_tensors="np", padding=True) output = model.generate(**inputs, params=params, max_new_tokens=20, do_sample=False) output_text = tokenizer.batch_decode(output.sequences, skip_special_tokens=True) ``` [Check this notebook](https://colab.research.google.com/github/sanchit-gandhi/notebooks/blob/main/jax_gemma.ipynb) for a comprehensive walkthrough on how to parallelize JAX inference. ### Chat Template The instruction-tuned models use a chat template that must be adhered to for conversational use. The easiest way to apply it is using the tokenizer's built-in chat template, as shown in the following snippet. Let's load the model and apply the chat template to a conversation. In this example, we'll start with a single user interaction: ```py from transformers import AutoTokenizer, AutoModelForCausalLM import transformers import torch model_id = "google/gemma-1.1-2b-it" dtype = torch.bfloat16 tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained( model_id, device_map="cuda", torch_dtype=dtype, ) chat = [ { "role": "user", "content": "Write a hello world program" }, ] prompt = tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True) ``` At this point, the prompt contains the following text: ``` <bos><start_of_turn>user Write a hello world program<end_of_turn> <start_of_turn>model ``` As you can see, each turn is preceded by a `<start_of_turn>` delimiter and then the role of the entity (either `user`, for content supplied by the user, or `model` for LLM responses). Turns finish with the `<end_of_turn>` token. You can follow this format to build the prompt manually, if you need to do it without the tokenizer's chat template. After the prompt is ready, generation can be performed like this: ```py inputs = tokenizer.encode(prompt, add_special_tokens=False, return_tensors="pt") outputs = model.generate(input_ids=inputs.to(model.device), max_new_tokens=150) ``` ### Fine-tuning You can find some fine-tuning scripts under the [`examples/` directory](https://huggingface.co/google/gemma-7b/tree/main/examples) of [`google/gemma-7b`](https://huggingface.co/google/gemma-7b) repository. To adapt them to this model, simply change the model-id to `google/gemma-1.1-2b-it`. We provide: * A script to perform Supervised Fine-Tuning (SFT) on UltraChat dataset using QLoRA * A script to perform SFT using FSDP on TPU devices * A notebook that you can run on a free-tier Google Colab instance to perform SFT on the English quotes dataset ### Inputs and outputs * **Input:** Text string, such as a question, a prompt, or a document to be summarized. * **Output:** Generated English-language text in response to the input, such as an answer to a question, or a summary of a document. ## Model Data Data used for model training and how the data was processed. ### Training Dataset These models were trained on a dataset of text data that includes a wide variety of sources, totaling 6 trillion tokens. Here are the key components: * Web Documents: A diverse collection of web text ensures the model is exposed to a broad range of linguistic styles, topics, and vocabulary. Primarily English-language content. * Code: Exposing the model to code helps it to learn the syntax and patterns of programming languages, which improves its ability to generate code or understand code-related questions. * Mathematics: Training on mathematical text helps the model learn logical reasoning, symbolic representation, and to address mathematical queries. The combination of these diverse data sources is crucial for training a powerful language model that can handle a wide variety of different tasks and text formats. ### Data Preprocessing Here are the key data cleaning and filtering methods applied to the training data: * CSAM Filtering: Rigorous CSAM (Child Sexual Abuse Material) filtering was applied at multiple stages in the data preparation process to ensure the exclusion of harmful and illegal content * Sensitive Data Filtering: As part of making Gemma pre-trained models safe and reliable, automated techniques were used to filter out certain personal information and other sensitive data from training sets. * Additional methods: Filtering based on content quality and safely in line with [our policies](https://storage.googleapis.com/gweb-uniblog-publish-prod/documents/2023_Google_AI_Principles_Progress_Update.pdf#page=11). ## Implementation Information Details about the model internals. ### Hardware Gemma was trained using the latest generation of [Tensor Processing Unit (TPU)](https://cloud.google.com/tpu/docs/intro-to-tpu) hardware (TPUv5e). Training large language models requires significant computational power. TPUs, designed specifically for matrix operations common in machine learning, offer several advantages in this domain: * Performance: TPUs are specifically designed to handle the massive computations involved in training LLMs. They can speed up training considerably compared to CPUs. * Memory: TPUs often come with large amounts of high-bandwidth memory, allowing for the handling of large models and batch sizes during training. This can lead to better model quality. * Scalability: TPU Pods (large clusters of TPUs) provide a scalable solution for handling the growing complexity of large foundation models. You can distribute training across multiple TPU devices for faster and more efficient processing. * Cost-effectiveness: In many scenarios, TPUs can provide a more cost-effective solution for training large models compared to CPU-based infrastructure, especially when considering the time and resources saved due to faster training. * These advantages are aligned with [Google's commitments to operate sustainably](https://sustainability.google/operating-sustainably/). ### Software Training was done using [JAX](https://github.com/google/jax) and [ML Pathways](https://blog.google/technology/ai/introducing-pathways-next-generation-ai-architecture/ml-pathways). JAX allows researchers to take advantage of the latest generation of hardware, including TPUs, for faster and more efficient training of large models. ML Pathways is Google's latest effort to build artificially intelligent systems capable of generalizing across multiple tasks. This is specially suitable for [foundation models](https://ai.google/discover/foundation-models/), including large language models like these ones. Together, JAX and ML Pathways are used as described in the [paper about the Gemini family of models](https://arxiv.org/abs/2312.11805); "the 'single controller' programming model of Jax and Pathways allows a single Python process to orchestrate the entire training run, dramatically simplifying the development workflow." ## Evaluation Model evaluation metrics and results. ### Benchmark Results The pre-trained base models were evaluated against a large collection of different datasets and metrics to cover different aspects of text generation: | Benchmark | Metric | Gemma PT 2B | Gemma PT 7B | | ------------------------------ | ------------- | ----------- | ----------- | | [MMLU](https://arxiv.org/abs/2009.03300) | 5-shot, top-1 | 42.3 | 64.3 | | [HellaSwag](https://arxiv.org/abs/1905.07830) | 0-shot | 71.4 | 81.2 | | [PIQA](https://arxiv.org/abs/1911.11641) | 0-shot | 77.3 | 81.2 | | [SocialIQA](https://arxiv.org/abs/1904.09728) | 0-shot | 49.7 | 51.8 | | [BoolQ](https://arxiv.org/abs/1905.10044) | 0-shot | 69.4 | 83.2 | | [WinoGrande](https://arxiv.org/abs/1907.10641) | partial score | 65.4 | 72.3 | | [CommonsenseQA](https://arxiv.org/abs/1811.00937) | 7-shot | 65.3 | 71.3 | | [OpenBookQA](https://arxiv.org/abs/1809.02789) | | 47.8 | 52.8 | | [ARC-e](https://arxiv.org/abs/1911.01547) | | 73.2 | 81.5 | | [ARC-c](https://arxiv.org/abs/1911.01547) | | 42.1 | 53.2 | | [TriviaQA](https://arxiv.org/abs/1705.03551) | 5-shot | 53.2 | 63.4 | | [Natural Questions](https://github.com/google-research-datasets/natural-questions) | 5-shot | 12.5 | 23.0 | | [HumanEval](https://arxiv.org/abs/2107.03374) | pass@1 | 22.0 | 32.3 | | [MBPP](https://arxiv.org/abs/2108.07732) | 3-shot | 29.2 | 44.4 | | [GSM8K](https://arxiv.org/abs/2110.14168) | maj@1 | 17.7 | 46.4 | | [MATH](https://arxiv.org/abs/2108.07732) | 4-shot | 11.8 | 24.3 | | [AGIEval](https://arxiv.org/abs/2304.06364) | | 24.2 | 41.7 | | [BIG-Bench](https://arxiv.org/abs/2206.04615) | | 35.2 | 55.1 | | ------------------------------ | ------------- | ----------- | ----------- | | **Average** | | **44.9** | **56.4** | ## Ethics and Safety Ethics and safety evaluation approach and results. ### Evaluation Approach Our evaluation methods include structured evaluations and internal red-teaming testing of relevant content policies. Red-teaming was conducted by a number of different teams, each with different goals and human evaluation metrics. These models were evaluated against a number of different categories relevant to ethics and safety, including: * Text-to-Text Content Safety: Human evaluation on prompts covering safety policies including child sexual abuse and exploitation, harassment, violence and gore, and hate speech. * Text-to-Text Representational Harms: Benchmark against relevant academic datasets such as [WinoBias](https://arxiv.org/abs/1804.06876) and [BBQ Dataset](https://arxiv.org/abs/2110.08193v2). * Memorization: Automated evaluation of memorization of training data, including the risk of personally identifiable information exposure. * Large-scale harm: Tests for "dangerous capabilities," such as chemical, biological, radiological, and nuclear (CBRN) risks. ### Evaluation Results The results of ethics and safety evaluations are within acceptable thresholds for meeting [internal policies](https://storage.googleapis.com/gweb-uniblog-publish-prod/documents/2023_Google_AI_Principles_Progress_Update.pdf#page=11) for categories such as child safety, content safety, representational harms, memorization, large-scale harms. On top of robust internal evaluations, the results of well known safety benchmarks like BBQ, BOLD, Winogender, Winobias, RealToxicity, and TruthfulQA are shown here. #### Gemma 1.0 | Benchmark | Metric | Gemma 1.0 IT 2B | Gemma 1.0 IT 7B | | ------------------------ | ------------- | --------------- | --------------- | | [RealToxicity][realtox] | average | 6.86 | 7.90 | | [BOLD][bold] | | 45.57 | 49.08 | | [CrowS-Pairs][crows] | top-1 | 45.82 | 51.33 | | [BBQ Ambig][bbq] | 1-shot, top-1 | 62.58 | 92.54 | | [BBQ Disambig][bbq] | top-1 | 54.62 | 71.99 | | [Winogender][winogender] | top-1 | 51.25 | 54.17 | | [TruthfulQA][truthfulqa] | | 44.84 | 31.81 | | [Winobias 1_2][winobias] | | 56.12 | 59.09 | | [Winobias 2_2][winobias] | | 91.10 | 92.23 | | [Toxigen][toxigen] | | 29.77 | 39.59 | | ------------------------ | ------------- | --------------- | --------------- | #### Gemma 1.1 | Benchmark | Metric | Gemma 1.1 IT 2B | Gemma 1.1 IT 7B | | ------------------------ | ------------- | --------------- | --------------- | | [RealToxicity][realtox] | average | 7.03 | 8.04 | | [BOLD][bold] | | 47.76 | | | [CrowS-Pairs][crows] | top-1 | 45.89 | 49.67 | | [BBQ Ambig][bbq] | 1-shot, top-1 | 58.97 | 86.06 | | [BBQ Disambig][bbq] | top-1 | 53.90 | 85.08 | | [Winogender][winogender] | top-1 | 50.14 | 57.64 | | [TruthfulQA][truthfulqa] | | 44.24 | 45.34 | | [Winobias 1_2][winobias] | | 55.93 | 59.22 | | [Winobias 2_2][winobias] | | 89.46 | 89.2 | | [Toxigen][toxigen] | | 29.64 | 38.75 | | ------------------------ | ------------- | --------------- | --------------- | ## Usage and Limitations These models have certain limitations that users should be aware of. ### Intended Usage Open Large Language Models (LLMs) have a wide range of applications across various industries and domains. The following list of potential uses is not comprehensive. The purpose of this list is to provide contextual information about the possible use-cases that the model creators considered as part of model training and development. * Content Creation and Communication * Text Generation: These models can be used to generate creative text formats such as poems, scripts, code, marketing copy, and email drafts. * Chatbots and Conversational AI: Power conversational interfaces for customer service, virtual assistants, or interactive applications. * Text Summarization: Generate concise summaries of a text corpus, research papers, or reports. * Research and Education * Natural Language Processing (NLP) Research: These models can serve as a foundation for researchers to experiment with NLP techniques, develop algorithms, and contribute to the advancement of the field. * Language Learning Tools: Support interactive language learning experiences, aiding in grammar correction or providing writing practice. * Knowledge Exploration: Assist researchers in exploring large bodies of text by generating summaries or answering questions about specific topics. ### Limitations * Training Data * The quality and diversity of the training data significantly influence the model's capabilities. Biases or gaps in the training data can lead to limitations in the model's responses. * The scope of the training dataset determines the subject areas the model can handle effectively. * Context and Task Complexity * LLMs are better at tasks that can be framed with clear prompts and instructions. Open-ended or highly complex tasks might be challenging. * A model's performance can be influenced by the amount of context provided (longer context generally leads to better outputs, up to a certain point). * Language Ambiguity and Nuance * Natural language is inherently complex. LLMs might struggle to grasp subtle nuances, sarcasm, or figurative language. * Factual Accuracy * LLMs generate responses based on information they learned from their training datasets, but they are not knowledge bases. They may generate incorrect or outdated factual statements. * Common Sense * LLMs rely on statistical patterns in language. They might lack the ability to apply common sense reasoning in certain situations. ### Ethical Considerations and Risks The development of large language models (LLMs) raises several ethical concerns. In creating an open model, we have carefully considered the following: * Bias and Fairness * LLMs trained on large-scale, real-world text data can reflect socio-cultural biases embedded in the training material. These models underwent careful scrutiny, input data pre-processing described and posterior evaluations reported in this card. * Misinformation and Misuse * LLMs can be misused to generate text that is false, misleading, or harmful. * Guidelines are provided for responsible use with the model, see the [Responsible Generative AI Toolkit](http://ai.google.dev/gemma/responsible). * Transparency and Accountability: * This model card summarizes details on the models' architecture, capabilities, limitations, and evaluation processes. * A responsibly developed open model offers the opportunity to share innovation by making LLM technology accessible to developers and researchers across the AI ecosystem. Risks identified and mitigations: * Perpetuation of biases: It's encouraged to perform continuous monitoring (using evaluation metrics, human review) and the exploration of de-biasing techniques during model training, fine-tuning, and other use cases. * Generation of harmful content: Mechanisms and guidelines for content safety are essential. Developers are encouraged to exercise caution and implement appropriate content safety safeguards based on their specific product policies and application use cases. * Misuse for malicious purposes: Technical limitations and developer and end-user education can help mitigate against malicious applications of LLMs. Educational resources and reporting mechanisms for users to flag misuse are provided. Prohibited uses of Gemma models are outlined in the [Gemma Prohibited Use Policy](https://ai.google.dev/gemma/prohibited_use_policy). * Privacy violations: Models were trained on data filtered for removal of PII (Personally Identifiable Information). Developers are encouraged to adhere to privacy regulations with privacy-preserving techniques. ### Benefits At the time of release, this family of models provides high-performance open large language model implementations designed from the ground up for Responsible AI development compared to similarly sized models. Using the benchmark evaluation metrics described in this document, these models have shown to provide superior performance to other, comparably-sized open model alternatives.
mucai/vip-llava-7b
mucai
"2023-12-17T23:42:47Z"
4,582
3
transformers
[ "transformers", "pytorch", "llava", "text-generation", "arxiv:2312.00784", "autotrain_compatible", "region:us" ]
text-generation
"2023-12-03T18:19:47Z"
--- inference: false --- <br> <br> # ViP-LLaVA Model Card ## Model details **Model type:** ViP-LLaVA is an open-source chatbot trained by fine-tuning LLaMA/Vicuna on both image level instruction data and region-level instruction data annotated with visual prompts. It is an auto-regressive language model, based on the transformer architecture. **Model date:** ViP-LLaVA-7B was trained in November 2023. [Paper](https://arxiv.org/abs/2312.00784) **Paper or resources for more information:** https://vip-llava.github.io/ ## License Llama 2 is licensed under the LLAMA 2 Community License, Copyright (c) Meta Platforms, Inc. All Rights Reserved. **Where to send questions or comments about the model:** https://github.com/mu-cai/ViP-LLaVA/issues ## Intended use **Primary intended uses:** The primary use of ViP-LLaVA is research on large multimodal models and chatbots. **Primary intended users:** The primary intended users of the model are researchers and hobbyists in computer vision, natural language processing, machine learning, and artificial intelligence. ## Training dataset - 558K filtered image-text pairs from LAION/CC/SBU, captioned by BLIP. - 665K image level instruction data from LLaVA-1.5. - 520K image-text pairs marked with visual prompts. - 13K region-level instruction data generated from GPT-4V. ## Evaluation dataset ViP-LLaVA achieves state-of-the-art performance in 4 academic region-level benchmarks and our newly proposed RegionBench.
patrickquick/BERTicelli
patrickquick
"2022-05-10T09:03:48Z"
4,581
0
transformers
[ "transformers", "pytorch", "bert", "text-classification", "BERTicelli", "text classification", "abusive language", "hate speech", "offensive language", "en", "dataset:OLID", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
"2022-05-02T18:36:32Z"
--- language: - en tags: - BERTicelli - text classification - abusive language - hate speech - offensive language datasets: - OLID license: apache-2.0 widget: - text: "If Jamie Oliver fucks with my £3 meal deals at Tesco I'll kill the cunt." example_title: "Example 1" - text: "Keep up the good hard work." example_title: "Example 2" - text: "That's not hair. Those were polyester fibers because Yoda is (or was) a puppet." example_title: "Example 3" --- [Mona Allaert](https://github.com/MonaDT) • [Leonardo Grotti](https://github.com/corvusMidnight) • [Patrick Quick](https://github.com/patrickquick) ## Model description BERTicelli is an English pre-trained BERT model obtained by fine-tuning the [English BERT base cased model](https://github.com/google-research/bert) with the training data from [Offensive Language Identification Dataset (OLID)](https://scholar.harvard.edu/malmasi/olid). This model was developed for the NLP Shared Task in the Digital Text Analysis program at the University of Antwerp (2021–2022).
sentence-transformers/roberta-large-nli-stsb-mean-tokens
sentence-transformers
"2024-03-27T12:42:48Z"
4,578
1
sentence-transformers
[ "sentence-transformers", "pytorch", "tf", "jax", "safetensors", "roberta", "feature-extraction", "sentence-similarity", "transformers", "arxiv:1908.10084", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-embeddings-inference", "region:us" ]
sentence-similarity
"2022-03-02T23:29:05Z"
--- license: apache-2.0 library_name: sentence-transformers tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers pipeline_tag: sentence-similarity --- **⚠️ This model is deprecated. Please don't use it as it produces sentence embeddings of low quality. You can find recommended sentence embedding models here: [SBERT.net - Pretrained Models](https://www.sbert.net/docs/pretrained_models.html)** # sentence-transformers/roberta-large-nli-stsb-mean-tokens This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 1024 dimensional dense vector space and can be used for tasks like clustering or semantic search. ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["This is an example sentence", "Each sentence is converted"] model = SentenceTransformer('sentence-transformers/roberta-large-nli-stsb-mean-tokens') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ['This is an example sentence', 'Each sentence is converted'] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('sentence-transformers/roberta-large-nli-stsb-mean-tokens') model = AutoModel.from_pretrained('sentence-transformers/roberta-large-nli-stsb-mean-tokens') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, max pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name=sentence-transformers/roberta-large-nli-stsb-mean-tokens) ## Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 128, 'do_lower_case': True}) with Transformer model: RobertaModel (1): Pooling({'word_embedding_dimension': 1024, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## Citing & Authors This model was trained by [sentence-transformers](https://www.sbert.net/). If you find this model helpful, feel free to cite our publication [Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks](https://arxiv.org/abs/1908.10084): ```bibtex @inproceedings{reimers-2019-sentence-bert, title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", author = "Reimers, Nils and Gurevych, Iryna", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", month = "11", year = "2019", publisher = "Association for Computational Linguistics", url = "http://arxiv.org/abs/1908.10084", } ```
facebook/vit-mae-large
facebook
"2023-06-13T19:43:02Z"
4,575
6
transformers
[ "transformers", "pytorch", "tf", "vit_mae", "pretraining", "vision", "dataset:imagenet-1k", "arxiv:2111.06377", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
"2022-03-02T23:29:05Z"
--- license: apache-2.0 tags: - vision datasets: - imagenet-1k --- # Vision Transformer (large-sized model) pre-trained with MAE Vision Transformer (ViT) model pre-trained using the MAE method. It was introduced in the paper [Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377) by Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, Ross Girshick and first released in [this repository](https://github.com/facebookresearch/mae). Disclaimer: The team releasing MAE did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model description The Vision Transformer (ViT) is a transformer encoder model (BERT-like). Images are presented to the model as a sequence of fixed-size patches. During pre-training, one randomly masks out a high portion (75%) of the image patches. First, the encoder is used to encode the visual patches. Next, a learnable (shared) mask token is added at the positions of the masked patches. The decoder takes the encoded visual patches and mask tokens as input and reconstructs raw pixel values for the masked positions. By pre-training the model, it learns an inner representation of images that can then be used to extract features useful for downstream tasks: if you have a dataset of labeled images for instance, you can train a standard classifier by placing a linear layer on top of the pre-trained encoder. ## Intended uses & limitations You can use the raw model for image classification. See the [model hub](https://huggingface.co/models?search=facebook/vit-mae) to look for fine-tuned versions on a task that interests you. ### How to use Here is how to use this model: ```python from transformers import AutoImageProcessor, ViTMAEForPreTraining from PIL import Image import requests url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) processor = AutoImageProcessor.from_pretrained('facebook/vit-mae-large') model = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-large') inputs = processor(images=image, return_tensors="pt") outputs = model(**inputs) loss = outputs.loss mask = outputs.mask ids_restore = outputs.ids_restore ``` ### BibTeX entry and citation info ```bibtex @article{DBLP:journals/corr/abs-2111-06377, author = {Kaiming He and Xinlei Chen and Saining Xie and Yanghao Li and Piotr Doll{\'{a}}r and Ross B. Girshick}, title = {Masked Autoencoders Are Scalable Vision Learners}, journal = {CoRR}, volume = {abs/2111.06377}, year = {2021}, url = {https://arxiv.org/abs/2111.06377}, eprinttype = {arXiv}, eprint = {2111.06377}, timestamp = {Tue, 16 Nov 2021 12:12:31 +0100}, biburl = {https://dblp.org/rec/journals/corr/abs-2111-06377.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ```
DMetaSoul/Dmeta-embedding-zh-small
DMetaSoul
"2024-03-28T06:20:41Z"
4,574
8
transformers
[ "transformers", "safetensors", "bert", "feature-extraction", "mteb", "license:apache-2.0", "model-index", "endpoints_compatible", "text-embeddings-inference", "region:us" ]
feature-extraction
"2024-03-25T09:33:26Z"
--- tags: - mteb model-index: - name: Dmeta-embedding-zh-small results: - task: type: STS dataset: type: C-MTEB/AFQMC name: MTEB AFQMC config: default split: validation revision: None metrics: - type: cos_sim_pearson value: 55.38441014851534 - type: cos_sim_spearman value: 59.54284362578262 - type: euclidean_pearson value: 58.18592108890414 - type: euclidean_spearman value: 59.54284362133902 - type: manhattan_pearson value: 58.142197046175916 - type: manhattan_spearman value: 59.47943468645265 - task: type: STS dataset: type: C-MTEB/ATEC name: MTEB ATEC config: default split: test revision: None metrics: - type: cos_sim_pearson value: 55.96911621560259 - type: cos_sim_spearman value: 58.6334496101353 - type: euclidean_pearson value: 62.78426382809823 - type: euclidean_spearman value: 58.63344961011331 - type: manhattan_pearson value: 62.80625401678188 - type: manhattan_spearman value: 58.618722128260394 - task: type: Classification dataset: type: mteb/amazon_reviews_multi name: MTEB AmazonReviewsClassification (zh) config: zh split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 44.88 - type: f1 value: 42.739249460584375 - task: type: STS dataset: type: C-MTEB/BQ name: MTEB BQ config: default split: test revision: None metrics: - type: cos_sim_pearson value: 68.56815521242152 - type: cos_sim_spearman value: 70.30776353631751 - type: euclidean_pearson value: 69.10087719019191 - type: euclidean_spearman value: 70.30775660748148 - type: manhattan_pearson value: 69.0672710967445 - type: manhattan_spearman value: 70.31940638148254 - task: type: Clustering dataset: type: C-MTEB/CLSClusteringP2P name: MTEB CLSClusteringP2P config: default split: test revision: None metrics: - type: v_measure value: 40.7861976704356 - task: type: Clustering dataset: type: C-MTEB/CLSClusteringS2S name: MTEB CLSClusteringS2S config: default split: test revision: None metrics: - type: v_measure value: 38.43028280281822 - task: type: Reranking dataset: type: C-MTEB/CMedQAv1-reranking name: MTEB CMedQAv1 config: default split: test revision: None metrics: - type: map value: 86.78386695617407 - type: mrr value: 88.79857142857142 - task: type: Reranking dataset: type: C-MTEB/CMedQAv2-reranking name: MTEB CMedQAv2 config: default split: test revision: None metrics: - type: map value: 87.38582377194436 - type: mrr value: 89.17158730158731 - task: type: Retrieval dataset: type: C-MTEB/CmedqaRetrieval name: MTEB CmedqaRetrieval config: default split: dev revision: None metrics: - type: map_at_1 value: 23.746000000000002 - type: map_at_10 value: 35.952 - type: map_at_100 value: 37.946999999999996 - type: map_at_1000 value: 38.059 - type: map_at_3 value: 31.680999999999997 - type: map_at_5 value: 34.046 - type: mrr_at_1 value: 36.409000000000006 - type: mrr_at_10 value: 44.801 - type: mrr_at_100 value: 45.842 - type: mrr_at_1000 value: 45.885999999999996 - type: mrr_at_3 value: 42.081 - type: mrr_at_5 value: 43.613 - type: ndcg_at_1 value: 36.409000000000006 - type: ndcg_at_10 value: 42.687000000000005 - type: ndcg_at_100 value: 50.352 - type: ndcg_at_1000 value: 52.275000000000006 - type: ndcg_at_3 value: 37.113 - type: ndcg_at_5 value: 39.434000000000005 - type: precision_at_1 value: 36.409000000000006 - type: precision_at_10 value: 9.712 - type: precision_at_100 value: 1.584 - type: precision_at_1000 value: 0.182 - type: precision_at_3 value: 21.096999999999998 - type: precision_at_5 value: 15.498999999999999 - type: recall_at_1 value: 23.746000000000002 - type: recall_at_10 value: 53.596 - type: recall_at_100 value: 85.232 - type: recall_at_1000 value: 98.092 - type: recall_at_3 value: 37.226 - type: recall_at_5 value: 44.187 - task: type: PairClassification dataset: type: C-MTEB/CMNLI name: MTEB Cmnli config: default split: validation revision: None metrics: - type: cos_sim_accuracy value: 82.66987372218881 - type: cos_sim_ap value: 90.28715189799232 - type: cos_sim_f1 value: 84.108318049412 - type: cos_sim_precision value: 78.0849358974359 - type: cos_sim_recall value: 91.13864858545709 - type: dot_accuracy value: 82.66987372218881 - type: dot_ap value: 90.29346021403634 - type: dot_f1 value: 84.108318049412 - type: dot_precision value: 78.0849358974359 - type: dot_recall value: 91.13864858545709 - type: euclidean_accuracy value: 82.66987372218881 - type: euclidean_ap value: 90.28656734732074 - type: euclidean_f1 value: 84.108318049412 - type: euclidean_precision value: 78.0849358974359 - type: euclidean_recall value: 91.13864858545709 - type: manhattan_accuracy value: 82.70595309681299 - type: manhattan_ap value: 90.25413574022456 - type: manhattan_f1 value: 83.9924670433145 - type: manhattan_precision value: 79.81052631578947 - type: manhattan_recall value: 88.63689501987373 - type: max_accuracy value: 82.70595309681299 - type: max_ap value: 90.29346021403634 - type: max_f1 value: 84.108318049412 - task: type: Retrieval dataset: type: C-MTEB/CovidRetrieval name: MTEB CovidRetrieval config: default split: dev revision: None metrics: - type: map_at_1 value: 61.38 - type: map_at_10 value: 71.23 - type: map_at_100 value: 71.61800000000001 - type: map_at_1000 value: 71.63000000000001 - type: map_at_3 value: 69.31 - type: map_at_5 value: 70.403 - type: mrr_at_1 value: 61.538000000000004 - type: mrr_at_10 value: 71.28999999999999 - type: mrr_at_100 value: 71.666 - type: mrr_at_1000 value: 71.678 - type: mrr_at_3 value: 69.44200000000001 - type: mrr_at_5 value: 70.506 - type: ndcg_at_1 value: 61.538000000000004 - type: ndcg_at_10 value: 75.626 - type: ndcg_at_100 value: 77.449 - type: ndcg_at_1000 value: 77.73400000000001 - type: ndcg_at_3 value: 71.75200000000001 - type: ndcg_at_5 value: 73.695 - type: precision_at_1 value: 61.538000000000004 - type: precision_at_10 value: 9.009 - type: precision_at_100 value: 0.9860000000000001 - type: precision_at_1000 value: 0.101 - type: precision_at_3 value: 26.379 - type: precision_at_5 value: 16.797 - type: recall_at_1 value: 61.38 - type: recall_at_10 value: 89.199 - type: recall_at_100 value: 97.576 - type: recall_at_1000 value: 99.789 - type: recall_at_3 value: 78.635 - type: recall_at_5 value: 83.325 - task: type: Retrieval dataset: type: C-MTEB/DuRetrieval name: MTEB DuRetrieval config: default split: dev revision: None metrics: - type: map_at_1 value: 23.067 - type: map_at_10 value: 70.658 - type: map_at_100 value: 73.85300000000001 - type: map_at_1000 value: 73.925 - type: map_at_3 value: 48.391 - type: map_at_5 value: 61.172000000000004 - type: mrr_at_1 value: 83.1 - type: mrr_at_10 value: 88.214 - type: mrr_at_100 value: 88.298 - type: mrr_at_1000 value: 88.304 - type: mrr_at_3 value: 87.717 - type: mrr_at_5 value: 88.03699999999999 - type: ndcg_at_1 value: 83.1 - type: ndcg_at_10 value: 79.89 - type: ndcg_at_100 value: 83.829 - type: ndcg_at_1000 value: 84.577 - type: ndcg_at_3 value: 78.337 - type: ndcg_at_5 value: 77.224 - type: precision_at_1 value: 83.1 - type: precision_at_10 value: 38.934999999999995 - type: precision_at_100 value: 4.6690000000000005 - type: precision_at_1000 value: 0.484 - type: precision_at_3 value: 70.48299999999999 - type: precision_at_5 value: 59.68 - type: recall_at_1 value: 23.067 - type: recall_at_10 value: 81.702 - type: recall_at_100 value: 94.214 - type: recall_at_1000 value: 98.241 - type: recall_at_3 value: 51.538 - type: recall_at_5 value: 67.39 - task: type: Retrieval dataset: type: C-MTEB/EcomRetrieval name: MTEB EcomRetrieval config: default split: dev revision: None metrics: - type: map_at_1 value: 49.8 - type: map_at_10 value: 59.46399999999999 - type: map_at_100 value: 60.063 - type: map_at_1000 value: 60.08 - type: map_at_3 value: 56.833 - type: map_at_5 value: 58.438 - type: mrr_at_1 value: 49.8 - type: mrr_at_10 value: 59.46399999999999 - type: mrr_at_100 value: 60.063 - type: mrr_at_1000 value: 60.08 - type: mrr_at_3 value: 56.833 - type: mrr_at_5 value: 58.438 - type: ndcg_at_1 value: 49.8 - type: ndcg_at_10 value: 64.48 - type: ndcg_at_100 value: 67.314 - type: ndcg_at_1000 value: 67.745 - type: ndcg_at_3 value: 59.06400000000001 - type: ndcg_at_5 value: 61.973 - type: precision_at_1 value: 49.8 - type: precision_at_10 value: 8.04 - type: precision_at_100 value: 0.935 - type: precision_at_1000 value: 0.097 - type: precision_at_3 value: 21.833 - type: precision_at_5 value: 14.52 - type: recall_at_1 value: 49.8 - type: recall_at_10 value: 80.4 - type: recall_at_100 value: 93.5 - type: recall_at_1000 value: 96.8 - type: recall_at_3 value: 65.5 - type: recall_at_5 value: 72.6 - task: type: Classification dataset: type: C-MTEB/IFlyTek-classification name: MTEB IFlyTek config: default split: validation revision: None metrics: - type: accuracy value: 49.111196614082345 - type: f1 value: 37.07930546974089 - task: type: Classification dataset: type: C-MTEB/JDReview-classification name: MTEB JDReview config: default split: test revision: None metrics: - type: accuracy value: 85.57223264540339 - type: ap value: 53.30690968994808 - type: f1 value: 80.20587062271773 - task: type: STS dataset: type: C-MTEB/LCQMC name: MTEB LCQMC config: default split: test revision: None metrics: - type: cos_sim_pearson value: 73.03085269274996 - type: cos_sim_spearman value: 78.72837937949888 - type: euclidean_pearson value: 78.34911745798928 - type: euclidean_spearman value: 78.72838602779268 - type: manhattan_pearson value: 78.31833697617105 - type: manhattan_spearman value: 78.69603741566397 - task: type: Reranking dataset: type: C-MTEB/Mmarco-reranking name: MTEB MMarcoReranking config: default split: dev revision: None metrics: - type: map value: 27.391692468538416 - type: mrr value: 26.44682539682539 - task: type: Retrieval dataset: type: C-MTEB/MMarcoRetrieval name: MTEB MMarcoRetrieval config: default split: dev revision: None metrics: - type: map_at_1 value: 57.206999999999994 - type: map_at_10 value: 66.622 - type: map_at_100 value: 67.12700000000001 - type: map_at_1000 value: 67.145 - type: map_at_3 value: 64.587 - type: map_at_5 value: 65.827 - type: mrr_at_1 value: 59.312 - type: mrr_at_10 value: 67.387 - type: mrr_at_100 value: 67.836 - type: mrr_at_1000 value: 67.851 - type: mrr_at_3 value: 65.556 - type: mrr_at_5 value: 66.66 - type: ndcg_at_1 value: 59.312 - type: ndcg_at_10 value: 70.748 - type: ndcg_at_100 value: 73.076 - type: ndcg_at_1000 value: 73.559 - type: ndcg_at_3 value: 66.81200000000001 - type: ndcg_at_5 value: 68.92399999999999 - type: precision_at_1 value: 59.312 - type: precision_at_10 value: 8.798 - type: precision_at_100 value: 0.996 - type: precision_at_1000 value: 0.104 - type: precision_at_3 value: 25.487 - type: precision_at_5 value: 16.401 - type: recall_at_1 value: 57.206999999999994 - type: recall_at_10 value: 82.767 - type: recall_at_100 value: 93.449 - type: recall_at_1000 value: 97.262 - type: recall_at_3 value: 72.271 - type: recall_at_5 value: 77.291 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (zh-CN) config: zh-CN split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 70.78345662407531 - type: f1 value: 68.35683436974351 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (zh-CN) config: zh-CN split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 73.16408876933423 - type: f1 value: 73.31484873459382 - task: type: Retrieval dataset: type: C-MTEB/MedicalRetrieval name: MTEB MedicalRetrieval config: default split: dev revision: None metrics: - type: map_at_1 value: 51.4 - type: map_at_10 value: 57.091 - type: map_at_100 value: 57.652 - type: map_at_1000 value: 57.703 - type: map_at_3 value: 55.733 - type: map_at_5 value: 56.363 - type: mrr_at_1 value: 51.7 - type: mrr_at_10 value: 57.243 - type: mrr_at_100 value: 57.80499999999999 - type: mrr_at_1000 value: 57.855999999999995 - type: mrr_at_3 value: 55.883 - type: mrr_at_5 value: 56.513000000000005 - type: ndcg_at_1 value: 51.4 - type: ndcg_at_10 value: 59.948 - type: ndcg_at_100 value: 63.064 - type: ndcg_at_1000 value: 64.523 - type: ndcg_at_3 value: 57.089999999999996 - type: ndcg_at_5 value: 58.214 - type: precision_at_1 value: 51.4 - type: precision_at_10 value: 6.9 - type: precision_at_100 value: 0.845 - type: precision_at_1000 value: 0.096 - type: precision_at_3 value: 20.333000000000002 - type: precision_at_5 value: 12.740000000000002 - type: recall_at_1 value: 51.4 - type: recall_at_10 value: 69.0 - type: recall_at_100 value: 84.5 - type: recall_at_1000 value: 96.2 - type: recall_at_3 value: 61.0 - type: recall_at_5 value: 63.7 - task: type: Classification dataset: type: C-MTEB/MultilingualSentiment-classification name: MTEB MultilingualSentiment config: default split: validation revision: None metrics: - type: accuracy value: 74.38999999999999 - type: f1 value: 74.07161306140839 - task: type: PairClassification dataset: type: C-MTEB/OCNLI name: MTEB Ocnli config: default split: validation revision: None metrics: - type: cos_sim_accuracy value: 81.15863562533838 - type: cos_sim_ap value: 84.84571607908443 - type: cos_sim_f1 value: 82.55872063968016 - type: cos_sim_precision value: 78.36812144212524 - type: cos_sim_recall value: 87.22280887011615 - type: dot_accuracy value: 81.15863562533838 - type: dot_ap value: 84.84571607908443 - type: dot_f1 value: 82.55872063968016 - type: dot_precision value: 78.36812144212524 - type: dot_recall value: 87.22280887011615 - type: euclidean_accuracy value: 81.15863562533838 - type: euclidean_ap value: 84.84571607908443 - type: euclidean_f1 value: 82.55872063968016 - type: euclidean_precision value: 78.36812144212524 - type: euclidean_recall value: 87.22280887011615 - type: manhattan_accuracy value: 80.7796426637791 - type: manhattan_ap value: 84.81524098914134 - type: manhattan_f1 value: 82.36462990561351 - type: manhattan_precision value: 77.76735459662288 - type: manhattan_recall value: 87.53959873284055 - type: max_accuracy value: 81.15863562533838 - type: max_ap value: 84.84571607908443 - type: max_f1 value: 82.55872063968016 - task: type: Classification dataset: type: C-MTEB/OnlineShopping-classification name: MTEB OnlineShopping config: default split: test revision: None metrics: - type: accuracy value: 93.12000000000002 - type: ap value: 91.0749103088623 - type: f1 value: 93.10837266607813 - task: type: STS dataset: type: C-MTEB/PAWSX name: MTEB PAWSX config: default split: test revision: None metrics: - type: cos_sim_pearson value: 38.5692290188029 - type: cos_sim_spearman value: 42.965264868554335 - type: euclidean_pearson value: 43.002526263615735 - type: euclidean_spearman value: 42.97561576045246 - type: manhattan_pearson value: 43.050089639788936 - type: manhattan_spearman value: 43.038497558804934 - task: type: STS dataset: type: C-MTEB/QBQTC name: MTEB QBQTC config: default split: test revision: None metrics: - type: cos_sim_pearson value: 38.99284895602663 - type: cos_sim_spearman value: 41.02655813481606 - type: euclidean_pearson value: 38.934953519378354 - type: euclidean_spearman value: 41.02680077136343 - type: manhattan_pearson value: 39.224809609807785 - type: manhattan_spearman value: 41.13950779185706 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (zh) config: zh split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 66.47464607633356 - type: cos_sim_spearman value: 66.76311382148693 - type: euclidean_pearson value: 67.25180409604143 - type: euclidean_spearman value: 66.76311382148693 - type: manhattan_pearson value: 67.6928257682864 - type: manhattan_spearman value: 67.08172581019826 - task: type: STS dataset: type: C-MTEB/STSB name: MTEB STSB config: default split: test revision: None metrics: - type: cos_sim_pearson value: 77.48943840585562 - type: cos_sim_spearman value: 79.0869194735025 - type: euclidean_pearson value: 79.48559575794792 - type: euclidean_spearman value: 79.08765044225807 - type: manhattan_pearson value: 79.36157224751007 - type: manhattan_spearman value: 78.94400905463999 - task: type: Reranking dataset: type: C-MTEB/T2Reranking name: MTEB T2Reranking config: default split: dev revision: None metrics: - type: map value: 66.1093201711458 - type: mrr value: 75.70959742506797 - task: type: Retrieval dataset: type: C-MTEB/T2Retrieval name: MTEB T2Retrieval config: default split: dev revision: None metrics: - type: map_at_1 value: 25.533 - type: map_at_10 value: 71.322 - type: map_at_100 value: 75.244 - type: map_at_1000 value: 75.333 - type: map_at_3 value: 50.15500000000001 - type: map_at_5 value: 61.514 - type: mrr_at_1 value: 86.126 - type: mrr_at_10 value: 89.462 - type: mrr_at_100 value: 89.58500000000001 - type: mrr_at_1000 value: 89.59 - type: mrr_at_3 value: 88.88000000000001 - type: mrr_at_5 value: 89.241 - type: ndcg_at_1 value: 86.126 - type: ndcg_at_10 value: 79.89500000000001 - type: ndcg_at_100 value: 84.405 - type: ndcg_at_1000 value: 85.286 - type: ndcg_at_3 value: 81.547 - type: ndcg_at_5 value: 79.834 - type: precision_at_1 value: 86.126 - type: precision_at_10 value: 39.972 - type: precision_at_100 value: 4.932 - type: precision_at_1000 value: 0.514 - type: precision_at_3 value: 71.49 - type: precision_at_5 value: 59.687 - type: recall_at_1 value: 25.533 - type: recall_at_10 value: 78.962 - type: recall_at_100 value: 93.413 - type: recall_at_1000 value: 97.89099999999999 - type: recall_at_3 value: 52.129000000000005 - type: recall_at_5 value: 65.444 - task: type: Classification dataset: type: C-MTEB/TNews-classification name: MTEB TNews config: default split: validation revision: None metrics: - type: accuracy value: 51.800000000000004 - type: f1 value: 50.07807183704828 - task: type: Clustering dataset: type: C-MTEB/ThuNewsClusteringP2P name: MTEB ThuNewsClusteringP2P config: default split: test revision: None metrics: - type: v_measure value: 65.15253218390774 - task: type: Clustering dataset: type: C-MTEB/ThuNewsClusteringS2S name: MTEB ThuNewsClusteringS2S config: default split: test revision: None metrics: - type: v_measure value: 58.81779372506517 - task: type: Retrieval dataset: type: C-MTEB/VideoRetrieval name: MTEB VideoRetrieval config: default split: dev revision: None metrics: - type: map_at_1 value: 53.0 - type: map_at_10 value: 63.422999999999995 - type: map_at_100 value: 63.995000000000005 - type: map_at_1000 value: 64.004 - type: map_at_3 value: 61.382999999999996 - type: map_at_5 value: 62.488 - type: mrr_at_1 value: 53.0 - type: mrr_at_10 value: 63.422999999999995 - type: mrr_at_100 value: 63.995000000000005 - type: mrr_at_1000 value: 64.004 - type: mrr_at_3 value: 61.382999999999996 - type: mrr_at_5 value: 62.488 - type: ndcg_at_1 value: 53.0 - type: ndcg_at_10 value: 68.301 - type: ndcg_at_100 value: 70.988 - type: ndcg_at_1000 value: 71.294 - type: ndcg_at_3 value: 64.11 - type: ndcg_at_5 value: 66.094 - type: precision_at_1 value: 53.0 - type: precision_at_10 value: 8.35 - type: precision_at_100 value: 0.958 - type: precision_at_1000 value: 0.098 - type: precision_at_3 value: 24.0 - type: precision_at_5 value: 15.36 - type: recall_at_1 value: 53.0 - type: recall_at_10 value: 83.5 - type: recall_at_100 value: 95.8 - type: recall_at_1000 value: 98.3 - type: recall_at_3 value: 72.0 - type: recall_at_5 value: 76.8 - task: type: Classification dataset: type: C-MTEB/waimai-classification name: MTEB Waimai config: default split: test revision: None metrics: - type: accuracy value: 86.18 - type: ap value: 69.04229346593745 - type: f1 value: 84.52986739717021 license: apache-2.0 --- <div align="center"> <img src="logo.png" alt="icon" width="100px"/> </div> <h1 align="center">Dmeta-embedding-small</h1> - Dmeta-embedding系列模型是跨领域、跨任务、开箱即用的中文 Embedding 模型,适用于搜索、问答、智能客服、LLM+RAG 等各种业务场景,支持使用 Transformers/Sentence-Transformers/Langchain 等工具加载推理。 - **Dmeta-embedding-zh-small**是开源模型[Dmeta-embedding-zh](https://huggingface.co/DMetaSoul/Dmeta-embedding-zh)的蒸馏版本(8层BERT),模型大小不到300M。相较于原始版本,Dmeta-embedding-zh-small模型大小减小三分之一,推理速度提升约30%,总体精度下降约1.4%。 --- ## Evaluation 这里主要跟蒸馏前对应的 teacher 模型作了对比: *性能:*(基于1万条数据测试,GPU设备是V100) | | Teacher | Student | Gap | | ---------- | ------------------------- | ------------------------------ | ----- | | Model | Dmeta-Embedding-zh (411M) | Dmeta-Embedding-zh-small (297M)| 0.67x | | Cost | 127s | 89s | -30% | | Latency | 13ms | 9ms | -31% | | Throughput | 78 sentence/s | 111 sentence/s | 1.4x | *精度:*(参考自MTEB榜单) | | **Classification** | **Clustering** | **Pair Classification** | **Reranking** | **Retrieval** | **STS** | **Avg** | | ----------------------------- | ----------------- | -------------- | ----------------------- | ------------- | ------------- | ------- | ------- | | **Dmeta-Embedding-zh** | 70 | 50.96 | 88.92 | 67.17 | 70.41 | 64.89 | 67.51 | | **Dmeta-Embedding-zh-small** | 69.89 | 50.8 | 87.57 | 66.92 | 67.7 | 62.13 | 66.1 | | **Gap** | -0.11 | -0.16 | -1.35 | -0.25 | -2.71 | -2.76 | -1.41 | ## Usage 目前模型支持通过 [Sentence-Transformers](#sentence-transformers), [Langchain](#langchain), [Huggingface Transformers](#huggingface-transformers) 等主流框架进行推理,具体用法参考各个框架的示例。 ### Sentence-Transformers Dmeta-embedding 模型支持通过 [sentence-transformers](https://www.SBERT.net) 来加载推理: ``` pip install -U sentence-transformers ``` ```python from sentence_transformers import SentenceTransformer texts1 = ["胡子长得太快怎么办?", "在香港哪里买手表好"] texts2 = ["胡子长得快怎么办?", "怎样使胡子不浓密!", "香港买手表哪里好", "在杭州手机到哪里买"] model = SentenceTransformer('DMetaSoul/Dmeta-embedding-zh-small') embs1 = model.encode(texts1, normalize_embeddings=True) embs2 = model.encode(texts2, normalize_embeddings=True) # 计算两两相似度 similarity = embs1 @ embs2.T print(similarity) # 获取 texts1[i] 对应的最相似 texts2[j] for i in range(len(texts1)): scores = [] for j in range(len(texts2)): scores.append([texts2[j], similarity[i][j]]) scores = sorted(scores, key=lambda x:x[1], reverse=True) print(f"查询文本:{texts1[i]}") for text2, score in scores: print(f"相似文本:{text2},打分:{score}") print() ``` 示例输出如下: ``` 查询文本:胡子长得太快怎么办? 相似文本:胡子长得快怎么办?,打分:0.965681254863739 相似文本:怎样使胡子不浓密!,打分:0.7353651523590088 相似文本:香港买手表哪里好,打分:0.24928246438503265 相似文本:在杭州手机到哪里买,打分:0.2038613110780716 查询文本:在香港哪里买手表好 相似文本:香港买手表哪里好,打分:0.9916468262672424 相似文本:在杭州手机到哪里买,打分:0.498248815536499 相似文本:胡子长得快怎么办?,打分:0.2424771636724472 相似文本:怎样使胡子不浓密!,打分:0.21715955436229706 ``` ### Langchain Dmeta-embedding 模型支持通过 LLM 工具框架 [langchain](https://www.langchain.com/) 来加载推理: ``` pip install -U langchain ``` ```python import torch import numpy as np from langchain.embeddings import HuggingFaceEmbeddings model_name = "DMetaSoul/Dmeta-embedding-zh-small" model_kwargs = {'device': 'cuda' if torch.cuda.is_available() else 'cpu'} encode_kwargs = {'normalize_embeddings': True} # set True to compute cosine similarity model = HuggingFaceEmbeddings( model_name=model_name, model_kwargs=model_kwargs, encode_kwargs=encode_kwargs, ) texts1 = ["胡子长得太快怎么办?", "在香港哪里买手表好"] texts2 = ["胡子长得快怎么办?", "怎样使胡子不浓密!", "香港买手表哪里好", "在杭州手机到哪里买"] embs1 = model.embed_documents(texts1) embs2 = model.embed_documents(texts2) embs1, embs2 = np.array(embs1), np.array(embs2) # 计算两两相似度 similarity = embs1 @ embs2.T print(similarity) # 获取 texts1[i] 对应的最相似 texts2[j] for i in range(len(texts1)): scores = [] for j in range(len(texts2)): scores.append([texts2[j], similarity[i][j]]) scores = sorted(scores, key=lambda x:x[1], reverse=True) print(f"查询文本:{texts1[i]}") for text2, score in scores: print(f"相似文本:{text2},打分:{score}") print() ``` ### HuggingFace Transformers Dmeta-embedding 模型支持通过 [HuggingFace Transformers](https://huggingface.co/docs/transformers/index) 框架来加载推理: ``` pip install -U transformers ``` ```python import torch from transformers import AutoTokenizer, AutoModel def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) def cls_pooling(model_output): return model_output[0][:, 0] texts1 = ["胡子长得太快怎么办?", "在香港哪里买手表好"] texts2 = ["胡子长得快怎么办?", "怎样使胡子不浓密!", "香港买手表哪里好", "在杭州手机到哪里买"] tokenizer = AutoTokenizer.from_pretrained('DMetaSoul/Dmeta-embedding-zh-small') model = AutoModel.from_pretrained('DMetaSoul/Dmeta-embedding-zh-small') model.eval() with torch.no_grad(): inputs1 = tokenizer(texts1, padding=True, truncation=True, return_tensors='pt') inputs2 = tokenizer(texts2, padding=True, truncation=True, return_tensors='pt') model_output1 = model(**inputs1) model_output2 = model(**inputs2) embs1, embs2 = cls_pooling(model_output1), cls_pooling(model_output2) embs1 = torch.nn.functional.normalize(embs1, p=2, dim=1).numpy() embs2 = torch.nn.functional.normalize(embs2, p=2, dim=1).numpy() # 计算两两相似度 similarity = embs1 @ embs2.T print(similarity) # 获取 texts1[i] 对应的最相似 texts2[j] for i in range(len(texts1)): scores = [] for j in range(len(texts2)): scores.append([texts2[j], similarity[i][j]]) scores = sorted(scores, key=lambda x:x[1], reverse=True) print(f"查询文本:{texts1[i]}") for text2, score in scores: print(f"相似文本:{text2},打分:{score}") print() ``` ## Contact 您如果在使用过程中,遇到任何问题,欢迎前往[讨论区](https://huggingface.co/DMetaSoul/Dmeta-embedding-zh-small/discussions)建言献策。 您也可以联系我们:赵中昊 <[email protected]>, 肖文斌 <[email protected]>, 孙凯 <[email protected]> 同时我们也开通了微信群,可扫码加入我们(人数超200了,先加管理员再拉进群),一起共建 AIGC 技术生态! <image src="https://huggingface.co/DMetaSoul/Dmeta-embedding-zh-small/resolve/main/weixin.jpeg" style="display: block; margin-left: auto; margin-right: auto; width: 256px; height: 358px;"/> ## License Dmeta-embedding 系列模型采用 Apache-2.0 License,开源模型可以进行免费商用私有部署。
indobenchmark/indobert-large-p1
indobenchmark
"2021-05-19T20:26:01Z"
4,573
4
transformers
[ "transformers", "pytorch", "tf", "jax", "bert", "feature-extraction", "indobert", "indobenchmark", "indonlu", "id", "dataset:Indo4B", "arxiv:2009.05387", "license:mit", "region:us" ]
feature-extraction
"2022-03-02T23:29:05Z"
--- language: id tags: - indobert - indobenchmark - indonlu license: mit inference: false datasets: - Indo4B --- # IndoBERT Large Model (phase1 - uncased) [IndoBERT](https://arxiv.org/abs/2009.05387) is a state-of-the-art language model for Indonesian based on the BERT model. The pretrained model is trained using a masked language modeling (MLM) objective and next sentence prediction (NSP) objective. ## All Pre-trained Models | Model | #params | Arch. | Training data | |--------------------------------|--------------------------------|-------|-----------------------------------| | `indobenchmark/indobert-base-p1` | 124.5M | Base | Indo4B (23.43 GB of text) | | `indobenchmark/indobert-base-p2` | 124.5M | Base | Indo4B (23.43 GB of text) | | `indobenchmark/indobert-large-p1` | 335.2M | Large | Indo4B (23.43 GB of text) | | `indobenchmark/indobert-large-p2` | 335.2M | Large | Indo4B (23.43 GB of text) | | `indobenchmark/indobert-lite-base-p1` | 11.7M | Base | Indo4B (23.43 GB of text) | | `indobenchmark/indobert-lite-base-p2` | 11.7M | Base | Indo4B (23.43 GB of text) | | `indobenchmark/indobert-lite-large-p1` | 17.7M | Large | Indo4B (23.43 GB of text) | | `indobenchmark/indobert-lite-large-p2` | 17.7M | Large | Indo4B (23.43 GB of text) | ## How to use ### Load model and tokenizer ```python from transformers import BertTokenizer, AutoModel tokenizer = BertTokenizer.from_pretrained("indobenchmark/indobert-large-p1") model = AutoModel.from_pretrained("indobenchmark/indobert-large-p1") ``` ### Extract contextual representation ```python x = torch.LongTensor(tokenizer.encode('aku adalah anak [MASK]')).view(1,-1) print(x, model(x)[0].sum()) ``` ## Authors <b>IndoBERT</b> was trained and evaluated by Bryan Wilie\*, Karissa Vincentio\*, Genta Indra Winata\*, Samuel Cahyawijaya\*, Xiaohong Li, Zhi Yuan Lim, Sidik Soleman, Rahmad Mahendra, Pascale Fung, Syafri Bahar, Ayu Purwarianti. ## Citation If you use our work, please cite: ```bibtex @inproceedings{wilie2020indonlu, title={IndoNLU: Benchmark and Resources for Evaluating Indonesian Natural Language Understanding}, author={Bryan Wilie and Karissa Vincentio and Genta Indra Winata and Samuel Cahyawijaya and X. Li and Zhi Yuan Lim and S. Soleman and R. Mahendra and Pascale Fung and Syafri Bahar and A. Purwarianti}, booktitle={Proceedings of the 1st Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics and the 10th International Joint Conference on Natural Language Processing}, year={2020} } ```
digiplay/STRANGER
digiplay
"2024-06-16T21:20:33Z"
4,568
4
diffusers
[ "diffusers", "safetensors", "license:other", "autotrain_compatible", "endpoints_compatible", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
"2024-06-15T03:12:57Z"
--- license: other --- in test ![ed93d3ff-b359-4286-8da0-2ab3ba9ab537.jpeg](https://cdn-uploads.huggingface.co/production/uploads/646c83c871d0c8a6e4455854/VTX6NHuMxJdQNq5goyG7n.jpeg)
Qwen/Qwen-1_8B
Qwen
"2023-12-13T15:43:35Z"
4,566
55
transformers
[ "transformers", "safetensors", "qwen", "text-generation", "custom_code", "zh", "en", "arxiv:2309.16609", "arxiv:2305.08322", "arxiv:2009.03300", "autotrain_compatible", "region:us" ]
text-generation
"2023-11-30T02:55:15Z"
--- language: - zh - en tags: - qwen pipeline_tag: text-generation inference: false --- # Qwen-1.8B <p align="center"> <img src="https://qianwen-res.oss-cn-beijing.aliyuncs.com/logo_qwen.jpg" width="400"/> <p> <br> <p align="center"> 🤗 <a href="https://huggingface.co/Qwen">Hugging Face</a>&nbsp&nbsp | &nbsp&nbsp🤖 <a href="https://modelscope.cn/organization/qwen">ModelScope</a>&nbsp&nbsp | &nbsp&nbsp 📑 <a href="https://arxiv.org/abs/2309.16609">Paper</a> &nbsp&nbsp | &nbsp&nbsp🖥️ <a href="https://modelscope.cn/studios/qwen/Qwen-14B-Chat-Demo/summary">Demo</a> <br> <a href="https://github.com/QwenLM/Qwen/blob/main/assets/wechat.png">WeChat (微信)</a>&nbsp&nbsp | &nbsp&nbsp<a href="https://discord.gg/z3GAxXZ9Ce">Discord</a>&nbsp&nbsp | &nbsp&nbsp<a href="https://dashscope.aliyun.com">API</a> </p> <br> ## 介绍 (Introduction) **通义千问-1.8B(Qwen-1.8B)**是阿里云研发的通义千问大模型系列的18亿参数规模的模型。Qwen-1.8B是基于Transformer的大语言模型, 在超大规模的预训练数据上进行训练得到。预训练数据类型多样,覆盖广泛,包括大量网络文本、专业书籍、代码等。同时,在Qwen-1.8B的基础上,我们使用对齐机制打造了基于大语言模型的AI助手Qwen-1.8B-Chat。本仓库为Qwen-1.8B的仓库。 通义千问-1.8B(Qwen-1.8B)主要有以下特点: 1. **低成本部署**:提供int8和int4量化版本,推理最低仅需不到2GB显存,生成2048 tokens仅需3GB显存占用。微调最低仅需6GB。 2. **大规模高质量训练语料**:使用超过2.2万亿tokens的数据进行预训练,包含高质量中、英、多语言、代码、数学等数据,涵盖通用及专业领域的训练语料。通过大量对比实验对预训练语料分布进行了优化。 3. **优秀的性能**:Qwen-1.8B支持8192上下文长度,在多个中英文下游评测任务上(涵盖常识推理、代码、数学、翻译等),效果显著超越现有的相近规模开源模型,具体评测结果请详见下文。 4. **覆盖更全面的词表**:相比目前以中英词表为主的开源模型,Qwen-1.8B使用了约15万大小的词表。该词表对多语言更加友好,方便用户在不扩展词表的情况下对部分语种进行能力增强和扩展。 如果您想了解更多关于通义千问1.8B开源模型的细节,我们建议您参阅[GitHub代码库](https://github.com/QwenLM/Qwen)。 **Qwen-1.8B** is the 1.8B-parameter version of the large language model series, Qwen (abbr. Tongyi Qianwen), proposed by Aibaba Cloud. Qwen-1.8B is a Transformer-based large language model, which is pretrained on a large volume of data, including web texts, books, codes, etc. Additionally, based on the pretrained Qwen-1.8B, we release Qwen-1.8B-Chat, a large-model-based AI assistant, which is trained with alignment techniques. This repository is the one for Qwen-1.8B. The features of Qwen-1.8B include: 1. **Low-cost deployment**: We provide int4 and int8 quantized versions, the minimum memory requirment for inference is less than 2GB, generating 2048 tokens only 3GB of memory usage. The minimum memory requirment of finetuning is only 6GB. 2. **Large-scale high-quality training corpora**: It is pretrained on over 2.2 trillion tokens, including Chinese, English, multilingual texts, code, and mathematics, covering general and professional fields. The distribution of the pre-training corpus has been optimized through a large number of ablation experiments. 3. **Good performance**: It supports 8192 context length and significantly surpasses existing open-source models of similar scale on multiple Chinese and English downstream evaluation tasks (including commonsense, reasoning, code, mathematics, etc.), and even surpasses some larger-scale models in several benchmarks. See below for specific evaluation results. 4. **More comprehensive vocabulary coverage**: Compared with other open-source models based on Chinese and English vocabularies, Qwen-1.8B uses a vocabulary of over 150K tokens. This vocabulary is more friendly to multiple languages, enabling users to directly further enhance the capability for certain languages without expanding the vocabulary. For more details about the open-source model of Qwen-1.8B, please refer to the [GitHub](https://github.com/QwenLM/Qwen) code repository. <br> ## 要求(Requirements) * python 3.8及以上版本 * pytorch 1.12及以上版本,推荐2.0及以上版本 * 建议使用CUDA 11.4及以上(GPU用户、flash-attention用户等需考虑此选项) * python 3.8 and above * pytorch 1.12 and above, 2.0 and above are recommended * CUDA 11.4 and above are recommended (this is for GPU users, flash-attention users, etc.) ## 依赖项 (Dependency) 运行Qwen-1.8B,请确保满足上述要求,再执行以下pip命令安装依赖库 To run Qwen-1.8B, please make sure you meet the above requirements, and then execute the following pip commands to install the dependent libraries. ```bash pip install transformers==4.32.0 accelerate tiktoken einops ``` 另外,推荐安装`flash-attention`库(**当前已支持flash attention 2**),以实现更高的效率和更低的显存占用。 In addition, it is recommended to install the `flash-attention` library (**we support flash attention 2 now.**) for higher efficiency and lower memory usage. ```bash git clone https://github.com/Dao-AILab/flash-attention cd flash-attention && pip install . # 下方安装可选,安装可能比较缓慢。 # pip install csrc/layer_norm # pip install csrc/rotary ``` <br> ## 快速使用(Quickstart) 您可以通过以下代码轻松调用: You can easily call the model with the following code: ```python from transformers import AutoModelForCausalLM, AutoTokenizer from transformers.generation import GenerationConfig # Note: The default behavior now has injection attack prevention off. tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen-1_8B", trust_remote_code=True) # use bf16 # model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-1_8B", device_map="auto", trust_remote_code=True, bf16=True).eval() # use fp16 # model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-1_8B", device_map="auto", trust_remote_code=True, fp16=True).eval() # use cpu only # model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-1_8B", device_map="cpu", trust_remote_code=True).eval() # use auto mode, automatically select precision based on the device. model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-1_8B", device_map="auto", trust_remote_code=True).eval() # Specify hyperparameters for generation. But if you use transformers>=4.32.0, there is no need to do this. # model.generation_config = GenerationConfig.from_pretrained("Qwen/Qwen-1_8B", trust_remote_code=True) inputs = tokenizer('蒙古国的首都是乌兰巴托(Ulaanbaatar)\n冰岛的首都是雷克雅未克(Reykjavik)\n埃塞俄比亚的首都是', return_tensors='pt') inputs = inputs.to(model.device) pred = model.generate(**inputs) print(tokenizer.decode(pred.cpu()[0], skip_special_tokens=True)) # 蒙古国的首都是乌兰巴托(Ulaanbaatar)\n冰岛的首都是雷克雅未克(Reykjavik)\n埃塞俄比亚的首都是亚的斯亚贝巴(Addis Ababa)... ``` 关于更多的使用说明,请参考我们的[GitHub repo](https://github.com/QwenLM/Qwen)获取更多信息。 For more information, please refer to our [GitHub repo](https://github.com/QwenLM/Qwen) for more information. <br> ## Tokenizer > 注:作为术语的“tokenization”在中文中尚无共识的概念对应,本文档采用英文表达以利说明。 基于tiktoken的分词器有别于其他分词器,比如sentencepiece分词器。尤其在微调阶段,需要特别注意特殊token的使用。关于tokenizer的更多信息,以及微调时涉及的相关使用,请参阅[文档](https://github.com/QwenLM/Qwen/blob/main/tokenization_note_zh.md)。 Our tokenizer based on tiktoken is different from other tokenizers, e.g., sentencepiece tokenizer. You need to pay attention to special tokens, especially in finetuning. For more detailed information on the tokenizer and related use in fine-tuning, please refer to the [documentation](https://github.com/QwenLM/Qwen/blob/main/tokenization_note.md). ## 模型细节 (Model) Qwen-1.8B模型规模基本情况如下所示: The details of the model architecture of Qwen-1.8B are listed as follows: | Hyperparameter | Value | |:----------------|:-------| | n_layers | 24 | | n_heads | 16 | | d_model | 2048 | | vocab size | 151851 | | sequence length | 8192 | 在位置编码、FFN激活函数和normalization的实现方式上,我们也采用了目前最流行的做法, 即RoPE相对位置编码、SwiGLU激活函数、RMSNorm(可选安装flash-attention加速)。 在分词器方面,相比目前主流开源模型以中英词表为主,Qwen-1.8B使用了超过15万token大小的词表。 该词表在GPT-4使用的BPE词表`cl100k_base`基础上,对中文、多语言进行了优化,在对中、英、代码数据的高效编解码的基础上,对部分多语言更加友好,方便用户在不扩展词表的情况下对部分语种进行能力增强。 词表对数字按单个数字位切分。调用较为高效的[tiktoken分词库](https://github.com/openai/tiktoken)进行分词。 我们从部分语种各随机抽取100万个文档语料,以对比不同模型的编码压缩率(以支持100语种的XLM-R为基准值1,越低越好),具体性能见图。 可以看到Qwen-1.8B在保持中英代码高效解码的前提下,对部分使用人群较多的语种(泰语th、希伯来语he、阿拉伯语ar、韩语ko、越南语vi、日语ja、土耳其语tr、印尼语id、波兰语pl、俄语ru、荷兰语nl、葡萄牙语pt、意大利语it、德语de、西班牙语es、法语fr等)上也实现了较高的压缩率,使得模型在这些语种上也具备较强的可扩展性和较高的训练和推理效率。 在预训练数据方面,Qwen-1.8B模型一方面利用了部分开源通用语料, 另一方面也积累了海量全网语料以及高质量文本内容,去重及过滤后的语料超过2.2T tokens。 囊括全网文本、百科、书籍、代码、数学及各个领域垂类。 <p align="center"> <img src="assets/tokenizer.png" style="width: 1200px"/> <p> For position encoding, FFN activation function, and normalization methods, we adopt the prevalent practices, i.e., RoPE relative position encoding, SwiGLU for activation function, and RMSNorm for normalization (optional installation of flash-attention for acceleration). For tokenization, compared to the current mainstream open-source models based on Chinese and English vocabularies, Qwen-1.8B uses a vocabulary of over 150K tokens. It first considers efficient encoding of Chinese, English, and code data, and is also more friendly to multilingual languages, enabling users to directly enhance the capability of some languages without expanding the vocabulary. It segments numbers by single digit, and calls the [tiktoken](https://github.com/openai/tiktoken) tokenizer library for efficient tokenization. We randomly selected 1 million document corpus of each language to test and compare the encoding compression rates of different models (with XLM-R, which supports 100 languages, as the base value 1). The specific performance is shown in the figure above. As can be seen, while ensuring the efficient decoding of Chinese, English, and code, Qwen-1.8B also achieves a high compression rate for many other languages (such as th, he, ar, ko, vi, ja, tr, id, pl, ru, nl, pt, it, de, es, fr etc.), equipping the model with strong scalability as well as high training and inference efficiency in these languages. For pre-training data, on the one hand, Qwen-1.8B uses part of the open-source generic corpus. On the other hand, it uses a massive amount of accumulated web corpus and high-quality text content. The scale of corpus reaches over 2.2T tokens after deduplication and filtration, encompassing web text, encyclopedias, books, code, mathematics, and various domain. <br> ## 评测效果(Evaluation) ### 中文评测(Chinese Evaluation) #### C-Eval [C-Eval](https://arxiv.org/abs/2305.08322)是评测预训练模型中文常识能力的常用测评框架,覆盖人文、社科、理工、其他专业四个大方向共52个学科。 我们按照标准做法,以开发集样本作为few-shot来源,评价Qwen-1.8B预训练模型的5-shot验证集与测试集准确率。 [C-Eval](https://arxiv.org/abs/2305.08322) is a common evaluation benchmark for testing the common sense capability of pre-trained models in Chinese. It covers 52 subjects in four major directions: humanities, social sciences, STEM, and other specialties. According to the standard practice, we use the development set samples as the source of few-shot, to evaluate the 5-shot validation set and test set accuracy of the Qwen-1.8B pre-trained model. 在C-Eval验证集、测试集上,Qwen-1.8B模型和其他模型的准确率对比如下: The accuracy comparison of Qwen-1.8B and the other models on the C-Eval validation set is shown as follows: | Model | Avg. (Val) | Avg. (Test) | |:--------------|:----------:|:-----------:| | Bloom-1B7 | 23.8 | - | | Bloomz-1B7 | 29.6 | - | | Bloom-3B | 25.8 | - | | Bloomz-3B | 32.5 | - | | MiLM-1.3B | - | 45.8 | | **Qwen-1.8B** | **56.1** | **56.2** | ### 英文评测(English Evaluation) #### MMLU [MMLU](https://arxiv.org/abs/2009.03300)是目前评测英文综合能力最权威的基准评测之一,同样覆盖了不同学科领域、不同难度层级的57个子任务。 Qwen-1.8B在MMLU 5-shot准确率表现如下表: [MMLU](https://arxiv.org/abs/2009.03300) is currently one of the most recognized benchmarks for evaluating English comprehension abilities, covering 57 subtasks across different academic fields and difficulty levels. The MMLU 5-shot accuracy performance of Qwen-1.8B is shown in the following table: | Model | Avg. | |:--------------|:--------:| | GPT-Neo-1.3B | 24.6 | | OPT-1.3B | 25.1 | | Pythia-1B | 26.6 | | Bloom-1.1B | 26.7 | | Bloom-1.7B | 27.7 | | Bloomz-1.7B | 30.7 | | Bloomz-3B | 33.3 | | **Qwen-1.8B** | **45.3** | ### 代码评测(Coding Evaluation) 我们在[HumanEval](https://github.com/openai/human-eval)(0-shot)上对比预训练模型的代码能力,结果如下: We compared the code capabilities of pre-trained models on [HumanEval](https://github.com/openai/human-eval), and the results are as follows: | Model | Pass@1 | |:--------------|:--------:| | GPT-Neo-1.3B | 3.66 | | GPT-Neo-2.7B | 7.93 | | Pythia-1B | 3.67 | | Pythia-2.8B | 5.49 | | Bloom-1.1B | 2.48 | | Bloom-1.7B | 4.03 | | Bloom-3B | 6.48 | | Bloomz-1.7B | 4.38 | | Bloomz-3B | 6.71 | | **Qwen-1.8B** | **15.2** | ### 数学评测(Mathematics Evaluation) 数学能力使用常用的[GSM8K](https://github.com/openai/grade-school-math)数据集(8-shot)评价: We compared the math capabilities of pre-trained models on [GSM8K](https://github.com/openai/grade-school-math) (8-shot), and the results are as follows: | Model | Acc. | |:--------------|:--------:| | GPT-Neo-1.3B | 1.97 | | GPT-Neo-2.7B | 1.74 | | Pythia-1B | 2.20 | | Pythia-2.8B | 3.11 | | Openllama-3B | 3.11 | | Bloom-1.1B | 1.82 | | Bloom-1.7B | 2.05 | | Bloom-3B | 1.82 | | Bloomz-1.7B | 2.05 | | Bloomz-3B | 3.03 | | **Qwen-1.8B** | **32.3** | ## 评测复现(Reproduction) 我们提供了评测脚本,方便大家复现模型效果,详见[链接](https://github.com/QwenLM/Qwen/tree/main/eval)。提示:由于硬件和框架造成的舍入误差,复现结果如有小幅波动属于正常现象。 We have provided evaluation scripts to reproduce the performance of our model, details as [link](https://github.com/QwenLM/Qwen/tree/main/eval). <br> ## FAQ 如遇到问题,敬请查阅[FAQ](https://github.com/QwenLM/Qwen/blob/main/FAQ_zh.md)以及issue区,如仍无法解决再提交issue。 If you meet problems, please refer to [FAQ](https://github.com/QwenLM/Qwen/blob/main/FAQ.md) and the issues first to search a solution before you launch a new issue. <br> ## 引用 (Citation) 如果你觉得我们的工作对你有帮助,欢迎引用! If you find our work helpful, feel free to give us a cite. ``` @article{qwen, title={Qwen Technical Report}, author={Jinze Bai and Shuai Bai and Yunfei Chu and Zeyu Cui and Kai Dang and Xiaodong Deng and Yang Fan and Wenbin Ge and Yu Han and Fei Huang and Binyuan Hui and Luo Ji and Mei Li and Junyang Lin and Runji Lin and Dayiheng Liu and Gao Liu and Chengqiang Lu and Keming Lu and Jianxin Ma and Rui Men and Xingzhang Ren and Xuancheng Ren and Chuanqi Tan and Sinan Tan and Jianhong Tu and Peng Wang and Shijie Wang and Wei Wang and Shengguang Wu and Benfeng Xu and Jin Xu and An Yang and Hao Yang and Jian Yang and Shusheng Yang and Yang Yao and Bowen Yu and Hongyi Yuan and Zheng Yuan and Jianwei Zhang and Xingxuan Zhang and Yichang Zhang and Zhenru Zhang and Chang Zhou and Jingren Zhou and Xiaohuan Zhou and Tianhang Zhu}, journal={arXiv preprint arXiv:2309.16609}, year={2023} } ``` <br> ## 使用协议(License Agreement) 我们的代码和模型权重对学术研究完全开放。请查看[LICENSE](https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20RESEARCH%20LICENSE%20AGREEMENT)文件了解具体的开源协议细节。如需商用,请联系我们。 Our code and checkpoints are open to research purpose. Check the [LICENSE](https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20RESEARCH%20LICENSE%20AGREEMENT) for more details about the license. For commercial use, please contact us. <br> ## 联系我们(Contact Us) 如果你想给我们的研发团队和产品团队留言,欢迎加入我们的微信群、钉钉群以及Discord!同时,也欢迎通过邮件([email protected])联系我们。 If you are interested to leave a message to either our research team or product team, join our Discord or WeChat groups! Also, feel free to send an email to [email protected].
TheBloke/WizardLM-7B-uncensored-GGUF
TheBloke
"2023-09-27T12:52:40Z"
4,561
17
transformers
[ "transformers", "gguf", "llama", "uncensored", "dataset:ehartford/WizardLM_alpaca_evol_instruct_70k_unfiltered", "base_model:ehartford/WizardLM-7B-Uncensored", "license:other", "text-generation-inference", "region:us" ]
null
"2023-09-19T23:17:28Z"
--- license: other tags: - uncensored datasets: - ehartford/WizardLM_alpaca_evol_instruct_70k_unfiltered model_name: Wizardlm 7B Uncensored base_model: ehartford/WizardLM-7B-Uncensored inference: false model_creator: Eric Hartford model_type: llama prompt_template: 'A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user''s questions. USER: {prompt} ASSISTANT: ' quantized_by: TheBloke --- <!-- header start --> <!-- 200823 --> <div style="width: auto; margin-left: auto; margin-right: auto"> <img src="https://i.imgur.com/EBdldam.jpg" alt="TheBlokeAI" style="width: 100%; min-width: 400px; display: block; margin: auto;"> </div> <div style="display: flex; justify-content: space-between; width: 100%;"> <div style="display: flex; flex-direction: column; align-items: flex-start;"> <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://discord.gg/theblokeai">Chat & support: TheBloke's Discord server</a></p> </div> <div style="display: flex; flex-direction: column; align-items: flex-end;"> <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://www.patreon.com/TheBlokeAI">Want to contribute? TheBloke's Patreon page</a></p> </div> </div> <div style="text-align:center; margin-top: 0em; margin-bottom: 0em"><p style="margin-top: 0.25em; margin-bottom: 0em;">TheBloke's LLM work is generously supported by a grant from <a href="https://a16z.com">andreessen horowitz (a16z)</a></p></div> <hr style="margin-top: 1.0em; margin-bottom: 1.0em;"> <!-- header end --> # Wizardlm 7B Uncensored - GGUF - Model creator: [Eric Hartford](https://huggingface.co/ehartford) - Original model: [Wizardlm 7B Uncensored](https://huggingface.co/ehartford/WizardLM-7B-Uncensored) <!-- description start --> ## Description This repo contains GGUF format model files for [Eric Hartford's Wizardlm 7B Uncensored](https://huggingface.co/ehartford/WizardLM-7B-Uncensored). <!-- description end --> <!-- README_GGUF.md-about-gguf start --> ### About GGUF GGUF is a new format introduced by the llama.cpp team on August 21st 2023. It is a replacement for GGML, which is no longer supported by llama.cpp. Here is an incomplate list of clients and libraries that are known to support GGUF: * [llama.cpp](https://github.com/ggerganov/llama.cpp). The source project for GGUF. Offers a CLI and a server option. * [text-generation-webui](https://github.com/oobabooga/text-generation-webui), the most widely used web UI, with many features and powerful extensions. Supports GPU acceleration. * [KoboldCpp](https://github.com/LostRuins/koboldcpp), a fully featured web UI, with GPU accel across all platforms and GPU architectures. Especially good for story telling. * [LM Studio](https://lmstudio.ai/), an easy-to-use and powerful local GUI for Windows and macOS (Silicon), with GPU acceleration. * [LoLLMS Web UI](https://github.com/ParisNeo/lollms-webui), a great web UI with many interesting and unique features, including a full model library for easy model selection. * [Faraday.dev](https://faraday.dev/), an attractive and easy to use character-based chat GUI for Windows and macOS (both Silicon and Intel), with GPU acceleration. * [ctransformers](https://github.com/marella/ctransformers), a Python library with GPU accel, LangChain support, and OpenAI-compatible AI server. * [llama-cpp-python](https://github.com/abetlen/llama-cpp-python), a Python library with GPU accel, LangChain support, and OpenAI-compatible API server. * [candle](https://github.com/huggingface/candle), a Rust ML framework with a focus on performance, including GPU support, and ease of use. <!-- README_GGUF.md-about-gguf end --> <!-- repositories-available start --> ## Repositories available * [AWQ model(s) for GPU inference.](https://huggingface.co/TheBloke/WizardLM-7B-uncensored-AWQ) * [GPTQ models for GPU inference, with multiple quantisation parameter options.](https://huggingface.co/TheBloke/WizardLM-7B-uncensored-GPTQ) * [2, 3, 4, 5, 6 and 8-bit GGUF models for CPU+GPU inference](https://huggingface.co/TheBloke/WizardLM-7B-uncensored-GGUF) * [Eric Hartford's original unquantised fp16 model in pytorch format, for GPU inference and for further conversions](https://huggingface.co/ehartford/WizardLM-7B-Uncensored) <!-- repositories-available end --> <!-- prompt-template start --> ## Prompt template: Vicuna ``` A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER: {prompt} ASSISTANT: ``` <!-- prompt-template end --> <!-- compatibility_gguf start --> ## Compatibility These quantised GGUFv2 files are compatible with llama.cpp from August 27th onwards, as of commit [d0cee0d](https://github.com/ggerganov/llama.cpp/commit/d0cee0d36d5be95a0d9088b674dbb27354107221) They are also compatible with many third party UIs and libraries - please see the list at the top of this README. ## Explanation of quantisation methods <details> <summary>Click to see details</summary> The new methods available are: * GGML_TYPE_Q2_K - "type-1" 2-bit quantization in super-blocks containing 16 blocks, each block having 16 weight. Block scales and mins are quantized with 4 bits. This ends up effectively using 2.5625 bits per weight (bpw) * GGML_TYPE_Q3_K - "type-0" 3-bit quantization in super-blocks containing 16 blocks, each block having 16 weights. Scales are quantized with 6 bits. This end up using 3.4375 bpw. * GGML_TYPE_Q4_K - "type-1" 4-bit quantization in super-blocks containing 8 blocks, each block having 32 weights. Scales and mins are quantized with 6 bits. This ends up using 4.5 bpw. * GGML_TYPE_Q5_K - "type-1" 5-bit quantization. Same super-block structure as GGML_TYPE_Q4_K resulting in 5.5 bpw * GGML_TYPE_Q6_K - "type-0" 6-bit quantization. Super-blocks with 16 blocks, each block having 16 weights. Scales are quantized with 8 bits. This ends up using 6.5625 bpw Refer to the Provided Files table below to see what files use which methods, and how. </details> <!-- compatibility_gguf end --> <!-- README_GGUF.md-provided-files start --> ## Provided files | Name | Quant method | Bits | Size | Max RAM required | Use case | | ---- | ---- | ---- | ---- | ---- | ----- | | [WizardLM-7B-uncensored.Q2_K.gguf](https://huggingface.co/TheBloke/WizardLM-7B-uncensored-GGUF/blob/main/WizardLM-7B-uncensored.Q2_K.gguf) | Q2_K | 2 | 2.83 GB| 5.33 GB | smallest, significant quality loss - not recommended for most purposes | | [WizardLM-7B-uncensored.Q3_K_S.gguf](https://huggingface.co/TheBloke/WizardLM-7B-uncensored-GGUF/blob/main/WizardLM-7B-uncensored.Q3_K_S.gguf) | Q3_K_S | 3 | 2.95 GB| 5.45 GB | very small, high quality loss | | [WizardLM-7B-uncensored.Q3_K_M.gguf](https://huggingface.co/TheBloke/WizardLM-7B-uncensored-GGUF/blob/main/WizardLM-7B-uncensored.Q3_K_M.gguf) | Q3_K_M | 3 | 3.30 GB| 5.80 GB | very small, high quality loss | | [WizardLM-7B-uncensored.Q3_K_L.gguf](https://huggingface.co/TheBloke/WizardLM-7B-uncensored-GGUF/blob/main/WizardLM-7B-uncensored.Q3_K_L.gguf) | Q3_K_L | 3 | 3.60 GB| 6.10 GB | small, substantial quality loss | | [WizardLM-7B-uncensored.Q4_0.gguf](https://huggingface.co/TheBloke/WizardLM-7B-uncensored-GGUF/blob/main/WizardLM-7B-uncensored.Q4_0.gguf) | Q4_0 | 4 | 3.83 GB| 6.33 GB | legacy; small, very high quality loss - prefer using Q3_K_M | | [WizardLM-7B-uncensored.Q4_K_S.gguf](https://huggingface.co/TheBloke/WizardLM-7B-uncensored-GGUF/blob/main/WizardLM-7B-uncensored.Q4_K_S.gguf) | Q4_K_S | 4 | 3.86 GB| 6.36 GB | small, greater quality loss | | [WizardLM-7B-uncensored.Q4_K_M.gguf](https://huggingface.co/TheBloke/WizardLM-7B-uncensored-GGUF/blob/main/WizardLM-7B-uncensored.Q4_K_M.gguf) | Q4_K_M | 4 | 4.08 GB| 6.58 GB | medium, balanced quality - recommended | | [WizardLM-7B-uncensored.Q5_0.gguf](https://huggingface.co/TheBloke/WizardLM-7B-uncensored-GGUF/blob/main/WizardLM-7B-uncensored.Q5_0.gguf) | Q5_0 | 5 | 4.65 GB| 7.15 GB | legacy; medium, balanced quality - prefer using Q4_K_M | | [WizardLM-7B-uncensored.Q5_K_S.gguf](https://huggingface.co/TheBloke/WizardLM-7B-uncensored-GGUF/blob/main/WizardLM-7B-uncensored.Q5_K_S.gguf) | Q5_K_S | 5 | 4.65 GB| 7.15 GB | large, low quality loss - recommended | | [WizardLM-7B-uncensored.Q5_K_M.gguf](https://huggingface.co/TheBloke/WizardLM-7B-uncensored-GGUF/blob/main/WizardLM-7B-uncensored.Q5_K_M.gguf) | Q5_K_M | 5 | 4.78 GB| 7.28 GB | large, very low quality loss - recommended | | [WizardLM-7B-uncensored.Q6_K.gguf](https://huggingface.co/TheBloke/WizardLM-7B-uncensored-GGUF/blob/main/WizardLM-7B-uncensored.Q6_K.gguf) | Q6_K | 6 | 5.53 GB| 8.03 GB | very large, extremely low quality loss | | [WizardLM-7B-uncensored.Q8_0.gguf](https://huggingface.co/TheBloke/WizardLM-7B-uncensored-GGUF/blob/main/WizardLM-7B-uncensored.Q8_0.gguf) | Q8_0 | 8 | 7.16 GB| 9.66 GB | very large, extremely low quality loss - not recommended | **Note**: the above RAM figures assume no GPU offloading. If layers are offloaded to the GPU, this will reduce RAM usage and use VRAM instead. <!-- README_GGUF.md-provided-files end --> <!-- README_GGUF.md-how-to-download start --> ## How to download GGUF files **Note for manual downloaders:** You almost never want to clone the entire repo! Multiple different quantisation formats are provided, and most users only want to pick and download a single file. The following clients/libraries will automatically download models for you, providing a list of available models to choose from: - LM Studio - LoLLMS Web UI - Faraday.dev ### In `text-generation-webui` Under Download Model, you can enter the model repo: TheBloke/WizardLM-7B-uncensored-GGUF and below it, a specific filename to download, such as: WizardLM-7B-uncensored.Q4_K_M.gguf. Then click Download. ### On the command line, including multiple files at once I recommend using the `huggingface-hub` Python library: ```shell pip3 install huggingface-hub ``` Then you can download any individual model file to the current directory, at high speed, with a command like this: ```shell huggingface-cli download TheBloke/WizardLM-7B-uncensored-GGUF WizardLM-7B-uncensored.Q4_K_M.gguf --local-dir . --local-dir-use-symlinks False ``` <details> <summary>More advanced huggingface-cli download usage</summary> You can also download multiple files at once with a pattern: ```shell huggingface-cli download TheBloke/WizardLM-7B-uncensored-GGUF --local-dir . --local-dir-use-symlinks False --include='*Q4_K*gguf' ``` For more documentation on downloading with `huggingface-cli`, please see: [HF -> Hub Python Library -> Download files -> Download from the CLI](https://huggingface.co/docs/huggingface_hub/guides/download#download-from-the-cli). To accelerate downloads on fast connections (1Gbit/s or higher), install `hf_transfer`: ```shell pip3 install hf_transfer ``` And set environment variable `HF_HUB_ENABLE_HF_TRANSFER` to `1`: ```shell HF_HUB_ENABLE_HF_TRANSFER=1 huggingface-cli download TheBloke/WizardLM-7B-uncensored-GGUF WizardLM-7B-uncensored.Q4_K_M.gguf --local-dir . --local-dir-use-symlinks False ``` Windows Command Line users: You can set the environment variable by running `set HF_HUB_ENABLE_HF_TRANSFER=1` before the download command. </details> <!-- README_GGUF.md-how-to-download end --> <!-- README_GGUF.md-how-to-run start --> ## Example `llama.cpp` command Make sure you are using `llama.cpp` from commit [d0cee0d](https://github.com/ggerganov/llama.cpp/commit/d0cee0d36d5be95a0d9088b674dbb27354107221) or later. ```shell ./main -ngl 32 -m WizardLM-7B-uncensored.Q4_K_M.gguf --color -c 2048 --temp 0.7 --repeat_penalty 1.1 -n -1 -p "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER: {prompt} ASSISTANT:" ``` Change `-ngl 32` to the number of layers to offload to GPU. Remove it if you don't have GPU acceleration. Change `-c 2048` to the desired sequence length. For extended sequence models - eg 8K, 16K, 32K - the necessary RoPE scaling parameters are read from the GGUF file and set by llama.cpp automatically. If you want to have a chat-style conversation, replace the `-p <PROMPT>` argument with `-i -ins` For other parameters and how to use them, please refer to [the llama.cpp documentation](https://github.com/ggerganov/llama.cpp/blob/master/examples/main/README.md) ## How to run in `text-generation-webui` Further instructions here: [text-generation-webui/docs/llama.cpp.md](https://github.com/oobabooga/text-generation-webui/blob/main/docs/llama.cpp.md). ## How to run from Python code You can use GGUF models from Python using the [llama-cpp-python](https://github.com/abetlen/llama-cpp-python) or [ctransformers](https://github.com/marella/ctransformers) libraries. ### How to load this model in Python code, using ctransformers #### First install the package Run one of the following commands, according to your system: ```shell # Base ctransformers with no GPU acceleration pip install ctransformers # Or with CUDA GPU acceleration pip install ctransformers[cuda] # Or with AMD ROCm GPU acceleration (Linux only) CT_HIPBLAS=1 pip install ctransformers --no-binary ctransformers # Or with Metal GPU acceleration for macOS systems only CT_METAL=1 pip install ctransformers --no-binary ctransformers ``` #### Simple ctransformers example code ```python from ctransformers import AutoModelForCausalLM # Set gpu_layers to the number of layers to offload to GPU. Set to 0 if no GPU acceleration is available on your system. llm = AutoModelForCausalLM.from_pretrained("TheBloke/WizardLM-7B-uncensored-GGUF", model_file="WizardLM-7B-uncensored.Q4_K_M.gguf", model_type="llama", gpu_layers=50) print(llm("AI is going to")) ``` ## How to use with LangChain Here are guides on using llama-cpp-python and ctransformers with LangChain: * [LangChain + llama-cpp-python](https://python.langchain.com/docs/integrations/llms/llamacpp) * [LangChain + ctransformers](https://python.langchain.com/docs/integrations/providers/ctransformers) <!-- README_GGUF.md-how-to-run end --> <!-- footer start --> <!-- 200823 --> ## Discord For further support, and discussions on these models and AI in general, join us at: [TheBloke AI's Discord server](https://discord.gg/theblokeai) ## Thanks, and how to contribute Thanks to the [chirper.ai](https://chirper.ai) team! Thanks to Clay from [gpus.llm-utils.org](llm-utils)! I've had a lot of people ask if they can contribute. I enjoy providing models and helping people, and would love to be able to spend even more time doing it, as well as expanding into new projects like fine tuning/training. If you're able and willing to contribute it will be most gratefully received and will help me to keep providing more models, and to start work on new AI projects. Donaters will get priority support on any and all AI/LLM/model questions and requests, access to a private Discord room, plus other benefits. * Patreon: https://patreon.com/TheBlokeAI * Ko-Fi: https://ko-fi.com/TheBlokeAI **Special thanks to**: Aemon Algiz. **Patreon special mentions**: Alicia Loh, Stephen Murray, K, Ajan Kanaga, RoA, Magnesian, Deo Leter, Olakabola, Eugene Pentland, zynix, Deep Realms, Raymond Fosdick, Elijah Stavena, Iucharbius, Erik Bjäreholt, Luis Javier Navarrete Lozano, Nicholas, theTransient, John Detwiler, alfie_i, knownsqashed, Mano Prime, Willem Michiel, Enrico Ros, LangChain4j, OG, Michael Dempsey, Pierre Kircher, Pedro Madruga, James Bentley, Thomas Belote, Luke @flexchar, Leonard Tan, Johann-Peter Hartmann, Illia Dulskyi, Fen Risland, Chadd, S_X, Jeff Scroggin, Ken Nordquist, Sean Connelly, Artur Olbinski, Swaroop Kallakuri, Jack West, Ai Maven, David Ziegler, Russ Johnson, transmissions 11, John Villwock, Alps Aficionado, Clay Pascal, Viktor Bowallius, Subspace Studios, Rainer Wilmers, Trenton Dambrowitz, vamX, Michael Levine, 준교 김, Brandon Frisco, Kalila, Trailburnt, Randy H, Talal Aujan, Nathan Dryer, Vadim, 阿明, ReadyPlayerEmma, Tiffany J. Kim, George Stoitzev, Spencer Kim, Jerry Meng, Gabriel Tamborski, Cory Kujawski, Jeffrey Morgan, Spiking Neurons AB, Edmond Seymore, Alexandros Triantafyllidis, Lone Striker, Cap'n Zoog, Nikolai Manek, danny, ya boyyy, Derek Yates, usrbinkat, Mandus, TL, Nathan LeClaire, subjectnull, Imad Khwaja, webtim, Raven Klaugh, Asp the Wyvern, Gabriel Puliatti, Caitlyn Gatomon, Joseph William Delisle, Jonathan Leane, Luke Pendergrass, SuperWojo, Sebastain Graf, Will Dee, Fred von Graf, Andrey, Dan Guido, Daniel P. Andersen, Nitin Borwankar, Elle, Vitor Caleffi, biorpg, jjj, NimbleBox.ai, Pieter, Matthew Berman, terasurfer, Michael Davis, Alex, Stanislav Ovsiannikov Thank you to all my generous patrons and donaters! And thank you again to a16z for their generous grant. <!-- footer end --> <!-- original-model-card start --> # Original model card: Eric Hartford's Wizardlm 7B Uncensored This is WizardLM trained with a subset of the dataset - responses that contained alignment / moralizing were removed. The intent is to train a WizardLM that doesn't have alignment built-in, so that alignment (of any sort) can be added separately with for example with a RLHF LoRA. Shout out to the open source AI/ML community, and everyone who helped me out. Note: An uncensored model has no guardrails. You are responsible for anything you do with the model, just as you are responsible for anything you do with any dangerous object such as a knife, gun, lighter, or car. Publishing anything this model generates is the same as publishing it yourself. You are responsible for the content you publish, and you cannot blame the model any more than you can blame the knife, gun, lighter, or car for what you do with it. <!-- original-model-card end -->
UFNLP/gatortron-base
UFNLP
"2024-03-19T00:23:59Z"
4,557
35
transformers
[ "transformers", "pytorch", "megatron-bert", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
"2023-06-02T23:27:12Z"
--- license: apache-2.0 --- <h2>GatorTron-Base overview </h2> Developed by a joint effort between the University of Florida and NVIDIA, GatorTron-Base is a clinical language model of 345 million parameters, pre-trained using a BERT architecure implemented in the Megatron package (https://github.com/NVIDIA/Megatron-LM). GatorTron-Base is pre-trained using a dataset consisting of: - 82B words of de-identified clinical notes from the University of Florida Health System, - 6.1B words from PubMed CC0, - 2.5B words from WikiText, - 0.5B words of de-identified clinical notes from MIMIC-III The Github for GatorTron is at : https://github.com/uf-hobi-informatics-lab/GatorTron This model is converted to Hugginface from : https://catalog.ngc.nvidia.com/orgs/nvidia/teams/clara/models/gatortron_og <h2>Model variations</h2> Model | Parameter --- | --- [gatortron-base (this model)](https://huggingface.co/UFNLP/gatortron-base)| 345 million [gatortronS](https://huggingface.co/UFNLP/gatortronS) | 345 million [gatortron-medium](https://huggingface.co/UFNLP/gatortron-medium) | 3.9 billion [gatortron-large](https://huggingface.co/UFNLP/gatortron-large) | 8.9 billion <h2>How to use</h2> ```python from transformers import AutoModel, AutoTokenizer, AutoConfig tokenizer= AutoTokenizer.from_pretrained('UFNLP/gatortron-base') config=AutoConfig.from_pretrained('UFNLP/gatortron-base') mymodel=AutoModel.from_pretrained('UFNLP/gatortron-base') encoded_input=tokenizer("Bone scan: Negative for distant metastasis.", return_tensors="pt") encoded_output = mymodel(**encoded_input) print (encoded_output) ``` - An NLP pacakge using GatorTron for clinical concept extraction (Named Entity Recognition): https://github.com/uf-hobi-informatics-lab/ClinicalTransformerNER - An NLP pacakge using GatorTron for Relation Extraction: https://github.com/uf-hobi-informatics-lab/ClinicalTransformerRelationExtraction - An NLP pacakge using GatorTron for extraction of social determinants of health (SDoH) from clinical narratives: https://github.com/uf-hobi-informatics-lab/SDoH_SODA <h2>De-identification</h2> We applied a de-identification system to remove protected health information (PHI) from clinical text. We adopted the safe-harbor method to identify 18 PHI categories defined in the Health Insurance Portability and Accountability Act (HIPAA) and replaced them with dummy strings (e.g., replace people’s names into [\*\*NAME\*\*]). The de-identifiation system is described in: Yang X, Lyu T, Li Q, Lee C-Y, Bian J, Hogan WR, Wu Y†. A study of deep learning methods for de-identification of clinical notes in cross-institute settings. BMC Med Inform Decis Mak. 2020 Dec 5;19(5):232. https://www.ncbi.nlm.nih.gov/pubmed/31801524. <h2>Citation info</h2> Yang X, Chen A, PourNejatian N, Shin HC, Smith KE, Parisien C, Compas C, Martin C, Costa AB, Flores MG, Zhang Y, Magoc T, Harle CA, Lipori G, Mitchell DA, Hogan WR, Shenkman EA, Bian J, Wu Y†. A large language model for electronic health records. Npj Digit Med. Nature Publishing Group; . 2022 Dec 26;5(1):1–9. https://www.nature.com/articles/s41746-022-00742-2 - BibTeX entry ``` @article{yang2022large, title={A large language model for electronic health records}, author={Yang, Xi and Chen, Aokun and PourNejatian, Nima and Shin, Hoo Chang and Smith, Kaleb E and Parisien, Christopher and Compas, Colin and Martin, Cheryl and Costa, Anthony B and Flores, Mona G and Zhang, Ying and Magoc, Tanja and Harle, Christopher A and Lipori, Gloria and Mitchell, Duane A and Hogan, William R and Shenkman, Elizabeth A and Bian, Jiang and Wu, Yonghui }, journal={npj Digital Medicine}, volume={5}, number={1}, pages={194}, year={2022}, publisher={Nature Publishing Group UK London} } ``` <h2>Contact</h2> - Yonghui Wu: [email protected] - Cheng Peng: [email protected]
facebook/DiT-XL-2-256
facebook
"2023-01-17T20:29:53Z"
4,555
12
diffusers
[ "diffusers", "license:cc-by-nc-4.0", "diffusers:DiTPipeline", "region:us" ]
null
"2023-01-17T20:25:12Z"
--- license: cc-by-nc-4.0 --- # Scalable Diffusion Models with Transformers (DiT) ## Abstract We train latent diffusion models, replacing the commonly-used U-Net backbone with a transformer that operates on latent patches. We analyze the scalability of our Diffusion Transformers (DiTs) through the lens of forward pass complexity as measured by Gflops. We find that DiTs with higher Gflops---through increased transformer depth/width or increased number of input tokens---consistently have lower FID. In addition to good scalability properties, our DiT-XL/2 models outperform all prior diffusion models on the class-conditional ImageNet 512×512 and 256×256 benchmarks, achieving a state-of-the-art FID of 2.27 on the latter.
01-ai/Yi-1.5-9B-32K
01-ai
"2024-06-26T10:43:03Z"
4,554
17
transformers
[ "transformers", "safetensors", "llama", "text-generation", "arxiv:2403.04652", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2024-05-15T10:45:35Z"
--- license: apache-2.0 --- <div align="center"> <picture> <img src="https://raw.githubusercontent.com/01-ai/Yi/main/assets/img/Yi_logo_icon_light.svg" width="150px"> </picture> </div> <p align="center"> <a href="https://github.com/01-ai">🐙 GitHub</a> • <a href="https://discord.gg/hYUwWddeAu">👾 Discord</a> • <a href="https://twitter.com/01ai_yi">🐤 Twitter</a> • <a href="https://github.com/01-ai/Yi-1.5/issues/2">💬 WeChat</a> <br/> <a href="https://arxiv.org/abs/2403.04652">📝 Paper</a> • <a href="https://01-ai.github.io/">💪 Tech Blog</a> • <a href="https://github.com/01-ai/Yi/tree/main?tab=readme-ov-file#faq">🙌 FAQ</a> • <a href="https://github.com/01-ai/Yi/tree/main?tab=readme-ov-file#learning-hub">📗 Learning Hub</a> </p> # Intro Yi-1.5 is an upgraded version of Yi. It is continuously pre-trained on Yi with a high-quality corpus of 500B tokens and fine-tuned on 3M diverse fine-tuning samples. Compared with Yi, Yi-1.5 delivers stronger performance in coding, math, reasoning, and instruction-following capability, while still maintaining excellent capabilities in language understanding, commonsense reasoning, and reading comprehension. <div align="center"> Model | Context Length | Pre-trained Tokens | :------------: | :------------: | :------------: | | Yi-1.5 | 4K, 16K, 32K | 3.6T </div> # Models - Chat models <div align="center"> | Name | Download | | --------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | Yi-1.5-34B-Chat | • [🤗 Hugging Face](https://huggingface.co/collections/01-ai/yi-15-2024-05-663f3ecab5f815a3eaca7ca8) • [🤖 ModelScope](https://www.modelscope.cn/organization/01ai) • [🟣 wisemodel](https://wisemodel.cn/organization/01.AI)| | Yi-1.5-34B-Chat-16K | • [🤗 Hugging Face](https://huggingface.co/collections/01-ai/yi-15-2024-05-663f3ecab5f815a3eaca7ca8) • [🤖 ModelScope](https://www.modelscope.cn/organization/01ai) • [🟣 wisemodel](https://wisemodel.cn/organization/01.AI) | | Yi-1.5-9B-Chat | • [🤗 Hugging Face](https://huggingface.co/collections/01-ai/yi-15-2024-05-663f3ecab5f815a3eaca7ca8) • [🤖 ModelScope](https://www.modelscope.cn/organization/01ai) • [🟣 wisemodel](https://wisemodel.cn/organization/01.AI) | | Yi-1.5-9B-Chat-16K | • [🤗 Hugging Face](https://huggingface.co/collections/01-ai/yi-15-2024-05-663f3ecab5f815a3eaca7ca8) • [🤖 ModelScope](https://www.modelscope.cn/organization/01ai) • [🟣 wisemodel](https://wisemodel.cn/organization/01.AI) | | Yi-1.5-6B-Chat | • [🤗 Hugging Face](https://huggingface.co/collections/01-ai/yi-15-2024-05-663f3ecab5f815a3eaca7ca8) • [🤖 ModelScope](https://www.modelscope.cn/organization/01ai) • [🟣 wisemodel](https://wisemodel.cn/organization/01.AI) | </div> - Base models <div align="center"> | Name | Download | | ---------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | Yi-1.5-34B | • [🤗 Hugging Face](https://huggingface.co/collections/01-ai/yi-15-2024-05-663f3ecab5f815a3eaca7ca8) • [🤖 ModelScope](https://www.modelscope.cn/organization/01ai) • [🟣 wisemodel](https://wisemodel.cn/organization/01.AI) | | Yi-1.5-34B-32K | • [🤗 Hugging Face](https://huggingface.co/collections/01-ai/yi-15-2024-05-663f3ecab5f815a3eaca7ca8) • [🤖 ModelScope](https://www.modelscope.cn/organization/01ai) • [🟣 wisemodel](https://wisemodel.cn/organization/01.AI) | | Yi-1.5-9B | • [🤗 Hugging Face](https://huggingface.co/collections/01-ai/yi-15-2024-05-663f3ecab5f815a3eaca7ca8) • [🤖 ModelScope](https://www.modelscope.cn/organization/01ai) • [🟣 wisemodel](https://wisemodel.cn/organization/01.AI) | | Yi-1.5-9B-32K | • [🤗 Hugging Face](https://huggingface.co/collections/01-ai/yi-15-2024-05-663f3ecab5f815a3eaca7ca8) • [🤖 ModelScope](https://www.modelscope.cn/organization/01ai) • [🟣 wisemodel](https://wisemodel.cn/organization/01.AI) | | Yi-1.5-6B | • [🤗 Hugging Face](https://huggingface.co/collections/01-ai/yi-15-2024-05-663f3ecab5f815a3eaca7ca8) • [🤖 ModelScope](https://www.modelscope.cn/organization/01ai) • [🟣 wisemodel](https://wisemodel.cn/organization/01.AI) | </div> # Benchmarks - Chat models Yi-1.5-34B-Chat is on par with or excels beyond larger models in most benchmarks. ![image/png](https://cdn-uploads.huggingface.co/production/uploads/656d9adce8bf55919aca7c3f/KcsJ9Oc1VnEmfCDEJc5cd.png) Yi-1.5-9B-Chat is the top performer among similarly sized open-source models. ![image/png](https://cdn-uploads.huggingface.co/production/uploads/656d9adce8bf55919aca7c3f/xf6pLg5jqRCwjlh6m3t6_.png) - Base models Yi-1.5-34B is on par with or excels beyond larger models in some benchmarks. ![image/png](https://cdn-uploads.huggingface.co/production/uploads/656d9adce8bf55919aca7c3f/BwU7QM-03dZvZzwdIE1xY.png) Yi-1.5-9B is the top performer among similarly sized open-source models. ![image/png](https://cdn-uploads.huggingface.co/production/uploads/656d9adce8bf55919aca7c3f/y-EYSYPT-3aWLJ0x8R94F.png) # Quick Start For getting up and running with Yi-1.5 models quickly, see [README](https://github.com/01-ai/Yi-1.5).
sdadas/mmlw-roberta-base
sdadas
"2024-02-23T07:58:24Z"
4,551
2
sentence-transformers
[ "sentence-transformers", "pytorch", "roberta", "feature-extraction", "sentence-similarity", "transformers", "mteb", "pl", "arxiv:2402.13350", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "text-embeddings-inference", "region:us" ]
sentence-similarity
"2023-11-17T19:04:53Z"
--- pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers - mteb model-index: - name: mmlw-roberta-base results: - task: type: Clustering dataset: type: PL-MTEB/8tags-clustering name: MTEB 8TagsClustering config: default split: test revision: None metrics: - type: v_measure value: 33.08463724780795 - task: type: Classification dataset: type: PL-MTEB/allegro-reviews name: MTEB AllegroReviews config: default split: test revision: None metrics: - type: accuracy value: 40.25844930417495 - type: f1 value: 35.59685265418916 - task: type: Retrieval dataset: type: arguana-pl name: MTEB ArguAna-PL config: default split: test revision: None metrics: - type: map_at_1 value: 33.073 - type: map_at_10 value: 50.223 - type: map_at_100 value: 50.942 - type: map_at_1000 value: 50.94499999999999 - type: map_at_3 value: 45.721000000000004 - type: map_at_5 value: 48.413000000000004 - type: mrr_at_1 value: 34.424 - type: mrr_at_10 value: 50.68899999999999 - type: mrr_at_100 value: 51.437999999999995 - type: mrr_at_1000 value: 51.441 - type: mrr_at_3 value: 46.219 - type: mrr_at_5 value: 48.921 - type: ndcg_at_1 value: 33.073 - type: ndcg_at_10 value: 59.021 - type: ndcg_at_100 value: 61.902 - type: ndcg_at_1000 value: 61.983999999999995 - type: ndcg_at_3 value: 49.818 - type: ndcg_at_5 value: 54.644999999999996 - type: precision_at_1 value: 33.073 - type: precision_at_10 value: 8.684 - type: precision_at_100 value: 0.9900000000000001 - type: precision_at_1000 value: 0.1 - type: precision_at_3 value: 20.555 - type: precision_at_5 value: 14.666 - type: recall_at_1 value: 33.073 - type: recall_at_10 value: 86.842 - type: recall_at_100 value: 99.004 - type: recall_at_1000 value: 99.644 - type: recall_at_3 value: 61.663999999999994 - type: recall_at_5 value: 73.329 - task: type: Classification dataset: type: PL-MTEB/cbd name: MTEB CBD config: default split: test revision: None metrics: - type: accuracy value: 68.11 - type: ap value: 20.916633959031266 - type: f1 value: 56.85804802205465 - task: type: PairClassification dataset: type: PL-MTEB/cdsce-pairclassification name: MTEB CDSC-E config: default split: test revision: None metrics: - type: cos_sim_accuracy value: 89.2 - type: cos_sim_ap value: 79.1041156765933 - type: cos_sim_f1 value: 70.0 - type: cos_sim_precision value: 74.11764705882354 - type: cos_sim_recall value: 66.3157894736842 - type: dot_accuracy value: 88.2 - type: dot_ap value: 72.57183688228149 - type: dot_f1 value: 67.16417910447761 - type: dot_precision value: 63.67924528301887 - type: dot_recall value: 71.05263157894737 - type: euclidean_accuracy value: 89.3 - type: euclidean_ap value: 79.01345533432428 - type: euclidean_f1 value: 70.19498607242339 - type: euclidean_precision value: 74.55621301775149 - type: euclidean_recall value: 66.3157894736842 - type: manhattan_accuracy value: 89.3 - type: manhattan_ap value: 79.01671381791259 - type: manhattan_f1 value: 70.0280112044818 - type: manhattan_precision value: 74.8502994011976 - type: manhattan_recall value: 65.78947368421053 - type: max_accuracy value: 89.3 - type: max_ap value: 79.1041156765933 - type: max_f1 value: 70.19498607242339 - task: type: STS dataset: type: PL-MTEB/cdscr-sts name: MTEB CDSC-R config: default split: test revision: None metrics: - type: cos_sim_pearson value: 91.79559442663039 - type: cos_sim_spearman value: 92.5438168962641 - type: euclidean_pearson value: 92.02981265332856 - type: euclidean_spearman value: 92.5548245733484 - type: manhattan_pearson value: 91.95296287979178 - type: manhattan_spearman value: 92.50279516120241 - task: type: Retrieval dataset: type: dbpedia-pl name: MTEB DBPedia-PL config: default split: test revision: None metrics: - type: map_at_1 value: 7.829999999999999 - type: map_at_10 value: 16.616 - type: map_at_100 value: 23.629 - type: map_at_1000 value: 25.235999999999997 - type: map_at_3 value: 12.485 - type: map_at_5 value: 14.077 - type: mrr_at_1 value: 61.75000000000001 - type: mrr_at_10 value: 69.852 - type: mrr_at_100 value: 70.279 - type: mrr_at_1000 value: 70.294 - type: mrr_at_3 value: 68.375 - type: mrr_at_5 value: 69.187 - type: ndcg_at_1 value: 49.75 - type: ndcg_at_10 value: 36.217 - type: ndcg_at_100 value: 41.235 - type: ndcg_at_1000 value: 48.952 - type: ndcg_at_3 value: 41.669 - type: ndcg_at_5 value: 38.285000000000004 - type: precision_at_1 value: 61.5 - type: precision_at_10 value: 28.499999999999996 - type: precision_at_100 value: 9.572 - type: precision_at_1000 value: 2.025 - type: precision_at_3 value: 44.083 - type: precision_at_5 value: 36.3 - type: recall_at_1 value: 7.829999999999999 - type: recall_at_10 value: 21.462999999999997 - type: recall_at_100 value: 47.095 - type: recall_at_1000 value: 71.883 - type: recall_at_3 value: 13.891 - type: recall_at_5 value: 16.326999999999998 - task: type: Retrieval dataset: type: fiqa-pl name: MTEB FiQA-PL config: default split: test revision: None metrics: - type: map_at_1 value: 16.950000000000003 - type: map_at_10 value: 27.422 - type: map_at_100 value: 29.146 - type: map_at_1000 value: 29.328 - type: map_at_3 value: 23.735999999999997 - type: map_at_5 value: 25.671 - type: mrr_at_1 value: 33.796 - type: mrr_at_10 value: 42.689 - type: mrr_at_100 value: 43.522 - type: mrr_at_1000 value: 43.563 - type: mrr_at_3 value: 40.226 - type: mrr_at_5 value: 41.685 - type: ndcg_at_1 value: 33.642 - type: ndcg_at_10 value: 35.008 - type: ndcg_at_100 value: 41.839 - type: ndcg_at_1000 value: 45.035 - type: ndcg_at_3 value: 31.358999999999998 - type: ndcg_at_5 value: 32.377 - type: precision_at_1 value: 33.642 - type: precision_at_10 value: 9.937999999999999 - type: precision_at_100 value: 1.685 - type: precision_at_1000 value: 0.22699999999999998 - type: precision_at_3 value: 21.142 - type: precision_at_5 value: 15.586 - type: recall_at_1 value: 16.950000000000003 - type: recall_at_10 value: 42.286 - type: recall_at_100 value: 68.51899999999999 - type: recall_at_1000 value: 87.471 - type: recall_at_3 value: 28.834 - type: recall_at_5 value: 34.274 - task: type: Retrieval dataset: type: hotpotqa-pl name: MTEB HotpotQA-PL config: default split: test revision: None metrics: - type: map_at_1 value: 37.711 - type: map_at_10 value: 57.867999999999995 - type: map_at_100 value: 58.77 - type: map_at_1000 value: 58.836999999999996 - type: map_at_3 value: 54.400999999999996 - type: map_at_5 value: 56.564 - type: mrr_at_1 value: 75.449 - type: mrr_at_10 value: 81.575 - type: mrr_at_100 value: 81.783 - type: mrr_at_1000 value: 81.792 - type: mrr_at_3 value: 80.50399999999999 - type: mrr_at_5 value: 81.172 - type: ndcg_at_1 value: 75.422 - type: ndcg_at_10 value: 66.635 - type: ndcg_at_100 value: 69.85 - type: ndcg_at_1000 value: 71.179 - type: ndcg_at_3 value: 61.648 - type: ndcg_at_5 value: 64.412 - type: precision_at_1 value: 75.422 - type: precision_at_10 value: 13.962 - type: precision_at_100 value: 1.649 - type: precision_at_1000 value: 0.183 - type: precision_at_3 value: 39.172000000000004 - type: precision_at_5 value: 25.691000000000003 - type: recall_at_1 value: 37.711 - type: recall_at_10 value: 69.811 - type: recall_at_100 value: 82.471 - type: recall_at_1000 value: 91.29 - type: recall_at_3 value: 58.757999999999996 - type: recall_at_5 value: 64.227 - task: type: Retrieval dataset: type: msmarco-pl name: MTEB MSMARCO-PL config: default split: validation revision: None metrics: - type: map_at_1 value: 17.033 - type: map_at_10 value: 27.242 - type: map_at_100 value: 28.451999999999998 - type: map_at_1000 value: 28.515 - type: map_at_3 value: 24.046 - type: map_at_5 value: 25.840999999999998 - type: mrr_at_1 value: 17.493 - type: mrr_at_10 value: 27.67 - type: mrr_at_100 value: 28.823999999999998 - type: mrr_at_1000 value: 28.881 - type: mrr_at_3 value: 24.529999999999998 - type: mrr_at_5 value: 26.27 - type: ndcg_at_1 value: 17.479 - type: ndcg_at_10 value: 33.048 - type: ndcg_at_100 value: 39.071 - type: ndcg_at_1000 value: 40.739999999999995 - type: ndcg_at_3 value: 26.493 - type: ndcg_at_5 value: 29.701 - type: precision_at_1 value: 17.479 - type: precision_at_10 value: 5.324 - type: precision_at_100 value: 0.8380000000000001 - type: precision_at_1000 value: 0.098 - type: precision_at_3 value: 11.408999999999999 - type: precision_at_5 value: 8.469999999999999 - type: recall_at_1 value: 17.033 - type: recall_at_10 value: 50.929 - type: recall_at_100 value: 79.262 - type: recall_at_1000 value: 92.239 - type: recall_at_3 value: 33.06 - type: recall_at_5 value: 40.747 - task: type: Classification dataset: type: mteb/amazon_massive_intent name: MTEB MassiveIntentClassification (pl) config: pl split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 72.31002017484867 - type: f1 value: 69.61603671063031 - task: type: Classification dataset: type: mteb/amazon_massive_scenario name: MTEB MassiveScenarioClassification (pl) config: pl split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 75.52790854068594 - type: f1 value: 75.4053872472259 - task: type: Retrieval dataset: type: nfcorpus-pl name: MTEB NFCorpus-PL config: default split: test revision: None metrics: - type: map_at_1 value: 5.877000000000001 - type: map_at_10 value: 12.817 - type: map_at_100 value: 16.247 - type: map_at_1000 value: 17.683 - type: map_at_3 value: 9.334000000000001 - type: map_at_5 value: 10.886999999999999 - type: mrr_at_1 value: 45.201 - type: mrr_at_10 value: 52.7 - type: mrr_at_100 value: 53.425999999999995 - type: mrr_at_1000 value: 53.461000000000006 - type: mrr_at_3 value: 50.464 - type: mrr_at_5 value: 51.827 - type: ndcg_at_1 value: 41.949999999999996 - type: ndcg_at_10 value: 34.144999999999996 - type: ndcg_at_100 value: 31.556 - type: ndcg_at_1000 value: 40.265 - type: ndcg_at_3 value: 38.07 - type: ndcg_at_5 value: 36.571 - type: precision_at_1 value: 44.272 - type: precision_at_10 value: 25.697 - type: precision_at_100 value: 8.077 - type: precision_at_1000 value: 2.084 - type: precision_at_3 value: 36.016999999999996 - type: precision_at_5 value: 31.703 - type: recall_at_1 value: 5.877000000000001 - type: recall_at_10 value: 16.986 - type: recall_at_100 value: 32.719 - type: recall_at_1000 value: 63.763000000000005 - type: recall_at_3 value: 10.292 - type: recall_at_5 value: 12.886000000000001 - task: type: Retrieval dataset: type: nq-pl name: MTEB NQ-PL config: default split: test revision: None metrics: - type: map_at_1 value: 25.476 - type: map_at_10 value: 38.67 - type: map_at_100 value: 39.784000000000006 - type: map_at_1000 value: 39.831 - type: map_at_3 value: 34.829 - type: map_at_5 value: 37.025000000000006 - type: mrr_at_1 value: 28.621000000000002 - type: mrr_at_10 value: 41.13 - type: mrr_at_100 value: 42.028 - type: mrr_at_1000 value: 42.059999999999995 - type: mrr_at_3 value: 37.877 - type: mrr_at_5 value: 39.763999999999996 - type: ndcg_at_1 value: 28.563 - type: ndcg_at_10 value: 45.654 - type: ndcg_at_100 value: 50.695 - type: ndcg_at_1000 value: 51.873999999999995 - type: ndcg_at_3 value: 38.359 - type: ndcg_at_5 value: 42.045 - type: precision_at_1 value: 28.563 - type: precision_at_10 value: 7.6450000000000005 - type: precision_at_100 value: 1.052 - type: precision_at_1000 value: 0.117 - type: precision_at_3 value: 17.458000000000002 - type: precision_at_5 value: 12.613 - type: recall_at_1 value: 25.476 - type: recall_at_10 value: 64.484 - type: recall_at_100 value: 86.96199999999999 - type: recall_at_1000 value: 95.872 - type: recall_at_3 value: 45.527 - type: recall_at_5 value: 54.029 - task: type: Classification dataset: type: laugustyniak/abusive-clauses-pl name: MTEB PAC config: default split: test revision: None metrics: - type: accuracy value: 65.87315377932232 - type: ap value: 76.41966964416534 - type: f1 value: 63.64417488639012 - task: type: PairClassification dataset: type: PL-MTEB/ppc-pairclassification name: MTEB PPC config: default split: test revision: None metrics: - type: cos_sim_accuracy value: 87.7 - type: cos_sim_ap value: 92.81319372631636 - type: cos_sim_f1 value: 90.04048582995952 - type: cos_sim_precision value: 88.11410459587957 - type: cos_sim_recall value: 92.05298013245033 - type: dot_accuracy value: 75.0 - type: dot_ap value: 83.63089957943261 - type: dot_f1 value: 80.76923076923077 - type: dot_precision value: 75.43103448275862 - type: dot_recall value: 86.9205298013245 - type: euclidean_accuracy value: 87.7 - type: euclidean_ap value: 92.94772245932825 - type: euclidean_f1 value: 90.10458567980692 - type: euclidean_precision value: 87.63693270735524 - type: euclidean_recall value: 92.71523178807946 - type: manhattan_accuracy value: 87.8 - type: manhattan_ap value: 92.95330512127123 - type: manhattan_f1 value: 90.08130081300813 - type: manhattan_precision value: 88.49840255591054 - type: manhattan_recall value: 91.72185430463577 - type: max_accuracy value: 87.8 - type: max_ap value: 92.95330512127123 - type: max_f1 value: 90.10458567980692 - task: type: PairClassification dataset: type: PL-MTEB/psc-pairclassification name: MTEB PSC config: default split: test revision: None metrics: - type: cos_sim_accuracy value: 96.19666048237477 - type: cos_sim_ap value: 98.61237969571302 - type: cos_sim_f1 value: 93.77845220030349 - type: cos_sim_precision value: 93.35347432024169 - type: cos_sim_recall value: 94.20731707317073 - type: dot_accuracy value: 94.89795918367348 - type: dot_ap value: 97.02853491357943 - type: dot_f1 value: 91.85185185185186 - type: dot_precision value: 89.33717579250721 - type: dot_recall value: 94.51219512195121 - type: euclidean_accuracy value: 96.38218923933209 - type: euclidean_ap value: 98.58145584134218 - type: euclidean_f1 value: 94.04580152671755 - type: euclidean_precision value: 94.18960244648318 - type: euclidean_recall value: 93.90243902439023 - type: manhattan_accuracy value: 96.47495361781077 - type: manhattan_ap value: 98.6108221024781 - type: manhattan_f1 value: 94.18960244648318 - type: manhattan_precision value: 94.47852760736197 - type: manhattan_recall value: 93.90243902439023 - type: max_accuracy value: 96.47495361781077 - type: max_ap value: 98.61237969571302 - type: max_f1 value: 94.18960244648318 - task: type: Classification dataset: type: PL-MTEB/polemo2_in name: MTEB PolEmo2.0-IN config: default split: test revision: None metrics: - type: accuracy value: 71.73130193905818 - type: f1 value: 71.17731918813324 - task: type: Classification dataset: type: PL-MTEB/polemo2_out name: MTEB PolEmo2.0-OUT config: default split: test revision: None metrics: - type: accuracy value: 46.59919028340081 - type: f1 value: 37.216392949948954 - task: type: Retrieval dataset: type: quora-pl name: MTEB Quora-PL config: default split: test revision: None metrics: - type: map_at_1 value: 66.134 - type: map_at_10 value: 80.19 - type: map_at_100 value: 80.937 - type: map_at_1000 value: 80.95599999999999 - type: map_at_3 value: 77.074 - type: map_at_5 value: 79.054 - type: mrr_at_1 value: 75.88000000000001 - type: mrr_at_10 value: 83.226 - type: mrr_at_100 value: 83.403 - type: mrr_at_1000 value: 83.406 - type: mrr_at_3 value: 82.03200000000001 - type: mrr_at_5 value: 82.843 - type: ndcg_at_1 value: 75.94 - type: ndcg_at_10 value: 84.437 - type: ndcg_at_100 value: 86.13 - type: ndcg_at_1000 value: 86.29299999999999 - type: ndcg_at_3 value: 81.07799999999999 - type: ndcg_at_5 value: 83.0 - type: precision_at_1 value: 75.94 - type: precision_at_10 value: 12.953999999999999 - type: precision_at_100 value: 1.514 - type: precision_at_1000 value: 0.156 - type: precision_at_3 value: 35.61 - type: precision_at_5 value: 23.652 - type: recall_at_1 value: 66.134 - type: recall_at_10 value: 92.991 - type: recall_at_100 value: 99.003 - type: recall_at_1000 value: 99.86 - type: recall_at_3 value: 83.643 - type: recall_at_5 value: 88.81099999999999 - task: type: Retrieval dataset: type: scidocs-pl name: MTEB SCIDOCS-PL config: default split: test revision: None metrics: - type: map_at_1 value: 4.183 - type: map_at_10 value: 10.626 - type: map_at_100 value: 12.485 - type: map_at_1000 value: 12.793 - type: map_at_3 value: 7.531000000000001 - type: map_at_5 value: 9.037 - type: mrr_at_1 value: 20.5 - type: mrr_at_10 value: 30.175 - type: mrr_at_100 value: 31.356 - type: mrr_at_1000 value: 31.421 - type: mrr_at_3 value: 26.900000000000002 - type: mrr_at_5 value: 28.689999999999998 - type: ndcg_at_1 value: 20.599999999999998 - type: ndcg_at_10 value: 17.84 - type: ndcg_at_100 value: 25.518 - type: ndcg_at_1000 value: 31.137999999999998 - type: ndcg_at_3 value: 16.677 - type: ndcg_at_5 value: 14.641000000000002 - type: precision_at_1 value: 20.599999999999998 - type: precision_at_10 value: 9.3 - type: precision_at_100 value: 2.048 - type: precision_at_1000 value: 0.33999999999999997 - type: precision_at_3 value: 15.533 - type: precision_at_5 value: 12.839999999999998 - type: recall_at_1 value: 4.183 - type: recall_at_10 value: 18.862000000000002 - type: recall_at_100 value: 41.592 - type: recall_at_1000 value: 69.037 - type: recall_at_3 value: 9.443 - type: recall_at_5 value: 13.028 - task: type: PairClassification dataset: type: PL-MTEB/sicke-pl-pairclassification name: MTEB SICK-E-PL config: default split: test revision: None metrics: - type: cos_sim_accuracy value: 86.32286995515696 - type: cos_sim_ap value: 82.04302619416443 - type: cos_sim_f1 value: 74.95572086432874 - type: cos_sim_precision value: 74.55954897815363 - type: cos_sim_recall value: 75.35612535612536 - type: dot_accuracy value: 83.9176518548716 - type: dot_ap value: 76.8608733580272 - type: dot_f1 value: 72.31936654569449 - type: dot_precision value: 67.36324523663184 - type: dot_recall value: 78.06267806267806 - type: euclidean_accuracy value: 86.32286995515696 - type: euclidean_ap value: 81.9648986659308 - type: euclidean_f1 value: 74.93796526054591 - type: euclidean_precision value: 74.59421312632321 - type: euclidean_recall value: 75.28490028490027 - type: manhattan_accuracy value: 86.30248675091724 - type: manhattan_ap value: 81.92853980116878 - type: manhattan_f1 value: 74.80968858131489 - type: manhattan_precision value: 72.74562584118439 - type: manhattan_recall value: 76.99430199430199 - type: max_accuracy value: 86.32286995515696 - type: max_ap value: 82.04302619416443 - type: max_f1 value: 74.95572086432874 - task: type: STS dataset: type: PL-MTEB/sickr-pl-sts name: MTEB SICK-R-PL config: default split: test revision: None metrics: - type: cos_sim_pearson value: 83.07566183637853 - type: cos_sim_spearman value: 79.20198022242548 - type: euclidean_pearson value: 81.27875473517936 - type: euclidean_spearman value: 79.21560102311153 - type: manhattan_pearson value: 81.21559474880459 - type: manhattan_spearman value: 79.1537846814979 - task: type: STS dataset: type: mteb/sts22-crosslingual-sts name: MTEB STS22 (pl) config: pl split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 36.39657573900194 - type: cos_sim_spearman value: 40.36403461037013 - type: euclidean_pearson value: 29.143416004776316 - type: euclidean_spearman value: 40.43197841306375 - type: manhattan_pearson value: 29.18632337290767 - type: manhattan_spearman value: 40.50563343395481 - task: type: Retrieval dataset: type: scifact-pl name: MTEB SciFact-PL config: default split: test revision: None metrics: - type: map_at_1 value: 49.428 - type: map_at_10 value: 60.423 - type: map_at_100 value: 61.037 - type: map_at_1000 value: 61.065999999999995 - type: map_at_3 value: 56.989000000000004 - type: map_at_5 value: 59.041999999999994 - type: mrr_at_1 value: 52.666999999999994 - type: mrr_at_10 value: 61.746 - type: mrr_at_100 value: 62.273 - type: mrr_at_1000 value: 62.300999999999995 - type: mrr_at_3 value: 59.278 - type: mrr_at_5 value: 60.611000000000004 - type: ndcg_at_1 value: 52.333 - type: ndcg_at_10 value: 65.75 - type: ndcg_at_100 value: 68.566 - type: ndcg_at_1000 value: 69.314 - type: ndcg_at_3 value: 59.768 - type: ndcg_at_5 value: 62.808 - type: precision_at_1 value: 52.333 - type: precision_at_10 value: 9.167 - type: precision_at_100 value: 1.0630000000000002 - type: precision_at_1000 value: 0.11299999999999999 - type: precision_at_3 value: 23.778 - type: precision_at_5 value: 16.2 - type: recall_at_1 value: 49.428 - type: recall_at_10 value: 81.07799999999999 - type: recall_at_100 value: 93.93299999999999 - type: recall_at_1000 value: 99.667 - type: recall_at_3 value: 65.061 - type: recall_at_5 value: 72.667 - task: type: Retrieval dataset: type: trec-covid-pl name: MTEB TRECCOVID-PL config: default split: test revision: None metrics: - type: map_at_1 value: 0.22100000000000003 - type: map_at_10 value: 1.788 - type: map_at_100 value: 9.937 - type: map_at_1000 value: 24.762999999999998 - type: map_at_3 value: 0.579 - type: map_at_5 value: 0.947 - type: mrr_at_1 value: 78.0 - type: mrr_at_10 value: 88.067 - type: mrr_at_100 value: 88.067 - type: mrr_at_1000 value: 88.067 - type: mrr_at_3 value: 87.667 - type: mrr_at_5 value: 88.067 - type: ndcg_at_1 value: 76.0 - type: ndcg_at_10 value: 71.332 - type: ndcg_at_100 value: 54.80500000000001 - type: ndcg_at_1000 value: 49.504999999999995 - type: ndcg_at_3 value: 73.693 - type: ndcg_at_5 value: 73.733 - type: precision_at_1 value: 82.0 - type: precision_at_10 value: 76.8 - type: precision_at_100 value: 56.68 - type: precision_at_1000 value: 22.236 - type: precision_at_3 value: 78.667 - type: precision_at_5 value: 79.2 - type: recall_at_1 value: 0.22100000000000003 - type: recall_at_10 value: 2.033 - type: recall_at_100 value: 13.431999999999999 - type: recall_at_1000 value: 46.913 - type: recall_at_3 value: 0.625 - type: recall_at_5 value: 1.052 language: pl license: apache-2.0 widget: - source_sentence: "zapytanie: Jak dożyć 100 lat?" sentences: - "Trzeba zdrowo się odżywiać i uprawiać sport." - "Trzeba pić alkohol, imprezować i jeździć szybkimi autami." - "Gdy trwała kampania politycy zapewniali, że rozprawią się z zakazem niedzielnego handlu." --- <h1 align="center">MMLW-roberta-base</h1> MMLW (muszę mieć lepszą wiadomość) are neural text encoders for Polish. This is a distilled model that can be used to generate embeddings applicable to many tasks such as semantic similarity, clustering, information retrieval. The model can also serve as a base for further fine-tuning. It transforms texts to 768 dimensional vectors. The model was initialized with Polish RoBERTa checkpoint, and then trained with [multilingual knowledge distillation method](https://aclanthology.org/2020.emnlp-main.365/) on a diverse corpus of 60 million Polish-English text pairs. We utilised [English FlagEmbeddings (BGE)](https://huggingface.co/BAAI/bge-base-en) as teacher models for distillation. ## Usage (Sentence-Transformers) ⚠️ Our embedding models require the use of specific prefixes and suffixes when encoding texts. For this model, each query should be preceded by the prefix **"zapytanie: "** ⚠️ You can use the model like this with [sentence-transformers](https://www.SBERT.net): ```python from sentence_transformers import SentenceTransformer from sentence_transformers.util import cos_sim query_prefix = "zapytanie: " answer_prefix = "" queries = [query_prefix + "Jak dożyć 100 lat?"] answers = [ answer_prefix + "Trzeba zdrowo się odżywiać i uprawiać sport.", answer_prefix + "Trzeba pić alkohol, imprezować i jeździć szybkimi autami.", answer_prefix + "Gdy trwała kampania politycy zapewniali, że rozprawią się z zakazem niedzielnego handlu." ] model = SentenceTransformer("sdadas/mmlw-roberta-base") queries_emb = model.encode(queries, convert_to_tensor=True, show_progress_bar=False) answers_emb = model.encode(answers, convert_to_tensor=True, show_progress_bar=False) best_answer = cos_sim(queries_emb, answers_emb).argmax().item() print(answers[best_answer]) # Trzeba zdrowo się odżywiać i uprawiać sport. ``` ## Evaluation Results - The model achieves an **Average Score** of **61.05** on the Polish Massive Text Embedding Benchmark (MTEB). See [MTEB Leaderboard](https://huggingface.co/spaces/mteb/leaderboard) for detailed results. - The model achieves **NDCG@10** of **53.60** on the Polish Information Retrieval Benchmark. See [PIRB Leaderboard](https://huggingface.co/spaces/sdadas/pirb) for detailed results. ## Acknowledgements This model was trained with the A100 GPU cluster support delivered by the Gdansk University of Technology within the TASK center initiative. ## Citation ```bibtex @article{dadas2024pirb, title={{PIRB}: A Comprehensive Benchmark of Polish Dense and Hybrid Text Retrieval Methods}, author={Sławomir Dadas and Michał Perełkiewicz and Rafał Poświata}, year={2024}, eprint={2402.13350}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
SG161222/Realistic_Vision_V5.0_noVAE
SG161222
"2024-04-12T15:40:06Z"
4,550
31
diffusers
[ "diffusers", "safetensors", "license:creativeml-openrail-m", "autotrain_compatible", "endpoints_compatible", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
"2023-07-23T15:41:10Z"
--- license: creativeml-openrail-m --- <b>Please read this!</b><br> For version 5.0 it is recommended to use with VAE (to improve generation quality and get rid of artifacts): https://huggingface.co/stabilityai/sd-vae-ft-mse-original<br> <b>You can support me directly on Boosty - https://boosty.to/sg_161222</b><br> <hr/> <b>The recommended negative prompt:</b> (deformed iris, deformed pupils, semi-realistic, cgi, 3d, render, sketch, cartoon, drawing, anime:1.4), text, close up, cropped, out of frame, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck<br> <b>OR</b><br> (deformed iris, deformed pupils, semi-realistic, cgi, 3d, render, sketch, cartoon, drawing, anime, mutated hands and fingers:1.4), (deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, disconnected limbs, mutation, mutated, ugly, disgusting, amputation <b>Euler A or DPM++ 2M Karras<br> CFG Scale 3,5 - 7<br> Hires. fix with 4x-UltraSharp upscaler<br> 0 Hires steps and Denoising strength 0.25-0.7<br> Upscale by 1.1-2.0</b>
tanishq26/distilbert-base-multilingual-cased-toxicity-onnx
tanishq26
"2024-04-19T07:08:10Z"
4,550
0
transformers
[ "transformers", "onnx", "distilbert", "text-classification", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-classification
"2024-04-19T07:07:12Z"
Entry not found
LLM360/CrystalCoder
LLM360
"2024-06-24T16:56:09Z"
4,548
68
transformers
[ "transformers", "pytorch", "crystalcoder", "text-generation", "llm", "code", "custom_code", "en", "arxiv:2312.06550", "license:apache-2.0", "autotrain_compatible", "region:us" ]
text-generation
"2023-12-08T19:34:58Z"
--- license: apache-2.0 language: - en pipeline_tag: text-generation library_name: transformers tags: - llm - code --- # CrystalCoder <center><img src="crystalcoder_logo.jpg" alt="crystal coder logo" width="300"/></center> CrystalCoder is a 7B parameter language model, distinctively trained on the SlimPajama and StarCoder datasets. This model excels in balancing natural language processing and coding capabilities. Despite being trained on a smaller dataset of 1.4 trillion tokens—compared to LLaMA 2's 2 trillion—CrystalCoder surpasses LLaMA 2 in some challenging English and coding tasks. It demonstrates superior performance in benchmarks like MMLU, HumanEval, and MBPP. By comparing CrystalCoder with other similar work, CrystalCoder is quite balance on language and coding tasks. <center><img src="performance_in_benchmarks.png" alt="performance in benchmarks" /></center> | Performance on Standard Benchmarks | |------------------------------------------------| <center><img src="performance_radarchart.png" alt="performance radar chart" /></center> **Notes** - We compute all evaluation metrics ourselves. - Language benchmarks are computed following the convention of [the Huggingface Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard), which means AI2 Reasoning Challenge in 25-shot, HellaSwag in 10-shot, MMLU computed in 5-shot, TruthfulQA in 0-shot. - As reported in prior work, the choice of temperature affect the programming metrics a lot, we evaluate all models with the following temperature: - Scores for HumanEval is computed with a temperature of 0.2 - Scores for MBPP is computed with a temperature of 0.1 - For detailed token breakdown of CrystalCoder dataset, refer to the [CrystalCoder dataset repository](https://huggingface.co/datasets/LLM360/CrystalCoderDatasets). ## About LLM360 LLM360 is an initiative for comprehensive and fully open-sourced LLMs, where all training details, model checkpoints, intermediate results, and additional analyses are made available to the community. Our goal is to advance the field by inviting the community to deepen the understanding of LLMs together. As the first step of the project LLM360, we release all intermediate model checkpoints, our fully-prepared pre-training dataset, all source code and configurations, and training details. We are committed to continually pushing the boundaries of LLMs through this open-source effort. Get access now at [LLM360 site](https://www.llm360.ai/) ## 🟣 Model Description - **Model type:** Language model with the same architecture as LLaMA-7B - **Language(s) (NLP):** English - **License:** Apache 2.0 - **Resources for more information:** - [Training Code](https://github.com/LLM360/crystalcoder-train) - [Data Preparation](https://github.com/LLM360/crystalcoder-data-prep) - [Metrics](https://github.com/LLM360/Analysis360) - [Fully processed CrystalCoder pretraining data](https://huggingface.co/datasets/LLM360/CrystalCoderDatasets) # 🟣 Model Architecture CrystalCoder leverages a GPT-like architecture, akin to LLaMA, but with the addition of maximal update parameterization (**muP**). Key modifications introduced by muP include: 1. Input embeddings are scaled by `mup_embeddings_scale`. 2. Output logits are scaled by `mup_output_alpha` * `mup_width_scale`. 3. Attention weights scaling is refined to division by the hidden dimension size (`(QK^T)/d`) instead of its square root (`(QK^T)/sqrt(d)`). 4. Learning rates and weight decay are optimized for different parameter groups: - Embedding layer: LR=`BASE_LR`, WD=`BASE_WD`. - Normalization layers: LR=`BASE_LR`, WD=0. - Other Parameters: LR=`BASE_LR` * `mup_width_scale`, WD=`BASE_WD`. 5. Initialization ranges are determined based on muP hyperparameters. The muP hyperparameters are set as follows: - `mup_embeddings_scale`: 14.6 - `mup_output_alpha`: 2.22 - `mup_width_scale`: 0.0625 For other architecture choices: - We use `LayerNorm` instead of `RMSNorm`. - Rotary position embeddings applied to only the first `25%` of hidden dimensions. - Training sequence length is `2048`. - Embedding dimension is `32032`. # 🟣 Tokenization Our tokenizer is based on the LLaMA tokenizer, with 22 additional special tokens for the following usage: - 4 filling-in-middle (FIM) tokens such as `<|fim_prefix|>` to support FIM inference. - 14 spcial tokens such as `<|filename|>`, `<|jupyter_start|>`, `<|reponame|>` to support meta data for code dataset following StarCoder's method. - 4 special tokens such as `<|sys_start|>`, `<|im_start|>` to support instruction tuning. Therefore, we extended the LLaMA tokenizer vocabulary size from `32000` to `32032`. Some token ids are reserved and not used. # 🟣 Training Our training has 3 stages: - Stage 1: Pretraining on first half of SlimPajama (50% x 690B = 345B). - Stage 2: Pretraining on the other half of SlimPajama (50% x 690B = 345B), plus two epochs of StarCoder Data (2 x 291B). - Stage 3: Pretraining on `100B` additional Python and web-related data (HTML, JavaScript, CSS) sampled from StarCoder Data, and `10B` tokens sampled from SlimPajama. For details of the training dataset for each stage, please refer to the Dataset section and our CrystalCoder Data Card. For hyperparameters used in each stage, please refer to the following table: <center><img src="hyperparameters.png" alt="hyperparameter table" /></center> For more details of training, please refer to [our paper](https://arxiv.org/pdf/2312.06550.pdf). # 🟣 Dataset Our tokenized datasets for all phases are available at [CrystalCoderDatasets](https://huggingface.co/datasets/LLM360/CrystalCoderDatasets). # 🟣 Model Usage To load a specific checkpoint, use the revision argument as shown below, for example, `CrystalCoder_phase1_checkpoint_055500`. All the revisions can be seen from the branch dropdown in the "Files and versions" tab. If no revision argument is provided, it will load the phase 3 final checkpoint `CrystalCoder_phase3_checkpoint_027728`. ```python from transformers import AutoModelForCausalLM, AutoTokenizer tokenizer = AutoTokenizer.from_pretrained( "LLM360/CrystalCoder", revision="CrystalCoder_phase1_checkpoint_055500", trust_remote_code=True ) model = AutoModelForCausalLM.from_pretrained( "LLM360/CrystalCoder", revision="CrystalCoder_phase1_checkpoint_055500", trust_remote_code=True ) prompt = 'int add(int x, int y) {' input_ids = tokenizer(prompt, return_tensors="pt").input_ids gen_tokens = model.generate(input_ids, do_sample=True, max_length=400) print("-"*20 + "Output for model" + 20 * '-') print(tokenizer.batch_decode(gen_tokens)[0]) ``` ## 🟣 Completion Example: ### prompt: ```python from typing import List def has_close_elements(numbers: List[float], threshold: float) -> bool: """ Check if in given list of numbers, are any two numbers closer to each other than given threshold. >>> has_close_elements([1.0, 2.0, 3.0], 0.5) False >>> has_close_elements([1.0, 2.8, 3.0, 4.0, 5.0, 2.0], 0.3) True """ ``` ### response: ```python from typing import List def has_close_elements(numbers: List[float], threshold: float) -> bool: """ Check if in given list of numbers, are any two numbers closer to each other than given threshold. >>> has_close_elements([1.0, 2.0, 3.0], 0.5) False >>> has_close_elements([1.0, 2.8, 3.0, 4.0, 5.0, 2.0], 0.3) True """ pass # Fill in this function. It should return the index into `numbers` where the closest pair should be inserted. def closest_pair(numbers: List[float], threshold: float) -> int: """ Find the closest pair in a given list ofalso numbers. Assumes all the numbers are numbers in the list are positive. Returns the correct index into `numbers` where the closest pair should be inserted. This number is the *first* element of the closest pair. >>> closest_pair([1.0, 2.8, 3.0, 4.0, 5.0, 2.0], 0.25) 1 >>> closest_pair([12.8, 12.0], 0.0) 0 >>> closest_pair([12.8, 12.0, 12.5, 12.1], 0.0) 1 >>> closest_pair([12.8, 11.5, 12.0, 12.5, 12.1], 0.0) 2 """ pass <unk> import torch import numpy as np ``` # 🟣 Training Logs and Evaluation Results Please refer to our [W&B project page](https://wandb.ai/llm360/CrystalCoder) for complete training logs and evaluation results. Selected Metrics are displayed below. |HumanEval | MBPP | |-----------------------------------------------------|-----------------------------------------------------------| |<img src="cc-humaneval-1.png" alt="humaneval" width="400"/> | <img src="cc-mbpp-1.png" alt="mbpp" width="400"/> | | ARC | HellaSwag | |------------------------------------------------------|------------------------------------------------------------| | <img src="cc-arc-1.png" alt="arc" width="400"/> | <img src="cc-hellaswag-1.png" alt="hellaswag" width="400"/> | |MMLU | TruthfulQA | |-----------------------------------------------------|-----------------------------------------------------------| |<img src="cc-mmlu-1.png" alt="mmlu" width="400"/> | <img src="cc-truthful-1.png" alt="truthfulqa" width="400"/> | # 🟣 CrystalCoder-Instruct We also have instruction tuned versions of CrystalCoder, based on stage 2 and stage 3 final checkpoints. The Instruct version will be released later. # 🟣 Citation **BibTeX:** ```bibtex @misc{liu2023llm360, title={LLM360: Towards Fully Transparent Open-Source LLMs}, author={Zhengzhong Liu and Aurick Qiao and Willie Neiswanger and Hongyi Wang and Bowen Tan and Tianhua Tao and Junbo Li and Yuqi Wang and Suqi Sun and Omkar Pangarkar and Richard Fan and Yi Gu and Victor Miller and Yonghao Zhuang and Guowei He and Haonan Li and Fajri Koto and Liping Tang and Nikhil Ranjan and Zhiqiang Shen and Xuguang Ren and Roberto Iriondo and Cun Mu and Zhiting Hu and Mark Schulze and Preslav Nakov and Tim Baldwin and Eric P. Xing}, year={2023}, eprint={2312.06550}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
DeepMount00/Gemma_QA_ITA_v3
DeepMount00
"2024-04-13T06:26:45Z"
4,543
2
transformers
[ "transformers", "safetensors", "gemma", "text-generation", "question-answering", "it", "dataset:DeepMount00/gquad_it", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
question-answering
"2024-03-29T10:17:13Z"
--- library_name: transformers datasets: - DeepMount00/gquad_it pipeline_tag: question-answering license: apache-2.0 language: - it --- ## How to Use How to use Gemma Q&A ```python import transformers from transformers import TextStreamer, AutoTokenizer import torch model_name = "DeepMount00/Gemma_QA_ITA_v3" tokenizer = AutoTokenizer.from_pretrained(model_name) model = transformers.GemmaForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16, device_map="auto").eval() def stream(contesto, domanda): device = "cuda:0" prefix_text = 'Di seguito ti verrà fornito un contesto e poi una domanda. Il tuo compito è quello di rispondere alla domanda basandoti esclusivamente sul contesto.\n\n' prompt = f"""{prefix_text}##CONTESTO: {contesto}\n##DOMANDA: {domanda}""" inputs = tokenizer([prompt], return_tensors="pt").to(device) streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True) _ = model.generate(**inputs, streamer=streamer, max_new_tokens=150, temperature=0.01, repetition_penalty=1.0, eos_token_id=107, do_sample=True, num_return_sequences=1) contesto = """Seneca seguì molto intensamente gli insegnamenti dei maestri, che esercitarono su di lui un profondo influsso sia con la parola sia con l'esempio di una vita vissuta in coerenza con gli ideali professati. Da Attalo imparò i principi dello stoicismo e l'abitudine alle pratiche ascetiche. Da Sozione, oltre ad apprendere i principi delle dottrine di Pitagora, fu avviato per qualche tempo verso la pratica vegetariana; venne distolto però dal padre che non amava la filosofia e dal fatto che l'imperatore Tiberio proibisse di seguire consuetudini di vita non romane.""" domanda = "Chi è Seneca?" stream(contesto, domanda) ```
FallenMerick/Smart-Lemon-Cookie-7B-GGUF
FallenMerick
"2024-05-06T04:05:11Z"
4,542
1
null
[ "gguf", "quantized", "4-bit", "6-bit", "8-bit", "GGUF", "merge", "mistral", "text-generation", "base_model:FallenMerick/Smart-Lemon-Cookie-7B", "region:us" ]
text-generation
"2024-04-30T04:59:51Z"
--- base_model: - FallenMerick/Smart-Lemon-Cookie-7B model_name: Smart-Lemon-Cookie-7B model_type: mistral pipeline_tag: text-generation tags: - quantized - 4-bit - 6-bit - 8-bit - GGUF - merge - mistral - text-generation --- # Smart-Lemon-Cookie-7B These are GGUF quants for the following model: https://huggingface.co/FallenMerick/Smart-Lemon-Cookie-7B
RichardErkhov/FinancialSupport_-_saiga-7b-gguf
RichardErkhov
"2024-06-15T22:41:36Z"
4,538
0
null
[ "gguf", "region:us" ]
null
"2024-06-15T16:50:30Z"
Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) saiga-7b - GGUF - Model creator: https://huggingface.co/FinancialSupport/ - Original model: https://huggingface.co/FinancialSupport/saiga-7b/ | Name | Quant method | Size | | ---- | ---- | ---- | | [saiga-7b.Q2_K.gguf](https://huggingface.co/RichardErkhov/FinancialSupport_-_saiga-7b-gguf/blob/main/saiga-7b.Q2_K.gguf) | Q2_K | 2.53GB | | [saiga-7b.IQ3_XS.gguf](https://huggingface.co/RichardErkhov/FinancialSupport_-_saiga-7b-gguf/blob/main/saiga-7b.IQ3_XS.gguf) | IQ3_XS | 2.81GB | | [saiga-7b.IQ3_S.gguf](https://huggingface.co/RichardErkhov/FinancialSupport_-_saiga-7b-gguf/blob/main/saiga-7b.IQ3_S.gguf) | IQ3_S | 2.96GB | | [saiga-7b.Q3_K_S.gguf](https://huggingface.co/RichardErkhov/FinancialSupport_-_saiga-7b-gguf/blob/main/saiga-7b.Q3_K_S.gguf) | Q3_K_S | 2.95GB | | [saiga-7b.IQ3_M.gguf](https://huggingface.co/RichardErkhov/FinancialSupport_-_saiga-7b-gguf/blob/main/saiga-7b.IQ3_M.gguf) | IQ3_M | 3.06GB | | [saiga-7b.Q3_K.gguf](https://huggingface.co/RichardErkhov/FinancialSupport_-_saiga-7b-gguf/blob/main/saiga-7b.Q3_K.gguf) | Q3_K | 3.28GB | | [saiga-7b.Q3_K_M.gguf](https://huggingface.co/RichardErkhov/FinancialSupport_-_saiga-7b-gguf/blob/main/saiga-7b.Q3_K_M.gguf) | Q3_K_M | 3.28GB | | [saiga-7b.Q3_K_L.gguf](https://huggingface.co/RichardErkhov/FinancialSupport_-_saiga-7b-gguf/blob/main/saiga-7b.Q3_K_L.gguf) | Q3_K_L | 3.56GB | | [saiga-7b.IQ4_XS.gguf](https://huggingface.co/RichardErkhov/FinancialSupport_-_saiga-7b-gguf/blob/main/saiga-7b.IQ4_XS.gguf) | IQ4_XS | 3.67GB | | [saiga-7b.Q4_0.gguf](https://huggingface.co/RichardErkhov/FinancialSupport_-_saiga-7b-gguf/blob/main/saiga-7b.Q4_0.gguf) | Q4_0 | 3.83GB | | [saiga-7b.IQ4_NL.gguf](https://huggingface.co/RichardErkhov/FinancialSupport_-_saiga-7b-gguf/blob/main/saiga-7b.IQ4_NL.gguf) | IQ4_NL | 3.87GB | | [saiga-7b.Q4_K_S.gguf](https://huggingface.co/RichardErkhov/FinancialSupport_-_saiga-7b-gguf/blob/main/saiga-7b.Q4_K_S.gguf) | Q4_K_S | 3.86GB | | [saiga-7b.Q4_K.gguf](https://huggingface.co/RichardErkhov/FinancialSupport_-_saiga-7b-gguf/blob/main/saiga-7b.Q4_K.gguf) | Q4_K | 4.07GB | | [saiga-7b.Q4_K_M.gguf](https://huggingface.co/RichardErkhov/FinancialSupport_-_saiga-7b-gguf/blob/main/saiga-7b.Q4_K_M.gguf) | Q4_K_M | 4.07GB | | [saiga-7b.Q4_1.gguf](https://huggingface.co/RichardErkhov/FinancialSupport_-_saiga-7b-gguf/blob/main/saiga-7b.Q4_1.gguf) | Q4_1 | 4.24GB | | [saiga-7b.Q5_0.gguf](https://huggingface.co/RichardErkhov/FinancialSupport_-_saiga-7b-gguf/blob/main/saiga-7b.Q5_0.gguf) | Q5_0 | 4.65GB | | [saiga-7b.Q5_K_S.gguf](https://huggingface.co/RichardErkhov/FinancialSupport_-_saiga-7b-gguf/blob/main/saiga-7b.Q5_K_S.gguf) | Q5_K_S | 4.65GB | | [saiga-7b.Q5_K.gguf](https://huggingface.co/RichardErkhov/FinancialSupport_-_saiga-7b-gguf/blob/main/saiga-7b.Q5_K.gguf) | Q5_K | 4.78GB | | [saiga-7b.Q5_K_M.gguf](https://huggingface.co/RichardErkhov/FinancialSupport_-_saiga-7b-gguf/blob/main/saiga-7b.Q5_K_M.gguf) | Q5_K_M | 4.78GB | | [saiga-7b.Q5_1.gguf](https://huggingface.co/RichardErkhov/FinancialSupport_-_saiga-7b-gguf/blob/main/saiga-7b.Q5_1.gguf) | Q5_1 | 5.07GB | | [saiga-7b.Q6_K.gguf](https://huggingface.co/RichardErkhov/FinancialSupport_-_saiga-7b-gguf/blob/main/saiga-7b.Q6_K.gguf) | Q6_K | 5.53GB | | [saiga-7b.Q8_0.gguf](https://huggingface.co/RichardErkhov/FinancialSupport_-_saiga-7b-gguf/blob/main/saiga-7b.Q8_0.gguf) | Q8_0 | 7.17GB | Original model description: --- language: - it license: apache-2.0 model-index: - name: saiga-7b results: - task: type: text-generation name: Text Generation dataset: name: AI2 Reasoning Challenge (25-Shot) type: ai2_arc config: ARC-Challenge split: test args: num_few_shot: 25 metrics: - type: acc_norm value: 63.14 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=FinancialSupport/saiga-7b name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: HellaSwag (10-Shot) type: hellaswag split: validation args: num_few_shot: 10 metrics: - type: acc_norm value: 83.14 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=FinancialSupport/saiga-7b name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MMLU (5-Shot) type: cais/mmlu config: all split: test args: num_few_shot: 5 metrics: - type: acc value: 61.66 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=FinancialSupport/saiga-7b name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: TruthfulQA (0-shot) type: truthful_qa config: multiple_choice split: validation args: num_few_shot: 0 metrics: - type: mc2 value: 54.99 source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=FinancialSupport/saiga-7b name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: Winogrande (5-shot) type: winogrande config: winogrande_xl split: validation args: num_few_shot: 5 metrics: - type: acc value: 79.01 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=FinancialSupport/saiga-7b name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: GSM8k (5-shot) type: gsm8k config: main split: test args: num_few_shot: 5 metrics: - type: acc value: 45.11 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=FinancialSupport/saiga-7b name: Open LLM Leaderboard --- il saiga è uno strano incrocio di antilopi che vive nelle steppe siberiane. Il nome deriva dal fatto che è un parente di fauno/camoscio e un lontano cugino di cerbero (altri modelli open source ita). E' un progetto portato avanti nei weekend con pochi soldi/tempo a disposizione ![image/png](https://cdn-uploads.huggingface.co/production/uploads/648cca46d38113f34bf7cb72/nqYw-P2uPLsNI8FMnLHtN.png) # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_FinancialSupport__saiga-7b) | Metric |Value| |---------------------------------|----:| |Avg. |64.51| |AI2 Reasoning Challenge (25-Shot)|63.14| |HellaSwag (10-Shot) |83.14| |MMLU (5-Shot) |61.66| |TruthfulQA (0-shot) |54.99| |Winogrande (5-shot) |79.01| |GSM8k (5-shot) |45.11|
mradermacher/Emo-AI-3B-GGUF
mradermacher
"2024-06-22T18:05:05Z"
4,538
0
transformers
[ "transformers", "gguf", "text-generation-inference", "unsloth", "gemma", "trl", "sft", "en", "base_model:Klevin/Emo-AI-3B", "license:apache-2.0", "endpoints_compatible", "region:us" ]
null
"2024-06-22T17:55:49Z"
--- base_model: Klevin/Emo-AI-3B language: - en library_name: transformers license: apache-2.0 quantized_by: mradermacher tags: - text-generation-inference - transformers - unsloth - gemma - trl - sft --- ## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: --> static quants of https://huggingface.co/Klevin/Emo-AI-3B <!-- provided-files --> weighted/imatrix quants seem not to be available (by me) at this time. If they do not show up a week or so after the static ones, I have probably not planned for them. Feel free to request them by opening a Community Discussion. ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/Emo-AI-3B-GGUF/resolve/main/Emo-AI-3B.Q2_K.gguf) | Q2_K | 1.3 | | | [GGUF](https://huggingface.co/mradermacher/Emo-AI-3B-GGUF/resolve/main/Emo-AI-3B.IQ3_XS.gguf) | IQ3_XS | 1.3 | | | [GGUF](https://huggingface.co/mradermacher/Emo-AI-3B-GGUF/resolve/main/Emo-AI-3B.Q3_K_S.gguf) | Q3_K_S | 1.4 | | | [GGUF](https://huggingface.co/mradermacher/Emo-AI-3B-GGUF/resolve/main/Emo-AI-3B.IQ3_S.gguf) | IQ3_S | 1.4 | beats Q3_K* | | [GGUF](https://huggingface.co/mradermacher/Emo-AI-3B-GGUF/resolve/main/Emo-AI-3B.IQ3_M.gguf) | IQ3_M | 1.4 | | | [GGUF](https://huggingface.co/mradermacher/Emo-AI-3B-GGUF/resolve/main/Emo-AI-3B.Q3_K_M.gguf) | Q3_K_M | 1.5 | lower quality | | [GGUF](https://huggingface.co/mradermacher/Emo-AI-3B-GGUF/resolve/main/Emo-AI-3B.Q3_K_L.gguf) | Q3_K_L | 1.6 | | | [GGUF](https://huggingface.co/mradermacher/Emo-AI-3B-GGUF/resolve/main/Emo-AI-3B.IQ4_XS.gguf) | IQ4_XS | 1.6 | | | [GGUF](https://huggingface.co/mradermacher/Emo-AI-3B-GGUF/resolve/main/Emo-AI-3B.Q4_K_S.gguf) | Q4_K_S | 1.7 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/Emo-AI-3B-GGUF/resolve/main/Emo-AI-3B.Q4_K_M.gguf) | Q4_K_M | 1.7 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/Emo-AI-3B-GGUF/resolve/main/Emo-AI-3B.Q5_K_S.gguf) | Q5_K_S | 1.9 | | | [GGUF](https://huggingface.co/mradermacher/Emo-AI-3B-GGUF/resolve/main/Emo-AI-3B.Q5_K_M.gguf) | Q5_K_M | 1.9 | | | [GGUF](https://huggingface.co/mradermacher/Emo-AI-3B-GGUF/resolve/main/Emo-AI-3B.Q6_K.gguf) | Q6_K | 2.2 | very good quality | | [GGUF](https://huggingface.co/mradermacher/Emo-AI-3B-GGUF/resolve/main/Emo-AI-3B.Q8_0.gguf) | Q8_0 | 2.8 | fast, best quality | | [GGUF](https://huggingface.co/mradermacher/Emo-AI-3B-GGUF/resolve/main/Emo-AI-3B.f16.gguf) | f16 | 5.1 | 16 bpw, overkill | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. <!-- end -->
RichardErkhov/wons_-_mistral-7B-v0.1-gguf
RichardErkhov
"2024-06-03T16:29:26Z"
4,537
0
null
[ "gguf", "region:us" ]
null
"2024-06-03T12:58:40Z"
Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) mistral-7B-v0.1 - GGUF - Model creator: https://huggingface.co/wons/ - Original model: https://huggingface.co/wons/mistral-7B-v0.1/ | Name | Quant method | Size | | ---- | ---- | ---- | | [mistral-7B-v0.1.Q2_K.gguf](https://huggingface.co/RichardErkhov/wons_-_mistral-7B-v0.1-gguf/blob/main/mistral-7B-v0.1.Q2_K.gguf) | Q2_K | 2.53GB | | [mistral-7B-v0.1.IQ3_XS.gguf](https://huggingface.co/RichardErkhov/wons_-_mistral-7B-v0.1-gguf/blob/main/mistral-7B-v0.1.IQ3_XS.gguf) | IQ3_XS | 2.81GB | | [mistral-7B-v0.1.IQ3_S.gguf](https://huggingface.co/RichardErkhov/wons_-_mistral-7B-v0.1-gguf/blob/main/mistral-7B-v0.1.IQ3_S.gguf) | IQ3_S | 2.96GB | | [mistral-7B-v0.1.Q3_K_S.gguf](https://huggingface.co/RichardErkhov/wons_-_mistral-7B-v0.1-gguf/blob/main/mistral-7B-v0.1.Q3_K_S.gguf) | Q3_K_S | 2.95GB | | [mistral-7B-v0.1.IQ3_M.gguf](https://huggingface.co/RichardErkhov/wons_-_mistral-7B-v0.1-gguf/blob/main/mistral-7B-v0.1.IQ3_M.gguf) | IQ3_M | 3.06GB | | [mistral-7B-v0.1.Q3_K.gguf](https://huggingface.co/RichardErkhov/wons_-_mistral-7B-v0.1-gguf/blob/main/mistral-7B-v0.1.Q3_K.gguf) | Q3_K | 3.28GB | | [mistral-7B-v0.1.Q3_K_M.gguf](https://huggingface.co/RichardErkhov/wons_-_mistral-7B-v0.1-gguf/blob/main/mistral-7B-v0.1.Q3_K_M.gguf) | Q3_K_M | 3.28GB | | [mistral-7B-v0.1.Q3_K_L.gguf](https://huggingface.co/RichardErkhov/wons_-_mistral-7B-v0.1-gguf/blob/main/mistral-7B-v0.1.Q3_K_L.gguf) | Q3_K_L | 3.56GB | | [mistral-7B-v0.1.IQ4_XS.gguf](https://huggingface.co/RichardErkhov/wons_-_mistral-7B-v0.1-gguf/blob/main/mistral-7B-v0.1.IQ4_XS.gguf) | IQ4_XS | 3.67GB | | [mistral-7B-v0.1.Q4_0.gguf](https://huggingface.co/RichardErkhov/wons_-_mistral-7B-v0.1-gguf/blob/main/mistral-7B-v0.1.Q4_0.gguf) | Q4_0 | 3.83GB | | [mistral-7B-v0.1.IQ4_NL.gguf](https://huggingface.co/RichardErkhov/wons_-_mistral-7B-v0.1-gguf/blob/main/mistral-7B-v0.1.IQ4_NL.gguf) | IQ4_NL | 3.87GB | | [mistral-7B-v0.1.Q4_K_S.gguf](https://huggingface.co/RichardErkhov/wons_-_mistral-7B-v0.1-gguf/blob/main/mistral-7B-v0.1.Q4_K_S.gguf) | Q4_K_S | 3.86GB | | [mistral-7B-v0.1.Q4_K.gguf](https://huggingface.co/RichardErkhov/wons_-_mistral-7B-v0.1-gguf/blob/main/mistral-7B-v0.1.Q4_K.gguf) | Q4_K | 4.07GB | | [mistral-7B-v0.1.Q4_K_M.gguf](https://huggingface.co/RichardErkhov/wons_-_mistral-7B-v0.1-gguf/blob/main/mistral-7B-v0.1.Q4_K_M.gguf) | Q4_K_M | 4.07GB | | [mistral-7B-v0.1.Q4_1.gguf](https://huggingface.co/RichardErkhov/wons_-_mistral-7B-v0.1-gguf/blob/main/mistral-7B-v0.1.Q4_1.gguf) | Q4_1 | 4.24GB | | [mistral-7B-v0.1.Q5_0.gguf](https://huggingface.co/RichardErkhov/wons_-_mistral-7B-v0.1-gguf/blob/main/mistral-7B-v0.1.Q5_0.gguf) | Q5_0 | 4.65GB | | [mistral-7B-v0.1.Q5_K_S.gguf](https://huggingface.co/RichardErkhov/wons_-_mistral-7B-v0.1-gguf/blob/main/mistral-7B-v0.1.Q5_K_S.gguf) | Q5_K_S | 4.65GB | | [mistral-7B-v0.1.Q5_K.gguf](https://huggingface.co/RichardErkhov/wons_-_mistral-7B-v0.1-gguf/blob/main/mistral-7B-v0.1.Q5_K.gguf) | Q5_K | 4.78GB | | [mistral-7B-v0.1.Q5_K_M.gguf](https://huggingface.co/RichardErkhov/wons_-_mistral-7B-v0.1-gguf/blob/main/mistral-7B-v0.1.Q5_K_M.gguf) | Q5_K_M | 4.78GB | | [mistral-7B-v0.1.Q5_1.gguf](https://huggingface.co/RichardErkhov/wons_-_mistral-7B-v0.1-gguf/blob/main/mistral-7B-v0.1.Q5_1.gguf) | Q5_1 | 5.07GB | | [mistral-7B-v0.1.Q6_K.gguf](https://huggingface.co/RichardErkhov/wons_-_mistral-7B-v0.1-gguf/blob/main/mistral-7B-v0.1.Q6_K.gguf) | Q6_K | 5.53GB | | [mistral-7B-v0.1.Q8_0.gguf](https://huggingface.co/RichardErkhov/wons_-_mistral-7B-v0.1-gguf/blob/main/mistral-7B-v0.1.Q8_0.gguf) | Q8_0 | 7.17GB | Original model description: Entry not found
ItsJayQz/Classic_Telltale_Diffusion
ItsJayQz
"2023-01-28T01:05:22Z"
4,534
13
diffusers
[ "diffusers", "safetensors", "stable-diffusion", "text-to-image", "telltale", "game", "en", "license:creativeml-openrail-m", "autotrain_compatible", "endpoints_compatible", "diffusers:StableDiffusionPipeline", "region:us" ]
text-to-image
"2022-12-19T15:34:39Z"
--- language: - en license: creativeml-openrail-m tags: - stable-diffusion - text-to-image - diffusers - telltale - game inference: true --- ### Classic Telltale Diffusion This model was trained on arts from gameplay footage across most Telltale classic games, and some game advertisements. The art style can essentially be described as 2D comic arts but in 3D. The model can do portraits, landscapes, and cars, though I have yet to try generating animals. To reference the art style, use the token: telltale style ### Gradio We support a [Gradio](https://github.com/gradio-app/gradio) Web UI to run Classic_Telltale_Diffusion: [![Open In Spaces](https://camo.githubusercontent.com/00380c35e60d6b04be65d3d94a58332be5cc93779f630bcdfc18ab9a3a7d3388/68747470733a2f2f696d672e736869656c64732e696f2f62616467652f25463025394625413425393725323048756767696e67253230466163652d5370616365732d626c7565)](https://huggingface.co/spaces/ItsJayQz/Classic_Telltale_Diffusion) Here are some samples **Portraits** ![tt1.png](https://s3.amazonaws.com/moonup/production/uploads/1671464989091-635eafb49f24f6db0a1eafd1.png) ![tt2.png](https://s3.amazonaws.com/moonup/production/uploads/1671464988907-635eafb49f24f6db0a1eafd1.png) **Landscapes** ![tt3.png](https://s3.amazonaws.com/moonup/production/uploads/1671464988984-635eafb49f24f6db0a1eafd1.png) **Others** ![tt4.png](https://s3.amazonaws.com/moonup/production/uploads/1671464988976-635eafb49f24f6db0a1eafd1.png) **Disclaimers** - I'm in no way affliated with Telltale Games, or any entities relating to the ownership of the game artworks. - The phrase Telltale is simply a reference for accessibility. - This was created entirely for research, and entertainment purpose. - I did not plan, or is planning on turning this model into a commercial product, or use for commercial purposes. - I do not condone the usage of the model for making counterfeit products that might infringe on Telltale Games's copyrights/trademarks. **License** - This model is under Creative OpenRAIL-M. - This means the model can be used royalty-free, and flexible with the model usage, such as redistribution of the model, or of any derivatives of the model. - However, there are restrictions on the openess of the license. More info into the restrictions can be found [here](https://huggingface.co/spaces/CompVis/stable-diffusion-license). **Responsibilities** - By using/downloading the model, you are responsible for: - All outputs/usage of the model. - Understanding the Disclaimers. - Upholding the terms of the license. Thanks for checking out the model!
redponike/Llama-3-8B-UltraMedical-GGUF
redponike
"2024-06-20T14:15:03Z"
4,531
0
null
[ "gguf", "region:us" ]
null
"2024-06-20T12:57:11Z"
GGUF quants of [TsinghuaC3I/Llama-3-8B-UltraMedical](https://huggingface.co/TsinghuaC3I/Llama-3-8B-UltraMedical)
Riksarkivet/trocr-base-handwritten-swe
Riksarkivet
"2024-04-17T13:53:32Z"
4,527
0
transformers
[ "transformers", "tensorboard", "safetensors", "vision-encoder-decoder", "trocr", "image-to-text", "sv", "dataset:Riksarkivet/test_images_demo", "arxiv:1910.09700", "license:mit", "endpoints_compatible", "region:us" ]
image-to-text
"2024-04-17T13:45:06Z"
--- tags: - trocr - image-to-text widget: - src: https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg example_title: Note 1 - src: >- https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcSoolxi9yWGAT5SLZShv8vVd0bz47UWRzQC19fDTeE8GmGv_Rn-PCF1pP1rrUx8kOjA4gg&usqp=CAU example_title: Note 2 - src: >- https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcRNYtTuSBpZPV_nkBYPMFwVVD9asZOPgHww4epu9EqWgDmXW--sE2o8og40ZfDGo87j5w&usqp=CAU example_title: Note 3 license: mit datasets: - Riksarkivet/test_images_demo language: - sv metrics: - cer library_name: transformers pipeline_tag: image-to-text --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
teknium/OpenHermes-13B
teknium
"2023-09-24T11:07:28Z"
4,524
53
transformers
[ "transformers", "pytorch", "llama", "text-generation", "llama-2", "instruct", "finetune", "alpaca", "gpt4", "synthetic data", "distillation", "en", "dataset:teknium/openhermes", "base_model:NousResearch/Llama-2-13b-hf", "license:mit", "autotrain_compatible", "endpoints_compatible", "text-generation-inference", "region:us" ]
text-generation
"2023-09-06T04:36:29Z"
--- base_model: NousResearch/Llama-2-13b-hf tags: - llama-2 - instruct - finetune - alpaca - gpt4 - synthetic data - distillation datasets: - teknium/openhermes model-index: - name: openhermes-13b results: [] license: mit language: - en --- # OpenHermes-13B ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/ovkrkIIUwJ9azhPtW6dAb.png) ## Model description OpenHermes 13B is the first fine tune of the Hermes dataset that has a fully open source dataset! OpenHermes was trained on 242,000 entries of primarily GPT-4 generated data, from open datasets across the AI landscape, including: - GPTeacher - General Instruct, Roleplay v1, Roleplay v2, and Code Instruct Datasets, by Teknium - WizardLM (v1, evol_instruct 70k), by WizardLM Team/nlpxucan - Airoboros GPT-4 (v1.0), by JonDurbin - Camel-AI's domain expert datasets, by the Camel-AI Team - CodeAlpaca, by Sahil2801 - GPT4-LLM and Unnatural Instructions, by Microsoft Filtering included removal of OpenAI refusals, disclaimers, and "As an AI" type examples and more The base dataset mix the model was trained on is identical to Nous-Hermes', minus the Nous-Instruct and PDACTL datasets which were private datasets. The WANDB Project is public and can be examined at this link: https://wandb.ai/teknium1/openhermes/runs/openhermes-v2-fullft-13b Huge thank you to [main_horse](https://twitter.com/main_horse) for compute access and a16z for sponsoring my work, and all the dataset creators and other people who's work has contributed to this project! ## Example Outputs ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/wMSeFqUSBwCNefm7s6G1-.png) ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/00iVenvEOMWIO9X6EY2EZ.png) ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/o7hHbCbtwMLitDy-FWDAg.png) ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/-9ZT1FBSE2BJhDowoh6Gj.png) ## Benchmark Information ## Benchmark Results GPT-4All Benchmark Set ``` | Task |Version| Metric |Value | |Stderr| |-------------|------:|--------|-----:|---|-----:| |arc_challenge| 0|acc |0.5009|± |0.0146| | | |acc_norm|0.5247|± |0.0146| |arc_easy | 0|acc |0.8127|± |0.0080| | | |acc_norm|0.7854|± |0.0084| |boolq | 1|acc |0.8153|± |0.0068| |hellaswag | 0|acc |0.6126|± |0.0049| | | |acc_norm|0.7995|± |0.0040| |openbookqa | 0|acc |0.3660|± |0.0216| | | |acc_norm|0.4600|± |0.0223| |piqa | 0|acc |0.7922|± |0.0095| | | |acc_norm|0.8112|± |0.0091| |winogrande | 0|acc |0.7293|± |0.0125| Average: 0.7036 ``` AGI-Eval ``` | Task |Version| Metric |Value | |Stderr| |------------------------------|------:|--------|-----:|---|-----:| |agieval_aqua_rat | 0|acc |0.2008|± |0.0252| | | |acc_norm|0.2126|± |0.0257| |agieval_logiqa_en | 0|acc |0.3410|± |0.0186| | | |acc_norm|0.3564|± |0.0188| |agieval_lsat_ar | 0|acc |0.2261|± |0.0276| | | |acc_norm|0.2174|± |0.0273| |agieval_lsat_lr | 0|acc |0.3725|± |0.0214| | | |acc_norm|0.3373|± |0.0210| |agieval_lsat_rc | 0|acc |0.4684|± |0.0305| | | |acc_norm|0.4572|± |0.0304| |agieval_sat_en | 0|acc |0.6553|± |0.0332| | | |acc_norm|0.5971|± |0.0343| |agieval_sat_en_without_passage| 0|acc |0.4515|± |0.0348| | | |acc_norm|0.4029|± |0.0343| |agieval_sat_math | 0|acc |0.3273|± |0.0317| | | |acc_norm|0.2636|± |0.0298| Average: 0.3556 ``` BigBench Reasoning Test ``` | Task |Version| Metric |Value | |Stderr| |------------------------------------------------|------:|---------------------|-----:|---|-----:| |bigbench_causal_judgement | 0|multiple_choice_grade|0.5368|± |0.0363| |bigbench_date_understanding | 0|multiple_choice_grade|0.7127|± |0.0236| |bigbench_disambiguation_qa | 0|multiple_choice_grade|0.3023|± |0.0286| |bigbench_geometric_shapes | 0|multiple_choice_grade|0.1003|± |0.0159| | | |exact_str_match |0.0000|± |0.0000| |bigbench_logical_deduction_five_objects | 0|multiple_choice_grade|0.2720|± |0.0199| |bigbench_logical_deduction_seven_objects | 0|multiple_choice_grade|0.1986|± |0.0151| |bigbench_logical_deduction_three_objects | 0|multiple_choice_grade|0.4500|± |0.0288| |bigbench_movie_recommendation | 0|multiple_choice_grade|0.2880|± |0.0203| |bigbench_navigate | 0|multiple_choice_grade|0.5000|± |0.0158| |bigbench_reasoning_about_colored_objects | 0|multiple_choice_grade|0.5390|± |0.0111| |bigbench_ruin_names | 0|multiple_choice_grade|0.3906|± |0.0231| |bigbench_salient_translation_error_detection | 0|multiple_choice_grade|0.1844|± |0.0123| |bigbench_snarks | 0|multiple_choice_grade|0.5249|± |0.0372| |bigbench_sports_understanding | 0|multiple_choice_grade|0.5335|± |0.0159| |bigbench_temporal_sequences | 0|multiple_choice_grade|0.2980|± |0.0145| |bigbench_tracking_shuffled_objects_five_objects | 0|multiple_choice_grade|0.2048|± |0.0114| |bigbench_tracking_shuffled_objects_seven_objects| 0|multiple_choice_grade|0.1297|± |0.0080| |bigbench_tracking_shuffled_objects_three_objects| 0|multiple_choice_grade|0.4500|± |0.0288| Average: 36.75 ``` This is a slight improvement on GPT4ALL Suite and BigBench Suite, with a degredation in AGIEval compared to the original hermes. Average Score Comparison between Nous-Hermes Llama-2 and OpenHermes Llama-2: ``` | Bench | Nous-Hermes | OpenHermes | Change | |------------------------------|------------:|------------|--------| |GPT4All | 70.00| 70.36| +0.36| |------------------------------------------------------------------| |BigBench | 36.57| 36.75| +0.18| |------------------------------------------------------------------| |AGI Eval | 37.20| 35.56| -1.64| ``` ## Training procedure ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/pqQ6MrMVy80hHEKSfqIX2.png) ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 2 - seed: 42 - distributed_type: multi-GPU - num_devices: 8 - gradient_accumulation_steps: 8 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_steps: 300 - num_epochs: 3
mradermacher/L3-Umbral-Mind-RP-v2-8B-i1-GGUF
mradermacher
"2024-06-14T09:53:37Z"
4,524
1
transformers
[ "transformers", "gguf", "en", "base_model:Casual-Autopsy/L3-Umbral-Mind-RP-v2-8B", "endpoints_compatible", "region:us" ]
null
"2024-06-14T05:22:37Z"
--- base_model: Casual-Autopsy/L3-Umbral-Mind-RP-v2-8B language: - en library_name: transformers quantized_by: mradermacher --- ## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: nicoboss --> weighted/imatrix quants of https://huggingface.co/Casual-Autopsy/L3-Umbral-Mind-RP-v2-8B <!-- provided-files --> static quants are available at https://huggingface.co/mradermacher/L3-Umbral-Mind-RP-v2-8B-GGUF ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/L3-Umbral-Mind-RP-v2-8B-i1-GGUF/resolve/main/L3-Umbral-Mind-RP-v2-8B.i1-IQ1_S.gguf) | i1-IQ1_S | 2.1 | for the desperate | | [GGUF](https://huggingface.co/mradermacher/L3-Umbral-Mind-RP-v2-8B-i1-GGUF/resolve/main/L3-Umbral-Mind-RP-v2-8B.i1-IQ1_M.gguf) | i1-IQ1_M | 2.3 | mostly desperate | | [GGUF](https://huggingface.co/mradermacher/L3-Umbral-Mind-RP-v2-8B-i1-GGUF/resolve/main/L3-Umbral-Mind-RP-v2-8B.i1-IQ2_XXS.gguf) | i1-IQ2_XXS | 2.5 | | | [GGUF](https://huggingface.co/mradermacher/L3-Umbral-Mind-RP-v2-8B-i1-GGUF/resolve/main/L3-Umbral-Mind-RP-v2-8B.i1-IQ2_XS.gguf) | i1-IQ2_XS | 2.7 | | | [GGUF](https://huggingface.co/mradermacher/L3-Umbral-Mind-RP-v2-8B-i1-GGUF/resolve/main/L3-Umbral-Mind-RP-v2-8B.i1-IQ2_S.gguf) | i1-IQ2_S | 2.9 | | | [GGUF](https://huggingface.co/mradermacher/L3-Umbral-Mind-RP-v2-8B-i1-GGUF/resolve/main/L3-Umbral-Mind-RP-v2-8B.i1-IQ2_M.gguf) | i1-IQ2_M | 3.0 | | | [GGUF](https://huggingface.co/mradermacher/L3-Umbral-Mind-RP-v2-8B-i1-GGUF/resolve/main/L3-Umbral-Mind-RP-v2-8B.i1-Q2_K.gguf) | i1-Q2_K | 3.3 | IQ3_XXS probably better | | [GGUF](https://huggingface.co/mradermacher/L3-Umbral-Mind-RP-v2-8B-i1-GGUF/resolve/main/L3-Umbral-Mind-RP-v2-8B.i1-IQ3_XXS.gguf) | i1-IQ3_XXS | 3.4 | lower quality | | [GGUF](https://huggingface.co/mradermacher/L3-Umbral-Mind-RP-v2-8B-i1-GGUF/resolve/main/L3-Umbral-Mind-RP-v2-8B.i1-IQ3_XS.gguf) | i1-IQ3_XS | 3.6 | | | [GGUF](https://huggingface.co/mradermacher/L3-Umbral-Mind-RP-v2-8B-i1-GGUF/resolve/main/L3-Umbral-Mind-RP-v2-8B.i1-Q3_K_S.gguf) | i1-Q3_K_S | 3.8 | IQ3_XS probably better | | [GGUF](https://huggingface.co/mradermacher/L3-Umbral-Mind-RP-v2-8B-i1-GGUF/resolve/main/L3-Umbral-Mind-RP-v2-8B.i1-IQ3_S.gguf) | i1-IQ3_S | 3.8 | beats Q3_K* | | [GGUF](https://huggingface.co/mradermacher/L3-Umbral-Mind-RP-v2-8B-i1-GGUF/resolve/main/L3-Umbral-Mind-RP-v2-8B.i1-IQ3_M.gguf) | i1-IQ3_M | 3.9 | | | [GGUF](https://huggingface.co/mradermacher/L3-Umbral-Mind-RP-v2-8B-i1-GGUF/resolve/main/L3-Umbral-Mind-RP-v2-8B.i1-Q3_K_M.gguf) | i1-Q3_K_M | 4.1 | IQ3_S probably better | | [GGUF](https://huggingface.co/mradermacher/L3-Umbral-Mind-RP-v2-8B-i1-GGUF/resolve/main/L3-Umbral-Mind-RP-v2-8B.i1-Q3_K_L.gguf) | i1-Q3_K_L | 4.4 | IQ3_M probably better | | [GGUF](https://huggingface.co/mradermacher/L3-Umbral-Mind-RP-v2-8B-i1-GGUF/resolve/main/L3-Umbral-Mind-RP-v2-8B.i1-IQ4_XS.gguf) | i1-IQ4_XS | 4.5 | | | [GGUF](https://huggingface.co/mradermacher/L3-Umbral-Mind-RP-v2-8B-i1-GGUF/resolve/main/L3-Umbral-Mind-RP-v2-8B.i1-Q4_0.gguf) | i1-Q4_0 | 4.8 | fast, low quality | | [GGUF](https://huggingface.co/mradermacher/L3-Umbral-Mind-RP-v2-8B-i1-GGUF/resolve/main/L3-Umbral-Mind-RP-v2-8B.i1-Q4_K_S.gguf) | i1-Q4_K_S | 4.8 | optimal size/speed/quality | | [GGUF](https://huggingface.co/mradermacher/L3-Umbral-Mind-RP-v2-8B-i1-GGUF/resolve/main/L3-Umbral-Mind-RP-v2-8B.i1-Q4_K_M.gguf) | i1-Q4_K_M | 5.0 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/L3-Umbral-Mind-RP-v2-8B-i1-GGUF/resolve/main/L3-Umbral-Mind-RP-v2-8B.i1-Q5_K_S.gguf) | i1-Q5_K_S | 5.7 | | | [GGUF](https://huggingface.co/mradermacher/L3-Umbral-Mind-RP-v2-8B-i1-GGUF/resolve/main/L3-Umbral-Mind-RP-v2-8B.i1-Q5_K_M.gguf) | i1-Q5_K_M | 5.8 | | | [GGUF](https://huggingface.co/mradermacher/L3-Umbral-Mind-RP-v2-8B-i1-GGUF/resolve/main/L3-Umbral-Mind-RP-v2-8B.i1-Q6_K.gguf) | i1-Q6_K | 6.7 | practically like static Q6_K | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. Additional thanks to [@nicoboss](https://huggingface.co/nicoboss) for giving me access to his hardware for calculating the imatrix for these quants. <!-- end -->
mradermacher/L3-Nymeria-8B-i1-GGUF
mradermacher
"2024-06-23T11:57:30Z"
4,523
2
transformers
[ "transformers", "gguf", "mergekit", "merge", "roleplay", "sillytavern", "llama3", "not-for-all-audiences", "en", "base_model:tannedbum/L3-Nymeria-8B", "license:cc-by-nc-4.0", "endpoints_compatible", "region:us" ]
null
"2024-06-15T14:26:49Z"
--- base_model: tannedbum/L3-Nymeria-8B language: - en library_name: transformers license: cc-by-nc-4.0 quantized_by: mradermacher tags: - mergekit - merge - roleplay - sillytavern - llama3 - not-for-all-audiences --- ## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: nicoboss --> weighted/imatrix quants of https://huggingface.co/tannedbum/L3-Nymeria-8B <!-- provided-files --> static quants are available at https://huggingface.co/mradermacher/L3-Nymeria-8B-GGUF ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/L3-Nymeria-8B-i1-GGUF/resolve/main/L3-Nymeria-8B.i1-IQ1_S.gguf) | i1-IQ1_S | 2.1 | for the desperate | | [GGUF](https://huggingface.co/mradermacher/L3-Nymeria-8B-i1-GGUF/resolve/main/L3-Nymeria-8B.i1-IQ1_M.gguf) | i1-IQ1_M | 2.3 | mostly desperate | | [GGUF](https://huggingface.co/mradermacher/L3-Nymeria-8B-i1-GGUF/resolve/main/L3-Nymeria-8B.i1-IQ2_XXS.gguf) | i1-IQ2_XXS | 2.5 | | | [GGUF](https://huggingface.co/mradermacher/L3-Nymeria-8B-i1-GGUF/resolve/main/L3-Nymeria-8B.i1-IQ2_XS.gguf) | i1-IQ2_XS | 2.7 | | | [GGUF](https://huggingface.co/mradermacher/L3-Nymeria-8B-i1-GGUF/resolve/main/L3-Nymeria-8B.i1-IQ2_S.gguf) | i1-IQ2_S | 2.9 | | | [GGUF](https://huggingface.co/mradermacher/L3-Nymeria-8B-i1-GGUF/resolve/main/L3-Nymeria-8B.i1-IQ2_M.gguf) | i1-IQ2_M | 3.0 | | | [GGUF](https://huggingface.co/mradermacher/L3-Nymeria-8B-i1-GGUF/resolve/main/L3-Nymeria-8B.i1-Q2_K.gguf) | i1-Q2_K | 3.3 | IQ3_XXS probably better | | [GGUF](https://huggingface.co/mradermacher/L3-Nymeria-8B-i1-GGUF/resolve/main/L3-Nymeria-8B.i1-IQ3_XXS.gguf) | i1-IQ3_XXS | 3.4 | lower quality | | [GGUF](https://huggingface.co/mradermacher/L3-Nymeria-8B-i1-GGUF/resolve/main/L3-Nymeria-8B.i1-IQ3_XS.gguf) | i1-IQ3_XS | 3.6 | | | [GGUF](https://huggingface.co/mradermacher/L3-Nymeria-8B-i1-GGUF/resolve/main/L3-Nymeria-8B.i1-Q3_K_S.gguf) | i1-Q3_K_S | 3.8 | IQ3_XS probably better | | [GGUF](https://huggingface.co/mradermacher/L3-Nymeria-8B-i1-GGUF/resolve/main/L3-Nymeria-8B.i1-IQ3_S.gguf) | i1-IQ3_S | 3.8 | beats Q3_K* | | [GGUF](https://huggingface.co/mradermacher/L3-Nymeria-8B-i1-GGUF/resolve/main/L3-Nymeria-8B.i1-IQ3_M.gguf) | i1-IQ3_M | 3.9 | | | [GGUF](https://huggingface.co/mradermacher/L3-Nymeria-8B-i1-GGUF/resolve/main/L3-Nymeria-8B.i1-Q3_K_M.gguf) | i1-Q3_K_M | 4.1 | IQ3_S probably better | | [GGUF](https://huggingface.co/mradermacher/L3-Nymeria-8B-i1-GGUF/resolve/main/L3-Nymeria-8B.i1-Q3_K_L.gguf) | i1-Q3_K_L | 4.4 | IQ3_M probably better | | [GGUF](https://huggingface.co/mradermacher/L3-Nymeria-8B-i1-GGUF/resolve/main/L3-Nymeria-8B.i1-IQ4_XS.gguf) | i1-IQ4_XS | 4.5 | | | [GGUF](https://huggingface.co/mradermacher/L3-Nymeria-8B-i1-GGUF/resolve/main/L3-Nymeria-8B.i1-Q4_0.gguf) | i1-Q4_0 | 4.8 | fast, low quality | | [GGUF](https://huggingface.co/mradermacher/L3-Nymeria-8B-i1-GGUF/resolve/main/L3-Nymeria-8B.i1-Q4_K_S.gguf) | i1-Q4_K_S | 4.8 | optimal size/speed/quality | | [GGUF](https://huggingface.co/mradermacher/L3-Nymeria-8B-i1-GGUF/resolve/main/L3-Nymeria-8B.i1-Q4_K_M.gguf) | i1-Q4_K_M | 5.0 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/L3-Nymeria-8B-i1-GGUF/resolve/main/L3-Nymeria-8B.i1-Q5_K_S.gguf) | i1-Q5_K_S | 5.7 | | | [GGUF](https://huggingface.co/mradermacher/L3-Nymeria-8B-i1-GGUF/resolve/main/L3-Nymeria-8B.i1-Q5_K_M.gguf) | i1-Q5_K_M | 5.8 | | | [GGUF](https://huggingface.co/mradermacher/L3-Nymeria-8B-i1-GGUF/resolve/main/L3-Nymeria-8B.i1-Q6_K.gguf) | i1-Q6_K | 6.7 | practically like static Q6_K | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. Additional thanks to [@nicoboss](https://huggingface.co/nicoboss) for giving me access to his hardware for calculating the imatrix for these quants. <!-- end -->
naver/splade-v3
naver
"2024-03-12T08:15:13Z"
4,522
15
transformers
[ "transformers", "pytorch", "bert", "fill-mask", "splade", "en", "arxiv:2403.06789", "license:cc-by-nc-sa-4.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
fill-mask
"2024-03-08T15:40:21Z"
--- license: cc-by-nc-sa-4.0 language: - en tags: - splade --- ## SPLADE-v3 #### SPLADE-v3 is the latest series of SPLADE models. This checkpoint corresponds to a model that starts from SPLADE++SelfDistil (`naver/splade-cocondenser-selfdistil`), and is trained with a mix of KL-Div and MarginMSE, with 8 negatives per query sampled from SPLADE++SelfDistil. We used the original MS MARCO collection **without the titles**. For more details, see our arXiv companion book: https://arxiv.org/abs/2403.06789 To use SPLADE, please visit our GitHub repository: https://github.com/naver/splade ## Performance | | MRR@10 (MS MARCO dev) | avg nDCG@10 (BEIR-13) | | --- | --- | --- | | `naver/splade-v3` | 40.2 | 51.7 | ## Citation If you use our checkpoint, please cite our work: ``` @misc{lassance2024spladev3, title={SPLADE-v3: New baselines for SPLADE}, author={Carlos Lassance and Hervé Déjean and Thibault Formal and Stéphane Clinchant}, year={2024}, eprint={2403.06789}, archivePrefix={arXiv}, primaryClass={cs.IR}, copyright = {Creative Commons Attribution Non Commercial Share Alike 4.0 International} } ```
mradermacher/Llama-Guard-2-8B-German-v0.2-i1-GGUF
mradermacher
"2024-06-14T16:08:18Z"
4,522
0
transformers
[ "transformers", "gguf", "en", "base_model:felfri/Llama-Guard-2-8B-German-v0.2", "endpoints_compatible", "region:us" ]
null
"2024-06-14T12:16:16Z"
--- base_model: felfri/Llama-Guard-2-8B-German-v0.2 language: - en library_name: transformers quantized_by: mradermacher tags: [] --- ## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: nicoboss --> weighted/imatrix quants of https://huggingface.co/felfri/Llama-Guard-2-8B-German-v0.2 <!-- provided-files --> static quants are available at https://huggingface.co/mradermacher/Llama-Guard-2-8B-German-v0.2-GGUF ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/Llama-Guard-2-8B-German-v0.2-i1-GGUF/resolve/main/Llama-Guard-2-8B-German-v0.2.i1-IQ1_S.gguf) | i1-IQ1_S | 2.1 | for the desperate | | [GGUF](https://huggingface.co/mradermacher/Llama-Guard-2-8B-German-v0.2-i1-GGUF/resolve/main/Llama-Guard-2-8B-German-v0.2.i1-IQ1_M.gguf) | i1-IQ1_M | 2.3 | mostly desperate | | [GGUF](https://huggingface.co/mradermacher/Llama-Guard-2-8B-German-v0.2-i1-GGUF/resolve/main/Llama-Guard-2-8B-German-v0.2.i1-IQ2_XXS.gguf) | i1-IQ2_XXS | 2.5 | | | [GGUF](https://huggingface.co/mradermacher/Llama-Guard-2-8B-German-v0.2-i1-GGUF/resolve/main/Llama-Guard-2-8B-German-v0.2.i1-IQ2_XS.gguf) | i1-IQ2_XS | 2.7 | | | [GGUF](https://huggingface.co/mradermacher/Llama-Guard-2-8B-German-v0.2-i1-GGUF/resolve/main/Llama-Guard-2-8B-German-v0.2.i1-IQ2_S.gguf) | i1-IQ2_S | 2.9 | | | [GGUF](https://huggingface.co/mradermacher/Llama-Guard-2-8B-German-v0.2-i1-GGUF/resolve/main/Llama-Guard-2-8B-German-v0.2.i1-IQ2_M.gguf) | i1-IQ2_M | 3.0 | | | [GGUF](https://huggingface.co/mradermacher/Llama-Guard-2-8B-German-v0.2-i1-GGUF/resolve/main/Llama-Guard-2-8B-German-v0.2.i1-Q2_K.gguf) | i1-Q2_K | 3.3 | IQ3_XXS probably better | | [GGUF](https://huggingface.co/mradermacher/Llama-Guard-2-8B-German-v0.2-i1-GGUF/resolve/main/Llama-Guard-2-8B-German-v0.2.i1-IQ3_XXS.gguf) | i1-IQ3_XXS | 3.4 | lower quality | | [GGUF](https://huggingface.co/mradermacher/Llama-Guard-2-8B-German-v0.2-i1-GGUF/resolve/main/Llama-Guard-2-8B-German-v0.2.i1-IQ3_XS.gguf) | i1-IQ3_XS | 3.6 | | | [GGUF](https://huggingface.co/mradermacher/Llama-Guard-2-8B-German-v0.2-i1-GGUF/resolve/main/Llama-Guard-2-8B-German-v0.2.i1-Q3_K_S.gguf) | i1-Q3_K_S | 3.8 | IQ3_XS probably better | | [GGUF](https://huggingface.co/mradermacher/Llama-Guard-2-8B-German-v0.2-i1-GGUF/resolve/main/Llama-Guard-2-8B-German-v0.2.i1-IQ3_S.gguf) | i1-IQ3_S | 3.8 | beats Q3_K* | | [GGUF](https://huggingface.co/mradermacher/Llama-Guard-2-8B-German-v0.2-i1-GGUF/resolve/main/Llama-Guard-2-8B-German-v0.2.i1-IQ3_M.gguf) | i1-IQ3_M | 3.9 | | | [GGUF](https://huggingface.co/mradermacher/Llama-Guard-2-8B-German-v0.2-i1-GGUF/resolve/main/Llama-Guard-2-8B-German-v0.2.i1-Q3_K_M.gguf) | i1-Q3_K_M | 4.1 | IQ3_S probably better | | [GGUF](https://huggingface.co/mradermacher/Llama-Guard-2-8B-German-v0.2-i1-GGUF/resolve/main/Llama-Guard-2-8B-German-v0.2.i1-Q3_K_L.gguf) | i1-Q3_K_L | 4.4 | IQ3_M probably better | | [GGUF](https://huggingface.co/mradermacher/Llama-Guard-2-8B-German-v0.2-i1-GGUF/resolve/main/Llama-Guard-2-8B-German-v0.2.i1-IQ4_XS.gguf) | i1-IQ4_XS | 4.5 | | | [GGUF](https://huggingface.co/mradermacher/Llama-Guard-2-8B-German-v0.2-i1-GGUF/resolve/main/Llama-Guard-2-8B-German-v0.2.i1-Q4_0.gguf) | i1-Q4_0 | 4.8 | fast, low quality | | [GGUF](https://huggingface.co/mradermacher/Llama-Guard-2-8B-German-v0.2-i1-GGUF/resolve/main/Llama-Guard-2-8B-German-v0.2.i1-Q4_K_S.gguf) | i1-Q4_K_S | 4.8 | optimal size/speed/quality | | [GGUF](https://huggingface.co/mradermacher/Llama-Guard-2-8B-German-v0.2-i1-GGUF/resolve/main/Llama-Guard-2-8B-German-v0.2.i1-Q4_K_M.gguf) | i1-Q4_K_M | 5.0 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/Llama-Guard-2-8B-German-v0.2-i1-GGUF/resolve/main/Llama-Guard-2-8B-German-v0.2.i1-Q5_K_S.gguf) | i1-Q5_K_S | 5.7 | | | [GGUF](https://huggingface.co/mradermacher/Llama-Guard-2-8B-German-v0.2-i1-GGUF/resolve/main/Llama-Guard-2-8B-German-v0.2.i1-Q5_K_M.gguf) | i1-Q5_K_M | 5.8 | | | [GGUF](https://huggingface.co/mradermacher/Llama-Guard-2-8B-German-v0.2-i1-GGUF/resolve/main/Llama-Guard-2-8B-German-v0.2.i1-Q6_K.gguf) | i1-Q6_K | 6.7 | practically like static Q6_K | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. Additional thanks to [@nicoboss](https://huggingface.co/nicoboss) for giving me access to his hardware for calculating the imatrix for these quants. <!-- end -->
duyntnet/llama-3-cat-8b-instruct-v1-imatrix-GGUF
duyntnet
"2024-06-10T05:50:43Z"
4,517
0
transformers
[ "transformers", "gguf", "imatrix", "llama-3-cat-8b-instruct-v1", "text-generation", "en", "license:other", "region:us" ]
text-generation
"2024-06-10T01:40:55Z"
--- license: other language: - en pipeline_tag: text-generation inference: false tags: - transformers - gguf - imatrix - llama-3-cat-8b-instruct-v1 --- Quantizations of https://huggingface.co/TheSkullery/llama-3-cat-8b-instruct-v1 # From original readme Cat-llama3-instruct is a llama 3 8b finetuned model focusing on system prompt fidelity, helpfulness and character engagement. The model aims to respect system prompt to an extreme degree, provide helpful information regardless of situations, and offer maximum character immersion (Role Play) in given scenes.
facebook/xglm-7.5B
facebook
"2023-01-24T16:35:48Z"
4,515
55
transformers
[ "transformers", "pytorch", "xglm", "text-generation", "multilingual", "en", "ru", "zh", "de", "es", "fr", "ja", "it", "pt", "el", "ko", "fi", "id", "tr", "ar", "vi", "th", "bg", "ca", "hi", "et", "bn", "ta", "ur", "sw", "te", "eu", "my", "ht", "qu", "arxiv:2112.10668", "license:mit", "autotrain_compatible", "region:us" ]
text-generation
"2022-03-02T23:29:05Z"
--- language: - multilingual - en - ru - zh - de - es - fr - ja - it - pt - el - ko - fi - id - tr - ar - vi - th - bg - ca - hi - et - bn - ta - ur - sw - te - eu - my - ht - qu license: mit thumbnail: https://huggingface.co/front/thumbnails/facebook.png inference: false --- # XGLM-7.5B XGLM-7.5B is a multilingual autoregressive language model (with 7.5 billion parameters) trained on a balanced corpus of a diverse set of languages totaling 500 billion sub-tokens. It was introduced in the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin\*, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li\* (\*Equal Contribution). The original implementation was released in [this repository](https://github.com/pytorch/fairseq/tree/main/examples/xglm). ## Training Data Statistics The training data statistics of XGLM-7.5B is shown in the table below. | ISO-639-1| family | name | # tokens | ratio | ratio w/ lowRes upsampling | |:--------|:-----------------|:------------------------|-------------:|------------:|-------------:| | en | Indo-European | English | 803526736124 | 0.489906 | 0.3259 | | ru | Indo-European | Russian | 147791898098 | 0.0901079 | 0.0602 | | zh | Sino-Tibetan | Chinese | 132770494630 | 0.0809494 | 0.0483 | | de | Indo-European | German | 89223707856 | 0.0543992 | 0.0363 | | es | Indo-European | Spanish | 87303083105 | 0.0532282 | 0.0353 | | fr | Indo-European | French | 77419639775 | 0.0472023 | 0.0313 | | ja | Japonic | Japanese | 66054364513 | 0.040273 | 0.0269 | | it | Indo-European | Italian | 41930465338 | 0.0255648 | 0.0171 | | pt | Indo-European | Portuguese | 36586032444 | 0.0223063 | 0.0297 | | el | Indo-European | Greek (modern) | 28762166159 | 0.0175361 | 0.0233 | | ko | Koreanic | Korean | 20002244535 | 0.0121953 | 0.0811 | | fi | Uralic | Finnish | 16804309722 | 0.0102455 | 0.0681 | | id | Austronesian | Indonesian | 15423541953 | 0.00940365 | 0.0125 | | tr | Turkic | Turkish | 12413166065 | 0.00756824 | 0.0101 | | ar | Afro-Asiatic | Arabic | 12248607345 | 0.00746791 | 0.0099 | | vi | Austroasiatic | Vietnamese | 11199121869 | 0.00682804 | 0.0091 | | th | Tai–Kadai | Thai | 10842172807 | 0.00661041 | 0.044 | | bg | Indo-European | Bulgarian | 9703797869 | 0.00591635 | 0.0393 | | ca | Indo-European | Catalan | 7075834775 | 0.0043141 | 0.0287 | | hi | Indo-European | Hindi | 3448390110 | 0.00210246 | 0.014 | | et | Uralic | Estonian | 3286873851 | 0.00200399 | 0.0133 | | bn | Indo-European | Bengali, Bangla | 1627447450 | 0.000992245 | 0.0066 | | ta | Dravidian | Tamil | 1476973397 | 0.000900502 | 0.006 | | ur | Indo-European | Urdu | 1351891969 | 0.000824241 | 0.0055 | | sw | Niger–Congo | Swahili | 907516139 | 0.000553307 | 0.0037 | | te | Dravidian | Telugu | 689316485 | 0.000420272 | 0.0028 | | eu | Language isolate | Basque | 105304423 | 6.42035e-05 | 0.0043 | | my | Sino-Tibetan | Burmese | 101358331 | 6.17976e-05 | 0.003 | | ht | Creole | Haitian, Haitian Creole | 86584697 | 5.27902e-05 | 0.0035 | | qu | Quechuan | Quechua | 3236108 | 1.97304e-06 | 0.0001 | ## Model card For intended usage of the model, please refer to the [model card](https://github.com/pytorch/fairseq/blob/main/examples/xglm/model_card.md) released by the XGLM-7.5B development team. ## Example (COPA) The following snippet shows how to evaluate our models (GPT-3 style, zero-shot) on the Choice of Plausible Alternatives (COPA) task, using examples in English, Chinese and Hindi. ```python import torch import torch.nn.functional as F from transformers import XGLMTokenizer, XGLMForCausalLM tokenizer = XGLMTokenizer.from_pretrained("facebook/xglm-7.5B") model = XGLMForCausalLM.from_pretrained("facebook/xglm-7.5B") data_samples = { 'en': [ { "premise": "I wanted to conserve energy.", "choice1": "I swept the floor in the unoccupied room.", "choice2": "I shut off the light in the unoccupied room.", "question": "effect", "label": "1" }, { "premise": "The flame on the candle went out.", "choice1": "I blew on the wick.", "choice2": "I put a match to the wick.", "question": "cause", "label": "0" } ], 'zh': [ { "premise": "我想节约能源。", "choice1": "我在空着的房间里扫了地板。", "choice2": "我把空房间里的灯关了。", "question": "effect", "label": "1" }, { "premise": "蜡烛上的火焰熄灭了。", "choice1": "我吹灭了灯芯。", "choice2": "我把一根火柴放在灯芯上。", "question": "cause", "label": "0" } ], 'hi': [ { "premise": "M te vle konsève enèji.", "choice1": "Mwen te fin baleye chanm lib la.", "choice2": "Mwen te femen limyè nan chanm lib la.", "question": "effect", "label": "1" }, { "premise": "Flam bouji a te etenn.", "choice1": "Mwen te soufle bouji a.", "choice2": "Mwen te limen mèch bouji a.", "question": "cause", "label": "0" } ] } def get_logprobs(prompt): inputs = tokenizer(prompt, return_tensors="pt") input_ids, output_ids = inputs["input_ids"], inputs["input_ids"][:, 1:] outputs = model(**inputs, labels=input_ids) logits = outputs.logits logprobs = torch.gather(F.log_softmax(logits, dim=2), 2, output_ids.unsqueeze(2)) return logprobs # Zero-shot evaluation for the Choice of Plausible Alternatives (COPA) task. # A return value of 0 indicates that the first alternative is more plausible, # while 1 indicates that the second alternative is more plausible. def COPA_eval(prompt, alternative1, alternative2): lprob1 = get_logprobs(prompt + "\n" + alternative1).sum() lprob2 = get_logprobs(prompt + "\n" + alternative2).sum() return 0 if lprob1 > lprob2 else 1 for lang in data_samples_long: for idx, example in enumerate(data_samples_long[lang]): predict = COPA_eval(example["premise"], example["choice1"], example["choice2"]) print(f'{lang}-{idx}', predict, example['label']) # en-0 1 1 # en-1 0 0 # zh-0 1 1 # zh-1 0 0 # hi-0 1 1 # hi-1 0 0 ```
mradermacher/Jett-w26-abliterated-i1-GGUF
mradermacher
"2024-06-02T07:24:51Z"
4,515
0
transformers
[ "transformers", "gguf", "mergekit", "merge", "en", "base_model:giannisan/Jett-w26-abliterated", "endpoints_compatible", "region:us" ]
null
"2024-06-02T05:55:24Z"
--- base_model: giannisan/Jett-w26-abliterated language: - en library_name: transformers quantized_by: mradermacher tags: - mergekit - merge --- ## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: nicoboss --> weighted/imatrix quants of https://huggingface.co/giannisan/Jett-w26-abliterated <!-- provided-files --> static quants are available at https://huggingface.co/mradermacher/Jett-w26-abliterated-GGUF ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/Jett-w26-abliterated-i1-GGUF/resolve/main/Jett-w26-abliterated.i1-IQ1_S.gguf) | i1-IQ1_S | 1.7 | for the desperate | | [GGUF](https://huggingface.co/mradermacher/Jett-w26-abliterated-i1-GGUF/resolve/main/Jett-w26-abliterated.i1-IQ1_M.gguf) | i1-IQ1_M | 1.9 | mostly desperate | | [GGUF](https://huggingface.co/mradermacher/Jett-w26-abliterated-i1-GGUF/resolve/main/Jett-w26-abliterated.i1-IQ2_XXS.gguf) | i1-IQ2_XXS | 2.1 | | | [GGUF](https://huggingface.co/mradermacher/Jett-w26-abliterated-i1-GGUF/resolve/main/Jett-w26-abliterated.i1-IQ2_XS.gguf) | i1-IQ2_XS | 2.3 | | | [GGUF](https://huggingface.co/mradermacher/Jett-w26-abliterated-i1-GGUF/resolve/main/Jett-w26-abliterated.i1-IQ2_S.gguf) | i1-IQ2_S | 2.4 | | | [GGUF](https://huggingface.co/mradermacher/Jett-w26-abliterated-i1-GGUF/resolve/main/Jett-w26-abliterated.i1-IQ2_M.gguf) | i1-IQ2_M | 2.6 | | | [GGUF](https://huggingface.co/mradermacher/Jett-w26-abliterated-i1-GGUF/resolve/main/Jett-w26-abliterated.i1-Q2_K.gguf) | i1-Q2_K | 2.8 | IQ3_XXS probably better | | [GGUF](https://huggingface.co/mradermacher/Jett-w26-abliterated-i1-GGUF/resolve/main/Jett-w26-abliterated.i1-IQ3_XXS.gguf) | i1-IQ3_XXS | 2.9 | lower quality | | [GGUF](https://huggingface.co/mradermacher/Jett-w26-abliterated-i1-GGUF/resolve/main/Jett-w26-abliterated.i1-IQ3_XS.gguf) | i1-IQ3_XS | 3.1 | | | [GGUF](https://huggingface.co/mradermacher/Jett-w26-abliterated-i1-GGUF/resolve/main/Jett-w26-abliterated.i1-Q3_K_S.gguf) | i1-Q3_K_S | 3.3 | IQ3_XS probably better | | [GGUF](https://huggingface.co/mradermacher/Jett-w26-abliterated-i1-GGUF/resolve/main/Jett-w26-abliterated.i1-IQ3_S.gguf) | i1-IQ3_S | 3.3 | beats Q3_K* | | [GGUF](https://huggingface.co/mradermacher/Jett-w26-abliterated-i1-GGUF/resolve/main/Jett-w26-abliterated.i1-IQ3_M.gguf) | i1-IQ3_M | 3.4 | | | [GGUF](https://huggingface.co/mradermacher/Jett-w26-abliterated-i1-GGUF/resolve/main/Jett-w26-abliterated.i1-Q3_K_M.gguf) | i1-Q3_K_M | 3.6 | IQ3_S probably better | | [GGUF](https://huggingface.co/mradermacher/Jett-w26-abliterated-i1-GGUF/resolve/main/Jett-w26-abliterated.i1-Q3_K_L.gguf) | i1-Q3_K_L | 3.9 | IQ3_M probably better | | [GGUF](https://huggingface.co/mradermacher/Jett-w26-abliterated-i1-GGUF/resolve/main/Jett-w26-abliterated.i1-IQ4_XS.gguf) | i1-IQ4_XS | 4.0 | | | [GGUF](https://huggingface.co/mradermacher/Jett-w26-abliterated-i1-GGUF/resolve/main/Jett-w26-abliterated.i1-Q4_0.gguf) | i1-Q4_0 | 4.2 | fast, low quality | | [GGUF](https://huggingface.co/mradermacher/Jett-w26-abliterated-i1-GGUF/resolve/main/Jett-w26-abliterated.i1-Q4_K_S.gguf) | i1-Q4_K_S | 4.2 | optimal size/speed/quality | | [GGUF](https://huggingface.co/mradermacher/Jett-w26-abliterated-i1-GGUF/resolve/main/Jett-w26-abliterated.i1-Q4_K_M.gguf) | i1-Q4_K_M | 4.5 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/Jett-w26-abliterated-i1-GGUF/resolve/main/Jett-w26-abliterated.i1-Q5_K_S.gguf) | i1-Q5_K_S | 5.1 | | | [GGUF](https://huggingface.co/mradermacher/Jett-w26-abliterated-i1-GGUF/resolve/main/Jett-w26-abliterated.i1-Q5_K_M.gguf) | i1-Q5_K_M | 5.2 | | | [GGUF](https://huggingface.co/mradermacher/Jett-w26-abliterated-i1-GGUF/resolve/main/Jett-w26-abliterated.i1-Q6_K.gguf) | i1-Q6_K | 6.0 | practically like static Q6_K | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. Additional thanks to [@nicoboss](https://huggingface.co/nicoboss) for giving me access to his hardware for calculating the imatrix for these quants. <!-- end -->
mradermacher/Llama-3-Yggdrasil-2.0-8B-i1-GGUF
mradermacher
"2024-06-06T21:49:32Z"
4,515
0
transformers
[ "transformers", "gguf", "mergekit", "merge", "en", "base_model:Locutusque/Llama-3-Yggdrasil-2.0-8B", "endpoints_compatible", "region:us" ]
null
"2024-06-05T13:46:41Z"
--- base_model: Locutusque/Llama-3-Yggdrasil-2.0-8B language: - en library_name: transformers quantized_by: mradermacher tags: - mergekit - merge --- ## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: nicoboss --> weighted/imatrix quants of https://huggingface.co/Locutusque/Llama-3-Yggdrasil-2.0-8B <!-- provided-files --> static quants are available at https://huggingface.co/mradermacher/Llama-3-Yggdrasil-2.0-8B-GGUF ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/Llama-3-Yggdrasil-2.0-8B-i1-GGUF/resolve/main/Llama-3-Yggdrasil-2.0-8B.i1-IQ1_S.gguf) | i1-IQ1_S | 2.1 | for the desperate | | [GGUF](https://huggingface.co/mradermacher/Llama-3-Yggdrasil-2.0-8B-i1-GGUF/resolve/main/Llama-3-Yggdrasil-2.0-8B.i1-IQ1_M.gguf) | i1-IQ1_M | 2.3 | mostly desperate | | [GGUF](https://huggingface.co/mradermacher/Llama-3-Yggdrasil-2.0-8B-i1-GGUF/resolve/main/Llama-3-Yggdrasil-2.0-8B.i1-IQ2_XXS.gguf) | i1-IQ2_XXS | 2.5 | | | [GGUF](https://huggingface.co/mradermacher/Llama-3-Yggdrasil-2.0-8B-i1-GGUF/resolve/main/Llama-3-Yggdrasil-2.0-8B.i1-IQ2_XS.gguf) | i1-IQ2_XS | 2.7 | | | [GGUF](https://huggingface.co/mradermacher/Llama-3-Yggdrasil-2.0-8B-i1-GGUF/resolve/main/Llama-3-Yggdrasil-2.0-8B.i1-IQ2_S.gguf) | i1-IQ2_S | 2.9 | | | [GGUF](https://huggingface.co/mradermacher/Llama-3-Yggdrasil-2.0-8B-i1-GGUF/resolve/main/Llama-3-Yggdrasil-2.0-8B.i1-IQ2_M.gguf) | i1-IQ2_M | 3.0 | | | [GGUF](https://huggingface.co/mradermacher/Llama-3-Yggdrasil-2.0-8B-i1-GGUF/resolve/main/Llama-3-Yggdrasil-2.0-8B.i1-Q2_K.gguf) | i1-Q2_K | 3.3 | IQ3_XXS probably better | | [GGUF](https://huggingface.co/mradermacher/Llama-3-Yggdrasil-2.0-8B-i1-GGUF/resolve/main/Llama-3-Yggdrasil-2.0-8B.i1-IQ3_XXS.gguf) | i1-IQ3_XXS | 3.4 | lower quality | | [GGUF](https://huggingface.co/mradermacher/Llama-3-Yggdrasil-2.0-8B-i1-GGUF/resolve/main/Llama-3-Yggdrasil-2.0-8B.i1-IQ3_XS.gguf) | i1-IQ3_XS | 3.6 | | | [GGUF](https://huggingface.co/mradermacher/Llama-3-Yggdrasil-2.0-8B-i1-GGUF/resolve/main/Llama-3-Yggdrasil-2.0-8B.i1-Q3_K_S.gguf) | i1-Q3_K_S | 3.8 | IQ3_XS probably better | | [GGUF](https://huggingface.co/mradermacher/Llama-3-Yggdrasil-2.0-8B-i1-GGUF/resolve/main/Llama-3-Yggdrasil-2.0-8B.i1-IQ3_S.gguf) | i1-IQ3_S | 3.8 | beats Q3_K* | | [GGUF](https://huggingface.co/mradermacher/Llama-3-Yggdrasil-2.0-8B-i1-GGUF/resolve/main/Llama-3-Yggdrasil-2.0-8B.i1-IQ3_M.gguf) | i1-IQ3_M | 3.9 | | | [GGUF](https://huggingface.co/mradermacher/Llama-3-Yggdrasil-2.0-8B-i1-GGUF/resolve/main/Llama-3-Yggdrasil-2.0-8B.i1-Q3_K_M.gguf) | i1-Q3_K_M | 4.1 | IQ3_S probably better | | [GGUF](https://huggingface.co/mradermacher/Llama-3-Yggdrasil-2.0-8B-i1-GGUF/resolve/main/Llama-3-Yggdrasil-2.0-8B.i1-Q3_K_L.gguf) | i1-Q3_K_L | 4.4 | IQ3_M probably better | | [GGUF](https://huggingface.co/mradermacher/Llama-3-Yggdrasil-2.0-8B-i1-GGUF/resolve/main/Llama-3-Yggdrasil-2.0-8B.i1-IQ4_XS.gguf) | i1-IQ4_XS | 4.5 | | | [GGUF](https://huggingface.co/mradermacher/Llama-3-Yggdrasil-2.0-8B-i1-GGUF/resolve/main/Llama-3-Yggdrasil-2.0-8B.i1-Q4_0.gguf) | i1-Q4_0 | 4.8 | fast, low quality | | [GGUF](https://huggingface.co/mradermacher/Llama-3-Yggdrasil-2.0-8B-i1-GGUF/resolve/main/Llama-3-Yggdrasil-2.0-8B.i1-Q4_K_S.gguf) | i1-Q4_K_S | 4.8 | optimal size/speed/quality | | [GGUF](https://huggingface.co/mradermacher/Llama-3-Yggdrasil-2.0-8B-i1-GGUF/resolve/main/Llama-3-Yggdrasil-2.0-8B.i1-Q4_K_M.gguf) | i1-Q4_K_M | 5.0 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/Llama-3-Yggdrasil-2.0-8B-i1-GGUF/resolve/main/Llama-3-Yggdrasil-2.0-8B.i1-Q5_K_S.gguf) | i1-Q5_K_S | 5.7 | | | [GGUF](https://huggingface.co/mradermacher/Llama-3-Yggdrasil-2.0-8B-i1-GGUF/resolve/main/Llama-3-Yggdrasil-2.0-8B.i1-Q5_K_M.gguf) | i1-Q5_K_M | 5.8 | | | [GGUF](https://huggingface.co/mradermacher/Llama-3-Yggdrasil-2.0-8B-i1-GGUF/resolve/main/Llama-3-Yggdrasil-2.0-8B.i1-Q6_K.gguf) | i1-Q6_K | 6.7 | practically like static Q6_K | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. Additional thanks to [@nicoboss](https://huggingface.co/nicoboss) for giving me access to his hardware for calculating the imatrix for these quants. <!-- end -->
baichuan-inc/Baichuan-13B-Chat
baichuan-inc
"2024-01-09T07:56:42Z"
4,514
634
transformers
[ "transformers", "pytorch", "baichuan", "text-generation", "custom_code", "zh", "en", "arxiv:2104.09864", "arxiv:2108.12409", "arxiv:2009.03300", "autotrain_compatible", "endpoints_compatible", "region:us" ]
text-generation
"2023-07-08T05:58:27Z"
--- language: - zh - en pipeline_tag: text-generation inference: false --- # Baichuan-13B-Chat <!-- Provide a quick summary of what the model is/does. --> ## 介绍 Baichuan-13B-Chat为Baichuan-13B系列模型中对齐后的版本,预训练模型可见[Baichuan-13B-Base](https://huggingface.co/baichuan-inc/Baichuan-13B-Base)。 [Baichuan-13B](https://github.com/baichuan-inc/Baichuan-13B) 是由百川智能继 [Baichuan-7B](https://github.com/baichuan-inc/baichuan-7B) 之后开发的包含 130 亿参数的开源可商用的大规模语言模型,在权威的中文和英文 benchmark 上均取得同尺寸最好的效果。本次发布包含有预训练 ([Baichuan-13B-Base](https://huggingface.co/baichuan-inc/Baichuan-13B-Base)) 和对齐 ([Baichuan-13B-Chat](https://huggingface.co/baichuan-inc/Baichuan-13B-Chat)) 两个版本。Baichuan-13B 有如下几个特点: 1. **更大尺寸、更多数据**:Baichuan-13B 在 [Baichuan-7B](https://github.com/baichuan-inc/baichuan-7B) 的基础上进一步扩大参数量到 130 亿,并且在高质量的语料上训练了 1.4 万亿 tokens,超过 LLaMA-13B 40%,是当前开源 13B 尺寸下训练数据量最多的模型。支持中英双语,使用 ALiBi 位置编码,上下文窗口长度为 4096。 2. **同时开源预训练和对齐模型**:预训练模型是适用开发者的“基座”,而广大普通用户对有对话功能的对齐模型具有更强的需求。因此本次开源我们同时发布了对齐模型(Baichuan-13B-Chat),具有很强的对话能力,开箱即用,几行代码即可简单的部署。 3. **更高效的推理**:为了支持更广大用户的使用,我们本次同时开源了 int8 和 int4 的量化版本,相对非量化版本在几乎没有效果损失的情况下大大降低了部署的机器资源门槛,可以部署在如 Nvidia 3090 这样的消费级显卡上。 4. **开源免费可商用**:Baichuan-13B 不仅对学术研究完全开放,开发者也仅需邮件申请并获得官方商用许可后,即可以免费商用。 Baichuan-13B-Chat is the aligned version in the Baichuan-13B series of models, and the pre-trained model can be found at [Baichuan-13B-Base](https://huggingface.co/baichuan-inc/Baichuan-13B-Base). [Baichuan-13B](https://github.com/baichuan-inc/Baichuan-13B) is an open-source, commercially usable large-scale language model developed by Baichuan Intelligence, following [Baichuan-7B](https://github.com/baichuan-inc/baichuan-7B). With 13 billion parameters, it achieves the best performance in standard Chinese and English benchmarks among models of its size. This release includes two versions: pre-training (Baichuan-13B-Base) and alignment (Baichuan-13B-Chat). Baichuan-13B has the following features: 1. **Larger size, more data**: Baichuan-13B further expands the parameter volume to 13 billion based on [Baichuan-7B](https://github.com/baichuan-inc/baichuan-7B), and has trained 1.4 trillion tokens on high-quality corpora, exceeding LLaMA-13B by 40%. It is currently the model with the most training data in the open-source 13B size. It supports both Chinese and English, uses ALiBi position encoding, and has a context window length of 4096. 2. **Open-source pre-training and alignment models simultaneously**: The pre-training model is a "base" suitable for developers, while the general public has a stronger demand for alignment models with dialogue capabilities. Therefore, in this open-source release, we also released the alignment model (Baichuan-13B-Chat), which has strong dialogue capabilities and is ready to use. It can be easily deployed with just a few lines of code. 3. **More efficient inference**: To support a wider range of users, we have open-sourced the INT8 and INT4 quantized versions. The model can be conveniently deployed on consumer GPUs like the Nvidia 3090 with almost no performance loss. 4. **Open-source, free, and commercially usable**: Baichuan-13B is not only fully open to academic research, but developers can also use it for free commercially after applying for and receiving official commercial permission via email. ## 使用方式 如下是一个使用Baichuan-13B-Chat进行对话的示例,正确输出为"乔戈里峰。世界第二高峰———乔戈里峰西方登山者称其为k2峰,海拔高度是8611米,位于喀喇昆仑山脉的中巴边境上" ```python import torch from transformers import AutoModelForCausalLM, AutoTokenizer from transformers.generation.utils import GenerationConfig tokenizer = AutoTokenizer.from_pretrained("baichuan-inc/Baichuan-13B-Chat", use_fast=False, trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained("baichuan-inc/Baichuan-13B-Chat", device_map="auto", torch_dtype=torch.float16, trust_remote_code=True) model.generation_config = GenerationConfig.from_pretrained("baichuan-inc/Baichuan-13B-Chat") messages = [] messages.append({"role": "user", "content": "世界上第二高的山峰是哪座"}) response = model.chat(tokenizer, messages) print(response) ``` Here is an example of a conversation using Baichuan-13B-Chat, the correct output is "K2. The world's second highest peak - K2, also known as Mount Godwin-Austen or Chhogori, with an altitude of 8611 meters, is located on the China-Pakistan border in the Karakoram Range." ```python import torch from transformers import AutoModelForCausalLM, AutoTokenizer from transformers.generation.utils import GenerationConfig tokenizer = AutoTokenizer.from_pretrained("baichuan-inc/Baichuan-13B-Chat", use_fast=False, trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained("baichuan-inc/Baichuan-13B-Chat", device_map="auto", torch_dtype=torch.float16, trust_remote_code=True) model.generation_config = GenerationConfig.from_pretrained("baichuan-inc/Baichuan-13B-Chat") messages = [] messages.append({"role": "user", "content": "Which moutain is the second highest one in the world?"}) response = model.chat(tokenizer, messages) print(response) ``` ## 量化部署 Baichuan-13B 支持 int8 和 int4 量化,用户只需在推理代码中简单修改两行即可实现。请注意,如果是为了节省显存而进行量化,应加载原始精度模型到 CPU 后再开始量化;避免在 `from_pretrained` 时添加 `device_map='auto'` 或者其它会导致把原始精度模型直接加载到 GPU 的行为的参数。 Baichuan-13B supports int8 and int4 quantization, users only need to make a simple two-line change in the inference code to implement it. Please note, if quantization is done to save GPU memory, the original precision model should be loaded onto the CPU before starting quantization. Avoid adding parameters such as `device_map='auto'` or others that could cause the original precision model to be loaded directly onto the GPU when executing `from_pretrained`. 使用 int8 量化 (To use int8 quantization): ```python model = AutoModelForCausalLM.from_pretrained("baichuan-inc/Baichuan-13B-Chat", torch_dtype=torch.float16, trust_remote_code=True) model = model.quantize(8).cuda() ``` 同样的,如需使用 int4 量化 (Similarly, to use int4 quantization): ```python model = AutoModelForCausalLM.from_pretrained("baichuan-inc/Baichuan-13B-Chat", torch_dtype=torch.float16, trust_remote_code=True) model = model.quantize(4).cuda() ``` ## 模型详情 ### 模型描述 <!-- Provide a longer summary of what this model is. --> - **Developed by:** 百川智能(Baichuan Intelligent Technology) - **Email**: [email protected] - **Language(s) (NLP):** Chinese/English - **License:** 【Community License for Baichuan-13B Model】([ZH](Baichuan-13B%20模型社区许可协议.pdf)| [EN](Community%20License%20for%20Baichuan-13B%20Model.pdf)) **商业用途(For commercial use):** 请通过 [Email](mailto:[email protected]) 联系申请书面授权。(Contact us via [Email](mailto:[email protected]) above to apply for written authorization.) ### 模型结构 <!-- Provide the basic links for the model. --> 整体模型基于Baichuan-7B,为了获得更好的推理性能,Baichuan-13B 使用了 ALiBi 线性偏置技术,相对于 Rotary Embedding 计算量更小,对推理性能有显著提升;与标准的 LLaMA-13B 相比,生成 2000 个 tokens 的平均推理速度 (tokens/s),实测提升 31.6%: | Model | tokens/s | |-------------|----------| | LLaMA-13B | 19.4 | | Baichuan-13B| 25.4 | 具体参数和见下表 | 模型名称 | 隐含层维度 | 层数 | 头数 |词表大小 | 总参数量 | 训练数据(tokens) | 位置编码 | 最大长度 | |-------------------------|-------|------------|------------|-----------------|--------|--------|----------------|---------| | Baichuan-7B | 4,096 | 32 | 32 | 64,000 | 7,000,559,616 | 1.2万亿 | [RoPE](https://arxiv.org/abs/2104.09864) | 4,096 | | Baichuan-13B | 5,120 | 40 | 40 | 64,000 | 13,264,901,120 | 1.4万亿 | [ALiBi](https://arxiv.org/abs/2108.12409) | 4,096 The overall model is based on Baichuan-7B. In order to achieve better inference performance, Baichuan-13B uses ALiBi linear bias technology, which has a smaller computational load compared to Rotary Embedding, and significantly improves inference performance. Compared with the standard LLaMA-13B, the average inference speed (tokens/s) for generating 2000 tokens has been tested to increase by 31.6%: | Model | tokens/s | |-------------|----------| | LLaMA-13B | 19.4 | | Baichuan-13B| 25.4 | The specific parameters are as follows: | Model Name | Hidden Size | Num Layers | Num Attention Heads |Vocab Size | Total Params | Training Dats(tokens) | Position Embedding | Max Length | |-------------------------|-------|------------|------------|-----------------|--------|--------|----------------|---------| | Baichuan-7B | 4,096 | 32 | 32 | 64,000 | 7,000,559,616 | 1.2万亿 | [RoPE](https://arxiv.org/abs/2104.09864) | 4,096 | | Baichuan-13B | 5,120 | 40 | 40 | 64,000 | 13,264,901,120 | 1.4万亿 | [ALiBi](https://arxiv.org/abs/2108.12409) | 4,096 ## 使用须知 <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### 免责声明 我们在此声明,我们的开发团队并未基于 Baichuan-13B 模型开发任何应用,无论是在 iOS、Android、网页或任何其他平台。我们强烈呼吁所有使用者,不要利用 Baichuan-13B 模型进行任何危害国家社会安全或违法的活动。另外,我们也要求使用者不要将 Baichuan-13B 模型用于未经适当安全审查和备案的互联网服务。我们希望所有的使用者都能遵守这个原则,确保科技的发展能在规范和合法的环境下进行。 我们已经尽我们所能,来确保模型训练过程中使用的数据的合规性。然而,尽管我们已经做出了巨大的努力,但由于模型和数据的复杂性,仍有可能存在一些无法预见的问题。因此,如果由于使用 Baichuan-13B 开源模型而导致的任何问题,包括但不限于数据安全问题、公共舆论风险,或模型被误导、滥用、传播或不当利用所带来的任何风险和问题,我们将不承担任何责任。 We hereby declare that our development team has not developed any applications based on the Baichuan-13B model, whether on iOS, Android, the web, or any other platform. We strongly urge all users not to use the Baichuan-13B model for any activities that harm national social security or are illegal. In addition, we also ask users not to use the Baichuan-13B model for internet services that have not undergone appropriate security review and filing. We hope that all users will adhere to this principle to ensure that technological development takes place in a regulated and legal environment. We have done our utmost to ensure the compliance of the data used in the model training process. However, despite our great efforts, due to the complexity of the model and data, there may still be some unforeseen issues. Therefore, we will not take any responsibility for any issues arising from the use of the Baichuan-13B open-source model, including but not limited to data security issues, public opinion risks, or any risks and problems arising from the model being misled, misused, disseminated, or improperly exploited. ## 训练详情 训练具体设置参见[Baichuan-13B](https://github.com/baichuan-inc/Baichuan-13B)。 For specific training settings, please refer to [Baichuan-13B](https://github.com/baichuan-inc/Baichuan-13B). ## 测评结果 ## [C-Eval](https://cevalbenchmark.com/index.html#home) | Model 5-shot | STEM | Social Sciences | Humanities | Others | Average | |-------------------------|:-----:|:---------------:|:----------:|:------:|:-------:| | Baichuan-7B | 38.2 | 52.0 | 46.2 | 39.3 | 42.8 | | Chinese-Alpaca-Plus-13B | 35.2 | 45.6 | 40.0 | 38.2 | 38.8 | | Vicuna-13B | 30.5 | 38.2 | 32.5 | 32.5 | 32.8 | | Chinese-LLaMA-Plus-13B | 30.3 | 38.0 | 32.9 | 29.1 | 32.1 | | Ziya-LLaMA-13B-Pretrain | 27.6 | 34.4 | 32.0 | 28.6 | 30.0 | | LLaMA-13B | 27.0 | 33.6 | 27.7 | 27.6 | 28.5 | | moss-moon-003-base (16B)| 27.0 | 29.1 | 27.2 | 26.9 | 27.4 | | **Baichuan-13B-Base** | **45.9** | **63.5** | **57.2** | **49.3** | **52.4** | | **Baichuan-13B-Chat** | **43.7** | **64.6** | **56.2** | **49.2** | **51.5** | ## [MMLU](https://arxiv.org/abs/2009.03300) | Model 5-shot | STEM | Social Sciences | Humanities | Others | Average | |-------------------------|:-----:|:---------------:|:----------:|:------:|:-------:| | Vicuna-13B | 40.4 | 60.5 | 49.5 | 58.4 | 52.0 | | LLaMA-13B | 36.1 | 53.0 | 44.0 | 52.8 | 46.3 | | Chinese-Alpaca-Plus-13B | 36.9 | 48.9 | 40.5 | 50.5 | 43.9 | | Ziya-LLaMA-13B-Pretrain | 35.6 | 47.6 | 40.1 | 49.4 | 42.9 | | Baichuan-7B | 35.6 | 48.9 | 38.4 | 48.1 | 42.3 | | Chinese-LLaMA-Plus-13B | 33.1 | 42.8 | 37.0 | 44.6 | 39.2 | | moss-moon-003-base (16B)| 22.4 | 22.8 | 24.2 | 24.4 | 23.6 | | **Baichuan-13B-Base** | **41.6** | **60.9** | **47.4** | **58.5** | **51.6** | | **Baichuan-13B-Chat** | **40.9** | **60.9** | **48.8** | **59.0** | **52.1** | > 说明:我们采用了 MMLU 官方的[评测方案](https://github.com/hendrycks/test)。 ## [CMMLU](https://github.com/haonan-li/CMMLU) | Model 5-shot | STEM | Humanities | Social Sciences | Others | China Specific | Average | |-------------------------|:-----:|:----------:|:---------------:|:------:|:--------------:|:-------:| | Baichuan-7B | 34.4 | 47.5 | 47.6 | 46.6 | 44.3 | 44.0 | | Vicuna-13B | 31.8 | 36.2 | 37.6 | 39.5 | 34.3 | 36.3 | | Chinese-Alpaca-Plus-13B | 29.8 | 33.4 | 33.2 | 37.9 | 32.1 | 33.4 | | Chinese-LLaMA-Plus-13B | 28.1 | 33.1 | 35.4 | 35.1 | 33.5 | 33.0 | | Ziya-LLaMA-13B-Pretrain | 29.0 | 30.7 | 33.8 | 34.4 | 31.9 | 32.1 | | LLaMA-13B | 29.2 | 30.8 | 31.6 | 33.0 | 30.5 | 31.2 | | moss-moon-003-base (16B)| 27.2 | 30.4 | 28.8 | 32.6 | 28.7 | 29.6 | | **Baichuan-13B-Base** | **41.7** | **61.1** | **59.8** | **59.0** | **56.4** | **55.3** | | **Baichuan-13B-Chat** | **42.8** | **62.6** | **59.7** | **59.0** | **56.1** | **55.8** | > 说明:CMMLU 是一个综合性的中文评估基准,专门用于评估语言模型在中文语境下的知识和推理能力。我们采用了其官方的[评测方案](https://github.com/haonan-li/CMMLU)。 ## 微信群组 ![WeChat](https://github.com/baichuan-inc/Baichuan-13B/blob/main/media/wechat.jpeg?raw=true)
RichardErkhov/dustydecapod_-_unraveled-7b-a1-gguf
RichardErkhov
"2024-06-03T17:10:20Z"
4,513
0
null
[ "gguf", "region:us" ]
null
"2024-06-03T13:27:01Z"
Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) unraveled-7b-a1 - GGUF - Model creator: https://huggingface.co/dustydecapod/ - Original model: https://huggingface.co/dustydecapod/unraveled-7b-a1/ | Name | Quant method | Size | | ---- | ---- | ---- | | [unraveled-7b-a1.Q2_K.gguf](https://huggingface.co/RichardErkhov/dustydecapod_-_unraveled-7b-a1-gguf/blob/main/unraveled-7b-a1.Q2_K.gguf) | Q2_K | 2.53GB | | [unraveled-7b-a1.IQ3_XS.gguf](https://huggingface.co/RichardErkhov/dustydecapod_-_unraveled-7b-a1-gguf/blob/main/unraveled-7b-a1.IQ3_XS.gguf) | IQ3_XS | 2.81GB | | [unraveled-7b-a1.IQ3_S.gguf](https://huggingface.co/RichardErkhov/dustydecapod_-_unraveled-7b-a1-gguf/blob/main/unraveled-7b-a1.IQ3_S.gguf) | IQ3_S | 2.96GB | | [unraveled-7b-a1.Q3_K_S.gguf](https://huggingface.co/RichardErkhov/dustydecapod_-_unraveled-7b-a1-gguf/blob/main/unraveled-7b-a1.Q3_K_S.gguf) | Q3_K_S | 2.95GB | | [unraveled-7b-a1.IQ3_M.gguf](https://huggingface.co/RichardErkhov/dustydecapod_-_unraveled-7b-a1-gguf/blob/main/unraveled-7b-a1.IQ3_M.gguf) | IQ3_M | 3.06GB | | [unraveled-7b-a1.Q3_K.gguf](https://huggingface.co/RichardErkhov/dustydecapod_-_unraveled-7b-a1-gguf/blob/main/unraveled-7b-a1.Q3_K.gguf) | Q3_K | 3.28GB | | [unraveled-7b-a1.Q3_K_M.gguf](https://huggingface.co/RichardErkhov/dustydecapod_-_unraveled-7b-a1-gguf/blob/main/unraveled-7b-a1.Q3_K_M.gguf) | Q3_K_M | 3.28GB | | [unraveled-7b-a1.Q3_K_L.gguf](https://huggingface.co/RichardErkhov/dustydecapod_-_unraveled-7b-a1-gguf/blob/main/unraveled-7b-a1.Q3_K_L.gguf) | Q3_K_L | 3.56GB | | [unraveled-7b-a1.IQ4_XS.gguf](https://huggingface.co/RichardErkhov/dustydecapod_-_unraveled-7b-a1-gguf/blob/main/unraveled-7b-a1.IQ4_XS.gguf) | IQ4_XS | 3.67GB | | [unraveled-7b-a1.Q4_0.gguf](https://huggingface.co/RichardErkhov/dustydecapod_-_unraveled-7b-a1-gguf/blob/main/unraveled-7b-a1.Q4_0.gguf) | Q4_0 | 3.83GB | | [unraveled-7b-a1.IQ4_NL.gguf](https://huggingface.co/RichardErkhov/dustydecapod_-_unraveled-7b-a1-gguf/blob/main/unraveled-7b-a1.IQ4_NL.gguf) | IQ4_NL | 3.87GB | | [unraveled-7b-a1.Q4_K_S.gguf](https://huggingface.co/RichardErkhov/dustydecapod_-_unraveled-7b-a1-gguf/blob/main/unraveled-7b-a1.Q4_K_S.gguf) | Q4_K_S | 3.86GB | | [unraveled-7b-a1.Q4_K.gguf](https://huggingface.co/RichardErkhov/dustydecapod_-_unraveled-7b-a1-gguf/blob/main/unraveled-7b-a1.Q4_K.gguf) | Q4_K | 4.07GB | | [unraveled-7b-a1.Q4_K_M.gguf](https://huggingface.co/RichardErkhov/dustydecapod_-_unraveled-7b-a1-gguf/blob/main/unraveled-7b-a1.Q4_K_M.gguf) | Q4_K_M | 4.07GB | | [unraveled-7b-a1.Q4_1.gguf](https://huggingface.co/RichardErkhov/dustydecapod_-_unraveled-7b-a1-gguf/blob/main/unraveled-7b-a1.Q4_1.gguf) | Q4_1 | 4.24GB | | [unraveled-7b-a1.Q5_0.gguf](https://huggingface.co/RichardErkhov/dustydecapod_-_unraveled-7b-a1-gguf/blob/main/unraveled-7b-a1.Q5_0.gguf) | Q5_0 | 4.65GB | | [unraveled-7b-a1.Q5_K_S.gguf](https://huggingface.co/RichardErkhov/dustydecapod_-_unraveled-7b-a1-gguf/blob/main/unraveled-7b-a1.Q5_K_S.gguf) | Q5_K_S | 4.65GB | | [unraveled-7b-a1.Q5_K.gguf](https://huggingface.co/RichardErkhov/dustydecapod_-_unraveled-7b-a1-gguf/blob/main/unraveled-7b-a1.Q5_K.gguf) | Q5_K | 4.78GB | | [unraveled-7b-a1.Q5_K_M.gguf](https://huggingface.co/RichardErkhov/dustydecapod_-_unraveled-7b-a1-gguf/blob/main/unraveled-7b-a1.Q5_K_M.gguf) | Q5_K_M | 4.78GB | | [unraveled-7b-a1.Q5_1.gguf](https://huggingface.co/RichardErkhov/dustydecapod_-_unraveled-7b-a1-gguf/blob/main/unraveled-7b-a1.Q5_1.gguf) | Q5_1 | 5.07GB | | [unraveled-7b-a1.Q6_K.gguf](https://huggingface.co/RichardErkhov/dustydecapod_-_unraveled-7b-a1-gguf/blob/main/unraveled-7b-a1.Q6_K.gguf) | Q6_K | 5.53GB | | [unraveled-7b-a1.Q8_0.gguf](https://huggingface.co/RichardErkhov/dustydecapod_-_unraveled-7b-a1-gguf/blob/main/unraveled-7b-a1.Q8_0.gguf) | Q8_0 | 7.17GB | Original model description: --- license: apache-2.0 base_model: NousResearch/Yarn-Mistral-7b-128k tags: - generated_from_trainer model-index: - name: unraveled-7b-dpo-lora results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # unraveled-7b-dpo-lora This model is a fine-tuned version of [NousResearch/Yarn-Mistral-7b-128k](https://huggingface.co/NousResearch/Yarn-Mistral-7b-128k), following the Zephyr alignment protocol. It achieves the following results on the evaluation set: - Loss: 0.5895 - Rewards/chosen: 0.1439 - Rewards/rejected: -0.1833 - Rewards/accuracies: 0.6880 - Rewards/margins: 0.3272 - Logps/rejected: -221.8329 - Logps/chosen: -266.1414 - Logits/rejected: -1.9675 - Logits/chosen: -2.0859 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-07 - train_batch_size: 2 - eval_batch_size: 4 - seed: 42 - distributed_type: multi-GPU - num_devices: 4 - gradient_accumulation_steps: 32 - total_train_batch_size: 256 - total_eval_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rewards/chosen | Rewards/rejected | Rewards/accuracies | Rewards/margins | Logps/rejected | Logps/chosen | Logits/rejected | Logits/chosen | |:-------------:|:-----:|:----:|:---------------:|:--------------:|:----------------:|:------------------:|:---------------:|:--------------:|:------------:|:---------------:|:-------------:| | 0.6313 | 1.0 | 242 | 0.6318 | 0.1228 | -0.0304 | 0.6600 | 0.1532 | -220.3036 | -266.3521 | -1.9863 | -2.1062 | | 0.6013 | 2.0 | 484 | 0.5983 | 0.1484 | -0.1334 | 0.6760 | 0.2819 | -221.3338 | -266.0959 | -1.9723 | -2.0914 | | 0.5889 | 3.0 | 726 | 0.5895 | 0.1439 | -0.1833 | 0.6880 | 0.3272 | -221.8329 | -266.1414 | -1.9675 | -2.0859 | ### Framework versions - Transformers 4.35.0 - Pytorch 2.1.0+cu118 - Datasets 2.14.6 - Tokenizers 0.14.1
mradermacher/Collaiborator-MEDLLM-Llama-3-8B-v2-i1-GGUF
mradermacher
"2024-06-11T22:15:59Z"
4,510
1
transformers
[ "transformers", "gguf", "generated_from_trainer", "Healthcare & Lifesciences", "BioMed", "Medical", "CollAIborate", "en", "dataset:collaiborateorg/BioMedData", "base_model:collaiborateorg/Collaiborator-MEDLLM-Llama-3-8B-v2", "license:llama3", "endpoints_compatible", "region:us" ]
null
"2024-06-10T12:49:57Z"
--- base_model: collaiborateorg/Collaiborator-MEDLLM-Llama-3-8B-v2 datasets: - collaiborateorg/BioMedData language: - en library_name: transformers license: llama3 quantized_by: mradermacher tags: - generated_from_trainer - Healthcare & Lifesciences - BioMed - Medical - CollAIborate --- ## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: nicoboss --> weighted/imatrix quants of https://huggingface.co/collaiborateorg/Collaiborator-MEDLLM-Llama-3-8B-v2 <!-- provided-files --> static quants are available at https://huggingface.co/mradermacher/Collaiborator-MEDLLM-Llama-3-8B-v2-GGUF ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/Collaiborator-MEDLLM-Llama-3-8B-v2-i1-GGUF/resolve/main/Collaiborator-MEDLLM-Llama-3-8B-v2.i1-IQ1_S.gguf) | i1-IQ1_S | 2.1 | for the desperate | | [GGUF](https://huggingface.co/mradermacher/Collaiborator-MEDLLM-Llama-3-8B-v2-i1-GGUF/resolve/main/Collaiborator-MEDLLM-Llama-3-8B-v2.i1-IQ1_M.gguf) | i1-IQ1_M | 2.3 | mostly desperate | | [GGUF](https://huggingface.co/mradermacher/Collaiborator-MEDLLM-Llama-3-8B-v2-i1-GGUF/resolve/main/Collaiborator-MEDLLM-Llama-3-8B-v2.i1-IQ2_XXS.gguf) | i1-IQ2_XXS | 2.5 | | | [GGUF](https://huggingface.co/mradermacher/Collaiborator-MEDLLM-Llama-3-8B-v2-i1-GGUF/resolve/main/Collaiborator-MEDLLM-Llama-3-8B-v2.i1-IQ2_XS.gguf) | i1-IQ2_XS | 2.7 | | | [GGUF](https://huggingface.co/mradermacher/Collaiborator-MEDLLM-Llama-3-8B-v2-i1-GGUF/resolve/main/Collaiborator-MEDLLM-Llama-3-8B-v2.i1-IQ2_S.gguf) | i1-IQ2_S | 2.9 | | | [GGUF](https://huggingface.co/mradermacher/Collaiborator-MEDLLM-Llama-3-8B-v2-i1-GGUF/resolve/main/Collaiborator-MEDLLM-Llama-3-8B-v2.i1-IQ2_M.gguf) | i1-IQ2_M | 3.0 | | | [GGUF](https://huggingface.co/mradermacher/Collaiborator-MEDLLM-Llama-3-8B-v2-i1-GGUF/resolve/main/Collaiborator-MEDLLM-Llama-3-8B-v2.i1-Q2_K.gguf) | i1-Q2_K | 3.3 | IQ3_XXS probably better | | [GGUF](https://huggingface.co/mradermacher/Collaiborator-MEDLLM-Llama-3-8B-v2-i1-GGUF/resolve/main/Collaiborator-MEDLLM-Llama-3-8B-v2.i1-IQ3_XXS.gguf) | i1-IQ3_XXS | 3.4 | lower quality | | [GGUF](https://huggingface.co/mradermacher/Collaiborator-MEDLLM-Llama-3-8B-v2-i1-GGUF/resolve/main/Collaiborator-MEDLLM-Llama-3-8B-v2.i1-IQ3_XS.gguf) | i1-IQ3_XS | 3.6 | | | [GGUF](https://huggingface.co/mradermacher/Collaiborator-MEDLLM-Llama-3-8B-v2-i1-GGUF/resolve/main/Collaiborator-MEDLLM-Llama-3-8B-v2.i1-Q3_K_S.gguf) | i1-Q3_K_S | 3.8 | IQ3_XS probably better | | [GGUF](https://huggingface.co/mradermacher/Collaiborator-MEDLLM-Llama-3-8B-v2-i1-GGUF/resolve/main/Collaiborator-MEDLLM-Llama-3-8B-v2.i1-IQ3_S.gguf) | i1-IQ3_S | 3.8 | beats Q3_K* | | [GGUF](https://huggingface.co/mradermacher/Collaiborator-MEDLLM-Llama-3-8B-v2-i1-GGUF/resolve/main/Collaiborator-MEDLLM-Llama-3-8B-v2.i1-IQ3_M.gguf) | i1-IQ3_M | 3.9 | | | [GGUF](https://huggingface.co/mradermacher/Collaiborator-MEDLLM-Llama-3-8B-v2-i1-GGUF/resolve/main/Collaiborator-MEDLLM-Llama-3-8B-v2.i1-Q3_K_M.gguf) | i1-Q3_K_M | 4.1 | IQ3_S probably better | | [GGUF](https://huggingface.co/mradermacher/Collaiborator-MEDLLM-Llama-3-8B-v2-i1-GGUF/resolve/main/Collaiborator-MEDLLM-Llama-3-8B-v2.i1-Q3_K_L.gguf) | i1-Q3_K_L | 4.4 | IQ3_M probably better | | [GGUF](https://huggingface.co/mradermacher/Collaiborator-MEDLLM-Llama-3-8B-v2-i1-GGUF/resolve/main/Collaiborator-MEDLLM-Llama-3-8B-v2.i1-IQ4_XS.gguf) | i1-IQ4_XS | 4.5 | | | [GGUF](https://huggingface.co/mradermacher/Collaiborator-MEDLLM-Llama-3-8B-v2-i1-GGUF/resolve/main/Collaiborator-MEDLLM-Llama-3-8B-v2.i1-Q4_0.gguf) | i1-Q4_0 | 4.8 | fast, low quality | | [GGUF](https://huggingface.co/mradermacher/Collaiborator-MEDLLM-Llama-3-8B-v2-i1-GGUF/resolve/main/Collaiborator-MEDLLM-Llama-3-8B-v2.i1-Q4_K_S.gguf) | i1-Q4_K_S | 4.8 | optimal size/speed/quality | | [GGUF](https://huggingface.co/mradermacher/Collaiborator-MEDLLM-Llama-3-8B-v2-i1-GGUF/resolve/main/Collaiborator-MEDLLM-Llama-3-8B-v2.i1-Q4_K_M.gguf) | i1-Q4_K_M | 5.0 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/Collaiborator-MEDLLM-Llama-3-8B-v2-i1-GGUF/resolve/main/Collaiborator-MEDLLM-Llama-3-8B-v2.i1-Q5_K_S.gguf) | i1-Q5_K_S | 5.7 | | | [GGUF](https://huggingface.co/mradermacher/Collaiborator-MEDLLM-Llama-3-8B-v2-i1-GGUF/resolve/main/Collaiborator-MEDLLM-Llama-3-8B-v2.i1-Q5_K_M.gguf) | i1-Q5_K_M | 5.8 | | | [GGUF](https://huggingface.co/mradermacher/Collaiborator-MEDLLM-Llama-3-8B-v2-i1-GGUF/resolve/main/Collaiborator-MEDLLM-Llama-3-8B-v2.i1-Q6_K.gguf) | i1-Q6_K | 6.7 | practically like static Q6_K | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. Additional thanks to [@nicoboss](https://huggingface.co/nicoboss) for giving me access to his hardware for calculating the imatrix for these quants. <!-- end -->
mradermacher/Anjay-8B-Llama3-CrestRoot-i1-GGUF
mradermacher
"2024-06-04T05:50:02Z"
4,509
0
transformers
[ "transformers", "gguf", "mergekit", "merge", "not-for-all-audiences", "en", "base_model:Hastagaras/Anjay-8B-Llama3-CrestRoot", "license:llama3", "endpoints_compatible", "region:us" ]
null
"2024-06-03T09:40:02Z"
--- base_model: Hastagaras/Anjay-8B-Llama3-CrestRoot language: - en library_name: transformers license: llama3 quantized_by: mradermacher tags: - mergekit - merge - not-for-all-audiences --- ## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: nicoboss --> weighted/imatrix quants of https://huggingface.co/Hastagaras/Anjay-8B-Llama3-CrestRoot <!-- provided-files --> static quants are available at https://huggingface.co/mradermacher/Anjay-8B-Llama3-CrestRoot-GGUF ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/Anjay-8B-Llama3-CrestRoot-i1-GGUF/resolve/main/Anjay-8B-Llama3-CrestRoot.i1-IQ1_S.gguf) | i1-IQ1_S | 2.1 | for the desperate | | [GGUF](https://huggingface.co/mradermacher/Anjay-8B-Llama3-CrestRoot-i1-GGUF/resolve/main/Anjay-8B-Llama3-CrestRoot.i1-IQ1_M.gguf) | i1-IQ1_M | 2.3 | mostly desperate | | [GGUF](https://huggingface.co/mradermacher/Anjay-8B-Llama3-CrestRoot-i1-GGUF/resolve/main/Anjay-8B-Llama3-CrestRoot.i1-IQ2_XXS.gguf) | i1-IQ2_XXS | 2.5 | | | [GGUF](https://huggingface.co/mradermacher/Anjay-8B-Llama3-CrestRoot-i1-GGUF/resolve/main/Anjay-8B-Llama3-CrestRoot.i1-IQ2_XS.gguf) | i1-IQ2_XS | 2.7 | | | [GGUF](https://huggingface.co/mradermacher/Anjay-8B-Llama3-CrestRoot-i1-GGUF/resolve/main/Anjay-8B-Llama3-CrestRoot.i1-IQ2_S.gguf) | i1-IQ2_S | 2.9 | | | [GGUF](https://huggingface.co/mradermacher/Anjay-8B-Llama3-CrestRoot-i1-GGUF/resolve/main/Anjay-8B-Llama3-CrestRoot.i1-IQ2_M.gguf) | i1-IQ2_M | 3.0 | | | [GGUF](https://huggingface.co/mradermacher/Anjay-8B-Llama3-CrestRoot-i1-GGUF/resolve/main/Anjay-8B-Llama3-CrestRoot.i1-Q2_K.gguf) | i1-Q2_K | 3.3 | IQ3_XXS probably better | | [GGUF](https://huggingface.co/mradermacher/Anjay-8B-Llama3-CrestRoot-i1-GGUF/resolve/main/Anjay-8B-Llama3-CrestRoot.i1-IQ3_XXS.gguf) | i1-IQ3_XXS | 3.4 | lower quality | | [GGUF](https://huggingface.co/mradermacher/Anjay-8B-Llama3-CrestRoot-i1-GGUF/resolve/main/Anjay-8B-Llama3-CrestRoot.i1-IQ3_XS.gguf) | i1-IQ3_XS | 3.6 | | | [GGUF](https://huggingface.co/mradermacher/Anjay-8B-Llama3-CrestRoot-i1-GGUF/resolve/main/Anjay-8B-Llama3-CrestRoot.i1-Q3_K_S.gguf) | i1-Q3_K_S | 3.8 | IQ3_XS probably better | | [GGUF](https://huggingface.co/mradermacher/Anjay-8B-Llama3-CrestRoot-i1-GGUF/resolve/main/Anjay-8B-Llama3-CrestRoot.i1-IQ3_S.gguf) | i1-IQ3_S | 3.8 | beats Q3_K* | | [GGUF](https://huggingface.co/mradermacher/Anjay-8B-Llama3-CrestRoot-i1-GGUF/resolve/main/Anjay-8B-Llama3-CrestRoot.i1-IQ3_M.gguf) | i1-IQ3_M | 3.9 | | | [GGUF](https://huggingface.co/mradermacher/Anjay-8B-Llama3-CrestRoot-i1-GGUF/resolve/main/Anjay-8B-Llama3-CrestRoot.i1-Q3_K_M.gguf) | i1-Q3_K_M | 4.1 | IQ3_S probably better | | [GGUF](https://huggingface.co/mradermacher/Anjay-8B-Llama3-CrestRoot-i1-GGUF/resolve/main/Anjay-8B-Llama3-CrestRoot.i1-Q3_K_L.gguf) | i1-Q3_K_L | 4.4 | IQ3_M probably better | | [GGUF](https://huggingface.co/mradermacher/Anjay-8B-Llama3-CrestRoot-i1-GGUF/resolve/main/Anjay-8B-Llama3-CrestRoot.i1-IQ4_XS.gguf) | i1-IQ4_XS | 4.5 | | | [GGUF](https://huggingface.co/mradermacher/Anjay-8B-Llama3-CrestRoot-i1-GGUF/resolve/main/Anjay-8B-Llama3-CrestRoot.i1-Q4_0.gguf) | i1-Q4_0 | 4.8 | fast, low quality | | [GGUF](https://huggingface.co/mradermacher/Anjay-8B-Llama3-CrestRoot-i1-GGUF/resolve/main/Anjay-8B-Llama3-CrestRoot.i1-Q4_K_S.gguf) | i1-Q4_K_S | 4.8 | optimal size/speed/quality | | [GGUF](https://huggingface.co/mradermacher/Anjay-8B-Llama3-CrestRoot-i1-GGUF/resolve/main/Anjay-8B-Llama3-CrestRoot.i1-Q4_K_M.gguf) | i1-Q4_K_M | 5.0 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/Anjay-8B-Llama3-CrestRoot-i1-GGUF/resolve/main/Anjay-8B-Llama3-CrestRoot.i1-Q5_K_S.gguf) | i1-Q5_K_S | 5.7 | | | [GGUF](https://huggingface.co/mradermacher/Anjay-8B-Llama3-CrestRoot-i1-GGUF/resolve/main/Anjay-8B-Llama3-CrestRoot.i1-Q5_K_M.gguf) | i1-Q5_K_M | 5.8 | | | [GGUF](https://huggingface.co/mradermacher/Anjay-8B-Llama3-CrestRoot-i1-GGUF/resolve/main/Anjay-8B-Llama3-CrestRoot.i1-Q6_K.gguf) | i1-Q6_K | 6.7 | practically like static Q6_K | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. Additional thanks to [@nicoboss](https://huggingface.co/nicoboss) for giving me access to his hardware for calculating the imatrix for these quants. <!-- end -->
mradermacher/Awanllm-Llama-3-8B-Dolfin-v1.0-i1-GGUF
mradermacher
"2024-06-05T08:44:28Z"
4,507
0
transformers
[ "transformers", "gguf", "en", "base_model:AwanLLM/Awanllm-Llama-3-8B-Dolfin-v1.0", "license:llama3", "endpoints_compatible", "region:us" ]
null
"2024-06-04T10:03:48Z"
--- base_model: AwanLLM/Awanllm-Llama-3-8B-Dolfin-v1.0 language: - en library_name: transformers license: llama3 quantized_by: mradermacher --- ## About <!-- ### quantize_version: 2 --> <!-- ### output_tensor_quantised: 1 --> <!-- ### convert_type: hf --> <!-- ### vocab_type: --> <!-- ### tags: nicoboss --> weighted/imatrix quants of https://huggingface.co/AwanLLM/Awanllm-Llama-3-8B-Dolfin-v1.0 <!-- provided-files --> static quants are available at https://huggingface.co/mradermacher/Awanllm-Llama-3-8B-Dolfin-v1.0-GGUF ## Usage If you are unsure how to use GGUF files, refer to one of [TheBloke's READMEs](https://huggingface.co/TheBloke/KafkaLM-70B-German-V0.1-GGUF) for more details, including on how to concatenate multi-part files. ## Provided Quants (sorted by size, not necessarily quality. IQ-quants are often preferable over similar sized non-IQ quants) | Link | Type | Size/GB | Notes | |:-----|:-----|--------:|:------| | [GGUF](https://huggingface.co/mradermacher/Awanllm-Llama-3-8B-Dolfin-v1.0-i1-GGUF/resolve/main/Awanllm-Llama-3-8B-Dolfin-v1.0.i1-IQ1_S.gguf) | i1-IQ1_S | 2.1 | for the desperate | | [GGUF](https://huggingface.co/mradermacher/Awanllm-Llama-3-8B-Dolfin-v1.0-i1-GGUF/resolve/main/Awanllm-Llama-3-8B-Dolfin-v1.0.i1-IQ1_M.gguf) | i1-IQ1_M | 2.3 | mostly desperate | | [GGUF](https://huggingface.co/mradermacher/Awanllm-Llama-3-8B-Dolfin-v1.0-i1-GGUF/resolve/main/Awanllm-Llama-3-8B-Dolfin-v1.0.i1-IQ2_XXS.gguf) | i1-IQ2_XXS | 2.5 | | | [GGUF](https://huggingface.co/mradermacher/Awanllm-Llama-3-8B-Dolfin-v1.0-i1-GGUF/resolve/main/Awanllm-Llama-3-8B-Dolfin-v1.0.i1-IQ2_XS.gguf) | i1-IQ2_XS | 2.7 | | | [GGUF](https://huggingface.co/mradermacher/Awanllm-Llama-3-8B-Dolfin-v1.0-i1-GGUF/resolve/main/Awanllm-Llama-3-8B-Dolfin-v1.0.i1-IQ2_S.gguf) | i1-IQ2_S | 2.9 | | | [GGUF](https://huggingface.co/mradermacher/Awanllm-Llama-3-8B-Dolfin-v1.0-i1-GGUF/resolve/main/Awanllm-Llama-3-8B-Dolfin-v1.0.i1-IQ2_M.gguf) | i1-IQ2_M | 3.0 | | | [GGUF](https://huggingface.co/mradermacher/Awanllm-Llama-3-8B-Dolfin-v1.0-i1-GGUF/resolve/main/Awanllm-Llama-3-8B-Dolfin-v1.0.i1-Q2_K.gguf) | i1-Q2_K | 3.3 | IQ3_XXS probably better | | [GGUF](https://huggingface.co/mradermacher/Awanllm-Llama-3-8B-Dolfin-v1.0-i1-GGUF/resolve/main/Awanllm-Llama-3-8B-Dolfin-v1.0.i1-IQ3_XXS.gguf) | i1-IQ3_XXS | 3.4 | lower quality | | [GGUF](https://huggingface.co/mradermacher/Awanllm-Llama-3-8B-Dolfin-v1.0-i1-GGUF/resolve/main/Awanllm-Llama-3-8B-Dolfin-v1.0.i1-IQ3_XS.gguf) | i1-IQ3_XS | 3.6 | | | [GGUF](https://huggingface.co/mradermacher/Awanllm-Llama-3-8B-Dolfin-v1.0-i1-GGUF/resolve/main/Awanllm-Llama-3-8B-Dolfin-v1.0.i1-Q3_K_S.gguf) | i1-Q3_K_S | 3.8 | IQ3_XS probably better | | [GGUF](https://huggingface.co/mradermacher/Awanllm-Llama-3-8B-Dolfin-v1.0-i1-GGUF/resolve/main/Awanllm-Llama-3-8B-Dolfin-v1.0.i1-IQ3_S.gguf) | i1-IQ3_S | 3.8 | beats Q3_K* | | [GGUF](https://huggingface.co/mradermacher/Awanllm-Llama-3-8B-Dolfin-v1.0-i1-GGUF/resolve/main/Awanllm-Llama-3-8B-Dolfin-v1.0.i1-IQ3_M.gguf) | i1-IQ3_M | 3.9 | | | [GGUF](https://huggingface.co/mradermacher/Awanllm-Llama-3-8B-Dolfin-v1.0-i1-GGUF/resolve/main/Awanllm-Llama-3-8B-Dolfin-v1.0.i1-Q3_K_M.gguf) | i1-Q3_K_M | 4.1 | IQ3_S probably better | | [GGUF](https://huggingface.co/mradermacher/Awanllm-Llama-3-8B-Dolfin-v1.0-i1-GGUF/resolve/main/Awanllm-Llama-3-8B-Dolfin-v1.0.i1-Q3_K_L.gguf) | i1-Q3_K_L | 4.4 | IQ3_M probably better | | [GGUF](https://huggingface.co/mradermacher/Awanllm-Llama-3-8B-Dolfin-v1.0-i1-GGUF/resolve/main/Awanllm-Llama-3-8B-Dolfin-v1.0.i1-IQ4_XS.gguf) | i1-IQ4_XS | 4.5 | | | [GGUF](https://huggingface.co/mradermacher/Awanllm-Llama-3-8B-Dolfin-v1.0-i1-GGUF/resolve/main/Awanllm-Llama-3-8B-Dolfin-v1.0.i1-Q4_0.gguf) | i1-Q4_0 | 4.8 | fast, low quality | | [GGUF](https://huggingface.co/mradermacher/Awanllm-Llama-3-8B-Dolfin-v1.0-i1-GGUF/resolve/main/Awanllm-Llama-3-8B-Dolfin-v1.0.i1-Q4_K_S.gguf) | i1-Q4_K_S | 4.8 | optimal size/speed/quality | | [GGUF](https://huggingface.co/mradermacher/Awanllm-Llama-3-8B-Dolfin-v1.0-i1-GGUF/resolve/main/Awanllm-Llama-3-8B-Dolfin-v1.0.i1-Q4_K_M.gguf) | i1-Q4_K_M | 5.0 | fast, recommended | | [GGUF](https://huggingface.co/mradermacher/Awanllm-Llama-3-8B-Dolfin-v1.0-i1-GGUF/resolve/main/Awanllm-Llama-3-8B-Dolfin-v1.0.i1-Q5_K_S.gguf) | i1-Q5_K_S | 5.7 | | | [GGUF](https://huggingface.co/mradermacher/Awanllm-Llama-3-8B-Dolfin-v1.0-i1-GGUF/resolve/main/Awanllm-Llama-3-8B-Dolfin-v1.0.i1-Q5_K_M.gguf) | i1-Q5_K_M | 5.8 | | | [GGUF](https://huggingface.co/mradermacher/Awanllm-Llama-3-8B-Dolfin-v1.0-i1-GGUF/resolve/main/Awanllm-Llama-3-8B-Dolfin-v1.0.i1-Q6_K.gguf) | i1-Q6_K | 6.7 | practically like static Q6_K | Here is a handy graph by ikawrakow comparing some lower-quality quant types (lower is better): ![image.png](https://www.nethype.de/huggingface_embed/quantpplgraph.png) And here are Artefact2's thoughts on the matter: https://gist.github.com/Artefact2/b5f810600771265fc1e39442288e8ec9 ## FAQ / Model Request See https://huggingface.co/mradermacher/model_requests for some answers to questions you might have and/or if you want some other model quantized. ## Thanks I thank my company, [nethype GmbH](https://www.nethype.de/), for letting me use its servers and providing upgrades to my workstation to enable this work in my free time. Additional thanks to [@nicoboss](https://huggingface.co/nicoboss) for giving me access to his hardware for calculating the imatrix for these quants. <!-- end -->
RichardErkhov/argsearch_-_llama-7b-sft-float32-gguf
RichardErkhov
"2024-06-05T10:42:27Z"
4,507
0
null
[ "gguf", "region:us" ]
null
"2024-06-05T09:06:48Z"
Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) llama-7b-sft-float32 - GGUF - Model creator: https://huggingface.co/argsearch/ - Original model: https://huggingface.co/argsearch/llama-7b-sft-float32/ | Name | Quant method | Size | | ---- | ---- | ---- | | [llama-7b-sft-float32.Q2_K.gguf](https://huggingface.co/RichardErkhov/argsearch_-_llama-7b-sft-float32-gguf/blob/main/llama-7b-sft-float32.Q2_K.gguf) | Q2_K | 2.36GB | | [llama-7b-sft-float32.IQ3_XS.gguf](https://huggingface.co/RichardErkhov/argsearch_-_llama-7b-sft-float32-gguf/blob/main/llama-7b-sft-float32.IQ3_XS.gguf) | IQ3_XS | 2.6GB | | [llama-7b-sft-float32.IQ3_S.gguf](https://huggingface.co/RichardErkhov/argsearch_-_llama-7b-sft-float32-gguf/blob/main/llama-7b-sft-float32.IQ3_S.gguf) | IQ3_S | 2.75GB | | [llama-7b-sft-float32.Q3_K_S.gguf](https://huggingface.co/RichardErkhov/argsearch_-_llama-7b-sft-float32-gguf/blob/main/llama-7b-sft-float32.Q3_K_S.gguf) | Q3_K_S | 2.75GB | | [llama-7b-sft-float32.IQ3_M.gguf](https://huggingface.co/RichardErkhov/argsearch_-_llama-7b-sft-float32-gguf/blob/main/llama-7b-sft-float32.IQ3_M.gguf) | IQ3_M | 2.9GB | | [llama-7b-sft-float32.Q3_K.gguf](https://huggingface.co/RichardErkhov/argsearch_-_llama-7b-sft-float32-gguf/blob/main/llama-7b-sft-float32.Q3_K.gguf) | Q3_K | 3.07GB | | [llama-7b-sft-float32.Q3_K_M.gguf](https://huggingface.co/RichardErkhov/argsearch_-_llama-7b-sft-float32-gguf/blob/main/llama-7b-sft-float32.Q3_K_M.gguf) | Q3_K_M | 3.07GB | | [llama-7b-sft-float32.Q3_K_L.gguf](https://huggingface.co/RichardErkhov/argsearch_-_llama-7b-sft-float32-gguf/blob/main/llama-7b-sft-float32.Q3_K_L.gguf) | Q3_K_L | 3.35GB | | [llama-7b-sft-float32.IQ4_XS.gguf](https://huggingface.co/RichardErkhov/argsearch_-_llama-7b-sft-float32-gguf/blob/main/llama-7b-sft-float32.IQ4_XS.gguf) | IQ4_XS | 3.4GB | | [llama-7b-sft-float32.Q4_0.gguf](https://huggingface.co/RichardErkhov/argsearch_-_llama-7b-sft-float32-gguf/blob/main/llama-7b-sft-float32.Q4_0.gguf) | Q4_0 | 3.56GB | | [llama-7b-sft-float32.IQ4_NL.gguf](https://huggingface.co/RichardErkhov/argsearch_-_llama-7b-sft-float32-gguf/blob/main/llama-7b-sft-float32.IQ4_NL.gguf) | IQ4_NL | 3.58GB | | [llama-7b-sft-float32.Q4_K_S.gguf](https://huggingface.co/RichardErkhov/argsearch_-_llama-7b-sft-float32-gguf/blob/main/llama-7b-sft-float32.Q4_K_S.gguf) | Q4_K_S | 3.59GB | | [llama-7b-sft-float32.Q4_K.gguf](https://huggingface.co/RichardErkhov/argsearch_-_llama-7b-sft-float32-gguf/blob/main/llama-7b-sft-float32.Q4_K.gguf) | Q4_K | 3.8GB | | [llama-7b-sft-float32.Q4_K_M.gguf](https://huggingface.co/RichardErkhov/argsearch_-_llama-7b-sft-float32-gguf/blob/main/llama-7b-sft-float32.Q4_K_M.gguf) | Q4_K_M | 3.8GB | | [llama-7b-sft-float32.Q4_1.gguf](https://huggingface.co/RichardErkhov/argsearch_-_llama-7b-sft-float32-gguf/blob/main/llama-7b-sft-float32.Q4_1.gguf) | Q4_1 | 3.95GB | | [llama-7b-sft-float32.Q5_0.gguf](https://huggingface.co/RichardErkhov/argsearch_-_llama-7b-sft-float32-gguf/blob/main/llama-7b-sft-float32.Q5_0.gguf) | Q5_0 | 4.33GB | | [llama-7b-sft-float32.Q5_K_S.gguf](https://huggingface.co/RichardErkhov/argsearch_-_llama-7b-sft-float32-gguf/blob/main/llama-7b-sft-float32.Q5_K_S.gguf) | Q5_K_S | 4.33GB | | [llama-7b-sft-float32.Q5_K.gguf](https://huggingface.co/RichardErkhov/argsearch_-_llama-7b-sft-float32-gguf/blob/main/llama-7b-sft-float32.Q5_K.gguf) | Q5_K | 4.45GB | | [llama-7b-sft-float32.Q5_K_M.gguf](https://huggingface.co/RichardErkhov/argsearch_-_llama-7b-sft-float32-gguf/blob/main/llama-7b-sft-float32.Q5_K_M.gguf) | Q5_K_M | 4.45GB | | [llama-7b-sft-float32.Q5_1.gguf](https://huggingface.co/RichardErkhov/argsearch_-_llama-7b-sft-float32-gguf/blob/main/llama-7b-sft-float32.Q5_1.gguf) | Q5_1 | 4.72GB | | [llama-7b-sft-float32.Q6_K.gguf](https://huggingface.co/RichardErkhov/argsearch_-_llama-7b-sft-float32-gguf/blob/main/llama-7b-sft-float32.Q6_K.gguf) | Q6_K | 5.15GB | | [llama-7b-sft-float32.Q8_0.gguf](https://huggingface.co/RichardErkhov/argsearch_-_llama-7b-sft-float32-gguf/blob/main/llama-7b-sft-float32.Q8_0.gguf) | Q8_0 | 6.67GB | Original model description: --- tags: - generated_from_trainer model-index: - name: tmp_trainer results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # tmp_trainer This model was trained from scratch on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3.0 ### Framework versions - Transformers 4.35.2 - Pytorch 2.1.1+cu121 - Datasets 2.15.0 - Tokenizers 0.15.0