OmniEval / eval-results /omnieval-human /e5-mistral-7b_qwen2-72b /results_2023-12-08 15:46:20.425378.json
zstanjj's picture
fix results
023a9be
raw
history blame
972 Bytes
{
"results": {
"retrieval": {
"mrr": 0.386455960516325,
"map": 0.37688876233864843
},
"generation": {
"em": 0.002277904328018223,
"f1": 0.3787448936861267,
"rouge1": 0.34038227335702076,
"rouge2": 0.1898058362852231,
"rougeL": 0.23622836359261534,
"accuracy": 0.40689066059225515,
"completeness": 0.5954968944099379,
"hallucination": 0.07920792079207921,
"utilization": 0.5117027501462844,
"numerical_accuracy": 0.3050397877984085
}
},
"config": {
"eval_name": "e5-mistral-7b_qwen2-72b",
"generative_model": "Qwen/Qwen2.5-72B-Instruct",
"generative_model_args": {
"name": "Qwen/Qwen2.5-72B-Instruct",
"num_params": 72.7,
"open_source": true
},
"retrieval_model": "intfloat/e5-mistral-7b-instruct",
"retrieval_model_args": {
"name": "intfloat/e5-mistral-7b-instruct",
"num_params": 7.11,
"open_source": true
}
}
}