OmniEval / eval-results /omnieval-human /e5-mistral-7b_qwen2-72b /results_2023-12-08 15:46:20.425378.json
zstanjj's picture
fix results
ee70019
raw
history blame
969 Bytes
{
"results": {
"retrieval": {
"mrr": 0.303246013667426,
"map": 0.2960516324981017
},
"generation": {
"em": 0.002277904328018223,
"f1": 0.3705164550873997,
"rouge1": 0.3270311806826159,
"rouge2": 0.17476659877087528,
"rougeL": 0.22225645997479143,
"accuracy": 0.385250569476082,
"completeness": 0.5877535101404057,
"hallucination": 0.0924956369982548,
"utilization": 0.4793244030285381,
"numerical_accuracy": 0.28622540250447226
}
},
"config": {
"eval_name": "e5-mistral-7b_qwen2-72b",
"generative_model": "Qwen/Qwen2.5-72B-Instruct",
"generative_model_args": {
"name": "Qwen/Qwen2.5-72B-Instruct",
"num_params": 72.7,
"open_source": true
},
"retrieval_model": "intfloat/e5-mistral-7b-instruct",
"retrieval_model_args": {
"name": "intfloat/e5-mistral-7b-instruct",
"num_params": 7.11,
"open_source": true
}
}
}