lingo_judge_metric / lingo_judge_metric.py
maysonma's picture
add requirements.
6f69903
raw
history blame
2 kB
# Inspired by: https://huggingface.co/spaces/evaluate-metric/bleurt/blob/main/bleurt.py
import datasets
import evaluate
import torch
from judge import LingoJudge
_CITATION = """
@article{marcu2023lingoqa,
title={LingoQA: Video Question Answering for Autonomous Driving},
author={Ana-Maria Marcu and Long Chen and Jan Hünermann and Alice Karnsund and Benoit Hanotte and Prajwal Chidananda and Saurabh Nair and Vijay Badrinarayanan and Alex Kendall and Jamie Shotton and Oleg Sinavski},
journal={arXiv preprint arXiv:2312.14115},
year={2023},
}
"""
_DESCRIPTION = """
Lingo-Judge is an evaluation metric that aligns closely with human judgement on the LingoQA evaluation suite.
See the project's README at https://github.com/wayveai/LingoQA for more information.
"""
_KWARGS_DESCRIPTION = """
Lingo-Judge Score.
Args:
'questions' (list of str): Input questions.
`predictions` (list of str): Model predictions.
`references` (list of list of str): Multiple references per question.
Returns:
`scores` (list of float): Score indicating truthfulness.
"""
@evaluate.utils.file_utils.add_end_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class LingoJudgeMetric(evaluate.Metric):
def _info(self):
return evaluate.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"questions": datasets.Value("string"),
"predictions": datasets.Value("string"),
"references": datasets.Sequence(datasets.Value("string")),
}
),
)
def _download_and_prepare(self, dl_manager):
self.device = "cuda" if torch.cuda.is_available() else "cpu"
self.scorer = LingoJudge().eval().to(self.device)
def _compute(self, questions, predictions, references):
return self.scorer.compute(questions, references, predictions)