KashiwaByte's picture
initial commit
7c93e9d
# -*- coding: utf-8 -*-
""" Official evaluation script for v1.0 of the TriviaQA dataset.
Extended from the evaluation script for v1.1 of the SQuAD dataset. """
from __future__ import print_function
import os
import sys
# 获取当前脚本所在的目录
current_dir = os.path.dirname(os.path.abspath(__file__))
# 构建相对路径
relative_path = os.path.join(current_dir, '..')
# 将相对路径添加到sys.path
sys.path.append(relative_path)
from collections import Counter
import string
import re
import sys
import argparse
import utils.dataset_utils
import utils.utils
import json
import csv
f1 = exact_match = common = Wrong = 0
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
# print(s)
s = json.dumps(s)
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def handle_punc(text):
exclude = set(string.punctuation + "".join([u"‘", u"’", u"´", u"`"]))
return ''.join(ch if ch not in exclude else ' ' for ch in text)
def lower(text):
return text.lower()
def replace_underscore(text):
return text.replace('_', ' ')
# print(white_space_fix(remove_articles(handle_punc(lower(replace_underscore(s))))).strip())
return white_space_fix(remove_articles(handle_punc(lower(replace_underscore(s))))).strip()
def f1_score(prediction, ground_truth):
global Wrong
prediction_tokens = normalize_answer(prediction).split()
print(f"规范化预测:{normalize_answer(prediction)}")
# print(f"预测token数:{len(prediction_tokens)}")
ground_truth_tokens = normalize_answer(ground_truth).split()
print(f"规范化答案:{normalize_answer(ground_truth)}")
# print(f"答案token数:{len(ground_truth_tokens) }")
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
print(common)
num_same = sum(common.values())
print(num_same)
if num_same == 0:
Wrong+=1
return 0
precision = 1.0 * num_same / len(prediction_tokens)
# print(f"预测率:{precision}")
recall = 1.0 * num_same / len(ground_truth_tokens)
# print(f"召回率:{recall}")
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
return normalize_answer(prediction) == normalize_answer(ground_truth)
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
score = metric_fn(prediction, ground_truths)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def is_exact_match(answer_object, prediction):
ground_truths = get_ground_truths(answer_object)
for ground_truth in ground_truths:
if exact_match_score(prediction, ground_truth):
return True
return False
def has_exact_match(ground_truths, candidates):
for ground_truth in ground_truths:
if ground_truth in candidates:
return True
return False
def get_ground_truths(answer):
return answer['NormalizedAliases'] + [normalize_answer(ans) for ans in answer.get('HumanAnswers', [])]
def get_oracle_score(ground_truth, predicted_answers, i=None, mute=False,maxline=1000):
exact_match = common = 0
common += 1
prediction = normalize_answer(predicted_answers[i])
ground_truths = ground_truth[i]
print(f"预测:{prediction}")
print(f"事实{ground_truths}")
em_for_this_question = has_exact_match(ground_truths, prediction)
exact_match += int(em_for_this_question)
exact_match = 100.0 * exact_match / maxline
return {'oracle_exact_match': exact_match, 'common': common, 'denominator': maxline,"Wrong":Wrong,
'pred_len': len(predicted_answers), 'gold_len': len(ground_truth)}
def evaluate_triviaqa(ground_truth, predicted_answers, i=None, mute=False,maxline=None):
global f1,exact_match,common
common += i
prediction = predicted_answers[i]
ground_truths = ground_truth[i]["Data"]["Answer"]
# print(f"预测:{prediction}")
# print(f"事实{ground_truths}")
em_for_this_question = metric_max_over_ground_truths(
exact_match_score, prediction, ground_truths)
if em_for_this_question == 0 and not mute:
print("em=0:", prediction, ground_truths)
exact_match += em_for_this_question
f1_for_this_question = metric_max_over_ground_truths(
f1_score, prediction, ground_truths)
f1 += f1_for_this_question
print(f"当前轮次:{i+1}")
print(f"本轮F1率:{f1_for_this_question}")
print(f"累加F1率:{f1}")
print(f"本轮exact:{em_for_this_question}")
print(f"累加exact:{exact_match}")
exact_match_mean = exact_match / (i+1)
f1_mean = f1 / (i+1)
print(f"平均F1率:{f1_mean}")
print(f"平均exact率:{exact_match_mean}")
return {'exact_match': exact_match_mean, 'f1': f1_mean, 'common': common, 'denominator': i+1,"Wrong":Wrong,
'pred_len': len(predicted_answers), 'gold_len': len(ground_truth)}
def get_args():
parser = argparse.ArgumentParser(
description='Evaluation for TriviaQA {}'.format(expected_version))
parser.add_argument('--dataset_file',default="C:/Users/94427/kashiwa/DISC-Assignment/Experiment/TriviaQA/TriviaQA_test_format1k.jsonl", help='Dataset file')
parser.add_argument('--prediction_file',default="C:/Users/94427/kashiwa/DISC-Assignment/Experiment/TriviaQA/result/TriviaQA_GPT3.5_answers1k.csv", help='Prediction File')
args = parser.parse_args()
return args
if __name__ == '__main__':
expected_version = 1.0
args = get_args()
# dataset_json = utils.dataset_utils.read_triviaqa_data(args.dataset_file)
dataset_json = args.dataset_file
prediction_json = args.prediction_file
# dataset_dict = json.loads(dataset_json)
dataset_dict = []
prediction_dict = []
# 打开JSONL文件并读取数据
with open(args.dataset_file, 'r',encoding="utf-8") as file:
for line in file:
json_data = json.loads(line)
dataset_dict.append(json_data)
# 打印读取的数据
# 打开CSV文件并读取数据
with open(args.prediction_file, newline='',encoding="utf-8") as csvfile:
reader = csv.reader(csvfile)
# 遍历每一行数据
for row in reader:
prediction_dict.append(row)
# print(prediction_dict)
# if dataset_json['Version'] != expected_version:
# print('Evaluation expects v-{} , but got dataset with v-{}'.format(expected_version,dataset_json['Version']),
# file=sys.stderr)
for i in range(0,1000):
# print(dataset_dict)
# print(dataset_dict[i])
# print(dataset_dict[i]["Data"])
print(f"当前行数:{i}")
key_to_ground_truth = dataset_dict
predictions = prediction_dict
eval_dict = evaluate_triviaqa(key_to_ground_truth, predictions,i=i,maxline=1000)
print(eval_dict)