File size: 7,004 Bytes
7c93e9d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 |
# -*- coding: utf-8 -*-
""" Official evaluation script for v1.0 of the TriviaQA dataset.
Extended from the evaluation script for v1.1 of the SQuAD dataset. """
from __future__ import print_function
import os
import sys
# 获取当前脚本所在的目录
current_dir = os.path.dirname(os.path.abspath(__file__))
# 构建相对路径
relative_path = os.path.join(current_dir, '..')
# 将相对路径添加到sys.path
sys.path.append(relative_path)
from collections import Counter
import string
import re
import sys
import argparse
import utils.dataset_utils
import utils.utils
import json
import csv
f1 = exact_match = common = Wrong = 0
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
# print(s)
s = json.dumps(s)
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def handle_punc(text):
exclude = set(string.punctuation + "".join([u"‘", u"’", u"´", u"`"]))
return ''.join(ch if ch not in exclude else ' ' for ch in text)
def lower(text):
return text.lower()
def replace_underscore(text):
return text.replace('_', ' ')
# print(white_space_fix(remove_articles(handle_punc(lower(replace_underscore(s))))).strip())
return white_space_fix(remove_articles(handle_punc(lower(replace_underscore(s))))).strip()
def f1_score(prediction, ground_truth):
global Wrong
prediction_tokens = normalize_answer(prediction).split()
print(f"规范化预测:{normalize_answer(prediction)}")
# print(f"预测token数:{len(prediction_tokens)}")
ground_truth_tokens = normalize_answer(ground_truth).split()
print(f"规范化答案:{normalize_answer(ground_truth)}")
# print(f"答案token数:{len(ground_truth_tokens) }")
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
print(common)
num_same = sum(common.values())
print(num_same)
if num_same == 0:
Wrong+=1
return 0
precision = 1.0 * num_same / len(prediction_tokens)
# print(f"预测率:{precision}")
recall = 1.0 * num_same / len(ground_truth_tokens)
# print(f"召回率:{recall}")
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
return normalize_answer(prediction) == normalize_answer(ground_truth)
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
score = metric_fn(prediction, ground_truths)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def is_exact_match(answer_object, prediction):
ground_truths = get_ground_truths(answer_object)
for ground_truth in ground_truths:
if exact_match_score(prediction, ground_truth):
return True
return False
def has_exact_match(ground_truths, candidates):
for ground_truth in ground_truths:
if ground_truth in candidates:
return True
return False
def get_ground_truths(answer):
return answer['NormalizedAliases'] + [normalize_answer(ans) for ans in answer.get('HumanAnswers', [])]
def get_oracle_score(ground_truth, predicted_answers, i=None, mute=False,maxline=1000):
exact_match = common = 0
common += 1
prediction = normalize_answer(predicted_answers[i])
ground_truths = ground_truth[i]
print(f"预测:{prediction}")
print(f"事实{ground_truths}")
em_for_this_question = has_exact_match(ground_truths, prediction)
exact_match += int(em_for_this_question)
exact_match = 100.0 * exact_match / maxline
return {'oracle_exact_match': exact_match, 'common': common, 'denominator': maxline,"Wrong":Wrong,
'pred_len': len(predicted_answers), 'gold_len': len(ground_truth)}
def evaluate_triviaqa(ground_truth, predicted_answers, i=None, mute=False,maxline=None):
global f1,exact_match,common
common += i
prediction = predicted_answers[i]
ground_truths = ground_truth[i]["Data"]["Answer"]
# print(f"预测:{prediction}")
# print(f"事实{ground_truths}")
em_for_this_question = metric_max_over_ground_truths(
exact_match_score, prediction, ground_truths)
if em_for_this_question == 0 and not mute:
print("em=0:", prediction, ground_truths)
exact_match += em_for_this_question
f1_for_this_question = metric_max_over_ground_truths(
f1_score, prediction, ground_truths)
f1 += f1_for_this_question
print(f"当前轮次:{i+1}")
print(f"本轮F1率:{f1_for_this_question}")
print(f"累加F1率:{f1}")
print(f"本轮exact:{em_for_this_question}")
print(f"累加exact:{exact_match}")
exact_match_mean = exact_match / (i+1)
f1_mean = f1 / (i+1)
print(f"平均F1率:{f1_mean}")
print(f"平均exact率:{exact_match_mean}")
return {'exact_match': exact_match_mean, 'f1': f1_mean, 'common': common, 'denominator': i+1,"Wrong":Wrong,
'pred_len': len(predicted_answers), 'gold_len': len(ground_truth)}
def get_args():
parser = argparse.ArgumentParser(
description='Evaluation for TriviaQA {}'.format(expected_version))
parser.add_argument('--dataset_file',default="C:/Users/94427/kashiwa/DISC-Assignment/Experiment/TriviaQA/TriviaQA_test_format1k.jsonl", help='Dataset file')
parser.add_argument('--prediction_file',default="C:/Users/94427/kashiwa/DISC-Assignment/Experiment/TriviaQA/result/TriviaQA_GPT3.5_answers1k.csv", help='Prediction File')
args = parser.parse_args()
return args
if __name__ == '__main__':
expected_version = 1.0
args = get_args()
# dataset_json = utils.dataset_utils.read_triviaqa_data(args.dataset_file)
dataset_json = args.dataset_file
prediction_json = args.prediction_file
# dataset_dict = json.loads(dataset_json)
dataset_dict = []
prediction_dict = []
# 打开JSONL文件并读取数据
with open(args.dataset_file, 'r',encoding="utf-8") as file:
for line in file:
json_data = json.loads(line)
dataset_dict.append(json_data)
# 打印读取的数据
# 打开CSV文件并读取数据
with open(args.prediction_file, newline='',encoding="utf-8") as csvfile:
reader = csv.reader(csvfile)
# 遍历每一行数据
for row in reader:
prediction_dict.append(row)
# print(prediction_dict)
# if dataset_json['Version'] != expected_version:
# print('Evaluation expects v-{} , but got dataset with v-{}'.format(expected_version,dataset_json['Version']),
# file=sys.stderr)
for i in range(0,1000):
# print(dataset_dict)
# print(dataset_dict[i])
# print(dataset_dict[i]["Data"])
print(f"当前行数:{i}")
key_to_ground_truth = dataset_dict
predictions = prediction_dict
eval_dict = evaluate_triviaqa(key_to_ground_truth, predictions,i=i,maxline=1000)
print(eval_dict)
|