Datasets:
Tasks:
Text Retrieval
Modalities:
Text
Formats:
json
Sub-tasks:
document-retrieval
Languages:
Slovak
Size:
10K - 100K
Tags:
text-retrieval
DOI:
License:
File size: 3,291 Bytes
a8a2b54 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 |
"""
Michal Stromko Automating Evaluation Script Annotated Dataset
for task Semantic Evaluation
"""
import os
import sys
import json
from metrics import recall, mrr, map, ndcg
from filtering_parsing import filtering_annotated, filtering_predicted
def load_jsonl_file(file_path: str):
"""
Load jsonl file
:param file_path: path to the file
:return: json data
"""
data = []
with open(file_path, "r", encoding="utf-8") as f:
for line in f:
data.append(json.loads(line))
return data
def load_json_file(file_path: str):
"""
Load json file
:param file_path: path to the file
:return: json data
"""
with open(file_path, "r", encoding="utf-8") as f:
data = json.load(f)
return data
if __name__ == '__main__':
# get file names
f_annotated_final = sys.argv[1]
f_predicted_final = sys.argv[2]
# load files
print("Loading datasets")
final_annotated = load_jsonl_file(f_annotated_final)
predicted = load_json_file(f_predicted_final)
print("Loaded datasets")
# filtering parsing annotated dataset
filtered_annotated_dataset = filtering_annotated.first_filtering_annotated(final_annotated)
filtering_annotated.counting_count_results(filtered_annotated_dataset)
actual_results = filtering_annotated.second_filtering_annotated(filtered_annotated_dataset)
# filtering and parsing predicted dataset
predicted_results = filtering_predicted.prediction_filtering_dataset(predicted)
# prepare dataset for ndcg evaluation
correct_answers_ndcg = ndcg.filtering_annotated_dataset_for_eval_ndcg(filtered_annotated_dataset)
predicted_ndcg = ndcg.filter_and_parsing_assignment_article_id_to_category(predicted_results, correct_answers_ndcg)
print("\n")
count_results = [5, 10, 15]
# Evaluation Dataset
print("Start evaluation")
# count recall
for i in range(0, len(count_results)):
print(f"Count Recall Metric for {count_results[i]} results:")
recall_value = recall.count_recall(actual_results, predicted_results, count_results[i])
print(f"\nMean value Recall for every count questions: \n{recall_value}")
print("\n")
print("---------------------------------------------------------")
# count mrr
for i in range(0, len(count_results)):
print(f"Count MRR Metric for {count_results[i]} results:")
mrr.count_mrr(actual_results, predicted_results, count_results[i])
print("\n ")
print("---------------------------------------------------------")
# count map
for i in range(0, len(count_results)):
print(f"Count MAP Metric for {count_results[i]} results:")
results, total_mAP = map.count_map(actual_results, predicted_results, count_results[i])
print("Results for individual questions:")
print(results)
print("\n")
print("---------------------------------------------------------")
# count ndcg
for i in range(0, len(count_results)):
print(f"Count NDCG Metric for {count_results[i]}:")
ndcg_res = ndcg.count_ndcg(predicted_ndcg, count_results[i])
print("\n")
print("---------------------------------------------------------")
print("Finish evaluation")
|