Datasets:
Tasks:
Text Retrieval
Modalities:
Text
Formats:
json
Sub-tasks:
document-retrieval
Languages:
Slovak
Size:
10K - 100K
Tags:
text-retrieval
DOI:
License:
""" | |
Michal Stromko Automating Evaluation Script Annotated Dataset | |
for task Semantic Evaluation | |
""" | |
import os | |
import sys | |
import json | |
from metrics import recall, mrr, map, ndcg | |
from filtering_parsing import filtering_annotated, filtering_predicted | |
def load_jsonl_file(file_path: str): | |
""" | |
Load jsonl file | |
:param file_path: path to the file | |
:return: json data | |
""" | |
data = [] | |
with open(file_path, "r", encoding="utf-8") as f: | |
for line in f: | |
data.append(json.loads(line)) | |
return data | |
def load_json_file(file_path: str): | |
""" | |
Load json file | |
:param file_path: path to the file | |
:return: json data | |
""" | |
with open(file_path, "r", encoding="utf-8") as f: | |
data = json.load(f) | |
return data | |
if __name__ == '__main__': | |
# get file names | |
f_annotated_final = sys.argv[1] | |
f_predicted_final = sys.argv[2] | |
# load files | |
print("Loading datasets") | |
final_annotated = load_jsonl_file(f_annotated_final) | |
predicted = load_json_file(f_predicted_final) | |
print("Loaded datasets") | |
# filtering parsing annotated dataset | |
filtered_annotated_dataset = filtering_annotated.first_filtering_annotated(final_annotated) | |
filtering_annotated.counting_count_results(filtered_annotated_dataset) | |
actual_results = filtering_annotated.second_filtering_annotated(filtered_annotated_dataset) | |
# filtering and parsing predicted dataset | |
predicted_results = filtering_predicted.prediction_filtering_dataset(predicted) | |
# prepare dataset for ndcg evaluation | |
correct_answers_ndcg = ndcg.filtering_annotated_dataset_for_eval_ndcg(filtered_annotated_dataset) | |
predicted_ndcg = ndcg.filter_and_parsing_assignment_article_id_to_category(predicted_results, correct_answers_ndcg) | |
print("\n") | |
count_results = [5, 10, 15] | |
# Evaluation Dataset | |
print("Start evaluation") | |
# count recall | |
for i in range(0, len(count_results)): | |
print(f"Count Recall Metric for {count_results[i]} results:") | |
recall_value = recall.count_recall(actual_results, predicted_results, count_results[i]) | |
print(f"\nMean value Recall for every count questions: \n{recall_value}") | |
print("\n") | |
print("---------------------------------------------------------") | |
# count mrr | |
for i in range(0, len(count_results)): | |
print(f"Count MRR Metric for {count_results[i]} results:") | |
mrr.count_mrr(actual_results, predicted_results, count_results[i]) | |
print("\n ") | |
print("---------------------------------------------------------") | |
# count map | |
for i in range(0, len(count_results)): | |
print(f"Count MAP Metric for {count_results[i]} results:") | |
results, total_mAP = map.count_map(actual_results, predicted_results, count_results[i]) | |
print("Results for individual questions:") | |
print(results) | |
print("\n") | |
print("---------------------------------------------------------") | |
# count ndcg | |
for i in range(0, len(count_results)): | |
print(f"Count NDCG Metric for {count_results[i]}:") | |
ndcg_res = ndcg.count_ndcg(predicted_ndcg, count_results[i]) | |
print("\n") | |
print("---------------------------------------------------------") | |
print("Finish evaluation") | |