Datasets:
Tasks:
Text Retrieval
Modalities:
Text
Formats:
json
Sub-tasks:
document-retrieval
Languages:
Slovak
Size:
10K - 100K
Tags:
text-retrieval
DOI:
License:
Upload 8 files
Browse filescode for evaluation
- .gitattributes +1 -0
- automatic_evaluation.py +100 -0
- metrics/__init__.py +0 -0
- metrics/map.py +66 -0
- metrics/mrr.py +38 -0
- metrics/ndcg.py +121 -0
- metrics/recall.py +74 -0
- requirements.txt +2 -0
- semantic_search_results.json +3 -0
.gitattributes
CHANGED
@@ -54,3 +54,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
54 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
55 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
56 |
indexing.jsonl filter=lfs diff=lfs merge=lfs -text
|
|
|
|
54 |
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
55 |
*.webp filter=lfs diff=lfs merge=lfs -text
|
56 |
indexing.jsonl filter=lfs diff=lfs merge=lfs -text
|
57 |
+
semantic_search_results.json filter=lfs diff=lfs merge=lfs -text
|
automatic_evaluation.py
ADDED
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Michal Stromko Automating Evaluation Script Annotated Dataset
|
3 |
+
for task Semantic Evaluation
|
4 |
+
"""
|
5 |
+
|
6 |
+
import os
|
7 |
+
import sys
|
8 |
+
import json
|
9 |
+
from metrics import recall, mrr, map, ndcg
|
10 |
+
from filtering_parsing import filtering_annotated, filtering_predicted
|
11 |
+
|
12 |
+
|
13 |
+
def load_jsonl_file(file_path: str):
|
14 |
+
"""
|
15 |
+
Load jsonl file
|
16 |
+
:param file_path: path to the file
|
17 |
+
:return: json data
|
18 |
+
"""
|
19 |
+
data = []
|
20 |
+
with open(file_path, "r", encoding="utf-8") as f:
|
21 |
+
for line in f:
|
22 |
+
data.append(json.loads(line))
|
23 |
+
return data
|
24 |
+
|
25 |
+
|
26 |
+
def load_json_file(file_path: str):
|
27 |
+
"""
|
28 |
+
Load json file
|
29 |
+
:param file_path: path to the file
|
30 |
+
:return: json data
|
31 |
+
"""
|
32 |
+
with open(file_path, "r", encoding="utf-8") as f:
|
33 |
+
data = json.load(f)
|
34 |
+
return data
|
35 |
+
|
36 |
+
|
37 |
+
if __name__ == '__main__':
|
38 |
+
# get file names
|
39 |
+
f_annotated_final = sys.argv[1]
|
40 |
+
f_predicted_final = sys.argv[2]
|
41 |
+
|
42 |
+
# load files
|
43 |
+
print("Loading datasets")
|
44 |
+
final_annotated = load_jsonl_file(f_annotated_final)
|
45 |
+
predicted = load_json_file(f_predicted_final)
|
46 |
+
print("Loaded datasets")
|
47 |
+
|
48 |
+
# filtering parsing annotated dataset
|
49 |
+
filtered_annotated_dataset = filtering_annotated.first_filtering_annotated(final_annotated)
|
50 |
+
|
51 |
+
filtering_annotated.counting_count_results(filtered_annotated_dataset)
|
52 |
+
actual_results = filtering_annotated.second_filtering_annotated(filtered_annotated_dataset)
|
53 |
+
|
54 |
+
# filtering and parsing predicted dataset
|
55 |
+
predicted_results = filtering_predicted.prediction_filtering_dataset(predicted)
|
56 |
+
|
57 |
+
# prepare dataset for ndcg evaluation
|
58 |
+
correct_answers_ndcg = ndcg.filtering_annotated_dataset_for_eval_ndcg(filtered_annotated_dataset)
|
59 |
+
predicted_ndcg = ndcg.filter_and_parsing_assignment_article_id_to_category(predicted_results, correct_answers_ndcg)
|
60 |
+
|
61 |
+
print("\n")
|
62 |
+
|
63 |
+
count_results = [5, 10, 15]
|
64 |
+
|
65 |
+
# Evaluation Dataset
|
66 |
+
print("Start evaluation")
|
67 |
+
|
68 |
+
# count recall
|
69 |
+
for i in range(0, len(count_results)):
|
70 |
+
print(f"Count Recall Metric for {count_results[i]} results:")
|
71 |
+
recall_value = recall.count_recall(actual_results, predicted_results, count_results[i])
|
72 |
+
print(f"\nMean value Recall for every count questions: \n{recall_value}")
|
73 |
+
print("\n")
|
74 |
+
print("---------------------------------------------------------")
|
75 |
+
|
76 |
+
# count mrr
|
77 |
+
for i in range(0, len(count_results)):
|
78 |
+
print(f"Count MRR Metric for {count_results[i]} results:")
|
79 |
+
mrr.count_mrr(actual_results, predicted_results, count_results[i])
|
80 |
+
print("\n ")
|
81 |
+
print("---------------------------------------------------------")
|
82 |
+
|
83 |
+
# count map
|
84 |
+
for i in range(0, len(count_results)):
|
85 |
+
print(f"Count MAP Metric for {count_results[i]} results:")
|
86 |
+
results, total_mAP = map.count_map(actual_results, predicted_results, count_results[i])
|
87 |
+
print("Results for individual questions:")
|
88 |
+
print(results)
|
89 |
+
|
90 |
+
print("\n")
|
91 |
+
print("---------------------------------------------------------")
|
92 |
+
|
93 |
+
# count ndcg
|
94 |
+
for i in range(0, len(count_results)):
|
95 |
+
print(f"Count NDCG Metric for {count_results[i]}:")
|
96 |
+
ndcg_res = ndcg.count_ndcg(predicted_ndcg, count_results[i])
|
97 |
+
print("\n")
|
98 |
+
print("---------------------------------------------------------")
|
99 |
+
|
100 |
+
print("Finish evaluation")
|
metrics/__init__.py
ADDED
File without changes
|
metrics/map.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
|
3 |
+
|
4 |
+
def count_map(actual_list, predicted_list, count_results):
|
5 |
+
"""
|
6 |
+
Funtion for count MAP metric
|
7 |
+
:param actual_list: actual results from annotated datatest
|
8 |
+
:param predicted_list: predicted results from searched engine
|
9 |
+
:return: map value in question MAP value
|
10 |
+
"""
|
11 |
+
|
12 |
+
# Initialization empty list for results
|
13 |
+
questions = []
|
14 |
+
k_stops = []
|
15 |
+
frequencies = []
|
16 |
+
mAPs = []
|
17 |
+
|
18 |
+
# For every question
|
19 |
+
for q, (actual, predicted) in enumerate(zip(actual_list, predicted_list), 1):
|
20 |
+
k_stop = None # initialization value who stop cycle for this question
|
21 |
+
ap_sum = 0 # initialization sum AP on question
|
22 |
+
count = 0 # count value k_stop
|
23 |
+
|
24 |
+
# Loop throw values k
|
25 |
+
for x, pred_value in enumerate(predicted[:count_results], 1):
|
26 |
+
act_set = set(actual)
|
27 |
+
pred_set = set(predicted[:x])
|
28 |
+
precision_at_k = len(act_set & pred_set) / x
|
29 |
+
|
30 |
+
if pred_value in actual:
|
31 |
+
rel_k = 1
|
32 |
+
else:
|
33 |
+
rel_k = 0
|
34 |
+
|
35 |
+
ap_sum += precision_at_k * rel_k
|
36 |
+
|
37 |
+
# If we have found all the relevant values and we don't have k_stop yet, we stop
|
38 |
+
if len(act_set) == ap_sum and k_stop is None:
|
39 |
+
k_stop = x
|
40 |
+
count += 1
|
41 |
+
|
42 |
+
# If we haven't reached k_stop by 15, we set it to 15
|
43 |
+
if k_stop is None:
|
44 |
+
k_stop = count_results
|
45 |
+
|
46 |
+
# Count mAP for question
|
47 |
+
ap_q = ap_sum / len(actual)
|
48 |
+
|
49 |
+
# Save results to list
|
50 |
+
questions.append(q)
|
51 |
+
k_stops.append(k_stop)
|
52 |
+
frequencies.append(count)
|
53 |
+
mAPs.append(round(ap_q, 2))
|
54 |
+
|
55 |
+
# Create DataFrame from results
|
56 |
+
df_results = pd.DataFrame({'Question': questions, 'k_stop': k_stops, 'Frequency': frequencies, 'mAP': mAPs})
|
57 |
+
|
58 |
+
# Count total mAP
|
59 |
+
total_mAP = round(df_results['mAP'].mean(), 2)
|
60 |
+
|
61 |
+
print(f"MAP Metric for {count_results} is: {total_mAP}")
|
62 |
+
|
63 |
+
k_stop_counts = df_results['k_stop'].value_counts()
|
64 |
+
print(f"Count of K_stop \n{k_stop_counts}")
|
65 |
+
|
66 |
+
return df_results, round(total_mAP,2)
|
metrics/mrr.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
def count_mrr(actual_list, predicted_list, count_results):
|
2 |
+
"""
|
3 |
+
Function for calculate MRR metric
|
4 |
+
:param actual_list: actual results from annotated datatest
|
5 |
+
:param predicted_list: predicted results from searched engine
|
6 |
+
:return: None
|
7 |
+
"""
|
8 |
+
# number of queries
|
9 |
+
Q = len(actual_list)
|
10 |
+
# calculate the reciprocal rank for each query
|
11 |
+
cumulative_reciprocal = 0
|
12 |
+
# initialize dictionary to store count of relevant results for each query
|
13 |
+
relevant_results_count = {i: 0 for i in range(1, count_results + 1)}
|
14 |
+
|
15 |
+
for i in range(Q):
|
16 |
+
actual = actual_list[i]
|
17 |
+
pred = predicted_list[i][:count_results]
|
18 |
+
reciprocal_rank = 0
|
19 |
+
for j, result in enumerate(pred, 1):
|
20 |
+
if result in actual:
|
21 |
+
reciprocal_rank = 1 / j
|
22 |
+
relevant_results_count[j] += 1 # increment count of relevant results for this query
|
23 |
+
break
|
24 |
+
cumulative_reciprocal += reciprocal_rank
|
25 |
+
# print(f"query #{i+1} = {1}/{j} = {reciprocal_rank}")
|
26 |
+
|
27 |
+
# calculate MRR
|
28 |
+
mrr = cumulative_reciprocal / Q
|
29 |
+
|
30 |
+
# generate result
|
31 |
+
print(f"MRR Metric for {count_results} is: {round(mrr, 2)}")
|
32 |
+
|
33 |
+
# generate table of relevant results count
|
34 |
+
print("Table of Relevant Results Count:")
|
35 |
+
print("Position | Count")
|
36 |
+
|
37 |
+
for position, count in relevant_results_count.items():
|
38 |
+
print(f"{position:^9} | {count:^5}")
|
metrics/ndcg.py
ADDED
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from math import log2
|
2 |
+
import pandas as pd
|
3 |
+
|
4 |
+
|
5 |
+
def filtering_annotated_dataset_for_eval_ndcg(filtered_annotated_dataset):
|
6 |
+
"""
|
7 |
+
Get from filtering annotated dataset only article id and category
|
8 |
+
next step save in dict and all save dict save in list
|
9 |
+
:param filtered_annotated_dataset: annotated dataset in json format
|
10 |
+
:return: list of dict for prepare datas for evaluation ndcg
|
11 |
+
"""
|
12 |
+
|
13 |
+
correct_answers = []
|
14 |
+
|
15 |
+
for i in range(0, len(filtered_annotated_dataset)):
|
16 |
+
correct_answer = {}
|
17 |
+
line = filtered_annotated_dataset[i]
|
18 |
+
for sublist in line["results"]:
|
19 |
+
correct_answer[sublist['answer_id']] = sublist['category']
|
20 |
+
|
21 |
+
correct_answers.append(correct_answer)
|
22 |
+
|
23 |
+
return correct_answers
|
24 |
+
|
25 |
+
|
26 |
+
def filter_and_parsing_assignment_article_id_to_category(predicted_list, correct_answers):
|
27 |
+
"""
|
28 |
+
Funtion to filter and parsing search_result id to category for evaluation ndcg
|
29 |
+
:param predicted_list: search results for evaluation ndcg
|
30 |
+
:param correct_answers: correct answers for evaluation ndcg from annotated dataset
|
31 |
+
:return: list with correct order with number of relevant category
|
32 |
+
"""
|
33 |
+
|
34 |
+
predictions = []
|
35 |
+
|
36 |
+
# for cycle in seared dataset
|
37 |
+
for i in range(0, len(predicted_list)):
|
38 |
+
prediction = []
|
39 |
+
|
40 |
+
# parsing data to tmp
|
41 |
+
line = predicted_list[i]
|
42 |
+
correct_answer = correct_answers[i]
|
43 |
+
|
44 |
+
# get on id and find same id in dict from annotated dataset
|
45 |
+
for j in range(0, len(line)):
|
46 |
+
|
47 |
+
# if article id in correct answers
|
48 |
+
if line[j] in correct_answer:
|
49 |
+
"""
|
50 |
+
acd correct order
|
51 |
+
if category 1 in ndcg is 3
|
52 |
+
if category 2 in ndcg is 2
|
53 |
+
if category 3 in ndcg is 1
|
54 |
+
else category in ndcg is 0
|
55 |
+
"""
|
56 |
+
if correct_answer[line[j]] == 1:
|
57 |
+
prediction.append(3)
|
58 |
+
elif correct_answer[line[j]] == 2:
|
59 |
+
prediction.append(2)
|
60 |
+
else:
|
61 |
+
# correct_answer[line[j]] == 3:
|
62 |
+
prediction.append(1)
|
63 |
+
else:
|
64 |
+
prediction.append(0)
|
65 |
+
|
66 |
+
# save get values
|
67 |
+
predictions.append(prediction)
|
68 |
+
|
69 |
+
return predictions
|
70 |
+
|
71 |
+
|
72 |
+
def count_ndcg(relevance, count_results):
|
73 |
+
"""
|
74 |
+
Function count ndcg
|
75 |
+
:param relevance: search results with value relevance
|
76 |
+
:return: total ndcg value
|
77 |
+
"""
|
78 |
+
|
79 |
+
df = pd.DataFrame()
|
80 |
+
df['Count Results'] = range(1, count_results + 1)
|
81 |
+
df['SUM NDCG'] = [0.00] * count_results
|
82 |
+
|
83 |
+
K = count_results
|
84 |
+
|
85 |
+
for line in relevance:
|
86 |
+
|
87 |
+
# sort items in 'relevance' from most relevant to less relevant
|
88 |
+
ideal_relevance = sorted(line, reverse=True)
|
89 |
+
|
90 |
+
dcg = 0
|
91 |
+
idcg = 0
|
92 |
+
ndcg_list = []
|
93 |
+
|
94 |
+
for k in range(1, K + 1):
|
95 |
+
# calculate rel_k values
|
96 |
+
rel_k = line[k - 1]
|
97 |
+
ideal_rel_k = ideal_relevance[k - 1]
|
98 |
+
|
99 |
+
# calculate dcg and idcg
|
100 |
+
dcg += rel_k / log2(1 + k)
|
101 |
+
idcg += ideal_rel_k / log2(1 + k)
|
102 |
+
|
103 |
+
if dcg == 0.00 and idcg == 0.00:
|
104 |
+
ndcg = 0
|
105 |
+
|
106 |
+
else:
|
107 |
+
# calculate ndcg
|
108 |
+
ndcg = dcg / idcg
|
109 |
+
|
110 |
+
df.at[k - 1, 'SUM NDCG'] += ndcg
|
111 |
+
|
112 |
+
# Create new column "NDCG" in dataFrame df
|
113 |
+
df['NDCG'] = round(df['SUM NDCG'] / len(relevance), 2)
|
114 |
+
print_ndcg = round(df.at[count_results - 1, 'NDCG'], 2)
|
115 |
+
print(f"NDCG Metric for {count_results} is: {print_ndcg} \n")
|
116 |
+
print(df)
|
117 |
+
|
118 |
+
sum_ndcg = df['NDCG'].sum()
|
119 |
+
ndcg_mean = sum_ndcg / K
|
120 |
+
|
121 |
+
return round(ndcg_mean, 2)
|
metrics/recall.py
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
|
3 |
+
|
4 |
+
# recall@k function
|
5 |
+
def recall(actual, predicted, k):
|
6 |
+
"""
|
7 |
+
Calculate recall for set results
|
8 |
+
:param actual: actual results on question from annotated datatest
|
9 |
+
:param predicted: predicted results on question from searched engine
|
10 |
+
:param k: max results in set
|
11 |
+
:return: recall value
|
12 |
+
"""
|
13 |
+
|
14 |
+
# corrects results
|
15 |
+
act_set = actual
|
16 |
+
# search results (count edit k)
|
17 |
+
pred_set = predicted[:k]
|
18 |
+
|
19 |
+
# count and find same numbers
|
20 |
+
common_elements = 0
|
21 |
+
for item in act_set:
|
22 |
+
if item in pred_set:
|
23 |
+
common_elements += 1
|
24 |
+
|
25 |
+
result = round(common_elements / float(len(act_set)), 2)
|
26 |
+
|
27 |
+
return result
|
28 |
+
|
29 |
+
|
30 |
+
def count_recall(actual_list, predicted_list, count_results):
|
31 |
+
"""
|
32 |
+
Calculate recall for search engine
|
33 |
+
:param actual_list: actual results from annotated datatest
|
34 |
+
:param predicted_list: predicted results from searched engine
|
35 |
+
:return: average recall value
|
36 |
+
"""
|
37 |
+
|
38 |
+
# set values for parameter k
|
39 |
+
k_start = 3
|
40 |
+
k_end = count_results + 1
|
41 |
+
|
42 |
+
# Initialization empty DataFrame
|
43 |
+
df_recall = pd.DataFrame(index=range(3, count_results + 1))
|
44 |
+
|
45 |
+
# For cycle go to every predicted questions
|
46 |
+
for i, predicted_val in enumerate(predicted_list, 1):
|
47 |
+
|
48 |
+
recalls = []
|
49 |
+
|
50 |
+
# Count recall for question
|
51 |
+
for k in range(k_start, k_end):
|
52 |
+
recall_val = recall(actual_list[i - 1], predicted_val, k)
|
53 |
+
recalls.append(recall_val)
|
54 |
+
|
55 |
+
df_temp = pd.DataFrame({f"Question {i}": recalls}, index=range(3, count_results + 1))
|
56 |
+
df_recall = pd.concat([df_recall, df_temp], axis=1)
|
57 |
+
|
58 |
+
df_recall[f"Question {i}"] = recalls
|
59 |
+
|
60 |
+
# Calculate the average recall value for each number of questions
|
61 |
+
average_recall = df_recall.mean(axis=1)
|
62 |
+
|
63 |
+
# Print list the recall values for each question separately
|
64 |
+
# print("Recall values for every question:")
|
65 |
+
# print(df_recall)
|
66 |
+
|
67 |
+
# set results two dots numbers
|
68 |
+
pd.set_option('display.float_format', '{:.2f}'.format)
|
69 |
+
|
70 |
+
# Print all mean recall
|
71 |
+
# print(f"\nAll Mean Recall for {count_results} results :{round(average_recall.mean(), 2)}")
|
72 |
+
|
73 |
+
print(f"Recall Metric for {count_results} is: {round(average_recall.iloc[-1], 2)}")
|
74 |
+
return average_recall
|
requirements.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
pandas
|
2 |
+
tqdm
|
semantic_search_results.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8d1b8e3c6094c40a4f91506f180bd990b847ee248e10b8346e0347bc9ab239f4
|
3 |
+
size 30758787
|