Datasets:
Tasks:
Text Retrieval
Modalities:
Text
Formats:
json
Sub-tasks:
document-retrieval
Languages:
Slovak
Size:
10K - 100K
Tags:
text-retrieval
DOI:
License:
from math import log2 | |
import pandas as pd | |
def filtering_annotated_dataset_for_eval_ndcg(filtered_annotated_dataset): | |
""" | |
Get from filtering annotated dataset only article id and category | |
next step save in dict and all save dict save in list | |
:param filtered_annotated_dataset: annotated dataset in json format | |
:return: list of dict for prepare datas for evaluation ndcg | |
""" | |
correct_answers = [] | |
for i in range(0, len(filtered_annotated_dataset)): | |
correct_answer = {} | |
line = filtered_annotated_dataset[i] | |
for sublist in line["results"]: | |
correct_answer[sublist['answer_id']] = sublist['category'] | |
correct_answers.append(correct_answer) | |
return correct_answers | |
def filter_and_parsing_assignment_article_id_to_category(predicted_list, correct_answers): | |
""" | |
Funtion to filter and parsing search_result id to category for evaluation ndcg | |
:param predicted_list: search results for evaluation ndcg | |
:param correct_answers: correct answers for evaluation ndcg from annotated dataset | |
:return: list with correct order with number of relevant category | |
""" | |
predictions = [] | |
# for cycle in seared dataset | |
for i in range(0, len(predicted_list)): | |
prediction = [] | |
# parsing data to tmp | |
line = predicted_list[i] | |
correct_answer = correct_answers[i] | |
# get on id and find same id in dict from annotated dataset | |
for j in range(0, len(line)): | |
# if article id in correct answers | |
if line[j] in correct_answer: | |
""" | |
acd correct order | |
if category 1 in ndcg is 3 | |
if category 2 in ndcg is 2 | |
if category 3 in ndcg is 1 | |
else category in ndcg is 0 | |
""" | |
if correct_answer[line[j]] == 1: | |
prediction.append(3) | |
elif correct_answer[line[j]] == 2: | |
prediction.append(2) | |
else: | |
# correct_answer[line[j]] == 3: | |
prediction.append(1) | |
else: | |
prediction.append(0) | |
# save get values | |
predictions.append(prediction) | |
return predictions | |
def count_ndcg(relevance, count_results): | |
""" | |
Function count ndcg | |
:param relevance: search results with value relevance | |
:return: total ndcg value | |
""" | |
df = pd.DataFrame() | |
df['Count Results'] = range(1, count_results + 1) | |
df['SUM NDCG'] = [0.00] * count_results | |
K = count_results | |
for line in relevance: | |
# sort items in 'relevance' from most relevant to less relevant | |
ideal_relevance = sorted(line, reverse=True) | |
dcg = 0 | |
idcg = 0 | |
ndcg_list = [] | |
for k in range(1, K + 1): | |
# calculate rel_k values | |
rel_k = line[k - 1] | |
ideal_rel_k = ideal_relevance[k - 1] | |
# calculate dcg and idcg | |
dcg += rel_k / log2(1 + k) | |
idcg += ideal_rel_k / log2(1 + k) | |
if dcg == 0.00 and idcg == 0.00: | |
ndcg = 0 | |
else: | |
# calculate ndcg | |
ndcg = dcg / idcg | |
df.at[k - 1, 'SUM NDCG'] += ndcg | |
# Create new column "NDCG" in dataFrame df | |
df['NDCG'] = round(df['SUM NDCG'] / len(relevance), 2) | |
print_ndcg = round(df.at[count_results - 1, 'NDCG'], 2) | |
print(f"NDCG Metric for {count_results} is: {print_ndcg} \n") | |
print(df) | |
sum_ndcg = df['NDCG'].sum() | |
ndcg_mean = sum_ndcg / K | |
return round(ndcg_mean, 2) | |