|
|
|
from clddp.retriever import Retriever, RetrieverConfig, Pooling, SimilarityFunction |
|
from clddp.dm import Separator |
|
from typing import Dict |
|
from clddp.dm import Query, Passage |
|
import torch |
|
import pytrec_eval |
|
import numpy as np |
|
from datasets import load_dataset |
|
|
|
|
|
|
|
class DRAGONPlus(Retriever): |
|
def __init__(self) -> None: |
|
config = RetrieverConfig( |
|
query_model_name_or_path="facebook/dragon-plus-query-encoder", |
|
passage_model_name_or_path="facebook/dragon-plus-context-encoder", |
|
shared_encoder=False, |
|
sep=Separator.blank, |
|
pooling=Pooling.cls, |
|
similarity_function=SimilarityFunction.dot_product, |
|
query_max_length=512, |
|
passage_max_length=512, |
|
) |
|
super().__init__(config) |
|
|
|
|
|
|
|
passages = load_dataset("kwang2049/dapr", "ConditionalQA-corpus", split="test") |
|
queries = load_dataset("kwang2049/dapr", "ConditionalQA-queries", split="test") |
|
qrels_rows = load_dataset("kwang2049/dapr", "ConditionalQA-qrels", split="test") |
|
qrels: Dict[str, Dict[str, float]] = {} |
|
for qrel_row in qrels_rows: |
|
qid = qrel_row["query_id"] |
|
pid = qrel_row["corpus_id"] |
|
rel = qrel_row["score"] |
|
qrels.setdefault(qid, {}) |
|
qrels[qid][pid] = rel |
|
|
|
|
|
retriever = DRAGONPlus() |
|
retriever.eval() |
|
queries = [Query(query_id=query["_id"], text=query["text"]) for query in queries] |
|
passages = [ |
|
Passage(passage_id=passage["_id"], text=passage["text"]) for passage in passages |
|
] |
|
query_embeddings = retriever.encode_queries(queries) |
|
with torch.no_grad(): |
|
passage_embeddings, passage_mask = retriever.encode_passages(passages) |
|
|
|
|
|
similarity_scores = torch.matmul( |
|
query_embeddings, passage_embeddings.t() |
|
) |
|
topk = torch.topk(similarity_scores, k=10) |
|
topk_values: torch.Tensor = topk[0] |
|
topk_indices: torch.LongTensor = topk[1] |
|
topk_value_lists = topk_values.tolist() |
|
topk_index_lists = topk_indices.tolist() |
|
|
|
|
|
retrieval_scores: Dict[str, Dict[str, float]] = {} |
|
for query_i, (values, indices) in enumerate(zip(topk_value_lists, topk_index_lists)): |
|
query_id = queries[query_i].query_id |
|
retrieval_scores.setdefault(query_id, {}) |
|
for value, passage_i in zip(values, indices): |
|
passage_id = passages[passage_i].passage_id |
|
retrieval_scores[query_id][passage_id] = value |
|
evaluator = pytrec_eval.RelevanceEvaluator( |
|
query_relevance=qrels, measures=["ndcg_cut_10"] |
|
) |
|
query_performances: Dict[str, Dict[str, float]] = evaluator.evaluate(retrieval_scores) |
|
ndcg = np.mean([score["ndcg_cut_10"] for score in query_performances.values()]) |
|
print(ndcg) |
|
|