File size: 2,703 Bytes
ef0ca20
cf05905
 
 
 
 
 
ef0ca20
cf05905
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ef0ca20
 
 
 
cf05905
 
 
 
 
ef0ca20
 
cf05905
 
ef0ca20
cf05905
 
 
 
 
 
 
 
 
ef0ca20
 
cf05905
 
 
ef0ca20
 
 
cf05905
 
 
 
 
 
 
 
 
ef0ca20
 
 
cf05905
ef0ca20
 
cf05905
 
ef0ca20
cf05905
b35f8ea
cf05905
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
from datasets import load_dataset, Features, Value, Sequence
from dataclasses import dataclass, field
import logging
from transformers import HfArgumentParser
from tqdm import tqdm
from typing import Dict, List
import json
import numpy as np

logger = logging.getLogger()
logger.setLevel(logging.INFO)
console_handler = logging.StreamHandler()
console_handler.setFormatter(
    logging.Formatter("[%(asctime)s %(levelname)s] %(message)s")
)
logger.handlers = [console_handler]


@dataclass
class ConversionAgruments:
    out: str = field(metadata={"help": "Output path"})


@dataclass
class QRel:
    doc: int
    score: int


def load_msmarco(path: str, split) -> Dict[int, str]:
    dataset = load_dataset(path, split, split=split)
    cache: Dict[int, str] = {}
    for row in tqdm(dataset, desc=f"loading {path} split={split}"):
        index = int(row["_id"])
        cache[index] = row["text"]
    return cache


def load_qrel(path: str, split: str) -> Dict[int, List[QRel]]:
    dataset = load_dataset(path, split=split)
    print(dataset.features)
    cache: Dict[int, List[QRel]] = {}
    for row in tqdm(dataset, desc=f"loading {path} split={split}"):
        qid = int(row["query-id"])
        qrel = QRel(int(row["corpus-id"]), int(row["score"]))
        if qid in cache:
            cache[qid].append(qrel)
        else:
            cache[qid] = [qrel]
    return cache


def process_raw(
    qrels: Dict[int, List[QRel]], queries: Dict[int, str], corpus: Dict[int, str]
) -> List[Dict]:
    result = []
    for query, rels in tqdm(qrels.items(), desc="processing split"):
        pos = [corpus[rel.doc] for rel in rels if rel.doc in corpus and rel.score > 0]
        neg = [corpus[rel.doc] for rel in rels if rel.doc in corpus and rel.score == 0]
        group = {"query": queries[query], "positive": pos, "negative": neg}
        result.append(group)
    return result


def main():
    parser = HfArgumentParser((ConversionAgruments))
    (args,) = parser.parse_args_into_dataclasses()
    print(f"Args: {args}")
    qrels = {
        "train": load_qrel("BeIR/msmarco-qrels", split="train"),
        "test": load_qrel("BeIR/msmarco-qrels", split="test"),
        "dev": load_qrel("BeIR/msmarco-qrels", split="validation"),
    }
    queries = load_msmarco("BeIR/msmarco", split="queries")
    corpus = load_msmarco("BeIR/msmarco", split="corpus")
    print("processing done")
    for split, data in qrels.items():
        dataset = process_raw(data, queries, corpus)
        with open(f"{args.out}/{split}.jsonl", "w") as out:
            for item in dataset:
                json.dump(item, out)
                out.write("\n")
    print("done")


if __name__ == "__main__":
    main()