File size: 2,790 Bytes
a6326c7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
from utils import read_json_file, write_jsonl_file, parse
import os


def preprocess(args, split):
    path = os.path.join(args.input_dir, f"coqa-{split}-v1.0.json")
    data = read_json_file(path)

    outfile = os.path.join(args.output_dir, f"{split}.jsonl")

    data = data["data"]

    turns = []
    for i in range(len(data)):
        t = {
            "turn": "multi",
            "locale": "en",
            "dialog": [],
            "knowledge": {
                "type": "dict",
                "value": {"source": data[i]["source"], "passage": data[i]["story"]},
            },
        }

        cand_answers = [list(map(lambda x: x["input_text"], data[i]["answers"]))]

        assert split != "train" or "additional_answers" not in data[i]
        if "additional_answers" in data[i]:
            for answers in data[i]["additional_answers"].values():
                cand_answers.append(list(map(lambda x: x["input_text"], answers)))

        cand_answers = list(zip(*cand_answers))

        for q, answers in zip(data[i]["questions"], cand_answers):
            dq = {"roles": ["USER"], "utterance": q["input_text"]}

            t["dialog"].append(dq)
            for answer in answers:
                da = {"roles": ["SYSTEM"], "utterance": answer}
                t["dialog"].append(da)

        turns.append(t)

    write_jsonl_file(turns, outfile)


# def preprocess_gold(args, file):
#     path = os.path.join(args.input_dir, f"{file}.json")
#     data = read_json_file(path)
#     data = data["data"]

#     turns = []
#     for i in range(len(data)):
#         t = {
#             "turn": "multi",
#             "locale": "en",
#             "title": {
#                 "name": data[i]["name"]
#             },
#             "dialog": [],
#             "knowledge": {
#                 "type": "text",
#                 "value": data[i]["story"]
#             }
#         }
#         for q, a0, a1, a2, a3 in zip(
#             data[i]["questions"],
#             data[i]["answers"],
#             data[i]["additional_answers"]["0"],
#             data[i]["additional_answers"]["1"],
#             data[i]["additional_answers"]["2"]
#         ):
#             t = deepcopy(t)
#             dq = {
#                 "role": "question",
#                 "utterance": q["input_text"]
#             }
#             da = {
#                 "role": "answer",
#                 "utterance": "\n".join([a0["input_text"], a1["input_text"], a2["input_text"], a3["input_text"]])
#             }
#             t["dialog"].append(dq)
#             t["dialog"].append(da)

#         turns.append(t)

#     write_jsonl_file(turns, args.output_dir + "/" + file + ".jsonl")


if __name__ == "__main__":
    args = parse()
    preprocess(args, "train")
    preprocess(args, "dev")