File size: 4,838 Bytes
10a171d
 
 
 
 
 
a04f566
 
10a171d
 
 
 
 
 
 
 
 
 
 
a04f566
 
 
 
 
 
 
 
 
 
343756c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a04f566
343756c
 
 
 
 
 
 
 
 
 
a04f566
343756c
 
 
a04f566
 
 
10a171d
 
 
 
 
 
 
 
 
 
 
 
343756c
10a171d
343756c
10a171d
343756c
10a171d
343756c
10a171d
343756c
10a171d
a04f566
 
343756c
a04f566
 
 
10a171d
a04f566
10a171d
 
343756c
10a171d
 
 
a04f566
10a171d
a04f566
 
10a171d
 
 
 
 
 
 
 
 
 
a04f566
 
10a171d
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
import os
import json
import tqdm
import functools
import collections
import multiprocessing
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel


def extract_domains(filename):
    domains = set()
    with open(filename) as f:
        for line in f:
            line = json.loads(line.strip())
            domains.add(line["domain"])
    return filename, list(domains)


def filter_valid(questions):
    answers = set()
    new_questions = []
    for question in questions:
        if question["answer"] not in answers:
            new_questions.append(question)
        answers.add(question["answer"])
    return new_questions


# def format_to_valid(questions):
#     answers_txt = [e["answer"] for e in questions]
#     questions_txt = [e["question"] for e in questions]
#     vectorizer = TfidfVectorizer()
#     vectorizer.fit(answers_txt + questions_txt)
#     answer_vectors = vectorizer.transform(answers_txt)
#     for i, question in enumerate(questions):
#         similarities = linear_kernel(answer_vectors[[i]], answer_vectors).flatten()
#         answer_scores = [(j, sim) for j, sim in enumerate(similarities) if sim != 1]
#         answer_scores = sorted(answer_scores, key=lambda x: x[1], reverse=True)
#         sorted_answers = [questions[j]["answer"] for j, _ in answer_scores if questions[j]["answer"] != question["answer"]]
#         negative_answer = sorted_answers[len(sorted_answers) // 2]
#         assert question["answer"] not in sorted_answers
#         question["candidates"] = [question["answer"]] + sorted_answers
#         question["negative_example"] = negative_answer
#     return questions


def format_to_valid(questions):
    answers = [e["answer"] for e in questions]
    for question in questions:
        answer = question["answer"]
        candidates = [e for e in answers if e != answer]
        candidates = [answer] + candidates
        question["candidates"] = candidates
    return questions


def format_to_train(questions):
    answers_txt = [e["answer"] for e in questions]
    answers_shifted = answers_txt[1:] + [answers_txt[0]]
    for question, answer in zip(questions, answers_shifted):
        question["negative"] = answer
    return questions


def valid_train_split(filename, mapping=None):
    previous_domain = ""
    train = []
    valid = []
    domain_data = {"questions": [], "pages": set()}
    counter = 0
    with open(filename) as f:
        for line_txt in f:
            counter += 1
            line = json.loads(line_txt.strip())
            domain = line["domain"]
            if domain != previous_domain and previous_domain != "":
                form_questions = format_to_train(domain_data["questions"])
                if len(mapping[previous_domain]) > 1:
                    train.extend(form_questions)
                elif len(valid) > 2000:
                    train.extend(form_questions)
                elif len(domain_data["pages"]) > 1:
                    train.extend(form_questions)
                elif len(domain_data["questions"]) < 15:
                    train.extend(form_questions)
                else:
                    questions = filter_valid(domain_data["questions"])
                    if len(questions) < 15:
                        train.extend(form_questions)
                    else:
                        questions = format_to_valid(questions)
                        valid.extend(questions)
                domain_data = {"questions": [], "pages": set()}
            domain_data["questions"].append(line)
            domain_data["pages"].add(line["domain_index"])
            previous_domain = domain
        # train.extend(form_questions)
    return train, valid, filename


domain_count = collections.defaultdict(list)
data = [f"data/{e}" for e in os.listdir("data") if e.endswith(".json")]
# with multiprocessing.Pool(os.cpu_count()) as p:
with multiprocessing.Pool(1) as p:
    for filename, domains in tqdm.tqdm(p.imap_unordered(extract_domains, data)):
        language = filename.split(".")[1]
        for domain in domains:
            domain_count[domain].append(language)


with multiprocessing.Pool(os.cpu_count()) as p:
    fn = functools.partial(valid_train_split, mapping=domain_count)
    for train, valid, filename in tqdm.tqdm(p.imap_unordered(fn, data)):
        train_filename = filename.replace("data/", "data/train/")
        train = [json.dumps(e, ensure_ascii=False) for e in train]
        valid = [json.dumps(e, ensure_ascii=False) for e in valid]
        with open(train_filename, "w+") as f:
            train = "\n".join(train)
            f.write(train) 
        valid_filename = filename.replace("data/", "data/valid/")
        with open(valid_filename, "w+") as f:
            valid = "\n".join(valid)
            f.write(valid)