File size: 2,988 Bytes
9c8ab32
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113


fi = open("multinli_1.0_train.txt", "r")
output_file = open("multinli_const.txt", "w")
output_index_list = []
count_entailment = 0
count_neutral = 0
count_contradiction = 0

def parse_phrase_list(parse, phrases):

    #print(parse)
    if parse == "":
        return phrases
    
    phrase_list = phrases

    

    words = parse.split()
    this_phrase = []
    next_level_parse = []
    for index, word in enumerate(words):
        if word == "(":
            next_level_parse += this_phrase
            this_phrase = ["("]

        elif word == ")" and len(this_phrase) > 0 and this_phrase[0] == "(":
            phrase_list.append(" ".join(this_phrase[1:]))
            next_level_parse += this_phrase[1:]
            this_phrase = []
        elif word == ")":
            next_level_parse += this_phrase
            next_level_parse.append(")")
            this_phrase = []
        else:
            this_phrase.append(word)
            #next_level_parse.append(word)

    #next_level_parse += this_phrase
    #print(phrase_list, " ".join(next_level_parse))

    return parse_phrase_list(" ".join(next_level_parse), phrase_list)

first = True

counter = 0
for line_index, line in enumerate(fi):
    #if counter % 1000 == 0:
    #    print(counter)
    counter += 1

    if first:
        first = False
        continue


    parts = line.strip().split("\t")

    premise = parts[5]
    hypothesis = parts[6]
    label = parts[0]
    parse = parts[1]

    parse_new = []
    for word in parse.split():
        if word not in [".", "?", "!"]:
            parse_new.append(word.lower())

    all_phrases = parse_phrase_list(" ".join(parse_new), [])

    prem_words = []
    hyp_words = []

    for word in premise.split():
        if word not in [".", "?", "!"]:
            prem_words.append(word.lower().replace(".", "").replace("?", "").replace("!", ""))

    for word in hypothesis.split():
        if word not in [".", "?", "!"]:
            hyp_words.append(word.lower().replace(".", "").replace("?", "").replace("!", ""))

    prem_filtered = " ".join(prem_words)
    hyp_filtered = " ".join(hyp_words)

    #print(hyp_filtered, all_phrases)
    if hyp_filtered in all_phrases:
        #print(premise, hypothesis, label)
        #print(label)
        if label == "entailment":
            count_entailment += 1
        if label == "neutral":
            count_neutral += 1
            print(premise, hypothesis, label)
        if label == "contradiction":
            count_contradiction += 1
            print(premise, hypothesis, label)
        # output_file.write(line)
        output_index_list.append(line_index)

    #print(premise, hypothesis, label)

    #break
# open json file
with open('multinli_1.0_train.jsonl', 'r') as f:
    for line_index, line in enumerate(f):
        if line_index in output_index_list:
            output_file.write(line)

print("Entailment:", count_entailment)
print("Contradiction:", count_contradiction)
print("Neutral:", count_neutral)