rominaoji commited on
Commit
412f2f0
1 Parent(s): 1bc0874

Upload split.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. split.py +178 -0
split.py ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # want data from all documents
3
+ # want data from all classes
4
+ #
5
+
6
+ file_names = [
7
+ "adjudications.txt",
8
+ "blog.txt",
9
+ "books.txt",
10
+ "emails.txt",
11
+ "fbl.txt",
12
+ "laws.txt",
13
+ "mbl.txt",
14
+ "radio_tv_news.txt",
15
+ "school_essays.txt",
16
+ "scienceweb.txt",
17
+ "webmedia.txt",
18
+ "websites.txt",
19
+ "written-to-be-spoken.txt"
20
+ ]
21
+
22
+ def read_file(file_name):
23
+ data = []
24
+ sentence = []
25
+ with open(file_name) as fh:
26
+ for line in fh.readlines():
27
+ if not line.strip() and sentence:
28
+ data.append(sentence)
29
+ sentence = []
30
+ continue
31
+ parts = line.strip().split()
32
+ if len(parts) >= 2:
33
+ w, t = parts[0], parts[1] # Take the first two items only
34
+ sentence.append((w, t))
35
+ return data
36
+
37
+
38
+ from collections import defaultdict
39
+ def calc_stats(data):
40
+ stats = defaultdict(int)
41
+ for sent in data:
42
+ stats["n_sentences"] += 1
43
+ for token, label in sent:
44
+ stats[label] += 1
45
+ return stats
46
+
47
+
48
+ import pprint
49
+ def get_total_stats():
50
+ total_stats = defaultdict(int)
51
+ for file_name in file_names:
52
+ d = read_file("data/"+file_name)
53
+ stats = calc_stats(d)
54
+ #print(f"--- [{file_name}]---")
55
+ #pprint.pprint(stats)
56
+ for k, v in stats.items():
57
+ total_stats[k] += v
58
+ #print("---- TOTAL ---- ")
59
+ #pprint.pprint(total_stats)
60
+ return total_stats
61
+
62
+ import random
63
+ random.seed(1)
64
+
65
+ def check_if_not_done(stats, total_stats, target):
66
+ for k, v in total_stats.items():
67
+ if v * target > stats[k]:
68
+ return True
69
+ return False
70
+
71
+ def create_splits(train=0.8, test=0.1, dev=0.1):
72
+ train_data = []
73
+ test_data = []
74
+ dev_data = []
75
+
76
+ total_stats = get_total_stats()
77
+
78
+ for file_name in file_names:
79
+ train_stats = defaultdict(int)
80
+ test_stats = defaultdict(int)
81
+ dev_stats = defaultdict(int)
82
+
83
+ d = read_file("data/"+file_name)
84
+ stats = calc_stats(d)
85
+ random.shuffle(d)
86
+
87
+ file_train = []
88
+ file_test = []
89
+ file_dev = []
90
+
91
+ for sent in d:
92
+ if check_if_not_done(test_stats, stats, test):
93
+ # TEST data
94
+ use = False
95
+ for token in sent:
96
+ w, tag = token
97
+ if tag == 'O':
98
+ continue
99
+ if test_stats[tag] < test * stats[tag] - 5:
100
+ use = True
101
+ if test_stats['n_sentences'] < test * stats['n_sentences'] - 5:
102
+ use = True
103
+ if use:
104
+ file_test.append(sent)
105
+ test_stats['n_sentences'] += 1
106
+ for w, t in sent:
107
+ test_stats[t] += 1
108
+ elif check_if_not_done(dev_stats, stats, dev):
109
+ # DEV DATA
110
+ use = False
111
+ for token in sent:
112
+ w, tag = token
113
+ if tag == 'O':
114
+ continue
115
+ if dev_stats[tag] < dev * stats[tag] - 5:
116
+ use = True
117
+ if dev_stats['n_sentences'] < dev * stats['n_sentences'] - 5:
118
+ use = True
119
+ if use:
120
+ file_dev.append(sent)
121
+ dev_stats['n_sentences'] += 1
122
+ for w, t in sent:
123
+ dev_stats[t] += 1
124
+ else:
125
+ file_train.append(sent)
126
+ train_stats['n_sentences'] += 1
127
+ for w, t in sent:
128
+ train_stats[t] += 1
129
+ else:
130
+ file_train.append(sent)
131
+ train_stats['n_sentences'] += 1
132
+ for w, t in sent:
133
+ train_stats[t] += 1
134
+ try:
135
+ assert len(d) == len(file_train) + len(file_dev) + len(file_test)
136
+ except:
137
+ import pdb; pdb.set_trace()
138
+ train_data += file_train
139
+ test_data += file_test
140
+ dev_data += file_dev
141
+
142
+ return train_data, test_data, dev_data
143
+
144
+ train, test, dev = create_splits()
145
+
146
+ total_stats = get_total_stats()
147
+ print("---- total -----")
148
+ pprint.pprint(total_stats)
149
+ print("----- test ----")
150
+ test_stats = calc_stats(test)
151
+ pprint.pprint(test_stats)
152
+ print("----- dev ----")
153
+ dev_stats = calc_stats(dev)
154
+ pprint.pprint(dev_stats)
155
+ print("----- train ----")
156
+ train_stats = calc_stats(train)
157
+ pprint.pprint(train_stats)
158
+
159
+
160
+ with open("train.txt", "w") as outf:
161
+ for sent in train:
162
+ for w, t in sent:
163
+ outf.writelines(f"{w} {t}\n")
164
+ outf.writelines("\n")
165
+
166
+
167
+ with open("test.txt", "w") as outf:
168
+ for sent in test:
169
+ for w, t in sent:
170
+ outf.writelines(f"{w} {t}\n")
171
+ outf.writelines("\n")
172
+
173
+
174
+ with open("dev.txt", "w") as outf:
175
+ for sent in dev:
176
+ for w, t in sent:
177
+ outf.writelines(f"{w} {t}\n")
178
+ outf.writelines("\n")