beyondzero commited on
Commit
9c20382
·
verified ·
1 Parent(s): 97cf954
Files changed (1) hide show
  1. preprocess_marc_ko.py +252 -0
preprocess_marc_ko.py ADDED
@@ -0,0 +1,252 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import warnings
3
+ from typing import Dict, List, Optional, Union
4
+ import string
5
+
6
+ import datasets as ds
7
+ import pandas as pd
8
+
9
+
10
+ class MarcKoConfig(ds.BuilderConfig):
11
+ def __init__(
12
+ self,
13
+ name: str = "MARC-ko",
14
+ is_han_to_zen: bool = False,
15
+ max_instance_num: Optional[int] = None,
16
+ max_char_length: int = 500,
17
+ remove_netural: bool = True,
18
+ train_ratio: float = 0.94,
19
+ val_ratio: float = 0.03,
20
+ test_ratio: float = 0.03,
21
+ output_testset: bool = False,
22
+ filter_review_id_list_valid: bool = True,
23
+ label_conv_review_id_list_valid: bool = True,
24
+ version: Optional[Union[ds.utils.Version, str]] = ds.utils.Version("0.0.0"),
25
+ data_dir: Optional[str] = None,
26
+ data_files: Optional[ds.data_files.DataFilesDict] = None,
27
+ description: Optional[str] = None,
28
+ ) -> None:
29
+ super().__init__(
30
+ name=name,
31
+ version=version,
32
+ data_dir=data_dir,
33
+ data_files=data_files,
34
+ description=description,
35
+ )
36
+ if train_ratio + val_ratio + test_ratio != 1.0:
37
+ raise ValueError(
38
+ "train_ratio + val_ratio + test_ratio should be 1.0, "
39
+ f"but got {train_ratio} + {val_ratio} + {test_ratio} = {train_ratio + val_ratio + test_ratio}"
40
+ )
41
+
42
+ self.train_ratio = train_ratio
43
+ self.val_ratio = val_ratio
44
+ self.test_ratio = test_ratio
45
+
46
+ self.is_han_to_zen = is_han_to_zen
47
+ self.max_instance_num = max_instance_num
48
+ self.max_char_length = max_char_length
49
+ self.remove_netural = remove_netural
50
+ self.output_testset = output_testset
51
+
52
+ self.filter_review_id_list_valid = filter_review_id_list_valid
53
+ self.label_conv_review_id_list_valid = label_conv_review_id_list_valid
54
+
55
+
56
+ def get_label(rating: int, remove_netural: bool = False) -> Optional[str]:
57
+ if rating >= 4:
58
+ return "positive"
59
+ elif rating <= 2:
60
+ return "negative"
61
+ else:
62
+ if remove_netural:
63
+ return None
64
+ else:
65
+ return "neutral"
66
+
67
+
68
+ def is_filtered_by_ascii_rate(text: str, threshold: float = 0.9) -> bool:
69
+ ascii_letters = set(string.printable)
70
+ rate = sum(c in ascii_letters for c in text) / len(text)
71
+ return rate >= threshold
72
+
73
+
74
+ def shuffle_dataframe(df: pd.DataFrame) -> pd.DataFrame:
75
+ instances = df.to_dict(orient="records")
76
+ random.seed(1)
77
+ random.shuffle(instances)
78
+ return pd.DataFrame(instances)
79
+
80
+
81
+ def get_filter_review_id_list(
82
+ filter_review_id_list_paths: Dict[str, str],
83
+ ) -> Dict[str, List[str]]:
84
+ filter_review_id_list_valid = filter_review_id_list_paths.get("valid")
85
+ filter_review_id_list_test = filter_review_id_list_paths.get("test")
86
+
87
+ filter_review_id_list = {}
88
+
89
+ if filter_review_id_list_valid is not None:
90
+ with open(filter_review_id_list_valid, "r") as rf:
91
+ filter_review_id_list["valid"] = [line.rstrip() for line in rf]
92
+
93
+ if filter_review_id_list_test is not None:
94
+ with open(filter_review_id_list_test, "r") as rf:
95
+ filter_review_id_list["test"] = [line.rstrip() for line in rf]
96
+
97
+ return filter_review_id_list
98
+
99
+
100
+ def get_label_conv_review_id_list(
101
+ label_conv_review_id_list_paths: Dict[str, str],
102
+ ) -> Dict[str, Dict[str, str]]:
103
+ import csv
104
+
105
+ label_conv_review_id_list_valid = label_conv_review_id_list_paths.get("valid")
106
+ label_conv_review_id_list_test = label_conv_review_id_list_paths.get("test")
107
+
108
+ label_conv_review_id_list: Dict[str, Dict[str, str]] = {}
109
+
110
+ if label_conv_review_id_list_valid is not None:
111
+ with open(label_conv_review_id_list_valid, "r", encoding="utf-8") as rf:
112
+ label_conv_review_id_list["valid"] = {row[0]: row[1] for row in csv.reader(rf)}
113
+
114
+ if label_conv_review_id_list_test is not None:
115
+ with open(label_conv_review_id_list_test, "r", encoding="utf-8") as rf:
116
+ label_conv_review_id_list["test"] = {row[0]: row[1] for row in csv.reader(rf)}
117
+
118
+ return label_conv_review_id_list
119
+
120
+
121
+ def output_data(
122
+ df: pd.DataFrame,
123
+ train_ratio: float,
124
+ val_ratio: float,
125
+ test_ratio: float,
126
+ output_testset: bool,
127
+ filter_review_id_list_paths: Dict[str, str],
128
+ label_conv_review_id_list_paths: Dict[str, str],
129
+ ) -> Dict[str, pd.DataFrame]:
130
+ instance_num = len(df)
131
+ split_dfs: Dict[str, pd.DataFrame] = {}
132
+ length1 = int(instance_num * train_ratio)
133
+ split_dfs["train"] = df.iloc[:length1]
134
+
135
+ length2 = int(instance_num * (train_ratio + val_ratio))
136
+ split_dfs["valid"] = df.iloc[length1:length2]
137
+ split_dfs["test"] = df.iloc[length2:]
138
+
139
+ filter_review_id_list = get_filter_review_id_list(
140
+ filter_review_id_list_paths=filter_review_id_list_paths,
141
+ )
142
+ label_conv_review_id_list = get_label_conv_review_id_list(
143
+ label_conv_review_id_list_paths=label_conv_review_id_list_paths,
144
+ )
145
+
146
+ for eval_type in ("valid", "test"):
147
+ if filter_review_id_list.get(eval_type):
148
+ df = split_dfs[eval_type]
149
+ df = df[~df["review_id"].isin(filter_review_id_list[eval_type])]
150
+ split_dfs[eval_type] = df
151
+
152
+ for eval_type in ("valid", "test"):
153
+ if label_conv_review_id_list.get(eval_type):
154
+ df = split_dfs[eval_type]
155
+ df = df.assign(converted_label=df["review_id"].map(label_conv_review_id_list["valid"]))
156
+ df = df.assign(
157
+ label=df[["label", "converted_label"]].apply(
158
+ lambda xs: xs["label"] if pd.isnull(xs["converted_label"]) else xs["converted_label"],
159
+ axis=1,
160
+ )
161
+ )
162
+ df = df.drop(columns=["converted_label"])
163
+ split_dfs[eval_type] = df
164
+
165
+ return {
166
+ "train": split_dfs["train"],
167
+ "valid": split_dfs["valid"],
168
+ }
169
+
170
+
171
+ def preprocess_marc_ko(
172
+ config: MarcKoConfig,
173
+ data_file_path: str,
174
+ filter_review_id_list_paths: Dict[str, str],
175
+ label_conv_review_id_list_paths: Dict[str, str],
176
+ ) -> Dict[str, pd.DataFrame]:
177
+ try:
178
+
179
+ def han_to_zen(text: str) -> str:
180
+ return text
181
+
182
+ except ImportError:
183
+ warnings.warn(
184
+ "can't import `mojimoji`, failing back to method that do nothing. "
185
+ "We recommend running `pip install mojimoji` to reproduce the original preprocessing.",
186
+ UserWarning,
187
+ )
188
+
189
+ def han_to_zen(text: str) -> str:
190
+ return text
191
+
192
+ try:
193
+ from bs4 import BeautifulSoup
194
+
195
+ def cleanup_text(text: str) -> str:
196
+ return BeautifulSoup(text, "html.parser").get_text()
197
+
198
+ except ImportError:
199
+ warnings.warn(
200
+ "can't import `beautifulsoup4`, failing back to method that do nothing."
201
+ "We recommend running `pip install beautifulsoup4` to reproduce the original preprocessing.",
202
+ UserWarning,
203
+ )
204
+
205
+ def cleanup_text(text: str) -> str:
206
+ return text
207
+
208
+ from tqdm import tqdm
209
+
210
+ df = pd.read_csv(data_file_path, delimiter="\t")
211
+ df = df[["review_body", "star_rating", "review_id"]]
212
+
213
+ # rename columns
214
+ df = df.rename(columns={"review_body": "text", "star_rating": "rating"})
215
+
216
+ # convert the rating to label
217
+ tqdm.pandas(dynamic_ncols=True, desc="Convert the rating to the label")
218
+ df = df.assign(label=df["rating"].progress_apply(lambda rating: get_label(rating, config.remove_netural)))
219
+
220
+ # remove rows where the label is None
221
+ df = df[~df["label"].isnull()]
222
+
223
+ # remove html tags from the text
224
+ tqdm.pandas(dynamic_ncols=True, desc="Remove html tags from the text")
225
+ df = df.assign(text=df["text"].progress_apply(cleanup_text))
226
+
227
+ # filter by ascii rate
228
+ tqdm.pandas(dynamic_ncols=True, desc="Filter by ascii rate")
229
+ df = df[~df["text"].progress_apply(is_filtered_by_ascii_rate)]
230
+
231
+ if config.max_char_length is not None:
232
+ df = df[df["text"].str.len() <= config.max_char_length]
233
+
234
+ if config.is_han_to_zen:
235
+ df = df.assign(text=df["text"].apply(han_to_zen))
236
+
237
+ df = df[["text", "label", "review_id"]]
238
+ df = df.rename(columns={"text": "sentence"})
239
+
240
+ # shuffle dataset
241
+ df = shuffle_dataframe(df)
242
+
243
+ split_dfs = output_data(
244
+ df=df,
245
+ train_ratio=config.train_ratio,
246
+ val_ratio=config.val_ratio,
247
+ test_ratio=config.test_ratio,
248
+ output_testset=config.output_testset,
249
+ filter_review_id_list_paths=filter_review_id_list_paths,
250
+ label_conv_review_id_list_paths=label_conv_review_id_list_paths,
251
+ )
252
+ return split_dfs