Create preprocess_marc_ja.py
Browse files- preprocess_marc_ja.py +261 -0
preprocess_marc_ja.py
ADDED
@@ -0,0 +1,261 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This code is licensed under CC-BY-4.0 from the original work by shunk031.
|
3 |
+
The code is adapted from https://huggingface.co/datasets/shunk031/JGLUE/blob/main/JGLUE.py
|
4 |
+
with minor modifications to the code structure.
|
5 |
+
This codebase provides pre-processing functionality for the MARC-ja dataset in the Japanese GLUE benchmark.
|
6 |
+
The original code can be found at https://github.com/yahoojapan/JGLUE/blob/main/preprocess/marc-ja/scripts/marc-ja.py.
|
7 |
+
"""
|
8 |
+
|
9 |
+
import random
|
10 |
+
import warnings
|
11 |
+
from typing import Dict, List, Optional, Union
|
12 |
+
import string
|
13 |
+
|
14 |
+
import datasets as ds
|
15 |
+
import pandas as pd
|
16 |
+
|
17 |
+
|
18 |
+
class MarcJaConfig(ds.BuilderConfig):
|
19 |
+
def __init__(
|
20 |
+
self,
|
21 |
+
name: str = "MARC-ja",
|
22 |
+
is_han_to_zen: bool = False,
|
23 |
+
max_instance_num: Optional[int] = None,
|
24 |
+
max_char_length: int = 500,
|
25 |
+
remove_netural: bool = True,
|
26 |
+
train_ratio: float = 0.94,
|
27 |
+
val_ratio: float = 0.03,
|
28 |
+
test_ratio: float = 0.03,
|
29 |
+
output_testset: bool = False,
|
30 |
+
filter_review_id_list_valid: bool = True,
|
31 |
+
label_conv_review_id_list_valid: bool = True,
|
32 |
+
version: Optional[Union[ds.utils.Version, str]] = ds.utils.Version("0.0.0"),
|
33 |
+
data_dir: Optional[str] = None,
|
34 |
+
data_files: Optional[ds.data_files.DataFilesDict] = None,
|
35 |
+
description: Optional[str] = None,
|
36 |
+
) -> None:
|
37 |
+
super().__init__(
|
38 |
+
name=name,
|
39 |
+
version=version,
|
40 |
+
data_dir=data_dir,
|
41 |
+
data_files=data_files,
|
42 |
+
description=description,
|
43 |
+
)
|
44 |
+
if train_ratio + val_ratio + test_ratio != 1.0:
|
45 |
+
raise ValueError(
|
46 |
+
"train_ratio + val_ratio + test_ratio should be 1.0, "
|
47 |
+
f"but got {train_ratio} + {val_ratio} + {test_ratio} = {train_ratio + val_ratio + test_ratio}"
|
48 |
+
)
|
49 |
+
|
50 |
+
self.train_ratio = train_ratio
|
51 |
+
self.val_ratio = val_ratio
|
52 |
+
self.test_ratio = test_ratio
|
53 |
+
|
54 |
+
self.is_han_to_zen = is_han_to_zen
|
55 |
+
self.max_instance_num = max_instance_num
|
56 |
+
self.max_char_length = max_char_length
|
57 |
+
self.remove_netural = remove_netural
|
58 |
+
self.output_testset = output_testset
|
59 |
+
|
60 |
+
self.filter_review_id_list_valid = filter_review_id_list_valid
|
61 |
+
self.label_conv_review_id_list_valid = label_conv_review_id_list_valid
|
62 |
+
|
63 |
+
|
64 |
+
def get_label(rating: int, remove_netural: bool = False) -> Optional[str]:
|
65 |
+
if rating >= 4:
|
66 |
+
return "positive"
|
67 |
+
elif rating <= 2:
|
68 |
+
return "negative"
|
69 |
+
else:
|
70 |
+
if remove_netural:
|
71 |
+
return None
|
72 |
+
else:
|
73 |
+
return "neutral"
|
74 |
+
|
75 |
+
|
76 |
+
def is_filtered_by_ascii_rate(text: str, threshold: float = 0.9) -> bool:
|
77 |
+
ascii_letters = set(string.printable)
|
78 |
+
rate = sum(c in ascii_letters for c in text) / len(text)
|
79 |
+
return rate >= threshold
|
80 |
+
|
81 |
+
|
82 |
+
def shuffle_dataframe(df: pd.DataFrame) -> pd.DataFrame:
|
83 |
+
instances = df.to_dict(orient="records")
|
84 |
+
random.seed(1)
|
85 |
+
random.shuffle(instances)
|
86 |
+
return pd.DataFrame(instances)
|
87 |
+
|
88 |
+
|
89 |
+
def get_filter_review_id_list(
|
90 |
+
filter_review_id_list_paths: Dict[str, str],
|
91 |
+
) -> Dict[str, List[str]]:
|
92 |
+
filter_review_id_list_valid = filter_review_id_list_paths.get("valid")
|
93 |
+
filter_review_id_list_test = filter_review_id_list_paths.get("test")
|
94 |
+
|
95 |
+
filter_review_id_list = {}
|
96 |
+
|
97 |
+
if filter_review_id_list_valid is not None:
|
98 |
+
with open(filter_review_id_list_valid, "r") as rf:
|
99 |
+
filter_review_id_list["valid"] = [line.rstrip() for line in rf]
|
100 |
+
|
101 |
+
if filter_review_id_list_test is not None:
|
102 |
+
with open(filter_review_id_list_test, "r") as rf:
|
103 |
+
filter_review_id_list["test"] = [line.rstrip() for line in rf]
|
104 |
+
|
105 |
+
return filter_review_id_list
|
106 |
+
|
107 |
+
|
108 |
+
def get_label_conv_review_id_list(
|
109 |
+
label_conv_review_id_list_paths: Dict[str, str],
|
110 |
+
) -> Dict[str, Dict[str, str]]:
|
111 |
+
import csv
|
112 |
+
|
113 |
+
label_conv_review_id_list_valid = label_conv_review_id_list_paths.get("valid")
|
114 |
+
label_conv_review_id_list_test = label_conv_review_id_list_paths.get("test")
|
115 |
+
|
116 |
+
label_conv_review_id_list: Dict[str, Dict[str, str]] = {}
|
117 |
+
|
118 |
+
if label_conv_review_id_list_valid is not None:
|
119 |
+
with open(label_conv_review_id_list_valid, "r", encoding="utf-8") as rf:
|
120 |
+
label_conv_review_id_list["valid"] = {row[0]: row[1] for row in csv.reader(rf)}
|
121 |
+
|
122 |
+
if label_conv_review_id_list_test is not None:
|
123 |
+
with open(label_conv_review_id_list_test, "r", encoding="utf-8") as rf:
|
124 |
+
label_conv_review_id_list["test"] = {row[0]: row[1] for row in csv.reader(rf)}
|
125 |
+
|
126 |
+
return label_conv_review_id_list
|
127 |
+
|
128 |
+
|
129 |
+
def output_data(
|
130 |
+
df: pd.DataFrame,
|
131 |
+
train_ratio: float,
|
132 |
+
val_ratio: float,
|
133 |
+
test_ratio: float,
|
134 |
+
output_testset: bool,
|
135 |
+
filter_review_id_list_paths: Dict[str, str],
|
136 |
+
label_conv_review_id_list_paths: Dict[str, str],
|
137 |
+
) -> Dict[str, pd.DataFrame]:
|
138 |
+
instance_num = len(df)
|
139 |
+
split_dfs: Dict[str, pd.DataFrame] = {}
|
140 |
+
length1 = int(instance_num * train_ratio)
|
141 |
+
split_dfs["train"] = df.iloc[:length1]
|
142 |
+
|
143 |
+
length2 = int(instance_num * (train_ratio + val_ratio))
|
144 |
+
split_dfs["valid"] = df.iloc[length1:length2]
|
145 |
+
split_dfs["test"] = df.iloc[length2:]
|
146 |
+
|
147 |
+
filter_review_id_list = get_filter_review_id_list(
|
148 |
+
filter_review_id_list_paths=filter_review_id_list_paths,
|
149 |
+
)
|
150 |
+
label_conv_review_id_list = get_label_conv_review_id_list(
|
151 |
+
label_conv_review_id_list_paths=label_conv_review_id_list_paths,
|
152 |
+
)
|
153 |
+
|
154 |
+
for eval_type in ("valid", "test"):
|
155 |
+
if filter_review_id_list.get(eval_type):
|
156 |
+
df = split_dfs[eval_type]
|
157 |
+
df = df[~df["review_id"].isin(filter_review_id_list[eval_type])]
|
158 |
+
split_dfs[eval_type] = df
|
159 |
+
|
160 |
+
for eval_type in ("valid", "test"):
|
161 |
+
if label_conv_review_id_list.get(eval_type):
|
162 |
+
df = split_dfs[eval_type]
|
163 |
+
df = df.assign(converted_label=df["review_id"].map(label_conv_review_id_list["valid"]))
|
164 |
+
df = df.assign(
|
165 |
+
label=df[["label", "converted_label"]].apply(
|
166 |
+
lambda xs: xs["label"] if pd.isnull(xs["converted_label"]) else xs["converted_label"],
|
167 |
+
axis=1,
|
168 |
+
)
|
169 |
+
)
|
170 |
+
df = df.drop(columns=["converted_label"])
|
171 |
+
split_dfs[eval_type] = df
|
172 |
+
|
173 |
+
return {
|
174 |
+
"train": split_dfs["train"],
|
175 |
+
"valid": split_dfs["valid"],
|
176 |
+
}
|
177 |
+
|
178 |
+
|
179 |
+
def preprocess_marc_ja(
|
180 |
+
config: MarcJaConfig,
|
181 |
+
data_file_path: str,
|
182 |
+
filter_review_id_list_paths: Dict[str, str],
|
183 |
+
label_conv_review_id_list_paths: Dict[str, str],
|
184 |
+
) -> Dict[str, pd.DataFrame]:
|
185 |
+
try:
|
186 |
+
import mojimoji
|
187 |
+
|
188 |
+
def han_to_zen(text: str) -> str:
|
189 |
+
return mojimoji.han_to_zen(text)
|
190 |
+
|
191 |
+
except ImportError:
|
192 |
+
warnings.warn(
|
193 |
+
"can't import `mojimoji`, failing back to method that do nothing. "
|
194 |
+
"We recommend running `pip install mojimoji` to reproduce the original preprocessing.",
|
195 |
+
UserWarning,
|
196 |
+
)
|
197 |
+
|
198 |
+
def han_to_zen(text: str) -> str:
|
199 |
+
return text
|
200 |
+
|
201 |
+
try:
|
202 |
+
from bs4 import BeautifulSoup
|
203 |
+
|
204 |
+
def cleanup_text(text: str) -> str:
|
205 |
+
return BeautifulSoup(text, "html.parser").get_text()
|
206 |
+
|
207 |
+
except ImportError:
|
208 |
+
warnings.warn(
|
209 |
+
"can't import `beautifulsoup4`, failing back to method that do nothing."
|
210 |
+
"We recommend running `pip install beautifulsoup4` to reproduce the original preprocessing.",
|
211 |
+
UserWarning,
|
212 |
+
)
|
213 |
+
|
214 |
+
def cleanup_text(text: str) -> str:
|
215 |
+
return text
|
216 |
+
|
217 |
+
from tqdm import tqdm
|
218 |
+
|
219 |
+
df = pd.read_csv(data_file_path, delimiter="\t")
|
220 |
+
df = df[["review_body", "star_rating", "review_id"]]
|
221 |
+
|
222 |
+
# rename columns
|
223 |
+
df = df.rename(columns={"review_body": "text", "star_rating": "rating"})
|
224 |
+
|
225 |
+
# convert the rating to label
|
226 |
+
tqdm.pandas(dynamic_ncols=True, desc="Convert the rating to the label")
|
227 |
+
df = df.assign(label=df["rating"].progress_apply(lambda rating: get_label(rating, config.remove_netural)))
|
228 |
+
|
229 |
+
# remove rows where the label is None
|
230 |
+
df = df[~df["label"].isnull()]
|
231 |
+
|
232 |
+
# remove html tags from the text
|
233 |
+
tqdm.pandas(dynamic_ncols=True, desc="Remove html tags from the text")
|
234 |
+
df = df.assign(text=df["text"].progress_apply(cleanup_text))
|
235 |
+
|
236 |
+
# filter by ascii rate
|
237 |
+
tqdm.pandas(dynamic_ncols=True, desc="Filter by ascii rate")
|
238 |
+
df = df[~df["text"].progress_apply(is_filtered_by_ascii_rate)]
|
239 |
+
|
240 |
+
if config.max_char_length is not None:
|
241 |
+
df = df[df["text"].str.len() <= config.max_char_length]
|
242 |
+
|
243 |
+
if config.is_han_to_zen:
|
244 |
+
df = df.assign(text=df["text"].apply(han_to_zen))
|
245 |
+
|
246 |
+
df = df[["text", "label", "review_id"]]
|
247 |
+
df = df.rename(columns={"text": "sentence"})
|
248 |
+
|
249 |
+
# shuffle dataset
|
250 |
+
df = shuffle_dataframe(df)
|
251 |
+
|
252 |
+
split_dfs = output_data(
|
253 |
+
df=df,
|
254 |
+
train_ratio=config.train_ratio,
|
255 |
+
val_ratio=config.val_ratio,
|
256 |
+
test_ratio=config.test_ratio,
|
257 |
+
output_testset=config.output_testset,
|
258 |
+
filter_review_id_list_paths=filter_review_id_list_paths,
|
259 |
+
label_conv_review_id_list_paths=label_conv_review_id_list_paths,
|
260 |
+
)
|
261 |
+
return split_dfs
|