Macropodus commited on
Commit
822ee34
·
verified ·
1 Parent(s): e5f52af

Upload 4 files

Browse files
.gitattributes CHANGED
@@ -57,3 +57,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
+ dev.json.handle_clean filter=lfs diff=lfs merge=lfs -text
61
+ train.json.handle_clean filter=lfs diff=lfs merge=lfs -text
a00_csc_clean_public_wang271k.py ADDED
@@ -0,0 +1,388 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # !/usr/bin/python
2
+ # -*- coding: utf-8 -*-
3
+ # @time : 2024/4/12 10:55
4
+ # @author : Mo
5
+ # @function: 清洗SIGHAN和Wang271k数据集中地脏数据
6
+
7
+
8
+ import logging as logger
9
+ import traceback
10
+ import random
11
+ import copy
12
+ import json
13
+ import re
14
+ import os
15
+ import gc
16
+
17
+ from difflib import SequenceMatcher
18
+ from tqdm import tqdm
19
+ import pandas as pd
20
+
21
+
22
+ def txt_write(lines, path, mode="w", encode_type="utf-8"):
23
+ """
24
+ txt【list】写入文件
25
+ Args:
26
+ lines[List]: input data to write, eg. ["桂林山水甲天下\\n"]
27
+ path[String]: path of file of read, eg. "corpus/xuexiqiangguo.txt"
28
+ mode[String]: write mode of file, eg. "w", "a+", "wb"
29
+ encode_type[String]: data encode type of file, eg. "utf-8", "gbk"
30
+ Returns:
31
+ lines[List]: output lines
32
+ """
33
+ try:
34
+ file = open(path, mode, encoding=encode_type)
35
+ file.writelines(lines)
36
+ file.close()
37
+ except Exception as e:
38
+ logger.info(str(e))
39
+ def txt_read(path, encode_type="utf-8", errors=None):
40
+ """
41
+ 读取txt文件,默认utf8格式, 不能有空行
42
+ Args:
43
+ path[String]: path of file of read, eg. "corpus/xuexiqiangguo.txt"
44
+ encode_type[String]: data encode type of file, eg. "utf-8", "gbk"
45
+ errors[String]: specifies how encoding errors handled, eg. "ignore", strict
46
+ Returns:
47
+ lines[List]: output lines
48
+ """
49
+ lines = []
50
+ try:
51
+ file = open(path, "r", encoding=encode_type, errors=errors)
52
+ lines = file.readlines()
53
+ file.close()
54
+ except Exception as e:
55
+ logger.info(str(e))
56
+ finally:
57
+ return lines
58
+ def save_xlsx_from_json(res_xlsx, path_xlsx="save.xlsx"):
59
+ """ json转化为xlsx的excel文件 """
60
+ pdr = pd.DataFrame(res_xlsx)
61
+ with pd.ExcelWriter(path_xlsx, engine="xlsxwriter",
62
+ # options={"strings_to_urls": False}
63
+ ) as writer:
64
+ pdr.to_excel(writer)
65
+ def save_xlsx(path_json, path_xlsx="nkpmath.xlsx"):
66
+ """ json转化为xlsx的excel文件 """
67
+ kg_list = load_json(path_json)
68
+ res_xlsx = {}
69
+ for kg_i in kg_list:
70
+ for jdx, kg_i_j in enumerate(kg_i):
71
+ jdx_str = str(jdx)
72
+ if jdx_str in res_xlsx:
73
+ res_xlsx[jdx_str].append(kg_i_j)
74
+ else:
75
+ res_xlsx[jdx_str] = [kg_i_j]
76
+ pdr = pd.DataFrame(res_xlsx)
77
+ with pd.ExcelWriter(path_xlsx, engine="xlsxwriter",
78
+ options={"strings_to_urls": False}) as writer:
79
+ pdr.to_excel(writer)
80
+ def save_json(jsons, json_path, indent=4):
81
+ """
82
+ 保存json
83
+ Args:
84
+ path[String]:, path of file of save, eg. "corpus/xuexiqiangguo.lib"
85
+ jsons[Json]: json of input data, eg. [{"桂林": 132}]
86
+ indent[int]: pretty-printed with that indent level, eg. 4
87
+ Returns:
88
+ None
89
+ """
90
+ with open(json_path, "w", encoding="utf-8") as fj:
91
+ fj.write(json.dumps(jsons, ensure_ascii=False, indent=indent))
92
+ fj.close()
93
+ def load_json(path, parse_int=None):
94
+ """
95
+ 加载json
96
+ Args:
97
+ path_file[String]:, path of file of save, eg. "corpus/xuexiqiangguo.lib"
98
+ parse_int[Boolean]: equivalent to int(num_str), eg. True or False
99
+ Returns:
100
+ data[Any]
101
+ """
102
+ with open(path, mode="r", encoding="utf-8") as fj:
103
+ model_json = json.load(fj, parse_int=parse_int)
104
+ return model_json
105
+ def get_all_dirs_files(path_dir):
106
+ """
107
+ 递归获取某个目录下的所有文件(所有层, 包括子目录)
108
+ Args:
109
+ path_dir[String]:, path of dir, eg. "/home/data"
110
+ Returns:
111
+ data[List]: data of input, eg. ["2020_01_08.txt"]
112
+ """
113
+ path_files = []
114
+ for root, dirs, files in os.walk(path_dir): # 分别代表根目录、文件夹、文件
115
+ for file in files: # 遍历文件
116
+ file_path = os.path.join(root, file) # 获取文件绝对路径
117
+ path_files.append(file_path) # 将文件路径添加进列表
118
+ files = list(set(path_files))
119
+ files.sort()
120
+ return files
121
+ def find_diff_pos(sent1, sent2):
122
+ """
123
+ 判断两个病句的不同之处, 返回insert/delete/replace, difflib-SequenceMatcher
124
+ args:
125
+ sent1: str, sentence of org, eg. "春天来了,越来越来暖和了。"
126
+ sent2: str, sentence of fix, eg. "春天来了,天气越来越暖和了。"
127
+ return:
128
+ diff_pos_s: List<Tuple>, tag and position, eg. ""
129
+ """
130
+ matcher = SequenceMatcher(None, sent1, sent2)
131
+ diff_pos_s = []
132
+ for tag, idx_1, idx_2, jdx_1, jdx_2 in matcher.get_opcodes():
133
+ if tag != "equal":
134
+ line_tuple = [tag, sent1[idx_1:idx_2],
135
+ sent2[jdx_1: jdx_2], [idx_1, idx_2]]
136
+ diff_pos_s.append(line_tuple)
137
+ return diff_pos_s
138
+ def cut_sentence(text):
139
+ """ 分句(文本摘要) """
140
+ # re_sen = re.compile('[:;!?。:;?!\n\r]') #.不加是因为不确定.是小数还是英文句号(中文省略号......)
141
+ # re_sen = re.compile('[!?。?!\n\r]')
142
+ # re_sen = re.compile('[,,"“”、<>《》{}【】:;!?。:;?!\n\r]') #.不加是因为不确定.是小数还是英文句号(中文省略号......)
143
+ re_sen = re.compile('[!?。?!\n\r…]')
144
+ sentences = re_sen.split(text)
145
+ return sentences
146
+
147
+
148
+ pun_1 = '"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、\u3000、〃〈〉《》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟 〰〾〿–—‘’‛“”„‟…‧﹏﹑﹔·!?。。'
149
+ pun_2 = """!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
150
+ puns = pun_1 + pun_2
151
+ def delete_last_punctuation(text):
152
+ """ -删除句末的标点符号- """
153
+ while len(text) > 0 and text[-1] in puns:
154
+ text = text[:-1]
155
+ return text
156
+
157
+ def tet_csc_clean_train_dataset_wang271k():
158
+ """ 清洗wang271数据集 """
159
+ import json
160
+ import sys
161
+ import os
162
+ path_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "../.."))
163
+ sys.path.append(path_root)
164
+ print(path_root)
165
+
166
+ path_dir = os.path.join(path_root, "macro_correct/corpus/text_correction/wang271k")
167
+ path_train = path_dir + "/train.json"
168
+ path_dev = path_dir + "/dev.json"
169
+ path_tet = path_dir + "/test.json"
170
+ paths = [path_train, path_dev, path_tet]
171
+ """
172
+ {
173
+ "id":"--",
174
+ "original_text":"国中数学课辅班",
175
+ "wrong_ids":[],
176
+ "correct_text":"国中数学课辅班"
177
+ }
178
+ """
179
+ count_yu_1 = 0
180
+ count_yu_2 = 0
181
+ count_fu_1 = 0
182
+ count_fu_2 = 0
183
+ count_ta_1 = 0
184
+ count_ta_2 = 0
185
+ count_de_1 = 0
186
+ for path in paths:
187
+ if not os.path.exists(path):
188
+ print("path is not ecist: " + path)
189
+ continue
190
+ data_json_list = load_json(path)
191
+ data_json_list_new = []
192
+ for jdx, d_json_org in enumerate(data_json_list):
193
+ d_json = copy.deepcopy(d_json_org)
194
+ original_text = d_json.get("original_text", "")
195
+ correct_text = d_json.get("correct_text", "")
196
+ wrong_ids = d_json.get("wrong_ids", [])
197
+ wrong_ids_new = []
198
+ for wid in wrong_ids:
199
+ char_error = original_text[wid]
200
+ char_true = correct_text[wid]
201
+ # 余 - 馀
202
+ flag_add = True
203
+ if char_error=="余" and char_true=="馀":
204
+ original_text = original_text[:wid] + char_true + original_text[wid+1:]
205
+ correct_text = correct_text[:wid] + char_error + correct_text[wid+1:]
206
+ count_yu_1 += 1
207
+ elif char_true=="馀":
208
+ correct_text = correct_text[:wid] + "余" + correct_text[wid+1:]
209
+ count_yu_2 += 1
210
+
211
+ # 复-覆
212
+ # 覆-复
213
+ if char_error == "覆" and char_true == "复":
214
+ original_text = original_text[:wid] + char_true + original_text[wid + 1:]
215
+ correct_text = correct_text[:wid] + char_error + correct_text[wid + 1:]
216
+ count_fu_1 += 1
217
+ elif char_true == "覆":
218
+ # 答疆/回覆/反覆
219
+ # 覆审
220
+ if correct_text[wid-1:wid+1] in ["答覆", "回覆", "反覆"]:
221
+ correct_text = correct_text[:wid] + "复" + correct_text[wid + 1:]
222
+ elif correct_text[wid:wid+2] in ["覆审"]:
223
+ correct_text = correct_text[:wid] + "复" + correct_text[wid + 1:]
224
+ count_fu_2 += 1
225
+
226
+ # 功-工
227
+ if char_error == "功" and char_true == "工":
228
+ "同工/工作/竣工"
229
+ "工夫/工效"
230
+ if correct_text[wid:wid + 2] in ["工夫", "工效"]:
231
+ original_text = original_text[:wid] + "工" + original_text[wid + 1:]
232
+ correct_text = correct_text[:wid] + "功" + correct_text[wid + 1:]
233
+
234
+ # 借-藉
235
+ if char_error == "借" and char_true == "藉":
236
+ original_text = original_text[:wid] + char_true + original_text[wid + 1:]
237
+ correct_text = correct_text[:wid] + char_error + correct_text[wid + 1:]
238
+ # 琅-瑯
239
+ if char_error == "琅" and char_true == "瑯":
240
+ original_text = original_text[:wid] + char_true + original_text[wid + 1:]
241
+ correct_text = correct_text[:wid] + char_error + correct_text[wid + 1:]
242
+
243
+ # 叶门-也门
244
+ if char_error == "也" and char_true == "叶":
245
+ if correct_text[wid:wid + 2] in ["叶门"]:
246
+ correct_text = correct_text[:wid] + char_error + correct_text[wid + 1:]
247
+ correct_text = correct_text.replace("叶门", "也门") # 多余得叶门
248
+ # 跨越数白百万公里后
249
+ if char_error == "百" and char_true == "白":
250
+ if correct_text[wid-1:wid + 2] in ["数白百"]:
251
+ correct_text = correct_text[:wid] + char_error + correct_text[wid + 1:]
252
+
253
+ # 震振有词 只改一半
254
+ if char_error == "震" and char_true == "振":
255
+ if correct_text[wid - 1:wid + 1] in ["震振"]:
256
+ correct_text = correct_text[:wid-1] + char_true + correct_text[wid-1+1:]
257
+ wrong_ids_new.append(wid-1)
258
+ # 外部, 经不起-禁不起
259
+ if char_error == "经" and char_true == "禁":
260
+ if correct_text[wid:wid + 3] in ["禁不住", "禁不起"] and \
261
+ "她曾经禁不住落泪" not in correct_text and \
262
+ "大家都禁不住拍手鼓掌" not in correct_text:
263
+ original_text = original_text[:wid] + char_true + original_text[wid + 1:]
264
+ correct_text = correct_text[:wid] + char_error + correct_text[wid + 1:]
265
+
266
+ # 他-她 不纠
267
+ if char_error == "他" and char_true == "她":
268
+ correct_text = correct_text[:wid] + char_error + correct_text[wid + 1:]
269
+ flag_add = False
270
+ count_ta_1 += 1
271
+ # 她 - 他
272
+ if char_error == "她" and char_true == "他":
273
+ correct_text = correct_text[:wid] + char_error + correct_text[wid + 1:]
274
+ flag_add = False
275
+ count_ta_2 += 1
276
+ # 小-晓 ### 人名不改
277
+ if char_error == "小" and char_true == "晓":
278
+ correct_text = correct_text[:wid] + char_error + correct_text[wid + 1:]
279
+ flag_add = False
280
+ # 一-逸 ### 人名不改, train都是人名, 吕逸涛/范逸臣/周逸雄
281
+ if char_error == "一" and char_true == "逸":
282
+ correct_text = correct_text[:wid] + char_error + correct_text[wid + 1:]
283
+ flag_add = False
284
+ # 吕一套被任命为二零一六央视春晚总导演。
285
+ # 佳-家 ### 人名不改, train都是人名, 张家玮 家慧
286
+ if char_error == "佳" and char_true == "家":
287
+ if correct_text[wid:wid + 2] in ["家玮", "家慧"]:
288
+ correct_text = correct_text[:wid] + char_error + correct_text[wid + 1:]
289
+ flag_add = False
290
+ # 得-地 ### 人名不改, 马哈得
291
+ if char_error == "得" and char_true == "地":
292
+ if correct_text[wid-2:wid+1] in ["马哈地"]:
293
+ correct_text = correct_text[:wid] + char_error + correct_text[wid + 1:]
294
+ flag_add = False
295
+ # 红-虹 ### 人名不改, 马哈得
296
+ if char_error == "红" and char_true == "虹": # 刘虹 虹姐 张虹 秀虹
297
+ if correct_text[wid-1:wid+1] in ["刘虹", "张虹", "秀虹"] or \
298
+ correct_text[wid:wid+2] in ["虹姐"]:
299
+ correct_text = correct_text[:wid] + char_error + correct_text[wid + 1:]
300
+ flag_add = False
301
+ # 民-明 ### 人名不改, 鲍明 冯大明 许传明 杨爱明
302
+ if char_error == "民" and char_true == "明":
303
+ if correct_text[wid - 1:wid + 1] in ["鲍明"] or \
304
+ correct_text[wid-2:wid + 1] in ["冯大明", "许传明", "杨爱明"]:
305
+ correct_text = correct_text[:wid] + char_error + correct_text[wid + 1:]
306
+ flag_add = False
307
+
308
+ # # 的 - 地
309
+ # # 的 - 得
310
+ # # 它 - 他
311
+ # # 哪 - 那
312
+
313
+ # # 改-大小改: 余-馀 覆-复 借-藉 功-工 琅-瑯 震-振 百-白 也-叶 经-禁(经不起-禁不起)
314
+ # # 部分不变(人名): 小-晓 一-逸 佳-家 得-地(马哈得) 红-虹 民-明
315
+ # # 匹配上但是不改的: 惟-唯 象-像 查-察 立-利 止-只 建-健 他-它 地-的 定-订 带-戴 力-利 成-城 点-店
316
+ # # 匹配上但是不改的: 作-做 得-的 场-厂 身-生 有-由 种-重 理-里
317
+
318
+
319
+ # # 空白没匹配上: 今-在 年-今 前-目 当-在 目-在 者-是
320
+ # # 外国人名等:其-齐 课-科 博-波
321
+ #
322
+ # if char_error == "民" and char_true == "明":
323
+ # # 张家玮 家慧 刘虹 虹姐 张虹 秀虹 鲍明 冯大明 许传明 杨爱明
324
+ # # print(original_text)
325
+ # # print(correct_text)
326
+ # # print(correct_text[wid - 1:wid + 1], correct_text[wid:wid+2])
327
+ # correct_text = correct_text[:wid] + char_error + correct_text[wid + 1:]
328
+ # count_de_1 += 1
329
+ # # "是那一段时间"
330
+ # # 发言人并未说明他住在那家疗养院,也未说明入院原因。
331
+ # # 但俄国官员却说,如果两千万美元费用未付,蓝斯那儿也去不成,目前双方还僵持在付费日期上。
332
+ # # 跨越数白百万公里后
333
+ # # 我们的眼睛也开始禁不起烟尘的刺激。 禁不住琢磨 禁不住 禁不起
334
+ # # 不禁令人担心
335
+ # # 她曾经禁不住落泪 大家都禁不住拍手鼓掌
336
+ if flag_add:
337
+ wrong_ids_new.append(wid)
338
+
339
+ d_json["original_text"] = original_text
340
+ d_json["correct_text"] = correct_text
341
+ d_json["wrong_ids"] = wrong_ids_new
342
+ data_json_list_new.append(d_json)
343
+ if wrong_ids_new != wrong_ids:
344
+ print("#"*128)
345
+ print("original_text_or: " + data_json_list[jdx].get("original_text", ""))
346
+ print("correct_text_org: " + data_json_list[jdx].get("correct_text", ""))
347
+ print("correct_text_new: " + correct_text)
348
+ print("wrong_ids_new: ", wrong_ids_new)
349
+ print("wrong_ids: ", wrong_ids)
350
+
351
+ save_json(data_json_list_new, os.path.split(path)[-1]+".handle_clean")
352
+ print(count_yu_1)
353
+ print(count_yu_2)
354
+ print(count_fu_1)
355
+ print(count_fu_2)
356
+
357
+
358
+ # test.json 和 dev.json 为 SIGHAN数据集, 包括SIGHAN13 14 15,来自 官方csc.html ,文件大小:339kb,4千条。
359
+ # train.json 为 Wang271k数据集,包括 Wang271k ,来自 Automatic-Corpus-Generation dimmywang提供 ,文件大小:93MB,27万条。
360
+
361
+
362
+ if __name__ == '__main__':
363
+ yz = 0
364
+
365
+ ### 清洗数据
366
+ tet_csc_clean_train_dataset_wang271k()
367
+
368
+
369
+ """
370
+ 余-馀: 替换为馀-余
371
+ other - 馀: 替换为余
372
+ 覆-复: 替换为复-覆
373
+ other-覆: # 答疆/回覆/反覆
374
+ # 覆审
375
+ 他-她:不纠
376
+ 她-他:不纠
377
+ 人名不纠: 识别人名并丢弃
378
+
379
+
380
+ 抽取人民日报语料中分错的(错得多的都需要补充预料)
381
+ 者-是
382
+ 或-还
383
+ 立-利
384
+ 震-振
385
+ 即-既
386
+
387
+ """
388
+
dev.json.handle_clean ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01301abd89cb317152f8eda39f1f9732d3d532111c77283d5fdbba13f92042b4
3
+ size 11399969
test.json.handle_clean ADDED
The diff for this file is too large to render. See raw diff
 
train.json.handle_clean ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a39a720215516e639a8206576a0971073f93d4f0fed8cdb03e73c75ea21b2a84
3
+ size 102123745