|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import logging as logger
|
|
import traceback
|
|
import random
|
|
import copy
|
|
import json
|
|
import re
|
|
import os
|
|
import gc
|
|
|
|
from difflib import SequenceMatcher
|
|
from tqdm import tqdm
|
|
import pandas as pd
|
|
|
|
|
|
def txt_write(lines, path, mode="w", encode_type="utf-8"):
|
|
"""
|
|
txt【list】写入文件
|
|
Args:
|
|
lines[List]: input data to write, eg. ["桂林山水甲天下\\n"]
|
|
path[String]: path of file of read, eg. "corpus/xuexiqiangguo.txt"
|
|
mode[String]: write mode of file, eg. "w", "a+", "wb"
|
|
encode_type[String]: data encode type of file, eg. "utf-8", "gbk"
|
|
Returns:
|
|
lines[List]: output lines
|
|
"""
|
|
try:
|
|
file = open(path, mode, encoding=encode_type)
|
|
file.writelines(lines)
|
|
file.close()
|
|
except Exception as e:
|
|
logger.info(str(e))
|
|
def txt_read(path, encode_type="utf-8", errors=None):
|
|
"""
|
|
读取txt文件,默认utf8格式, 不能有空行
|
|
Args:
|
|
path[String]: path of file of read, eg. "corpus/xuexiqiangguo.txt"
|
|
encode_type[String]: data encode type of file, eg. "utf-8", "gbk"
|
|
errors[String]: specifies how encoding errors handled, eg. "ignore", strict
|
|
Returns:
|
|
lines[List]: output lines
|
|
"""
|
|
lines = []
|
|
try:
|
|
file = open(path, "r", encoding=encode_type, errors=errors)
|
|
lines = file.readlines()
|
|
file.close()
|
|
except Exception as e:
|
|
logger.info(str(e))
|
|
finally:
|
|
return lines
|
|
def save_xlsx_from_json(res_xlsx, path_xlsx="save.xlsx"):
|
|
""" json转化为xlsx的excel文件 """
|
|
pdr = pd.DataFrame(res_xlsx)
|
|
with pd.ExcelWriter(path_xlsx, engine="xlsxwriter",
|
|
|
|
) as writer:
|
|
pdr.to_excel(writer)
|
|
def save_xlsx(path_json, path_xlsx="nkpmath.xlsx"):
|
|
""" json转化为xlsx的excel文件 """
|
|
kg_list = load_json(path_json)
|
|
res_xlsx = {}
|
|
for kg_i in kg_list:
|
|
for jdx, kg_i_j in enumerate(kg_i):
|
|
jdx_str = str(jdx)
|
|
if jdx_str in res_xlsx:
|
|
res_xlsx[jdx_str].append(kg_i_j)
|
|
else:
|
|
res_xlsx[jdx_str] = [kg_i_j]
|
|
pdr = pd.DataFrame(res_xlsx)
|
|
with pd.ExcelWriter(path_xlsx, engine="xlsxwriter",
|
|
options={"strings_to_urls": False}) as writer:
|
|
pdr.to_excel(writer)
|
|
def save_json(jsons, json_path, indent=4):
|
|
"""
|
|
保存json
|
|
Args:
|
|
path[String]:, path of file of save, eg. "corpus/xuexiqiangguo.lib"
|
|
jsons[Json]: json of input data, eg. [{"桂林": 132}]
|
|
indent[int]: pretty-printed with that indent level, eg. 4
|
|
Returns:
|
|
None
|
|
"""
|
|
with open(json_path, "w", encoding="utf-8") as fj:
|
|
fj.write(json.dumps(jsons, ensure_ascii=False, indent=indent))
|
|
fj.close()
|
|
def load_json(path, parse_int=None):
|
|
"""
|
|
加载json
|
|
Args:
|
|
path_file[String]:, path of file of save, eg. "corpus/xuexiqiangguo.lib"
|
|
parse_int[Boolean]: equivalent to int(num_str), eg. True or False
|
|
Returns:
|
|
data[Any]
|
|
"""
|
|
with open(path, mode="r", encoding="utf-8") as fj:
|
|
model_json = json.load(fj, parse_int=parse_int)
|
|
return model_json
|
|
def get_all_dirs_files(path_dir):
|
|
"""
|
|
递归获取某个目录下的所有文件(所有层, 包括子目录)
|
|
Args:
|
|
path_dir[String]:, path of dir, eg. "/home/data"
|
|
Returns:
|
|
data[List]: data of input, eg. ["2020_01_08.txt"]
|
|
"""
|
|
path_files = []
|
|
for root, dirs, files in os.walk(path_dir):
|
|
for file in files:
|
|
file_path = os.path.join(root, file)
|
|
path_files.append(file_path)
|
|
files = list(set(path_files))
|
|
files.sort()
|
|
return files
|
|
def find_diff_pos(sent1, sent2):
|
|
"""
|
|
判断两个病句的不同之处, 返回insert/delete/replace, difflib-SequenceMatcher
|
|
args:
|
|
sent1: str, sentence of org, eg. "春天来了,越来越来暖和了。"
|
|
sent2: str, sentence of fix, eg. "春天来了,天气越来越暖和了。"
|
|
return:
|
|
diff_pos_s: List<Tuple>, tag and position, eg. ""
|
|
"""
|
|
matcher = SequenceMatcher(None, sent1, sent2)
|
|
diff_pos_s = []
|
|
for tag, idx_1, idx_2, jdx_1, jdx_2 in matcher.get_opcodes():
|
|
if tag != "equal":
|
|
line_tuple = [tag, sent1[idx_1:idx_2],
|
|
sent2[jdx_1: jdx_2], [idx_1, idx_2]]
|
|
diff_pos_s.append(line_tuple)
|
|
return diff_pos_s
|
|
def cut_sentence(text):
|
|
""" 分句(文本摘要) """
|
|
|
|
|
|
|
|
re_sen = re.compile('[!?。?!\n\r…]')
|
|
sentences = re_sen.split(text)
|
|
return sentences
|
|
|
|
|
|
pun_1 = '"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、\u3000、〃〈〉《》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟 〰〾〿–—‘’‛“”„‟…‧﹏﹑﹔·!?。。'
|
|
pun_2 = """!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
|
|
puns = pun_1 + pun_2
|
|
def delete_last_punctuation(text):
|
|
""" -删除句末的标点符号- """
|
|
while len(text) > 0 and text[-1] in puns:
|
|
text = text[:-1]
|
|
return text
|
|
|
|
def tet_csc_clean_train_dataset_wang271k():
|
|
""" 清洗wang271数据集 """
|
|
import json
|
|
import sys
|
|
import os
|
|
path_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "../.."))
|
|
sys.path.append(path_root)
|
|
print(path_root)
|
|
|
|
path_dir = os.path.join(path_root, "macro_correct/corpus/text_correction/wang271k")
|
|
path_train = path_dir + "/train.json"
|
|
path_dev = path_dir + "/dev.json"
|
|
path_tet = path_dir + "/test.json"
|
|
paths = [path_train, path_dev, path_tet]
|
|
"""
|
|
{
|
|
"id":"--",
|
|
"original_text":"国中数学课辅班",
|
|
"wrong_ids":[],
|
|
"correct_text":"国中数学课辅班"
|
|
}
|
|
"""
|
|
count_yu_1 = 0
|
|
count_yu_2 = 0
|
|
count_fu_1 = 0
|
|
count_fu_2 = 0
|
|
count_ta_1 = 0
|
|
count_ta_2 = 0
|
|
count_de_1 = 0
|
|
for path in paths:
|
|
if not os.path.exists(path):
|
|
print("path is not ecist: " + path)
|
|
continue
|
|
data_json_list = load_json(path)
|
|
data_json_list_new = []
|
|
for jdx, d_json_org in enumerate(data_json_list):
|
|
d_json = copy.deepcopy(d_json_org)
|
|
original_text = d_json.get("original_text", "")
|
|
correct_text = d_json.get("correct_text", "")
|
|
wrong_ids = d_json.get("wrong_ids", [])
|
|
wrong_ids_new = []
|
|
for wid in wrong_ids:
|
|
char_error = original_text[wid]
|
|
char_true = correct_text[wid]
|
|
|
|
flag_add = True
|
|
if char_error=="余" and char_true=="馀":
|
|
original_text = original_text[:wid] + char_true + original_text[wid+1:]
|
|
correct_text = correct_text[:wid] + char_error + correct_text[wid+1:]
|
|
count_yu_1 += 1
|
|
elif char_true=="馀":
|
|
correct_text = correct_text[:wid] + "余" + correct_text[wid+1:]
|
|
count_yu_2 += 1
|
|
|
|
|
|
|
|
if char_error == "覆" and char_true == "复":
|
|
original_text = original_text[:wid] + char_true + original_text[wid + 1:]
|
|
correct_text = correct_text[:wid] + char_error + correct_text[wid + 1:]
|
|
count_fu_1 += 1
|
|
elif char_true == "覆":
|
|
|
|
|
|
if correct_text[wid-1:wid+1] in ["答覆", "回覆", "反覆"]:
|
|
correct_text = correct_text[:wid] + "复" + correct_text[wid + 1:]
|
|
elif correct_text[wid:wid+2] in ["覆审"]:
|
|
correct_text = correct_text[:wid] + "复" + correct_text[wid + 1:]
|
|
count_fu_2 += 1
|
|
|
|
|
|
if char_error == "功" and char_true == "工":
|
|
"同工/工作/竣工"
|
|
"工夫/工效"
|
|
if correct_text[wid:wid + 2] in ["工夫", "工效"]:
|
|
original_text = original_text[:wid] + "工" + original_text[wid + 1:]
|
|
correct_text = correct_text[:wid] + "功" + correct_text[wid + 1:]
|
|
|
|
|
|
if char_error == "借" and char_true == "藉":
|
|
original_text = original_text[:wid] + char_true + original_text[wid + 1:]
|
|
correct_text = correct_text[:wid] + char_error + correct_text[wid + 1:]
|
|
|
|
if char_error == "琅" and char_true == "瑯":
|
|
original_text = original_text[:wid] + char_true + original_text[wid + 1:]
|
|
correct_text = correct_text[:wid] + char_error + correct_text[wid + 1:]
|
|
|
|
|
|
if char_error == "也" and char_true == "叶":
|
|
if correct_text[wid:wid + 2] in ["叶门"]:
|
|
correct_text = correct_text[:wid] + char_error + correct_text[wid + 1:]
|
|
correct_text = correct_text.replace("叶门", "也门")
|
|
|
|
if char_error == "百" and char_true == "白":
|
|
if correct_text[wid-1:wid + 2] in ["数白百"]:
|
|
correct_text = correct_text[:wid] + char_error + correct_text[wid + 1:]
|
|
|
|
|
|
if char_error == "震" and char_true == "振":
|
|
if correct_text[wid - 1:wid + 1] in ["震振"]:
|
|
correct_text = correct_text[:wid-1] + char_true + correct_text[wid-1+1:]
|
|
wrong_ids_new.append(wid-1)
|
|
|
|
if char_error == "经" and char_true == "禁":
|
|
if correct_text[wid:wid + 3] in ["禁不住", "禁不起"] and \
|
|
"她曾经禁不住落泪" not in correct_text and \
|
|
"大家都禁不住拍手鼓掌" not in correct_text:
|
|
original_text = original_text[:wid] + char_true + original_text[wid + 1:]
|
|
correct_text = correct_text[:wid] + char_error + correct_text[wid + 1:]
|
|
|
|
|
|
if char_error == "他" and char_true == "她":
|
|
correct_text = correct_text[:wid] + char_error + correct_text[wid + 1:]
|
|
flag_add = False
|
|
count_ta_1 += 1
|
|
|
|
if char_error == "她" and char_true == "他":
|
|
correct_text = correct_text[:wid] + char_error + correct_text[wid + 1:]
|
|
flag_add = False
|
|
count_ta_2 += 1
|
|
|
|
if char_error == "小" and char_true == "晓":
|
|
correct_text = correct_text[:wid] + char_error + correct_text[wid + 1:]
|
|
flag_add = False
|
|
|
|
if char_error == "一" and char_true == "逸":
|
|
correct_text = correct_text[:wid] + char_error + correct_text[wid + 1:]
|
|
flag_add = False
|
|
|
|
|
|
if char_error == "佳" and char_true == "家":
|
|
if correct_text[wid:wid + 2] in ["家玮", "家慧"]:
|
|
correct_text = correct_text[:wid] + char_error + correct_text[wid + 1:]
|
|
flag_add = False
|
|
|
|
if char_error == "得" and char_true == "地":
|
|
if correct_text[wid-2:wid+1] in ["马哈地"]:
|
|
correct_text = correct_text[:wid] + char_error + correct_text[wid + 1:]
|
|
flag_add = False
|
|
|
|
if char_error == "红" and char_true == "虹":
|
|
if correct_text[wid-1:wid+1] in ["刘虹", "张虹", "秀虹"] or \
|
|
correct_text[wid:wid+2] in ["虹姐"]:
|
|
correct_text = correct_text[:wid] + char_error + correct_text[wid + 1:]
|
|
flag_add = False
|
|
|
|
if char_error == "民" and char_true == "明":
|
|
if correct_text[wid - 1:wid + 1] in ["鲍明"] or \
|
|
correct_text[wid-2:wid + 1] in ["冯大明", "许传明", "杨爱明"]:
|
|
correct_text = correct_text[:wid] + char_error + correct_text[wid + 1:]
|
|
flag_add = False
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if flag_add:
|
|
wrong_ids_new.append(wid)
|
|
|
|
d_json["original_text"] = original_text
|
|
d_json["correct_text"] = correct_text
|
|
d_json["wrong_ids"] = wrong_ids_new
|
|
data_json_list_new.append(d_json)
|
|
if wrong_ids_new != wrong_ids:
|
|
print("#"*128)
|
|
print("original_text_or: " + data_json_list[jdx].get("original_text", ""))
|
|
print("correct_text_org: " + data_json_list[jdx].get("correct_text", ""))
|
|
print("correct_text_new: " + correct_text)
|
|
print("wrong_ids_new: ", wrong_ids_new)
|
|
print("wrong_ids: ", wrong_ids)
|
|
|
|
save_json(data_json_list_new, os.path.split(path)[-1]+".handle_clean")
|
|
print(count_yu_1)
|
|
print(count_yu_2)
|
|
print(count_fu_1)
|
|
print(count_fu_2)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
yz = 0
|
|
|
|
|
|
tet_csc_clean_train_dataset_wang271k()
|
|
|
|
|
|
"""
|
|
余-馀: 替换为馀-余
|
|
other - 馀: 替换为余
|
|
覆-复: 替换为复-覆
|
|
other-覆: # 答疆/回覆/反覆
|
|
# 覆审
|
|
他-她:不纠
|
|
她-他:不纠
|
|
人名不纠: 识别人名并丢弃
|
|
|
|
|
|
抽取人民日报语料中分错的(错得多的都需要补充预料)
|
|
者-是
|
|
或-还
|
|
立-利
|
|
震-振
|
|
即-既
|
|
|
|
"""
|
|
|
|
|