Spaces:
Sleeping
Sleeping
Siyuan Feng
commited on
Commit
•
ab879ca
1
Parent(s):
a88a427
feat: clean pdf fitz text
Browse files- crazy_functions/批量总结PDF文档.py +2 -1
- toolbox.py +57 -1
crazy_functions/批量总结PDF文档.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
from predict import predict_no_ui
|
2 |
-
from toolbox import CatchException, report_execption, write_results_to_file, predict_no_ui_but_counting_down
|
3 |
fast_debug = False
|
4 |
|
5 |
|
@@ -11,6 +11,7 @@ def 解析PDF(file_manifest, project_folder, top_p, temperature, chatbot, histor
|
|
11 |
file_content = ""
|
12 |
for page in doc:
|
13 |
file_content += page.get_text()
|
|
|
14 |
print(file_content)
|
15 |
|
16 |
prefix = "接下来请你逐文件分析下面的论文文件,概括其内容" if index==0 else ""
|
|
|
1 |
from predict import predict_no_ui
|
2 |
+
from toolbox import CatchException, report_execption, write_results_to_file, predict_no_ui_but_counting_down, clean_text
|
3 |
fast_debug = False
|
4 |
|
5 |
|
|
|
11 |
file_content = ""
|
12 |
for page in doc:
|
13 |
file_content += page.get_text()
|
14 |
+
file_content = clean_text(file_content)
|
15 |
print(file_content)
|
16 |
|
17 |
prefix = "接下来请你逐文件分析下面的论文文件,概括其内容" if index==0 else ""
|
toolbox.py
CHANGED
@@ -235,4 +235,60 @@ def clear_line_break(txt):
|
|
235 |
txt = txt.replace('\n', ' ')
|
236 |
txt = txt.replace(' ', ' ')
|
237 |
txt = txt.replace(' ', ' ')
|
238 |
-
return txt
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
235 |
txt = txt.replace('\n', ' ')
|
236 |
txt = txt.replace(' ', ' ')
|
237 |
txt = txt.replace(' ', ' ')
|
238 |
+
return txt
|
239 |
+
|
240 |
+
import re
|
241 |
+
import unicodedata
|
242 |
+
|
243 |
+
def is_paragraph_break(match):
|
244 |
+
"""
|
245 |
+
根据给定的匹配结果来判断换行符是否表示段落分隔。
|
246 |
+
如果换行符前为句子结束标志(句号,感叹号,问号),且下一个字符为大写字母,则换行符更有可能表示段落分隔。
|
247 |
+
也可以根据之前的内容长度来判断段落是否已经足够长。
|
248 |
+
"""
|
249 |
+
prev_char, next_char = match.groups()
|
250 |
+
|
251 |
+
# 句子结束标志
|
252 |
+
sentence_endings = ".!?"
|
253 |
+
|
254 |
+
# 设定一个最小段落长度阈值
|
255 |
+
min_paragraph_length = 140
|
256 |
+
|
257 |
+
if prev_char in sentence_endings and next_char.isupper() and len(match.string[:match.start(1)]) > min_paragraph_length:
|
258 |
+
return "\n\n"
|
259 |
+
else:
|
260 |
+
return " "
|
261 |
+
|
262 |
+
def normalize_text(text):
|
263 |
+
"""
|
264 |
+
通过把连字(ligatures)等文本特殊符号转换为其基本形式来对文本进行归一化处理。
|
265 |
+
例如,将连字 "fi" 转换为 "f" 和 "i"。
|
266 |
+
"""
|
267 |
+
# 对文本进行归一化处理,分解连字
|
268 |
+
normalized_text = unicodedata.normalize("NFKD", text)
|
269 |
+
|
270 |
+
# 替换其他特殊字符
|
271 |
+
cleaned_text = re.sub(r'[^\x00-\x7F]+', '', normalized_text)
|
272 |
+
|
273 |
+
return cleaned_text
|
274 |
+
|
275 |
+
def clean_text(raw_text):
|
276 |
+
"""
|
277 |
+
对从 PDF 提取出的原始文本进行清洗和格式化处理。
|
278 |
+
1. 对原始文本进行归一化处理。
|
279 |
+
2. 替换跨行的连词,例如 “Espe-\ncially” 转换为 “Especially”。
|
280 |
+
3. 根据 heuristic 规则判断换行符是否是段落分隔,并相应地进行替换。
|
281 |
+
"""
|
282 |
+
# 对文本进行归一化处理
|
283 |
+
normalized_text = normalize_text(raw_text)
|
284 |
+
|
285 |
+
# 替换跨行的连词
|
286 |
+
text = re.sub(r'(\w+-\n\w+)', lambda m: m.group(1).replace('-\n', ''), normalized_text)
|
287 |
+
|
288 |
+
# 根据前后相邻字符的特点,找到原文本中的换行符
|
289 |
+
newlines = re.compile(r'(\S)\n(\S)')
|
290 |
+
|
291 |
+
# 根据 heuristic 规则,用空格或段落分隔符替换原换行符
|
292 |
+
final_text = re.sub(newlines, lambda m: m.group(1) + is_paragraph_break(m) + m.group(2), text)
|
293 |
+
|
294 |
+
return final_text.strip()
|