File size: 13,971 Bytes
f280e03
 
 
 
 
 
 
 
6b144d5
 
76d97a2
59be4c5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fc7c947
532033c
59be4c5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6b144d5
 
 
 
59be4c5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76d97a2
 
6b144d5
 
 
3734164
6b144d5
3734164
6b144d5
76d97a2
6b144d5
 
 
 
 
3734164
6b144d5
 
 
 
 
 
 
 
 
 
3734164
6b144d5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3734164
6b144d5
 
3734164
6b144d5
 
3734164
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6b144d5
3734164
 
 
6b144d5
3734164
 
 
6b144d5
 
 
 
 
 
 
 
 
3734164
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6b144d5
3734164
6b144d5
 
3734164
6b144d5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f3f0948
59be4c5
 
 
 
 
6b144d5
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
# Add these imports
from pdfminer.high_level import extract_text
from pdfminer.layout import LAParams
import fitz  # PyMuPDF
from transformers import LayoutLMv3Processor, LayoutLMv3ForSequenceClassification
import torch
from PIL import Image
import numpy as np
import logging
from fastapi.logger import logger as fastapi_logger

# Copyright (c) Opendatalab. All rights reserved.

import base64
import json
import os
import time
import zipfile
from pathlib import Path
import re
import uuid
import pymupdf
from io import BytesIO
from fastapi import FastAPI, File, UploadFile
from fastapi.responses import JSONResponse
import uvicorn
import traceback
from datetime import datetime

# Initialize FastAPI app
app = FastAPI()

# Setup and installation commands
os.system('pip uninstall -y magic-pdf')
os.system('pip install git+https://github.com/opendatalab/MinerU.git@dev')
os.system('wget https://github.com/opendatalab/MinerU/raw/dev/scripts/download_models_hf.py -O download_models_hf.py')
os.system('python download_models_hf.py')

# Configure magic-pdf settings
with open('/home/user/magic-pdf.json', 'r') as file:
    data = json.load(file)

data['device-mode'] = "cuda"
if os.getenv('apikey'):
    data['llm-aided-config']['title_aided']['api_key'] = os.getenv('apikey')
    data['llm-aided-config']['title_aided']['enable'] = True

with open('/home/user/magic-pdf.json', 'w') as file:
    json.dump(data, file, indent=4)

os.system('cp -r paddleocr /home/user/.paddleocr')

# Import required modules
from magic_pdf.data.data_reader_writer import FileBasedDataReader
from magic_pdf.libs.hash_utils import compute_sha256
from magic_pdf.tools.common import do_parse, prepare_env
from loguru import logger

# Настраиваем логирование
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("uvicorn")

def read_fn(path):
    disk_rw = FileBasedDataReader(os.path.dirname(path))
    return disk_rw.read(os.path.basename(path))

def read_fn(path):
    disk_rw = FileBasedDataReader(os.path.dirname(path))
    return disk_rw.read(os.path.basename(path))


def parse_pdf(doc_path, output_dir, end_page_id, is_ocr, layout_mode, formula_enable, table_enable, language):
    os.makedirs(output_dir, exist_ok=True)

    try:
        file_name = f"{str(Path(doc_path).stem)}_{time.time()}"
        pdf_data = read_fn(doc_path)
        if is_ocr:
            parse_method = "ocr"
        else:
            parse_method = "auto"
        local_image_dir, local_md_dir = prepare_env(output_dir, file_name, parse_method)
        do_parse(
            output_dir,
            file_name,
            pdf_data,
            [],
            parse_method,
            False,
            end_page_id=end_page_id,
            layout_model=layout_mode,
            formula_enable=formula_enable,
            table_enable=table_enable,
            lang=language,
            f_dump_orig_pdf=False,
        )
        return local_md_dir, file_name
    except Exception as e:
        logger.exception(e)


def compress_directory_to_zip(directory_path, output_zip_path):
    """
    压缩指定目录到一个 ZIP 文件。

    :param directory_path: 要压缩的目录路径
    :param output_zip_path: 输出的 ZIP 文件路径
    """
    try:
        with zipfile.ZipFile(output_zip_path, 'w', zipfile.ZIP_DEFLATED) as zipf:

            # 遍历目录中的所有文件和子目录
            for root, dirs, files in os.walk(directory_path):
                for file in files:
                    # 构建完整的文件路径
                    file_path = os.path.join(root, file)
                    # 计算相对路径
                    arcname = os.path.relpath(file_path, directory_path)
                    # 添加文件到 ZIP 文件
                    zipf.write(file_path, arcname)
        return 0
    except Exception as e:
        logger.exception(e)
        return -1


def image_to_base64(image_path):
    with open(image_path, "rb") as image_file:
        return base64.b64encode(image_file.read()).decode('utf-8')


def replace_image_with_base64(markdown_text, image_dir_path):
    # 匹配Markdown中的图片标签
    pattern = r'\!\[(?:[^\]]*)\]\(([^)]+)\)'

    # 替换图片链接
    def replace(match):
        relative_path = match.group(1)
        full_path = os.path.join(image_dir_path, relative_path)
        base64_image = image_to_base64(full_path)
        return f"![{relative_path}](data:image/jpeg;base64,{base64_image})"

    # 应用替换
    return re.sub(pattern, replace, markdown_text)


def to_markdown(file_path, end_pages, is_ocr, layout_mode, formula_enable, table_enable, language):
    file_path = to_pdf(file_path)
    if end_pages > 20:
        end_pages = 20
    # 获取识别的md文件以及压缩包文件路径
    local_md_dir, file_name = parse_pdf(file_path, './output', end_pages - 1, is_ocr,
                                        layout_mode, formula_enable, table_enable, language)
    archive_zip_path = os.path.join("./output", compute_sha256(local_md_dir) + ".zip")
    zip_archive_success = compress_directory_to_zip(local_md_dir, archive_zip_path)
    if zip_archive_success == 0:
        logger.info("压缩成功")
    else:
        logger.error("压缩失败")
    md_path = os.path.join(local_md_dir, file_name + ".md")
    with open(md_path, 'r', encoding='utf-8') as f:
        txt_content = f.read()
    md_content = replace_image_with_base64(txt_content, local_md_dir)
    # 返回转换后的PDF路径
    new_pdf_path = os.path.join(local_md_dir, file_name + "_layout.pdf")

    return md_content, txt_content, archive_zip_path, new_pdf_path


latex_delimiters = [{"left": "$$", "right": "$$", "display": True},
                    {"left": '$', "right": '$', "display": False}]


def init_model():
    from magic_pdf.model.doc_analyze_by_custom_model import ModelSingleton
    try:
        model_manager = ModelSingleton()
        txt_model = model_manager.get_model(False, False)
        logger.info(f"txt_model init final")
        ocr_model = model_manager.get_model(True, False)
        logger.info(f"ocr_model init final")
        return 0
    except Exception as e:
        logger.exception(e)
        return -1


model_init = init_model()
logger.info(f"model_init: {model_init}")


with open("header.html", "r") as file:
    header = file.read()


latin_lang = [
        'af', 'az', 'bs', 'cs', 'cy', 'da', 'de', 'es', 'et', 'fr', 'ga', 'hr',
        'hu', 'id', 'is', 'it', 'ku', 'la', 'lt', 'lv', 'mi', 'ms', 'mt', 'nl',
        'no', 'oc', 'pi', 'pl', 'pt', 'ro', 'rs_latin', 'sk', 'sl', 'sq', 'sv',
        'sw', 'tl', 'tr', 'uz', 'vi', 'french', 'german'
]
arabic_lang = ['ar', 'fa', 'ug', 'ur']
cyrillic_lang = [
        'ru', 'rs_cyrillic', 'be', 'bg', 'uk', 'mn', 'abq', 'ady', 'kbd', 'ava',
        'dar', 'inh', 'che', 'lbe', 'lez', 'tab'
]
devanagari_lang = [
        'hi', 'mr', 'ne', 'bh', 'mai', 'ang', 'bho', 'mah', 'sck', 'new', 'gom',
        'sa', 'bgc'
]
other_lang = ['ch', 'en', 'korean', 'japan', 'chinese_cht', 'ta', 'te', 'ka']

all_lang = ['', 'auto']
all_lang.extend([*other_lang, *latin_lang, *arabic_lang, *cyrillic_lang, *devanagari_lang])


def to_pdf(file_path):
    with pymupdf.open(file_path) as f:
        if f.is_pdf:
            return file_path
        else:
            pdf_bytes = f.convert_to_pdf()
            # 将pdfbytes 写入到uuid.pdf中
            # 生成唯一的文件名
            unique_filename = f"{uuid.uuid4()}.pdf"

            # 构建完整的文件路径
            tmp_file_path = os.path.join(os.path.dirname(file_path), unique_filename)

            # 将字节数据写入文件
            with open(tmp_file_path, 'wb') as tmp_pdf_file:
                tmp_pdf_file.write(pdf_bytes)

            return tmp_file_path


@app.post("/process_document")
async def process_document(
    file: UploadFile = File(...),
    end_pages: int = 10,
    is_ocr: bool = False,
    layout_mode: str = "doclayout_yolo",
    formula_enable: bool = True,
    table_enable: bool = True,
    language: str = "auto"
):
    try:
        logger.info("\n=== НАЧАЛО ОБРАБОТКИ ДОКУМЕНТА ===")
        logger.info(f"Имя файла: {file.filename}")
        logger.info(f"Параметры: end_pages={end_pages}, is_ocr={is_ocr}, language={language}")

        # Сохраняем временный файл
        temp_path = f"/tmp/{file.filename}"
        try:
            with open(temp_path, "wb") as buffer:
                content = await file.read()
                buffer.write(content)
            logger.info(f"Файл сохранен: {temp_path}")
        except Exception as e:
            logger.error(f"Ошибка при сохранении файла: {str(e)}")
            raise

        # Извлечение текста через PyMuPDF
        def extract_text_pymupdf(pdf_path):
            try:
                doc = fitz.open(pdf_path)
                logger.info(f"Открыт PDF, всего страниц: {doc.page_count}")
                
                text = ""
                for page_num in range(min(end_pages, doc.page_count)):
                    try:
                        page = doc[page_num]
                        blocks = page.get_text("blocks")
                        blocks.sort(key=lambda b: (b[1], b[0]))
                        for b in blocks:
                            text += b[4] + "\n"
                        logger.info(f"Обработана страница {page_num + 1}")
                    except Exception as page_error:
                        logger.error(f"Ошибка при обработке страницы {page_num + 1}: {str(page_error)}")
                
                doc.close()
                logger.info(f"Извлечено {len(text)} символов текста через PyMuPDF")
                return text
            except Exception as e:
                logger.error(f"Ошибка при извлечении текста через PyMuPDF: {str(e)}")
                return str(e)

        # Извлечение текста через magic-pdf
        def extract_text_magicpdf(pdf_path):
            try:
                # Получаем markdown и HTML
                md_content, txt_content, archive_zip_path, new_pdf_path = to_markdown(
                    pdf_path,
                    end_pages=end_pages,
                    is_ocr=is_ocr,
                    layout_mode=layout_mode,
                    formula_enable=formula_enable,
                    table_enable=table_enable,
                    language=language
                )
                logger.info(f"Извлечено {len(txt_content)} символов текста через magic-pdf")
                return {
                    "text": txt_content,
                    "html": md_content
                }
            except Exception as e:
                logger.error(f"Ошибка при извлечении текста через magic-pdf: {str(e)}")
                return {"text": str(e), "html": ""}

        # Получаем данные из обоих источников
        pymupdf_text = extract_text_pymupdf(temp_path) or ""
        magic_pdf_data = extract_text_magicpdf(temp_path)

        # Проверяем наличие текста хотя бы в одном источнике
        if not pymupdf_text.strip() and not magic_pdf_data["text"].strip():
            error_msg = "Не удалось извлечь текст из документа ни одним из методов"
            logger.error(error_msg)
            return JSONResponse(
                status_code=422,
                content={
                    "error": error_msg,
                    "details": "Извлеченный текст пуст"
                }
            )

        # Формируем структуру данных для обработки
        combined_data = {
            "sources": {
                "pymupdf": {
                    "text": pymupdf_text
                },
                "magic_pdf": magic_pdf_data
            },
            "metadata": {
                "filename": file.filename,
                "page_count": min(end_pages, fitz.open(temp_path).page_count),
                "extraction_date": datetime.now().isoformat()
            }
        }

        # Очистка временных файлов
        try:
            os.remove(temp_path)
            logger.info("Временный файл удален")
        except Exception as e:
            logger.warning(f"Не удалось удалить временный файл: {str(e)}")

        logger.info("\n=== ВОЗВРАЩАЕМЫЙ JSON ===")
        response_json = {"text": json.dumps(combined_data, ensure_ascii=False)}
        logger.info(json.dumps(response_json, indent=2, ensure_ascii=False)[:500] + "...")
        logger.info("\n=== УСПЕШНОЕ ЗАВЕРШЕНИЕ ОБРАБОТКИ ===")

        return JSONResponse(response_json)

    except Exception as e:
        error_msg = f"Критическая ошибка при обработке документа: {str(e)}\nTraceback: {traceback.format_exc()}"
        logger.error(error_msg)
        return JSONResponse(
            status_code=500,
            content={
                "error": error_msg,
                "details": {
                    "error_type": type(e).__name__,
                    "error_message": str(e),
                    "file_name": file.filename if file else None
                }
            }
        )

# Initialize models
model_init = init_model()
logger.info(f"model_init: {model_init}")

if __name__ == "__main__":
    # Запускаем с включенным выводом логов
    uvicorn.run(
        app,
        host="0.0.0.0",
        port=7860,
        log_level="info",
        access_log=True
    )