Alignment-Lab-AI commited on
Commit
dc61d35
·
verified ·
1 Parent(s): 687f691

Upload scrpt27.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. scrpt27.py +435 -0
scrpt27.py ADDED
@@ -0,0 +1,435 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import shutil
4
+ import argparse
5
+ import logging
6
+ import multiprocessing as mp
7
+ from concurrent.futures import ProcessPoolExecutor, as_completed
8
+ import torch
9
+ import psutil
10
+ import numpy as np
11
+ from tqdm import tqdm
12
+ from magic_pdf.pipe.UNIPipe import UNIPipe
13
+ from magic_pdf.libs.commons import read_file
14
+ from magic_pdf.libs.config_reader import get_device
15
+ from magic_pdf.tools.common import do_parse
16
+ from magic_pdf.libs.pdf_image_tools import cut_image
17
+ from magic_pdf.rw.DiskReaderWriter import DiskReaderWriter
18
+ from magic_pdf.filter.pdf_meta_scan import pdf_meta_scan
19
+ from magic_pdf.filter.pdf_classify_by_type import classify
20
+ import fitz # PyMuPDF
21
+ import time
22
+ import signal
23
+
24
+ # Set up logging
25
+ logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
26
+ logger = logging.getLogger(__name__)
27
+
28
+ # Minimum batch size
29
+ MIN_BATCH_SIZE = 1
30
+
31
+ def parse_arguments():
32
+ parser = argparse.ArgumentParser(description="Process multiple PDFs using Magic PDF")
33
+ parser.add_argument("--input", default="input", help="Input folder containing PDF files")
34
+ parser.add_argument("--output", default="output", help="Output folder for processed files")
35
+ parser.add_argument("--config", default="magic-pdf.template.json", help="Path to configuration file")
36
+ parser.add_argument("--timeout", type=int, default=240, help="Timeout for processing each PDF (in seconds)")
37
+ parser.add_argument("--max-workers", type=int, default=None, help="Maximum number of worker processes")
38
+ parser.add_argument("--use-bf16", action="store_true", help="Use bfloat16 precision for model inference")
39
+ parser.add_argument("--initial-batch-size", type=int, default=1, help="Initial batch size for processing")
40
+ return parser.parse_args()
41
+
42
+ def load_config(config_path):
43
+ with open(config_path, 'r') as f:
44
+ return json.load(f)
45
+
46
+ def get_available_memory(gpu_id):
47
+ return torch.cuda.get_device_properties(gpu_id).total_memory - torch.cuda.memory_allocated(gpu_id)
48
+
49
+ def extract_images(pdf_path, output_folder):
50
+ doc = fitz.open(pdf_path)
51
+ pdf_name = os.path.splitext(os.path.basename(pdf_path))[0]
52
+ images_folder = os.path.join(output_folder, 'images')
53
+ os.makedirs(images_folder, exist_ok=True)
54
+
55
+ for page_num, page in enumerate(doc):
56
+ for img_index, img in enumerate(page.get_images(full=True)):
57
+ xref = img[0]
58
+ base_image = doc.extract_image(xref)
59
+ image_bytes = base_image["image"]
60
+ image_ext = base_image["ext"]
61
+ image_filename = f'{pdf_name}_{page_num+1:03d}_{img_index+1:03d}.{image_ext}'
62
+ image_path = os.path.join(images_folder, image_filename)
63
+ with open(image_path, "wb") as image_file:
64
+ image_file.write(image_bytes)
65
+ doc.close()
66
+
67
+ class MagicModel:
68
+ def __init__(self, config):
69
+ self.config = config
70
+
71
+ def process_pdf(self, pdf_data, parse_type, layout_info, log_file_path):
72
+ processed_pages = []
73
+ with open(log_file_path, 'a') as log_file:
74
+ log_file.write(f"Entering process_pdf\n")
75
+ log_file.write(f" parse_type: {parse_type}, (expected: str)\n")
76
+ log_file.write(f" layout_info (length: {len(layout_info)}), (expected: list of dicts): {layout_info}\n")
77
+ for page_index, page_info in enumerate(layout_info):
78
+ try:
79
+ with open(log_file_path, 'a') as log_file:
80
+ log_file.write(f"Processing page {page_index}\n")
81
+ log_file.write(f" Page info (expected: dict): {page_info}\n")
82
+ processed_page = self.process_page(page_info, parse_type)
83
+ processed_pages.append(processed_page)
84
+ except Exception as e:
85
+ with open(log_file_path, 'a') as log_file:
86
+ log_file.write(f"Error processing page {page_index} in process_pdf: {str(e)}\n")
87
+ log_file.write(f"Page info (expected: dict): {page_info}\n")
88
+ with open(log_file_path, 'a') as log_file:
89
+ log_file.write(f"Exiting process_pdf\n")
90
+ return {
91
+ "processed_pages": processed_pages,
92
+ "parse_type": parse_type,
93
+ }
94
+
95
+ def process_page(self, page_info, parse_type):
96
+ with open(log_file_path, 'a') as log_file:
97
+ log_file.write(f"Entering process_page\n")
98
+ log_file.write(f" page_info (expected: dict): {page_info}\n")
99
+ log_file.write(f" parse_type (expected: str): {parse_type}\n")
100
+ result = {
101
+ "page_no": page_info.get("page_info", {}).get("page_no", "unknown"),
102
+ "content": "Processed page content",
103
+ "parse_type": parse_type
104
+ }
105
+ with open(log_file_path, 'a') as log_file:
106
+ log_file.write(f"Exiting process_page\n")
107
+ return result
108
+
109
+ def process_single_pdf(input_file, output_folder, gpu_id, config, timeout, use_bf16, model, log_file_path):
110
+ start_time = time.time()
111
+ pdf_name = os.path.splitext(os.path.basename(input_file))[0]
112
+ output_subfolder = os.path.join(output_folder, pdf_name, 'auto')
113
+ os.makedirs(output_subfolder, exist_ok=True)
114
+
115
+ def timeout_handler(signum, frame):
116
+ raise TimeoutError("PDF processing timed out")
117
+
118
+ try:
119
+ signal.signal(signal.SIGALRM, timeout_handler)
120
+ signal.alarm(timeout)
121
+
122
+ if gpu_id >= 0:
123
+ torch.cuda.set_device(gpu_id)
124
+ if use_bf16 and torch.cuda.is_bf16_supported():
125
+ torch.set_default_dtype(torch.bfloat16)
126
+ else:
127
+ torch.set_default_dtype(torch.float32)
128
+ torch.set_default_device(f'cuda:{gpu_id}')
129
+ else:
130
+ if use_bf16:
131
+ torch.set_default_dtype(torch.bfloat16)
132
+ else:
133
+ torch.set_default_dtype(torch.float32)
134
+ torch.set_default_device('cpu')
135
+
136
+ pdf_data = read_file(input_file, 'rb')
137
+
138
+ # Perform PDF metadata scan
139
+ metadata = pdf_meta_scan(pdf_data)
140
+ with open(log_file_path, 'a') as log_file:
141
+ log_file.write(f"Processing PDF: {input_file}\n")
142
+ log_file.write(f"Metadata (expected: dict): {json.dumps(metadata, indent=2)}\n")
143
+
144
+ # Check if metadata indicates the PDF should be dropped
145
+ if metadata.get("_need_drop", False):
146
+ with open(log_file_path, 'a') as log_file:
147
+ log_file.write(f"Dropping PDF {input_file}: {metadata.get('_drop_reason', 'Unknown reason')}\n")
148
+ return input_file, "Dropped", None
149
+
150
+ # Check if all required fields are present in metadata
151
+ required_fields = ['total_page', 'page_width_pts', 'page_height_pts', 'image_info_per_page',
152
+ 'text_len_per_page', 'imgs_per_page', 'text_layout_per_page', 'invalid_chars']
153
+ for field in required_fields:
154
+ if field not in metadata:
155
+ raise ValueError(f"Required field '{field}' not found in metadata for {input_file}")
156
+
157
+ # Extract required fields for classify function
158
+ total_page = metadata['total_page']
159
+ page_width = metadata['page_width_pts']
160
+ page_height = metadata['page_height_pts']
161
+ img_sz_list = metadata['image_info_per_page']
162
+ text_len_list = metadata['text_len_per_page']
163
+ img_num_list = metadata['imgs_per_page']
164
+ text_layout_list = metadata['text_layout_per_page']
165
+ invalid_chars = metadata['invalid_chars']
166
+
167
+ with open(log_file_path, 'a') as log_file:
168
+ log_file.write(f"Classify parameters:\n")
169
+ log_file.write(f" total_page (expected: int): {total_page}\n")
170
+ log_file.write(f" page_width (expected: int): {page_width}\n")
171
+ log_file.write(f" page_height (expected: int): {page_height}\n")
172
+ log_file.write(f" img_sz_list (expected: list of lists): {img_sz_list[:5]}...\n")
173
+ log_file.write(f" text_len_list (expected: list of ints): {text_len_list[:5]}...\n")
174
+ log_file.write(f" img_num_list (expected: list of ints): {img_num_list[:5]}...\n")
175
+ log_file.write(f" text_layout_list (expected: list of strs): {text_layout_list[:5]}...\n")
176
+ log_file.write(f" invalid_chars (expected: bool): {invalid_chars}\n")
177
+
178
+ # Classify PDF
179
+ try:
180
+ is_text_pdf, classification_results = classify(
181
+ total_page, page_width, page_height, img_sz_list[:total_page],
182
+ text_len_list[:total_page], img_num_list[:total_page],
183
+ text_layout_list[:len(text_layout_list)], invalid_chars
184
+ )
185
+ with open(log_file_path, 'a') as log_file:
186
+ log_file.write(f"Classification Results:\n")
187
+ log_file.write(f" is_text_pdf (expected: bool): {is_text_pdf}\n")
188
+ log_file.write(f" classification_results (expected: dict): {classification_results}\n")
189
+
190
+ except Exception as e:
191
+ with open(log_file_path, 'a') as log_file:
192
+ log_file.write(f"Error in classify function for {input_file}: {str(e)}\n")
193
+ return input_file, f"Classification Error: {str(e)}", None
194
+
195
+ image_writer = DiskReaderWriter(output_subfolder)
196
+ with open(log_file_path, 'a') as log_file:
197
+ log_file.write(f"Image writer initialized: {image_writer}\n")
198
+
199
+ # Create jso_useful_key as a dictionary
200
+ model_json = [] # Or load your model data here
201
+ jso_useful_key = {"_pdf_type": "", "model_list": model_json}
202
+
203
+ unipipe = UNIPipe(pdf_data, jso_useful_key, image_writer)
204
+ with open(log_file_path, 'a') as log_file:
205
+ log_file.write(f"UNIPipe initialized: {unipipe}\n")
206
+
207
+ parse_type = unipipe.pipe_classify()
208
+ with open(log_file_path, 'a') as log_file:
209
+ log_file.write(f"pipe_classify result (expected: str): {parse_type}\n")
210
+
211
+ # Add detailed logging for pipe_analyze inputs and output
212
+ with open(log_file_path, 'a') as log_file:
213
+ log_file.write(f"Detailed pipe_analyze Inputs for {input_file}:\n")
214
+ log_file.write(f" parse_type (expected: str): {parse_type}\n")
215
+ try:
216
+ layout_info = unipipe.pipe_analyze()
217
+ with open(log_file_path, 'a') as log_file:
218
+ log_file.write(f"pipe_analyze Results (expected: list of dicts, length: {len(layout_info)}): {layout_info}\n")
219
+ except Exception as e:
220
+ with open(log_file_path, 'a') as log_file:
221
+ log_file.write(f"Error in pipe_analyze for {input_file}: {str(e)}\n")
222
+ return input_file, f"pipe_analyze Error: {str(e)}", None
223
+
224
+ # Use OCR if it's not classified as a text PDF
225
+ if not is_text_pdf:
226
+ parse_type = 'ocr'
227
+ with open(log_file_path, 'a') as log_file:
228
+ log_file.write(f"parse_type after OCR check (expected: str): {parse_type}\n")
229
+
230
+ # Process the PDF using the model
231
+ try:
232
+ parse_result = model.process_pdf(pdf_data, parse_type, layout_info, log_file_path)
233
+ with open(log_file_path, 'a') as log_file:
234
+ log_file.write(f"Model process_pdf result (expected: dict): {parse_result}\n")
235
+ except Exception as e:
236
+ with open(log_file_path, 'a') as log_file:
237
+ log_file.write(f"Error in model processing for {input_file}: {str(e)}\n")
238
+ return input_file, f"Model Processing Error: {str(e)}", None
239
+
240
+ try:
241
+ markdown_content = unipipe.pipe_mk_markdown(parse_result)
242
+ with open(log_file_path, 'a') as log_file:
243
+ log_file.write(f"pipe_mk_markdown result (expected: str, length: {len(markdown_content)}): {markdown_content}\n")
244
+ except Exception as e:
245
+ with open(log_file_path, 'a') as log_file:
246
+ log_file.write(f"Error in pipe_mk_markdown for {input_file}: {str(e)}\n")
247
+ log_file.write(f" parse_result (expected: dict): {parse_result}\n")
248
+ return input_file, f"pipe_mk_markdown Error: {str(e)}", None
249
+
250
+ try:
251
+ uni_format = unipipe.pipe_mk_uni_format(parse_result)
252
+ with open(log_file_path, 'a') as log_file:
253
+ log_file.write(f"pipe_mk_uni_format result (expected: dict): {uni_format}\n")
254
+ except Exception as e:
255
+ with open(log_file_path, 'a') as log_file:
256
+ log_file.write(f"Error in pipe_mk_uni_format for {input_file}: {str(e)}\n")
257
+ log_file.write(f" parse_result (expected: dict): {parse_result}\n")
258
+ return input_file, f"pipe_mk_uni_format Error: {str(e)}", None
259
+
260
+ # Write markdown content
261
+ with open(os.path.join(output_subfolder, f'{pdf_name}.md'), 'w', encoding='utf-8') as f:
262
+ f.write(markdown_content)
263
+
264
+ # Write middle.json
265
+ with open(os.path.join(output_subfolder, 'middle.json'), 'w', encoding='utf-8') as f:
266
+ json.dump(parse_result, f, ensure_ascii=False, indent=2)
267
+
268
+ # Write model.json
269
+ with open(os.path.join(output_subfolder, 'model.json'), 'w', encoding='utf-8') as f:
270
+ json.dump(uni_format, f, ensure_ascii=False, indent=2)
271
+
272
+ # Copy original PDF
273
+ shutil.copy(input_file, os.path.join(output_subfolder, f'{pdf_name}.pdf'))
274
+
275
+ # Generate layout.pdf and spans.pdf
276
+ try:
277
+ do_parse(input_file, parse_type, output_subfolder, draw_bbox=True)
278
+ except Exception as e:
279
+ with open(log_file_path, 'a') as log_file:
280
+ log_file.write(f"Error in do_parse for {input_file}: {str(e)}\n")
281
+ return input_file, f"do_parse Error: {str(e)}", None
282
+
283
+ # Extract images
284
+ extract_images(input_file, output_subfolder)
285
+
286
+ processing_time = time.time() - start_time
287
+ with open(log_file_path, 'a') as log_file:
288
+ log_file.write(f"Successfully processed {input_file} on GPU {gpu_id} in {processing_time:.2f} seconds\n")
289
+
290
+ # Prepare result for JSONL output
291
+ result = {
292
+ "file_name": pdf_name,
293
+ "processing_time": processing_time,
294
+ "parse_type": parse_type,
295
+ "metadata": metadata,
296
+ "classification": classification_results,
297
+ "is_text_pdf": is_text_pdf
298
+ }
299
+
300
+ return input_file, "Success", result
301
+
302
+ except ValueError as ve:
303
+ with open(log_file_path, 'a') as log_file:
304
+ log_file.write(f"Metadata error: {str(ve)}\n")
305
+ return input_file, f"Metadata Error: {str(ve)}", None
306
+
307
+ except TimeoutError:
308
+ with open(log_file_path, 'a') as log_file:
309
+ log_file.write(f"Processing timed out after {timeout} seconds\n")
310
+ return input_file, "Timeout", None
311
+
312
+ except Exception as e:
313
+ with open(log_file_path, 'a') as log_file:
314
+ log_file.write(f"Error occurred: {str(e)}\n")
315
+ return input_file, f"Error: {str(e)}", None
316
+
317
+ finally:
318
+ signal.alarm(0) # Cancel the alarm
319
+ if gpu_id >= 0:
320
+ torch.cuda.empty_cache()
321
+
322
+ def process_pdf_batch(batch, output_folder, gpu_id, config, timeout, use_bf16, model, log_file_path):
323
+ results = []
324
+ for pdf_file in batch:
325
+ result = process_single_pdf(pdf_file, output_folder, gpu_id, config, timeout, use_bf16, model, log_file_path)
326
+ results.append(result)
327
+ return results
328
+
329
+ def write_to_jsonl(results, output_file):
330
+ with open(output_file, 'a') as f:
331
+ for result in results:
332
+ if result[2]: # Check if result is not None
333
+ json.dump(result[2], f)
334
+ f.write('\n')
335
+
336
+ def get_gpu_memory_usage(gpu_id):
337
+ if gpu_id < 0:
338
+ return 0, 0 # CPU mode
339
+ total_memory = torch.cuda.get_device_properties(gpu_id).total_memory
340
+ allocated_memory = torch.cuda.memory_allocated(gpu_id)
341
+ return allocated_memory, total_memory
342
+
343
+ def main():
344
+ mp.set_start_method('spawn', force=True)
345
+
346
+ args = parse_arguments()
347
+ config = load_config(args.config)
348
+
349
+ input_folder = args.input
350
+ output_folder = args.output
351
+ os.makedirs(output_folder, exist_ok=True)
352
+
353
+ pdf_files = [os.path.join(input_folder, f) for f in os.listdir(input_folder) if f.endswith('.pdf')]
354
+
355
+ num_gpus = torch.cuda.device_count()
356
+ if num_gpus == 0:
357
+ print("No GPUs available. Using CPU.")
358
+ num_gpus = 1
359
+ gpu_ids = [-1]
360
+ else:
361
+ gpu_ids = list(range(num_gpus))
362
+
363
+ num_workers = args.max_workers or min(num_gpus, os.cpu_count())
364
+
365
+ main_jsonl = os.path.join(output_folder, 'processing_results.jsonl')
366
+ temp_jsonl = os.path.join(output_folder, 'temp_results.jsonl')
367
+ log_file_path = os.path.join(output_folder, 'processing_log.txt')
368
+
369
+ # Enable deterministic mode
370
+ torch.backends.cudnn.deterministic = True
371
+ torch.backends.cudnn.benchmark = False
372
+
373
+ # Load the model
374
+ model = MagicModel(config)
375
+
376
+ results = []
377
+ with ProcessPoolExecutor(max_workers=num_workers) as executor:
378
+ for gpu_id in gpu_ids:
379
+ batch_size = args.initial_batch_size
380
+ pdf_index = 0
381
+ oom_occurred = False
382
+ while pdf_index < len(pdf_files):
383
+ batch = pdf_files[pdf_index:pdf_index + batch_size]
384
+ try:
385
+ future = executor.submit(process_pdf_batch, batch, output_folder, gpu_id, config, args.timeout, args.use_bf16, model, log_file_path)
386
+ batch_results = future.result()
387
+ results.extend(batch_results)
388
+ for result in batch_results:
389
+ write_to_jsonl([result], temp_jsonl)
390
+
391
+ # Print VRAM usage
392
+ allocated, total = get_gpu_memory_usage(gpu_id)
393
+ with open(log_file_path, 'a') as log_file:
394
+ log_file.write(f"GPU {gpu_id} - Batch size: {batch_size}, VRAM usage: {allocated/1024**3:.2f}GB / {total/1024**3:.2f}GB\n")
395
+ # If successful and OOM hasn't occurred yet, increase batch size
396
+ if not oom_occurred:
397
+ batch_size += 1
398
+ pdf_index += len(batch)
399
+ except torch.cuda.OutOfMemoryError:
400
+ # If OOM occurs, reduce batch size
401
+ oom_occurred = True
402
+ batch_size = max(MIN_BATCH_SIZE, batch_size - 1)
403
+ with open(log_file_path, 'a') as log_file:
404
+ log_file.write(f"OOM error occurred. Reducing batch size to {batch_size}\n")
405
+ torch.cuda.empty_cache()
406
+ continue
407
+
408
+ # After processing each batch, move temp JSONL to main JSONL
409
+ if os.path.exists(temp_jsonl):
410
+ with open(temp_jsonl, 'r') as temp, open(main_jsonl, 'a') as main:
411
+ shutil.copyfileobj(temp, main)
412
+ os.remove(temp_jsonl)
413
+
414
+ # Clear GPU cache after each batch
415
+ if gpu_id >= 0:
416
+ torch.cuda.empty_cache()
417
+
418
+ success_count = sum(1 for _, status, _ in results if status == "Success")
419
+ timeout_count = sum(1 for _, status, _ in results if status == "Timeout")
420
+ error_count = len(results) - success_count - timeout_count
421
+
422
+ with open(log_file_path, 'a') as log_file:
423
+ log_file.write(f"Processed {len(results)} PDFs. {success_count} succeeded, {timeout_count} timed out, {error_count} failed.\n")
424
+
425
+ with open(os.path.join(output_folder, 'processing_summary.txt'), 'w') as summary:
426
+ summary.write(f"Total PDFs processed: {len(results)}\n")
427
+ summary.write(f"Successful: {success_count}\n")
428
+ summary.write(f"Timed out: {timeout_count}\n")
429
+ summary.write(f"Failed: {error_count}\n\n")
430
+ summary.write("Failed PDFs:\n")
431
+ for pdf, status, _ in [result for result in results if result[1] != "Success"]:
432
+ summary.write(f" - {pdf}: {status}\n")
433
+
434
+ if __name__ == '__main__':
435
+ main()