File size: 5,898 Bytes
a639170
 
 
 
 
 
45cbcdf
17345fb
 
 
45cbcdf
a639170
 
 
 
17345fb
a639170
 
 
 
 
 
 
 
 
 
 
17345fb
a639170
 
4bc3210
17345fb
 
b2ae432
 
4bc3210
 
17345fb
 
 
 
 
 
 
 
 
 
4bc3210
 
5307b4f
17345fb
 
 
 
 
 
 
 
 
 
 
 
 
 
b2ae432
17345fb
b2ae432
17345fb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45cbcdf
17345fb
 
 
 
 
 
 
 
 
 
 
45cbcdf
17345fb
b2ae432
17345fb
b2ae432
 
 
17345fb
b2ae432
 
 
 
 
 
 
 
 
 
17345fb
b2ae432
 
a639170
 
17345fb
a639170
 
 
 
 
 
 
17345fb
b2ae432
 
a639170
 
 
 
 
 
 
 
 
 
 
 
 
 
17345fb
a639170
 
 
 
 
 
 
79cdd30
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
import os
import json
import time
import logging
from pathlib import Path
from typing import List, Dict, Optional
from dataclasses import dataclass
import fitz  # PyMuPDF
from sentence_transformers import SentenceTransformer
from llama_cpp import Llama
from fastapi.encoders import jsonable_encoder

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)


@dataclass
class ProductSpec:
    name: str
    description: Optional[str] = None
    price: Optional[float] = None
    attributes: Dict[str, str] = None
    tables: List[Dict] = None

    def to_dict(self):
        return jsonable_encoder(self)


class PDFProcessor:
    def __init__(self):
        self.emb_model = self._initialize_emb_model("all-MiniLM-L6-v2")
        # self.llm = self._initialize_llm("llama-2-7b.Q2_K.gguf")
        self.llm = self._initialize_llm("deepseek-llm-7b-base.Q2_K.gguf")
        self.output_dir = Path("./output")
        self.output_dir.mkdir(exist_ok=True)

    def _initialize_emb_model(self, model_name):
        try:
            from sentence_transformers import SentenceTransformer
            return SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
        except:
            # Load model directly
            from transformers import AutoTokenizer, AutoModel

            tokenizer = AutoTokenizer.from_pretrained("sentence-transformers/" + model_name)
            model = AutoModel.from_pretrained("sentence-transformers/" + model_name)
            return model

    def _initialize_llm(self, model_name):
        """Initialize LLM with automatic download if needed"""
        # model_path = os.path.join("models/", model_name)
        # if os.path.exists(model_path):
        #     return Llama(
        #         model_path=model_path,
        #         n_ctx=1024,
        #         n_gpu_layers=-1,
        #         n_threads=os.cpu_count() - 1,
        #         verbose=False
        #     )
        return Llama.from_pretrained(
            repo_id="TheBloke/deepseek-llm-7B-base-GGUF",
            filename=model_name,
        )

    def process_pdf(self, pdf_path: str) -> Dict:
        """Process PDF using PyMuPDF"""
        start_time = time.time()

        # Open PDF
        doc = fitz.open(pdf_path)
        text_blocks = []
        tables = []

        # Extract text and tables
        for page_num, page in enumerate(doc):
            # Extract text blocks
            text_blocks.extend(self._extract_text_blocks(page))

            # Extract tables
            tables.extend(self._extract_tables(page, page_num))

        # Process text blocks with LLM
        products = []
        for block in text_blocks:
            product = self._process_text_block(block)
            if product:
                product.tables = tables
                products.append(product.to_dict())

        logger.info(f"Processed {len(products)} products in {time.time() - start_time:.2f}s")
        return {"products": products, "tables": tables}

    def _extract_text_blocks(self, page) -> List[str]:
        """Extract text blocks from a PDF page"""
        blocks = []
        for block in page.get_text("blocks"):
            blocks.append(block[4])  # The text content is at index 4
        return blocks

    def _extract_tables(self, page, page_num: int) -> List[Dict]:
        """Extract tables from a PDF page"""
        tables = []
        try:
            tab = page.find_tables()
            if tab.tables:
                for table_idx, table in enumerate(tab.tables):
                    table_data = table.extract()
                    if table_data:
                        tables.append({
                            "page": page_num + 1,
                            "cells": table_data,
                            "header": table.header.names if table.header else [],
                            "content": table_data
                        })
        except Exception as e:
            logger.warning(f"Error extracting tables from page {page_num}: {e}")
        return tables

    def _process_text_block(self, text: str) -> Optional[ProductSpec]:
        """Process text block with LLM"""
        prompt = self._generate_query_prompt(text)

        try:
            response = self.llm.create_chat_completion(
                messages=[{"role": "user", "content": prompt}],
                temperature=0.1,
                max_tokens=512
            )
            return self._parse_response(response['choices'][0]['message']['content'])
        except Exception as e:
            logger.warning(f"Error processing text block: {e}")
            return None

    def _generate_query_prompt(self, text: str) -> str:
        """Generate extraction prompt"""
        return f"""Extract product specifications from this text:
{text}

Return JSON format:
{{
    "name": "product name",
    "description": "product description",
    "price": numeric_price,
    "attributes": {{ "key": "value" }}
}}"""

    def _parse_response(self, response: str) -> Optional[ProductSpec]:
        """Parse LLM response"""
        try:
            json_start = response.find('{')
            json_end = response.rfind('}') + 1
            data = json.loads(response[json_start:json_end])
            return ProductSpec(
                name=data.get('name', ''),
                description=data.get('description'),
                price=data.get('price'),
                attributes=data.get('attributes', {})
            )
        except (json.JSONDecodeError, KeyError) as e:
            logger.warning(f"Parse error: {e}")
            return None


def process_pdf_catalog(pdf_path: str):
    processor = PDFProcessor()
    try:
        result = processor.process_pdf(pdf_path)
        return result, "Processing completed successfully!"
    except Exception as e:
        logger.error(f"Processing failed: {e}")
        return {}, "Error processing PDF"