File size: 1,249 Bytes
782cb1c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
from PyPDF2 import PdfReader

class PDFAssistant:
    def __init__(self):
        self.model_name = "meta-llama/Llama-3.3-70B-Instruct"
        self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
        self.model = AutoModelForCausalLM.from_pretrained(
            self.model_name,
            torch_dtype=torch.bfloat16,
            device_map="auto"
        )
    
    def answer_query(self, query, pdf_file):
        # Extract text from PDF
        pdf_text = self._extract_pdf_text(pdf_file)
        
        # Create prompt with context
        prompt = f"Context from PDF: {pdf_text}\nQuestion: {query}"
        
        inputs = self.tokenizer(prompt, return_tensors="pt").to(self.model.device)
        outputs = self.model.generate(
            **inputs,
            max_length=1024,
            temperature=0.3,
            top_p=0.95
        )
        return self.tokenizer.decode(outputs[0], skip_special_tokens=True)
    
    def _extract_pdf_text(self, pdf_file):
        reader = PdfReader(pdf_file)
        text = ""
        for page in reader.pages:
            text += page.extract_text()
        return text