from transformers import AutoTokenizer, AutoModelForCausalLM import torch from PyPDF2 import PdfReader class PDFAssistant: def __init__(self): self.model_name = "meta-llama/Llama-3.3-70B-Instruct" self.tokenizer = AutoTokenizer.from_pretrained(self.model_name) self.model = AutoModelForCausalLM.from_pretrained( self.model_name, torch_dtype=torch.bfloat16, device_map="auto" ) def answer_query(self, query, pdf_file): # Extract text from PDF pdf_text = self._extract_pdf_text(pdf_file) # Create prompt with context prompt = f"Context from PDF: {pdf_text}\nQuestion: {query}" inputs = self.tokenizer(prompt, return_tensors="pt").to(self.model.device) outputs = self.model.generate( **inputs, max_length=1024, temperature=0.3, top_p=0.95 ) return self.tokenizer.decode(outputs[0], skip_special_tokens=True) def _extract_pdf_text(self, pdf_file): reader = PdfReader(pdf_file) text = "" for page in reader.pages: text += page.extract_text() return text