File size: 1,603 Bytes
7e75a72
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
from langchain_community.embeddings.fastembed import FastEmbedEmbeddings
from langchain_groq import ChatGroq
import os
import json
from typing import List, Dict

class LLMProcessor:
    def __init__(self):
        """Initialize embedding model and Groq LLM"""
        self.api_key = os.getenv("GROQ_API_KEY")

        # Use FastEmbed instead of SentenceTransformer
        self.embed_model = FastEmbedEmbeddings()

        self.llm = ChatGroq(
            model_name="mixtral-8x7b-32768",
            api_key=self.api_key
        )

    def format_context(self, chunks: List[Dict]) -> str:
        """Format retrieved chunks into a structured context for the LLM"""
        context_parts = []
        for chunk in chunks:
            try:
                headings = json.loads(chunk['headings'])
                if headings:
                    context_parts.append(f"Section: {' > '.join(headings)}")
            except:
                pass

            if chunk['page']:
                context_parts.append(f"Page {chunk['page']}:")
            
            context_parts.append(chunk['text'])
            context_parts.append("-" * 40)

        return "\n".join(context_parts)

    def generate_answer(self, context: str, question: str) -> str:
        """Generate answer using structured context"""
        prompt = f"""Based on the following excerpts from a document:



{context}



Please answer this question: {question}



Make use of the section information and page numbers in your answer when relevant.

"""
        return self.llm.invoke(prompt)