File size: 2,820 Bytes
bddbd8b
 
 
 
 
 
 
 
 
 
 
11a3221
bddbd8b
 
bab28e4
bddbd8b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
import os
import io
import streamlit as st
from dotenv import load_dotenv
from PyPDF2 import PdfReader
from sentence_transformers import SentenceTransformer
import faiss
from groq import Groq

# Load environment variables
load_dotenv()
#GROQ_API_KEY = "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"

# Initialize Groq client
client = Groq(api_key=os.getenv("MY_API_KEY"))

# Load the embedding model
embedding_model = SentenceTransformer('all-MiniLM-L6-v2')

# Streamlit UI
st.set_page_config(page_title="RAG-Based Application", layout="wide")
st.title("RAG-Based Application")
st.sidebar.header("Upload Your PDF")

uploaded_file = st.sidebar.file_uploader("Upload a PDF file", type=["pdf"])

if uploaded_file is not None:
    try:
        # Extract text from PDF
        st.write("Extracting text from the PDF...")
        reader = PdfReader(io.BytesIO(uploaded_file.read()))
        text = "".join([page.extract_text() for page in reader.pages])

        if not text.strip():
            st.error("The uploaded PDF contains no text. Please upload a valid document.")
            st.stop()

        # Split the text into chunks
        st.write("Processing the PDF into chunks...")
        chunk_size = 500
        chunks = [text[i:i + chunk_size] for i in range(0, len(text), chunk_size)]

        # Create embeddings for the chunks
        st.write("Creating embeddings for text chunks...")
        embeddings = embedding_model.encode(chunks)
        if len(embeddings.shape) == 1:
            embeddings = embeddings.reshape(1, -1)

        # Store embeddings in FAISS
        st.write("Storing embeddings in FAISS...")
        dimension = embeddings.shape[1]
        index = faiss.IndexFlatL2(dimension)
        index.add(embeddings)
        st.write(f"Stored {len(chunks)} chunks in FAISS.")

        # Ask a question
        st.subheader("Ask a Question")
        user_query = st.text_input("Enter your question:")
        if user_query:
            query_embedding = embedding_model.encode([user_query])
            distances, indices = index.search(query_embedding, k=1)
            best_chunk = chunks[indices[0][0]]

            # Use Groq API to interact with the LLM
            st.write("Interacting with the LLM...")
            chat_completion = client.chat.completions.create(
                messages=[
                    {
                        "role": "user",
                        "content": f"Using this context: {best_chunk}, answer the following question: {user_query}",
                    }
                ],
                model="llama3-8b-8192",
            )

            # Display the response
            st.subheader("LLM Response")
            st.write(chat_completion.choices[0].message.content)
    except Exception as e:
        st.error(f"An error occurred: {e}")