adilriaz264 commited on
Commit
19ceabc
·
verified ·
1 Parent(s): d70eb8a

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +97 -0
app.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import streamlit as st
3
+ from PyPDF2 import PdfReader
4
+ from sentence_transformers import SentenceTransformer
5
+ import faiss
6
+ import matplotlib.pyplot as plt
7
+ import numpy as np
8
+ from groq import Groq
9
+
10
+ GROQ_API_KEY = "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"
11
+ client = Groq(api_key=GROQ_API_KEY)
12
+
13
+ # Initialize Embedding Model
14
+ embedding_model = SentenceTransformer('all-MiniLM-L6-v2')
15
+
16
+ # Initialize FAISS Index
17
+ embedding_dim = 384 # Dimensionality of 'all-MiniLM-L6-v2'
18
+ faiss_index = faiss.IndexFlatL2(embedding_dim)
19
+
20
+ # Store Metadata
21
+ metadata_store = []
22
+
23
+ def extract_text_from_pdf(pdf_file):
24
+ pdf_reader = PdfReader(pdf_file)
25
+ text = ""
26
+ for page in pdf_reader.pages:
27
+ text += page.extract_text()
28
+ return text
29
+
30
+ def chunk_text(text, chunk_size=500):
31
+ words = text.split()
32
+ return [' '.join(words[i:i+chunk_size]) for i in range(0, len(words), chunk_size)]
33
+
34
+ def generate_embeddings(chunks):
35
+ return embedding_model.encode(chunks)
36
+
37
+ def store_embeddings(embeddings, metadata):
38
+ faiss_index.add(np.array(embeddings))
39
+ metadata_store.extend(metadata)
40
+
41
+ def retrieve_relevant_chunks(query, k=5):
42
+ query_embedding = embedding_model.encode([query])
43
+ distances, indices = faiss_index.search(query_embedding, k)
44
+ return [(metadata_store[i], distances[i]) for i in indices[0]]
45
+
46
+ def ask_groq_api(question, context):
47
+ chat_completion = client.chat.completions.create(
48
+ messages=[
49
+ {"role": "user", "content": f"{context}\n\n{question}"}
50
+ ],
51
+ model="llama3-8b-8192"
52
+ )
53
+ return chat_completion.choices[0].message.content
54
+
55
+ # Streamlit App
56
+ st.title("RAG-Based Research Paper Analyzer")
57
+
58
+ uploaded_files = st.file_uploader("Upload PDF Files", accept_multiple_files=True, type="pdf")
59
+
60
+ if uploaded_files:
61
+ all_chunks = []
62
+ all_metadata = []
63
+
64
+ for uploaded_file in uploaded_files:
65
+ text = extract_text_from_pdf(uploaded_file)
66
+ chunks = chunk_text(text)
67
+ embeddings = generate_embeddings(chunks)
68
+ metadata = [{"chunk": chunk, "file_name": uploaded_file.name} for chunk in chunks]
69
+ store_embeddings(embeddings, metadata)
70
+ all_chunks.extend(chunks)
71
+ all_metadata.extend(metadata)
72
+
73
+ st.success("Files uploaded and processed successfully!")
74
+
75
+ if st.button("View Topic Summaries"):
76
+ for chunk in all_chunks[:3]:
77
+ st.write(chunk)
78
+
79
+ user_question = st.text_input("Ask a question about the uploaded papers:")
80
+ if user_question:
81
+ relevant_chunks = retrieve_relevant_chunks(user_question)
82
+ context = "\n\n".join([chunk['chunk'] for chunk, _ in relevant_chunks])
83
+ answer = ask_groq_api(user_question, context)
84
+ st.write("**Answer:**", answer)
85
+
86
+ if st.button("Generate Scatter Plot"):
87
+ st.write("Generating scatter plot for methods vs. results...")
88
+ # Example scatter plot (replace with real data)
89
+ x = np.random.rand(10)
90
+ y = np.random.rand(10)
91
+ plt.scatter(x, y)
92
+ plt.xlabel("Methods")
93
+ plt.ylabel("Results")
94
+ st.pyplot(plt)
95
+
96
+ st.text_area("Annotate Your Insights:", height=100, key="annotations")
97
+