Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import PyPDF2
|
3 |
+
import openai
|
4 |
+
import faiss
|
5 |
+
import os
|
6 |
+
from sklearn.feature_extraction.text import TfidfVectorizer
|
7 |
+
from sklearn.metrics.pairwise import cosine_similarity
|
8 |
+
|
9 |
+
# Function to extract text from a PDF file
|
10 |
+
def extract_text_from_pdf(pdf_file):
|
11 |
+
reader = PyPDF2.PdfReader(pdf_file)
|
12 |
+
text = ""
|
13 |
+
for page in reader.pages:
|
14 |
+
text += page.extract_text()
|
15 |
+
return text
|
16 |
+
|
17 |
+
# Function to generate embeddings for a piece of text
|
18 |
+
def get_embeddings(text, model="text-embedding-ada-002"):
|
19 |
+
response = openai.Embedding.create(input=[text], model=model)
|
20 |
+
return response['data'][0]['embedding']
|
21 |
+
|
22 |
+
# Function to search for similar content
|
23 |
+
def search_similar(query_embedding, index, stored_texts, top_k=3):
|
24 |
+
distances, indices = index.search([query_embedding], top_k)
|
25 |
+
results = [(stored_texts[i], distances[0][idx]) for idx, i in enumerate(indices[0])]
|
26 |
+
return results
|
27 |
+
|
28 |
+
# Streamlit app starts here
|
29 |
+
st.title("Course Query Assistant")
|
30 |
+
|
31 |
+
# Input OpenAI API key
|
32 |
+
openai_api_key = st.text_input("Enter your OpenAI API key:", type="password")
|
33 |
+
|
34 |
+
if openai_api_key:
|
35 |
+
openai.api_key = openai_api_key
|
36 |
+
|
37 |
+
# Upload course materials
|
38 |
+
uploaded_files = st.file_uploader("Upload Course Materials (PDFs)", type=["pdf"], accept_multiple_files=True)
|
39 |
+
|
40 |
+
if uploaded_files:
|
41 |
+
st.write("Processing uploaded course materials...")
|
42 |
+
|
43 |
+
# Extract text and generate embeddings for all uploaded PDFs
|
44 |
+
course_texts = []
|
45 |
+
for uploaded_file in uploaded_files:
|
46 |
+
text = extract_text_from_pdf(uploaded_file)
|
47 |
+
course_texts.append(text)
|
48 |
+
|
49 |
+
# Combine all course materials into one large text
|
50 |
+
combined_text = " ".join(course_texts)
|
51 |
+
|
52 |
+
# Split combined text into smaller chunks for embedding (max tokens ~1000)
|
53 |
+
chunks = [combined_text[i:i+1000] for i in range(0, len(combined_text), 1000)]
|
54 |
+
|
55 |
+
# Generate embeddings for all chunks
|
56 |
+
embeddings = [get_embeddings(chunk) for chunk in chunks]
|
57 |
+
|
58 |
+
# Create a FAISS index for similarity search
|
59 |
+
index = faiss.IndexFlatL2(len(embeddings[0]))
|
60 |
+
index.add(embeddings)
|
61 |
+
|
62 |
+
st.write("Course materials have been processed and indexed.")
|
63 |
+
|
64 |
+
# User query
|
65 |
+
query = st.text_input("Enter your question about the course materials:")
|
66 |
+
|
67 |
+
if query:
|
68 |
+
# Generate embedding for the query
|
69 |
+
query_embedding = get_embeddings(query)
|
70 |
+
|
71 |
+
# Search for similar chunks in the FAISS index
|
72 |
+
results = search_similar(query_embedding, index, chunks)
|
73 |
+
|
74 |
+
# Create the context for the GPT prompt
|
75 |
+
context = "\n".join([result[0] for result in results])
|
76 |
+
modified_prompt = f"Context: {context}\n\nQuestion: {query}\n\nProvide a detailed answer based on the context."
|
77 |
+
|
78 |
+
# Get the GPT-3.5-turbo response
|
79 |
+
response = openai.ChatCompletion.create(
|
80 |
+
model="gpt-3.5-turbo",
|
81 |
+
messages=[{"role": "user", "content": modified_prompt}]
|
82 |
+
)
|
83 |
+
|
84 |
+
# Display the response
|
85 |
+
st.write("### Intelligent Reply:")
|
86 |
+
st.write(response['choices'][0]['message']['content'])
|