Spaces:
Sleeping
Sleeping
import streamlit as st | |
import PyPDF2 | |
import openai | |
import faiss | |
import os | |
import numpy as np | |
from sklearn.feature_extraction.text import TfidfVectorizer | |
from sklearn.metrics.pairwise import cosine_similarity | |
# Function to extract text from a PDF file | |
def extract_text_from_pdf(pdf_file): | |
reader = PyPDF2.PdfReader(pdf_file) | |
text = "" | |
for page in reader.pages: | |
text += page.extract_text() | |
return text | |
# Function to generate embeddings for a piece of text | |
def get_embeddings(text, model="text-embedding-ada-002"): | |
response = openai.Embedding.create(input=[text], model=model) | |
return response['data'][0]['embedding'] | |
# Function to search for similar content | |
def search_similar(query_embedding, index, stored_texts, top_k=3): | |
distances, indices = index.search(np.array([query_embedding]), top_k) | |
results = [(stored_texts[i], distances[0][idx]) for idx, i in enumerate(indices[0])] | |
return results | |
# Function to generate code based on a prompt | |
def generate_code_from_prompt(prompt, model="gpt-4o-mini"): | |
response = openai.ChatCompletion.create( | |
model=model, | |
messages=[{"role": "user", "content": prompt}] | |
) | |
return response['choices'][0]['message']['content'] | |
# Streamlit app starts here | |
st.title("AI Assistance") | |
# Input OpenAI API key | |
openai_api_key = st.text_input("Enter your OpenAI API key:", type="password") | |
if openai_api_key: | |
openai.api_key = openai_api_key | |
# Sidebar to toggle between Course Query Assistant and Code Generator | |
st.sidebar.title("Select Mode") | |
mode = st.sidebar.radio("Choose an option", ("Course Query Assistant", "Code Generator")) | |
if mode == "Course Query Assistant": | |
st.header("Course Query Assistant") | |
# Upload course materials | |
uploaded_files = st.file_uploader("Upload Course Materials (PDFs)", type=["pdf"], accept_multiple_files=True) | |
if uploaded_files: | |
st.write("Processing uploaded course materials...") | |
# Extract text and generate embeddings for all uploaded PDFs | |
course_texts = [] | |
for uploaded_file in uploaded_files: | |
text = extract_text_from_pdf(uploaded_file) | |
course_texts.append(text) | |
# Combine all course materials into one large text | |
combined_text = " ".join(course_texts) | |
# Split combined text into smaller chunks for embedding (max tokens ~1000) | |
chunks = [combined_text[i:i+1000] for i in range(0, len(combined_text), 1000)] | |
# Generate embeddings for all chunks | |
embeddings = [get_embeddings(chunk) for chunk in chunks] | |
# Convert the list of embeddings into a NumPy array (shape: [num_chunks, embedding_size]) | |
embeddings_np = np.array(embeddings).astype("float32") | |
# Create a FAISS index for similarity search | |
index = faiss.IndexFlatL2(len(embeddings_np[0])) # Use the length of the embedding vectors for the dimension | |
index.add(embeddings_np) | |
st.write("Course materials have been processed and indexed.") | |
# User query | |
query = st.text_input("Enter your question about the course materials:") | |
if query: | |
# Generate embedding for the query | |
query_embedding = get_embeddings(query) | |
# Search for similar chunks in the FAISS index | |
results = search_similar(query_embedding, index, chunks) | |
# Create the context for the GPT prompt | |
context = "\n".join([result[0] for result in results]) | |
modified_prompt = f"Context: {context}\n\nQuestion: {query}\n\nProvide a detailed answer based on the context." | |
# Get the GPT-4 response | |
response = openai.ChatCompletion.create( | |
model="gpt-4o-mini", # Update to GPT-4 (or your desired model) | |
messages=[{"role": "user", "content": modified_prompt}] | |
) | |
# Get the response content | |
response_content = response['choices'][0]['message']['content'] | |
# Display the response in Streamlit (Intelligent Reply) | |
st.write("### Intelligent Reply:") | |
st.write(response_content) | |
elif mode == "Code Generator": | |
st.header("Code Generator") | |
# Code generation prompt input | |
code_prompt = st.text_area("Describe the code you want to generate:", | |
"e.g., Write a Python program that generates Fibonacci numbers.") | |
if st.button("Generate Code"): | |
if code_prompt: | |
with st.spinner("Generating code..."): | |
# Generate code using GPT-4 | |
generated_code = generate_code_from_prompt(code_prompt) | |
# Display the generated code | |
st.write("### Generated Code:") | |
st.code(generated_code, language="python") | |
else: | |
st.error("Please provide a prompt to generate the code.") | |