import streamlit as st import PyPDF2 import openai import faiss import os import numpy as np from io import StringIO # Function to extract text from a PDF file def extract_text_from_pdf(pdf_file): reader = PyPDF2.PdfReader(pdf_file) text = "" for page in reader.pages: text += page.extract_text() return text # Function to generate embeddings for a piece of text def get_embeddings(text, model="text-embedding-ada-002"): response = openai.Embedding.create(input=[text], model=model) return response['data'][0]['embedding'] # Function to search for similar content def search_similar(query_embedding, index, stored_texts, top_k=3): distances, indices = index.search(np.array([query_embedding]), top_k) results = [(stored_texts[i], distances[0][idx]) for idx, i in enumerate(indices[0])] return results # Function to generate code based on a prompt def generate_code_from_prompt(prompt, model="gpt-4o-mini"): response = openai.ChatCompletion.create( model=model, messages=[{"role": "user", "content": prompt}] ) return response['choices'][0]['message']['content'] # Function to save code to a .txt file def save_code_to_file(code, filename="generated_code.txt"): with open(filename, "w") as f: f.write(code) # Function to generate AI-based study notes and summaries def generate_summary(text): prompt = f"Summarize the following text into key points:\n\n{text}" response = openai.ChatCompletion.create( model="gpt-4o-mini", messages=[{"role": "user", "content": prompt}] ) return response['choices'][0]['message']['content'] # Streamlit app starts here st.title("AI Assistance") # Input OpenAI API key openai_api_key = st.text_input("Enter your OpenAI API key:", type="password") if openai_api_key: openai.api_key = openai_api_key # Sidebar to toggle between Course Query Assistant and Code Generator st.sidebar.title("Select Mode") mode = st.sidebar.radio("Choose an option", ("Course Query Assistant", "Code Generator", "AI Chatbot Tutor", "AI Study Notes & Summaries")) if mode == "Course Query Assistant": st.header("Course Query Assistant") # Upload course materials uploaded_files = st.file_uploader("Upload Course Materials (PDFs)", type=["pdf"], accept_multiple_files=True) if uploaded_files: st.write("Processing uploaded course materials...") # Extract text and generate embeddings for all uploaded PDFs course_texts = [] for uploaded_file in uploaded_files: text = extract_text_from_pdf(uploaded_file) course_texts.append(text) # Combine all course materials into one large text combined_text = " ".join(course_texts) # Split combined text into smaller chunks for embedding (max tokens ~1000) chunks = [combined_text[i:i+1000] for i in range(0, len(combined_text), 1000)] # Generate embeddings for all chunks embeddings = [get_embeddings(chunk) for chunk in chunks] # Convert the list of embeddings into a NumPy array (shape: [num_chunks, embedding_size]) embeddings_np = np.array(embeddings).astype("float32") # Create a FAISS index for similarity search index = faiss.IndexFlatL2(len(embeddings_np[0])) # Use the length of the embedding vectors for the dimension index.add(embeddings_np) st.write("Course materials have been processed and indexed.") # User query query = st.text_input("Enter your question about the course materials:") if query: # Generate embedding for the query query_embedding = get_embeddings(query) # Search for similar chunks in the FAISS index results = search_similar(query_embedding, index, chunks) # Create the context for the GPT prompt context = "\n".join([result[0] for result in results]) modified_prompt = f"Context: {context}\n\nQuestion: {query}\n\nProvide a detailed answer based on the context." # Get the GPT-4 response response = openai.ChatCompletion.create( model="gpt-4o-mini", # Update to GPT-4 (or your desired model) messages=[{"role": "user", "content": modified_prompt}] ) # Get the response content response_content = response['choices'][0]['message']['content'] # Display the response in Streamlit (Intelligent Reply) st.write("### Intelligent Reply:") st.text_area("Response:", response_content, height=300) # Copy button st.button("Copy Response", on_click=lambda: st.text_area("Copy the response", response_content, height=300)) elif mode == "AI Chatbot Tutor": st.header("AI Chatbot Tutor") # Chat interface for the AI tutor chat_history = [] def chat_with_bot(query): chat_history.append({"role": "user", "content": query}) response = openai.ChatCompletion.create( model="gpt-4o-mini", messages=chat_history ) chat_history.append({"role": "assistant", "content": response['choices'][0]['message']['content']}) return response['choices'][0]['message']['content'] user_query = st.text_input("Ask a question:") if user_query: with st.spinner("Getting answer..."): bot_response = chat_with_bot(user_query) st.write(f"### AI Response: {bot_response}") # Copy button st.text_area("Copy the response", bot_response, height=300) st.button("Copy Response", on_click=lambda: st.text_area("Copy the response", bot_response, height=300)) elif mode == "AI Study Notes & Summaries": st.header("AI Study Notes & Summaries") # Upload course materials for summarization uploaded_files_for_summary = st.file_uploader("Upload Course Materials (PDFs) for Summarization", type=["pdf"], accept_multiple_files=True) if uploaded_files_for_summary: st.write("Generating study notes and summaries...") # Extract text from PDFs all_text = "" for uploaded_file in uploaded_files_for_summary: text = extract_text_from_pdf(uploaded_file) all_text += text # Generate summary using AI summary = generate_summary(all_text) # Display the summary st.write("### AI-Generated Summary:") st.text_area("Summary:", summary, height=300) # Copy button st.button("Copy Summary", on_click=lambda: st.text_area("Copy the summary", summary, height=300))