|
|
|
import streamlit as st |
|
import PyPDF2 |
|
import openai |
|
import faiss |
|
import os |
|
import numpy as np |
|
from sklearn.feature_extraction.text import TfidfVectorizer |
|
from sklearn.metrics.pairwise import cosine_similarity |
|
from io import StringIO |
|
|
|
# Function to extract text from a PDF file |
|
def extract_text_from_pdf(pdf_file): |
|
reader = PyPDF2.PdfReader(pdf_file) |
|
text = "" |
|
for page in reader.pages: |
|
text += page.extract_text() |
|
return text |
|
|
|
# Function to generate embeddings for a piece of text |
|
def get_embeddings(text, model="text-embedding-ada-002"): |
|
response = openai.Embedding.create(input=[text], model=model) |
|
return response['data'][0]['embedding'] |
|
|
|
# Function to search for similar content |
|
def search_similar(query_embedding, index, stored_texts, top_k=3): |
|
distances, indices = index.search(np.array([query_embedding]), top_k) |
|
results = [(stored_texts[i], distances[0][idx]) for idx, i in enumerate(indices[0])] |
|
return results |
|
|
|
# Function to generate code based on a prompt |
|
def generate_code_from_prompt(prompt, model="gpt-4o-mini"): |
|
response = openai.ChatCompletion.create( |
|
model=model, |
|
messages=[{"role": "user", "content": prompt}] |
|
) |
|
return response['choices'][0]['message']['content'] |
|
|
|
# Function to save code to a .txt file |
|
def save_code_to_file(code, filename="generated_code.txt"): |
|
with open(filename, "w") as f: |
|
f.write(code) |
|
|
|
# Function to generate AI-based study notes and summaries |
|
def generate_summary(text): |
|
prompt = f"Summarize the following text into key points:\n\n{text}" |
|
response = openai.ChatCompletion.create( |
|
model="gpt-4o-mini", |
|
messages=[{"role": "user", "content": prompt}] |
|
) |
|
return response['choices'][0]['message']['content'] |
|
|
|
# Function to fix bugs in code |
|
def fix_code_bugs(buggy_code, model="gpt-4o-mini"): |
|
prompt = f"The following code has bugs or issues. Please identify and fix the problems. If possible, provide explanations for the changes made.\n\nBuggy Code:\n{buggy_code}\n\nFixed Code:" |
|
response = openai.ChatCompletion.create( |
|
model=model, |
|
messages=[{"role": "user", "content": prompt}] |
|
) |
|
return response['choices'][0]['message']['content'] |
|
|
|
# Function to generate AI-based mathematical solutions |
|
def generate_math_solution(query): |
|
prompt = f"Explain and solve the following mathematical problem step by step: {query}" |
|
response = openai.ChatCompletion.create( |
|
model="gpt-3.5-turbo", |
|
messages=[{"role": "user", "content": prompt}] |
|
) |
|
return response['choices'][0]['message']['content'] |
|
|
|
from PIL import Image # Required for local image files |
|
|
|
# Streamlit app starts here |
|
st.set_page_config(page_title="AI Assistance", page_icon=":robot:", layout="wide") |
|
|
|
# Display a logo or icon |
|
image = Image.open("14313824.png") # Path to your image file |
|
st.image(image, width=200) # You can adjust the width as needed |
|
|
|
# Streamlit app starts here |
|
st.title("AI Assistance") |
|
|
|
# Input OpenAI API key |
|
openai_api_key = st.text_input("Enter your OpenAI API key:", type="password") |
|
|
|
if openai_api_key: |
|
openai.api_key = openai_api_key |
|
|
|
# Sidebar to toggle between Course Query Assistant, Code Generator, Bug Fixer, etc. |
|
st.sidebar.title("Select Mode") |
|
mode = st.sidebar.radio("Choose an option", ( |
|
"Course Query Assistant", |
|
"Code Generator", |
|
"AI Chatbot Tutor", |
|
"AI Study Notes & Summaries", |
|
"Code Bug Fixer", |
|
"Mathematics Assistant", # Added option for Math |
|
"Biology Assistant", # Added option for Biology |
|
"Chemistry Assistant", # Added option for Chemistry |
|
"Physics Assistant", # Added option for Physics |
|
"Voice Chat", |
|
"Image Chat", |
|
"English To Japanese", |
|
"Text to Image Generator", |
|
"Graph Tutorial", |
|
"Text-To-Diagram-Generator" |
|
)) |
|
|
|
# Add Contact information in the sidebar |
|
st.sidebar.markdown(""" |
|
## Contact |
|
|
|
For any questions or issues, please contact: |
|
|
|
- **Email**: [[email protected]](mailto:[email protected]) |
|
- **GitHub**: [Click here to access the Github Profile](https://github.com/shukdevtroy) |
|
- **WhatsApp**: [Click here to chat](https://wa.me/+8801719296601) |
|
- **HuggingFace Profile**: [Click here to access the HuggingFace Profile](https://huggingface.co/shukdevdatta123) |
|
""") |
|
|
|
if mode == "Course Query Assistant": |
|
st.header("Course Query Assistant") |
|
|
|
# Display image/logo in the "Course Query Assistant" section (optional) |
|
course_query_image = Image.open("Capture.PNG") # Ensure the file is in the correct directory |
|
st.image(course_query_image, width=150) # Adjust the size as per preference |
|
|
|
# Upload course materials |
|
uploaded_files = st.file_uploader("Upload Course Materials (PDFs)", type=["pdf"], accept_multiple_files=True) |
|
|
|
if uploaded_files: |
|
st.write("Processing uploaded course materials...") |
|
|
|
# Extract text and generate embeddings for all uploaded PDFs |
|
course_texts = [] |
|
for uploaded_file in uploaded_files: |
|
text = extract_text_from_pdf(uploaded_file) |
|
course_texts.append(text) |
|
|
|
# Combine all course materials into one large text |
|
combined_text = " ".join(course_texts) |
|
|
|
# Split combined text into smaller chunks for embedding (max tokens ~1000) |
|
chunks = [combined_text[i:i+1000] for i in range(0, len(combined_text), 1000)] |
|
|
|
# Generate embeddings for all chunks |
|
embeddings = [get_embeddings(chunk) for chunk in chunks] |
|
|
|
# Convert the list of embeddings into a NumPy array (shape: [num_chunks, embedding_size]) |
|
embeddings_np = np.array(embeddings).astype("float32") |
|
|
|
# Create a FAISS index for similarity search |
|
index = faiss.IndexFlatL2(len(embeddings_np[0])) # Use the length of the embedding vectors for the dimension |
|
index.add(embeddings_np) |
|
|
|
st.write("Course materials have been processed and indexed.") |
|
|
|
# User query |
|
query = st.text_input("Enter your question about the course materials:") |
|
|
|
if query: |
|
# Generate embedding for the query |
|
query_embedding = get_embeddings(query) |
|
|
|
# Search for similar chunks in the FAISS index |
|
results = search_similar(query_embedding, index, chunks) |
|
|
|
# Create the context for the GPT prompt |
|
context = "\n".join([result[0] for result in results]) |
|
modified_prompt = f"Context: {context}\n\nQuestion: {query}\n\nProvide a detailed answer based on the context." |
|
|
|
# Get the GPT-4 response |
|
response = openai.ChatCompletion.create( |
|
model="gpt-4o-mini", # Update to GPT-4 (or your desired model) |
|
messages=[{"role": "user", "content": modified_prompt}] |
|
) |
|
|
|
# Get the response content |
|
response_content = response['choices'][0]['message']['content'] |
|
|
|
# Display the response in Streamlit (Intelligent Reply) |
|
st.write("### Intelligent Reply:") |
|
st.write(response_content) |
|
|
|
elif mode == "Code Generator": |
|
st.header("Code Generator") |
|
|
|
# Display image/logo in the "Course Query Assistant" section (optional) |
|
codegen = Image.open("9802381.png") # Ensure the file is in the correct directory |
|
st.image(codegen, width=150) # Adjust the size as per preference |
|
|
|
# Code generation prompt input |
|
code_prompt = st.text_area("Describe the code you want to generate:", |
|
"e.g., Write a Python program that generates Fibonacci numbers.") |
|
|
|
if st.button("Generate Code"): |
|
if code_prompt: |
|
with st.spinner("Generating code..."): |
|
# Generate code using GPT-4 |
|
generated_code = generate_code_from_prompt(code_prompt) |
|
|
|
# Clean the generated code to ensure only code is saved (removing comments or additional text) |
|
clean_code = "\n".join([line for line in generated_code.splitlines() if not line.strip().startswith("#")]) |
|
|
|
# Save the clean code to a file |
|
save_code_to_file(clean_code) |
|
|
|
# Display the generated code |
|
st.write("### Generated Code:") |
|
st.code(clean_code, language="python") |
|
|
|
# Provide a download link for the generated code |
|
with open("generated_code.txt", "w") as f: |
|
f.write(clean_code) |
|
|
|
st.download_button( |
|
label="Download Generated Code", |
|
data=open("generated_code.txt", "rb").read(), |
|
file_name="generated_code.txt", |
|
mime="text/plain" |
|
) |
|
else: |
|
st.error("Please provide a prompt to generate the code.") |
|
|
|
elif mode == "AI Chatbot Tutor": |
|
st.header("AI Chatbot Tutor") |
|
|
|
# Display image/logo in the "Course Query Assistant" section (optional) |
|
aitut = Image.open("910372.png") # Ensure the file is in the correct directory |
|
st.image(aitut, width=150) # Adjust the size as per preference |
|
|
|
# Chat interface for the AI tutor |
|
chat_history = [] |
|
|
|
def chat_with_bot(query): |
|
chat_history.append({"role": "user", "content": query}) |
|
response = openai.ChatCompletion.create( |
|
model="gpt-4o-mini", |
|
messages=chat_history |
|
) |
|
chat_history.append({"role": "assistant", "content": response['choices'][0]['message']['content']}) |
|
return response['choices'][0]['message']['content'] |
|
|
|
user_query = st.text_input("Ask a question:") |
|
|
|
if user_query: |
|
with st.spinner("Getting answer..."): |
|
bot_response = chat_with_bot(user_query) |
|
st.write(f"### AI Response: {bot_response}") |
|
|
|
elif mode == "AI Study Notes & Summaries": |
|
st.header("AI Study Notes & Summaries") |
|
|
|
# Display image/logo in the "Course Query Assistant" section (optional) |
|
aisum = Image.open("sum.png") # Ensure the file is in the correct directory |
|
st.image(aisum, width=150) # Adjust the size as per preference |
|
|
|
# Upload course materials for summarization |
|
uploaded_files_for_summary = st.file_uploader("Upload Course Materials (PDFs) for Summarization", type=["pdf"], accept_multiple_files=True) |
|
|
|
if uploaded_files_for_summary: |
|
st.write("Generating study notes and summaries...") |
|
|
|
# Extract text from PDFs |
|
all_text = "" |
|
for uploaded_file in uploaded_files_for_summary: |
|
text = extract_text_from_pdf(uploaded_file) |
|
all_text += text |
|
|
|
# Generate summary using AI |
|
summary = generate_summary(all_text) |
|
|
|
# Display the summary |
|
st.write("### AI-Generated Summary:") |
|
st.write(summary) |
|
|
|
elif mode == "Code Bug Fixer": |
|
st.header("Code Bug Fixer") |
|
|
|
# Display image/logo in the "Course Query Assistant" section (optional) |
|
aibug = Image.open("bug.png") # Ensure the file is in the correct directory |
|
st.image(aibug, width=150) # Adjust the size as per preference |
|
|
|
# User input for buggy code |
|
buggy_code = st.text_area("Enter your buggy code here:") |
|
|
|
if st.button("Fix Code"): |
|
if buggy_code: |
|
with st.spinner("Fixing code..."): |
|
# Fix bugs using GPT-4 |
|
fixed_code = fix_code_bugs(buggy_code) |
|
|
|
# Display the fixed code |
|
st.write("### Fixed Code:") |
|
st.code(fixed_code, language="python") |
|
|
|
# Provide a download link for the fixed code |
|
with open("fixed_code.txt", "w") as f: |
|
f.write(fixed_code) |
|
|
|
st.download_button( |
|
label="Download Fixed Code", |
|
data=open("fixed_code.txt", "rb").read(), |
|
file_name="fixed_code.txt", |
|
mime="text/plain" |
|
) |
|
else: |
|
st.error("Please enter some buggy code to fix.") |
|
|
|
elif mode == "Mathematics Assistant": |
|
st.header("Mathematics Assistant") |
|
|
|
# Display image/logo in the "Mathematics Assistant" section (optional) |
|
math_icon = Image.open("math_icon.PNG") # Ensure the file is in the correct directory |
|
st.image(math_icon, width=150) # Adjust the size as per preference |
|
|
|
# User input for math questions |
|
math_query = st.text_input("Ask a mathematics-related question:") |
|
|
|
if st.button("Solve Problem"): |
|
if math_query: |
|
with st.spinner("Generating solution..."): |
|
# Generate the solution using GPT-4 |
|
solution = generate_math_solution(math_query) |
|
|
|
# Render the solution with LaTeX for mathematical notations |
|
formatted_solution = f""" |
|
### Solution to the Problem |
|
**Problem:** {math_query} |
|
**Solution:** |
|
{solution} |
|
""" |
|
|
|
st.markdown(formatted_solution) |
|
else: |
|
st.error("Please enter a math problem to solve.") |
|
|
|
# **New Section: Biology Assistant** |
|
elif mode == "Biology Assistant": |
|
st.header("Biology Assistant") |
|
|
|
# Display image/logo in the "Biology Assistant" section (optional) |
|
bio_icon = Image.open("bio_icon.PNG") # Ensure the file is in the correct directory |
|
st.image(bio_icon, width=150) # Adjust the size as per preference |
|
|
|
# User input for biology questions |
|
bio_query = st.text_input("Ask a biology-related question:") |
|
|
|
if bio_query: |
|
with st.spinner("Getting answer..."): |
|
prompt = f"Answer the following biology question: {bio_query}" |
|
response = openai.ChatCompletion.create( |
|
model="gpt-4o-mini", |
|
messages=[{"role": "user", "content": prompt}] |
|
) |
|
answer = response['choices'][0]['message']['content'] |
|
st.write(f"### Answer: {answer}") |
|
|
|
# **New Section: Chemistry Assistant** |
|
elif mode == "Chemistry Assistant": |
|
st.header("Chemistry Assistant") |
|
|
|
# Display image/logo in the "Chemistry Assistant" section (optional) |
|
chem_icon = Image.open("chem.PNG") # Ensure the file is in the correct directory |
|
st.image(chem_icon, width=150) # Adjust the size as per preference |
|
|
|
# User input for chemistry questions |
|
chem_query = st.text_input("Ask a chemistry-related question:") |
|
|
|
if chem_query: |
|
with st.spinner("Getting answer..."): |
|
prompt = f"Answer the following chemistry question: {chem_query}" |
|
response = openai.ChatCompletion.create( |
|
model="gpt-4o-mini", |
|
messages=[{"role": "user", "content": prompt}] |
|
) |
|
answer = response['choices'][0]['message']['content'] |
|
st.write(f"### Answer: {answer}") |
|
|
|
# **New Section: Physics Assistant** |
|
elif mode == "Physics Assistant": |
|
st.header("Physics Assistant") |
|
|
|
# Display image/logo in the "Physics Assistant" section (optional) |
|
phys_icon = Image.open("physics_icon.PNG") # Ensure the file is in the correct directory |
|
st.image(phys_icon, width=150) # Adjust the size as per preference |
|
|
|
# User input for physics questions |
|
phys_query = st.text_input("Ask a physics-related question:") |
|
|
|
if phys_query: |
|
with st.spinner("Getting answer..."): |
|
prompt = f"Answer the following physics question: {phys_query}" |
|
response = openai.ChatCompletion.create( |
|
model="gpt-3.5-turbo", |
|
messages=[{"role": "user", "content": prompt}] |
|
) |
|
answer = response['choices'][0]['message']['content'] |
|
st.write(f"### Answer: {answer}") |
|
|
|
# **New Section: Voice Chat** |
|
elif mode == "Voice Chat": |
|
st.header("Voice Chat") |
|
|
|
# Display a description or instructions |
|
st.write("Click the button below to go to the Voice Chat.") |
|
|
|
# Display image/logo in the "Physics Assistant" section (optional) |
|
gif = "200w.gif" # Ensure the file is in the correct directory |
|
st.image(gif, use_container_width=50) # Adjust the size as per preference |
|
|
|
# Button to navigate to the external voice chat link |
|
if st.button("Go to Voice Chat"): |
|
st.write("Redirecting to the voice chat...") # You can customize this message |
|
st.markdown(f'<a href="https://shukdevdatta123-voicechat.hf.space" target="_blank">Go to Voice Chat</a>', unsafe_allow_html=True) |
|
|
|
# **New Section: Image Chat** |
|
elif mode == "Image Chat": |
|
|
|
# Display image/logo in the "Physics Assistant" section (optional) |
|
imgc = Image.open("i.jpg") # Ensure the file is in the correct directory |
|
st.image(imgc, width=150) # Adjust the size as per preference |
|
|
|
st.header("Image Chat") |
|
|
|
# Display a description or instructions |
|
st.write("Click the button below to go to the Image Chat.") |
|
|
|
# Display image/logo in the "Physics Assistant" section (optional) |
|
gif = "200w.gif" # Ensure the file is in the correct directory |
|
st.image(gif, use_container_width=50) # Adjust the size as per preference |
|
|
|
# Button to navigate to the external voice chat link |
|
if st.button("Go to Image Chat"): |
|
st.write("Redirecting to the image chat...") # You can customize this message |
|
st.markdown(f'<a href="https://imagechat2278.streamlit.app/" target="_blank">Go to Image Chat</a>', unsafe_allow_html=True) |
|
|
|
# Button to navigate to the alternative app (alternative) |
|
if st.button("Go to Image Chat (Alternative App)"): |
|
st.write("Redirecting to the alternative image chat...") # You can customize this message |
|
st.markdown(f'<a href="https://imagechat.onrender.com/" target="_blank">Go to Image Chat (Alternative App)</a>', unsafe_allow_html=True) |
|
|
|
# **New Section: English To Japanese** |
|
elif mode == "English To Japanese": |
|
st.header("English To Japanese") |
|
|
|
# Display a description or instructions |
|
st.write("Click the button below to go to the English To Japanese Translator.") |
|
|
|
|
|
gif = "200w.gif" # Ensure the file is in the correct directory |
|
st.image(gif, use_container_width=150) # Adjust the size as per preference |
|
|
|
# Button to navigate to the external voice chat link |
|
if st.button("Go to English To Japanese Translator"): |
|
st.write("Redirecting to the English To Japanese Translator...") # You can customize this message |
|
st.markdown(f'<a href="https://shukdevdatta123-engtojap-2-0.hf.space" target="_blank">Go to English To Japanese Translator</a>', unsafe_allow_html=True) |
|
|
|
# **New Section: Text to Image Generator** |
|
elif mode == "Text to Image Generator": |
|
st.header("Text to Image Generator") |
|
|
|
# Display a description or instructions |
|
st.write("Click the button below to go to the Text to Image Generator.") |
|
|
|
|
|
gif = "200w.gif" # Ensure the file is in the correct directory |
|
st.image(gif, use_container_width=150) # Adjust the size as per preference |
|
|
|
# Button to navigate to the external voice chat link |
|
if st.button("Go to Text to Image Generator"): |
|
st.write("Redirecting to the Text to Image Generator...") # You can customize this message |
|
st.markdown(f'<a href="https://shukdevdatta123-image-generator-dall-e3.hf.space" target="_blank">Go to Text to Image Generator</a>', unsafe_allow_html=True) |
|
|
|
# **New Section: Graph Tutorial** |
|
elif mode == "Graph Tutorial": |
|
st.header("Graph Tutorial") |
|
|
|
# Display a description or instructions |
|
st.write("Click the button below to go to Graph Tutorial.") |
|
|
|
|
|
gif = "200w.gif" # Ensure the file is in the correct directory |
|
st.image(gif, use_container_width=150) # Adjust the size as per preference |
|
|
|
# Button to navigate to the external voice chat link |
|
if st.button("Go to Graph Tutorial"): |
|
st.write("Redirecting to Graph Tutorial...") # You can customize this message |
|
st.markdown(f'<a href="https://shukdevdatta123-networkx-tutorial.hf.space" target="_blank">Go to Graph Tutorial</a>', unsafe_allow_html=True) |
|
|
|
# **New Section: Text-To-Diagram-Generator** |
|
elif mode == "Text-To-Diagram-Generator": |
|
st.header("Text-To-Diagram-Generator") |
|
|
|
# Display a description or instructions |
|
st.write("Click the button below to go to Text-To-Diagram-Generator.") |
|
|
|
|
|
gif = "200w.gif" # Ensure the file is in the correct directory |
|
st.image(gif, use_container_width=150) # Adjust the size as per preference |
|
|
|
# Button to navigate to the external voice chat link |
|
if st.button("Go to Text-To-Diagram-Generator"): |
|
st.write("Redirecting to Text-To-Diagram-Generator...") # You can customize this message |
|
st.markdown(f'<a href="https://shukdevdatta123-text-2-diagram.hf.space" target="_blank">Go to Text-To-Diagram-Generator</a>', unsafe_allow_html=True) |