File size: 23,855 Bytes
adf4800 818bad6 7a25c1c b1e5f5e 0bad8e4 d0acc10 5bc9cf6 818bad6 7a25c1c 818bad6 490e084 6e449bc 0bad8e4 b24c6f6 538869a 0738257 538869a 6f0c083 d0acc10 818bad6 b24c6f6 490e084 b24c6f6 85c63d5 53b830c 5898856 73248b6 156e346 afc753f 3e9fd9d b24c6f6 490e084 d0acc10 1fff6c4 490e084 bc01520 9eb6f89 bc01520 490e084 818bad6 490e084 818bad6 490e084 818bad6 490e084 818bad6 490e084 818bad6 490e084 818bad6 490e084 7a25c1c 490e084 818bad6 490e084 818bad6 490e084 818bad6 490e084 818bad6 490e084 818bad6 490e084 818bad6 490e084 b1e5f5e 6c7e881 b1e5f5e 0bad8e4 6c7e881 0bad8e4 bcd0ef7 0bad8e4 b24c6f6 6c7e881 b24c6f6 85c63d5 5616aae 85c63d5 538869a 15c06a2 538869a 85c63d5 5616aae 85c63d5 5616aae 85c63d5 5616aae 85c63d5 0ed7ea4 85c63d5 53b830c 87d2860 90dea8f 74bcf53 87d2860 53b830c 7f9bad5 5898856 7f9bad5 564f813 7f9bad5 e369571 7f9bad5 e369571 741d042 73248b6 156e346 73248b6 74bcf53 73248b6 74bcf53 73248b6 d9ec507 156e346 afc753f 718b757 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 |
#
import streamlit as st
import PyPDF2
import openai
import faiss
import os
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from io import StringIO
from PIL import Image
# Function to extract text from a PDF file
def extract_text_from_pdf(pdf_file):
reader = PyPDF2.PdfReader(pdf_file)
text = ""
for page in reader.pages:
text += page.extract_text()
return text
# Function to generate embeddings for a piece of text
def get_embeddings(text, model="text-embedding-ada-002"):
response = openai.Embedding.create(input=[text], model=model)
return response['data'][0]['embedding']
# Function to search for similar content
def search_similar(query_embedding, index, stored_texts, top_k=3):
distances, indices = index.search(np.array([query_embedding]), top_k)
results = [(stored_texts[i], distances[0][idx]) for idx, i in enumerate(indices[0])]
return results
# Function to generate code based on a prompt
def generate_code_from_prompt(prompt, model="gpt-4o-mini"):
response = openai.ChatCompletion.create(
model=model,
messages=[{"role": "user", "content": prompt}]
)
return response['choices'][0]['message']['content']
# Function to save code to a .txt file
def save_code_to_file(code, filename="generated_code.txt"):
with open(filename, "w") as f:
f.write(code)
# Function to generate AI-based study notes and summaries
def generate_summary(text):
prompt = f"Summarize the following text into key points:\n\n{text}"
response = openai.ChatCompletion.create(
model="gpt-4o-mini",
messages=[{"role": "user", "content": prompt}]
)
return response['choices'][0]['message']['content']
# Function to fix bugs in code
def fix_code_bugs(buggy_code, model="gpt-4o-mini"):
prompt = f"The following code has bugs or issues. Please identify and fix the problems. If possible, provide explanations for the changes made.\n\nBuggy Code:\n{buggy_code}\n\nFixed Code:"
response = openai.ChatCompletion.create(
model=model,
messages=[{"role": "user", "content": prompt}]
)
return response['choices'][0]['message']['content']
# Function to generate AI-based mathematical solutions
def generate_math_solution(query):
prompt = f"Explain and solve the following mathematical problem step by step: {query}"
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": prompt}]
)
return response['choices'][0]['message']['content']
# Streamlit app starts here
st.set_page_config(page_title="AI Assistance", page_icon=":robot:", layout="wide")
# Custom CSS for the page styling
st.markdown("""
<style>
body {
background-color: #f0f4f7;
font-family: 'Arial', sans-serif;
color: #333;
}
.header {
text-align: center;
font-size: 2.5em;
font-weight: bold;
color: #4CAF50;
margin-top: 30px;
animation: fadeIn 2s ease-out;
}
.sidebar .sidebar-content {
background-color: #333;
color: white;
}
.sidebar .sidebar-content a {
color: white;
font-size: 1.2em;
}
.sidebar .sidebar-content a:hover {
color: #4CAF50;
}
.stButton>button {
background-color: #4CAF50;
color: white;
font-size: 1.2em;
padding: 10px 20px;
border-radius: 5px;
border: none;
transition: background-color 0.3s;
}
.stButton>button:hover {
background-color: #45a049;
}
.stTextInput input {
padding: 10px;
font-size: 1.1em;
border-radius: 5px;
border: 1px solid #ccc;
}
.stFileUploader {
border-radius: 5px;
border: 1px solid #ddd;
padding: 10px;
}
.stImage {
animation: fadeIn 2s ease-out;
}
@keyframes fadeIn {
0% { opacity: 0; }
100% { opacity: 1; }
}
.stTextArea textarea {
padding: 10px;
font-size: 1.1em;
border-radius: 5px;
border: 1px solid #ccc;
}
</style>
""", unsafe_allow_html=True)
# Custom JavaScript for animations
st.markdown("""
<script type="text/javascript">
window.onload = function() {
const elements = document.querySelectorAll('.stButton button, .stTextInput input, .stTextArea textarea');
elements.forEach(element => {
element.style.transition = 'all 0.3s ease';
element.addEventListener('mouseover', function() {
element.style.transform = 'scale(1.05)';
});
element.addEventListener('mouseout', function() {
element.style.transform = 'scale(1)';
});
});
};
</script>
""", unsafe_allow_html=True)
# Add your app logic here...
# Display the app header
st.markdown("<h1 class='header'>AI Assistance</h1>", unsafe_allow_html=True)
# Input OpenAI API key
openai_api_key = st.text_input("Enter your OpenAI API key:", type="password")
if openai_api_key:
openai.api_key = openai_api_key
# Sidebar to toggle between Course Query Assistant, Code Generator, Bug Fixer, etc.
st.sidebar.title("Select Mode")
mode = st.sidebar.radio("Choose an option", (
"Course Query Assistant",
"Code Generator",
"AI Chatbot Tutor",
"AI Study Notes & Summaries",
"Code Bug Fixer",
"Mathematics Assistant", # Added option for Math
"Biology Assistant", # Added option for Biology
"Chemistry Assistant", # Added option for Chemistry
"Physics Assistant", # Added option for Physics
"Voice Chat",
"Image Chat",
"English To Japanese",
"Text to Image Generator",
"Graph Tutorial",
"Text-To-Diagram-Generator"
))
# Main app content here...
if mode == "Course Query Assistant":
st.header("Course Query Assistant")
# Display image/logo in the "Course Query Assistant" section (optional)
course_query_image = Image.open("Capture.PNG") # Ensure the file is in the correct directory
st.image(course_query_image, width=150) # Adjust the size as per preference
# Upload course materials
uploaded_files = st.file_uploader("Upload Course Materials (PDFs)", type=["pdf"], accept_multiple_files=True)
if uploaded_files:
st.write("Processing uploaded course materials...")
# Extract text and generate embeddings for all uploaded PDFs
course_texts = []
for uploaded_file in uploaded_files:
text = extract_text_from_pdf(uploaded_file)
course_texts.append(text)
# Combine all course materials into one large text
combined_text = " ".join(course_texts)
# Split combined text into smaller chunks for embedding (max tokens ~1000)
chunks = [combined_text[i:i+1000] for i in range(0, len(combined_text), 1000)]
# Generate embeddings for all chunks
embeddings = [get_embeddings(chunk) for chunk in chunks]
# Convert the list of embeddings into a NumPy array (shape: [num_chunks, embedding_size])
embeddings_np = np.array(embeddings).astype("float32")
# Create a FAISS index for similarity search
index = faiss.IndexFlatL2(len(embeddings_np[0])) # Use the length of the embedding vectors for the dimension
index.add(embeddings_np)
st.write("Course materials have been processed and indexed.")
# User query
query = st.text_input("Enter your question about the course materials:")
if query:
# Generate embedding for the query
query_embedding = get_embeddings(query)
# Search for similar chunks in the FAISS index
results = search_similar(query_embedding, index, chunks)
# Create the context for the GPT prompt
context = "\n".join([result[0] for result in results])
modified_prompt = f"Context: {context}\n\nQuestion: {query}\n\nProvide a detailed answer based on the context."
# Get the GPT-4 response
response = openai.ChatCompletion.create(
model="gpt-4o-mini", # Update to GPT-4 (or your desired model)
messages=[{"role": "user", "content": modified_prompt}]
)
# Get the response content
response_content = response['choices'][0]['message']['content']
# Display the response in Streamlit (Intelligent Reply)
st.write("### Intelligent Reply:")
st.write(response_content)
elif mode == "Code Generator":
st.header("Code Generator")
# Display image/logo in the "Course Query Assistant" section (optional)
codegen = Image.open("9802381.png") # Ensure the file is in the correct directory
st.image(codegen, width=150) # Adjust the size as per preference
# Code generation prompt input
code_prompt = st.text_area("Describe the code you want to generate:",
"e.g., Write a Python program that generates Fibonacci numbers.")
if st.button("Generate Code"):
if code_prompt:
with st.spinner("Generating code..."):
# Generate code using GPT-4
generated_code = generate_code_from_prompt(code_prompt)
# Clean the generated code to ensure only code is saved (removing comments or additional text)
clean_code = "\n".join([line for line in generated_code.splitlines() if not line.strip().startswith("#")])
# Save the clean code to a file
save_code_to_file(clean_code)
# Display the generated code
st.write("### Generated Code:")
st.code(clean_code, language="python")
# Provide a download link for the generated code
with open("generated_code.txt", "w") as f:
f.write(clean_code)
st.download_button(
label="Download Generated Code",
data=open("generated_code.txt", "rb").read(),
file_name="generated_code.txt",
mime="text/plain"
)
else:
st.error("Please provide a prompt to generate the code.")
elif mode == "AI Chatbot Tutor":
st.header("AI Chatbot Tutor")
# Display image/logo in the "Course Query Assistant" section (optional)
aitut = Image.open("910372.png") # Ensure the file is in the correct directory
st.image(aitut, width=150) # Adjust the size as per preference
# Chat interface for the AI tutor
chat_history = []
def chat_with_bot(query):
chat_history.append({"role": "user", "content": query})
response = openai.ChatCompletion.create(
model="gpt-4o-mini",
messages=chat_history
)
chat_history.append({"role": "assistant", "content": response['choices'][0]['message']['content']})
return response['choices'][0]['message']['content']
user_query = st.text_input("Ask a question:")
if user_query:
with st.spinner("Getting answer..."):
bot_response = chat_with_bot(user_query)
st.write(f"### AI Response: {bot_response}")
elif mode == "AI Study Notes & Summaries":
st.header("AI Study Notes & Summaries")
# Display image/logo in the "Course Query Assistant" section (optional)
aisum = Image.open("sum.png") # Ensure the file is in the correct directory
st.image(aisum, width=150) # Adjust the size as per preference
# Upload course materials for summarization
uploaded_files_for_summary = st.file_uploader("Upload Course Materials (PDFs) for Summarization", type=["pdf"], accept_multiple_files=True)
if uploaded_files_for_summary:
st.write("Generating study notes and summaries...")
# Extract text from PDFs
all_text = ""
for uploaded_file in uploaded_files_for_summary:
text = extract_text_from_pdf(uploaded_file)
all_text += text
# Generate summary using AI
summary = generate_summary(all_text)
# Display the summary
st.write("### AI-Generated Summary:")
st.write(summary)
elif mode == "Code Bug Fixer":
st.header("Code Bug Fixer")
# Display image/logo in the "Course Query Assistant" section (optional)
aibug = Image.open("bug.png") # Ensure the file is in the correct directory
st.image(aibug, width=150) # Adjust the size as per preference
# User input for buggy code
buggy_code = st.text_area("Enter your buggy code here:")
if st.button("Fix Code"):
if buggy_code:
with st.spinner("Fixing code..."):
# Fix bugs using GPT-4
fixed_code = fix_code_bugs(buggy_code)
# Display the fixed code
st.write("### Fixed Code:")
st.code(fixed_code, language="python")
# Provide a download link for the fixed code
with open("fixed_code.txt", "w") as f:
f.write(fixed_code)
st.download_button(
label="Download Fixed Code",
data=open("fixed_code.txt", "rb").read(),
file_name="fixed_code.txt",
mime="text/plain"
)
else:
st.error("Please enter some buggy code to fix.")
elif mode == "Mathematics Assistant":
st.header("Mathematics Assistant")
# Display image/logo in the "Mathematics Assistant" section (optional)
math_icon = Image.open("math_icon.PNG") # Ensure the file is in the correct directory
st.image(math_icon, width=150) # Adjust the size as per preference
# User input for math questions
math_query = st.text_input("Ask a mathematics-related question:")
if st.button("Solve Problem"):
if math_query:
with st.spinner("Generating solution..."):
# Generate the solution using GPT-4
solution = generate_math_solution(math_query)
# Render the solution with LaTeX for mathematical notations
formatted_solution = f"""
### Solution to the Problem
**Problem:** {math_query}
**Solution:**
{solution}
"""
st.markdown(formatted_solution)
else:
st.error("Please enter a math problem to solve.")
# **New Section: Biology Assistant**
elif mode == "Biology Assistant":
st.header("Biology Assistant")
# Display image/logo in the "Biology Assistant" section (optional)
bio_icon = Image.open("bio_icon.PNG") # Ensure the file is in the correct directory
st.image(bio_icon, width=150) # Adjust the size as per preference
# User input for biology questions
bio_query = st.text_input("Ask a biology-related question:")
if bio_query:
with st.spinner("Getting answer..."):
prompt = f"Answer the following biology question: {bio_query}"
response = openai.ChatCompletion.create(
model="gpt-4o-mini",
messages=[{"role": "user", "content": prompt}]
)
answer = response['choices'][0]['message']['content']
st.write(f"### Answer: {answer}")
# **New Section: Chemistry Assistant**
elif mode == "Chemistry Assistant":
st.header("Chemistry Assistant")
# Display image/logo in the "Chemistry Assistant" section (optional)
chem_icon = Image.open("chem.PNG") # Ensure the file is in the correct directory
st.image(chem_icon, width=150) # Adjust the size as per preference
# User input for chemistry questions
chem_query = st.text_input("Ask a chemistry-related question:")
if chem_query:
with st.spinner("Getting answer..."):
prompt = f"Answer the following chemistry question: {chem_query}"
response = openai.ChatCompletion.create(
model="gpt-4o-mini",
messages=[{"role": "user", "content": prompt}]
)
answer = response['choices'][0]['message']['content']
st.write(f"### Answer: {answer}")
# **New Section: Physics Assistant**
elif mode == "Physics Assistant":
st.header("Physics Assistant")
# Display image/logo in the "Physics Assistant" section (optional)
phys_icon = Image.open("physics_icon.PNG") # Ensure the file is in the correct directory
st.image(phys_icon, width=150) # Adjust the size as per preference
# User input for physics questions
phys_query = st.text_input("Ask a physics-related question:")
if phys_query:
with st.spinner("Getting answer..."):
prompt = f"Answer the following physics question: {phys_query}"
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": prompt}]
)
answer = response['choices'][0]['message']['content']
st.write(f"### Answer: {answer}")
# **New Section: Voice Chat**
elif mode == "Voice Chat":
st.header("Voice Chat")
# Display a description or instructions
st.write("Click the button below to go to the Voice Chat.")
# Display image/logo in the "Physics Assistant" section (optional)
gif = "200w.gif" # Ensure the file is in the correct directory
st.image(gif, use_container_width=50) # Adjust the size as per preference
# Button to navigate to the external voice chat link
if st.button("Go to Voice Chat"):
st.write("Redirecting to the voice chat...") # You can customize this message
st.markdown(f'<a href="https://shukdevdatta123-voicechat.hf.space" target="_blank">Go to Voice Chat</a>', unsafe_allow_html=True)
# **New Section: Image Chat**
elif mode == "Image Chat":
# Display image/logo in the "Physics Assistant" section (optional)
imgc = Image.open("i.jpg") # Ensure the file is in the correct directory
st.image(imgc, width=150) # Adjust the size as per preference
st.header("Image Chat")
# Display a description or instructions
st.write("Click the button below to go to the Image Chat.")
# Display image/logo in the "Physics Assistant" section (optional)
gif = "200w.gif" # Ensure the file is in the correct directory
st.image(gif, use_container_width=50) # Adjust the size as per preference
# Button to navigate to the external voice chat link
if st.button("Go to Image Chat"):
st.write("Redirecting to the image chat...") # You can customize this message
st.markdown(f'<a href="https://imagechat2278.streamlit.app/" target="_blank">Go to Image Chat</a>', unsafe_allow_html=True)
# Button to navigate to the alternative app (alternative)
if st.button("Go to Image Chat (Alternative App)"):
st.write("Redirecting to the alternative image chat...") # You can customize this message
st.markdown(f'<a href="https://imagechat.onrender.com/" target="_blank">Go to Image Chat (Alternative App)</a>', unsafe_allow_html=True)
# **New Section: English To Japanese**
elif mode == "English To Japanese":
st.header("English To Japanese")
# Display a description or instructions
st.write("Click the button below to go to the English To Japanese Translator.")
gif = "200w.gif" # Ensure the file is in the correct directory
st.image(gif, use_container_width=150) # Adjust the size as per preference
# Button to navigate to the external voice chat link
if st.button("Go to English To Japanese Translator"):
st.write("Redirecting to the English To Japanese Translator...") # You can customize this message
st.markdown(f'<a href="https://shukdevdatta123-engtojap-2-0.hf.space" target="_blank">Go to English To Japanese Translator</a>', unsafe_allow_html=True)
# **New Section: Text to Image Generator**
elif mode == "Text to Image Generator":
st.header("Text to Image Generator")
# Display a description or instructions
st.write("Click the button below to go to the Text to Image Generator.")
gif = "200w.gif" # Ensure the file is in the correct directory
st.image(gif, use_container_width=150) # Adjust the size as per preference
# Button to navigate to the external voice chat link
if st.button("Go to Text to Image Generator"):
st.write("Redirecting to the Text to Image Generator...") # You can customize this message
st.markdown(f'<a href="https://shukdevdatta123-image-generator-dall-e3.hf.space" target="_blank">Go to Text to Image Generator</a>', unsafe_allow_html=True)
# **New Section: Graph Tutorial**
elif mode == "Graph Tutorial":
st.header("Graph Tutorial")
# Display a description or instructions
st.write("Click the button below to go to Graph Tutorial.")
gif = "200w.gif" # Ensure the file is in the correct directory
st.image(gif, use_container_width=150) # Adjust the size as per preference
# Button to navigate to the external voice chat link
if st.button("Go to Graph Tutorial"):
st.write("Redirecting to Graph Tutorial...") # You can customize this message
st.markdown(f'<a href="https://shukdevdatta123-networkx-tutorial.hf.space" target="_blank">Go to Graph Tutorial</a>', unsafe_allow_html=True)
# **New Section: Text-To-Diagram-Generator**
elif mode == "Text-To-Diagram-Generator":
st.header("Text-To-Diagram-Generator")
# Display a description or instructions
st.write("Click the button below to go to Text-To-Diagram-Generator.")
gif = "200w.gif" # Ensure the file is in the correct directory
st.image(gif, use_container_width=150) # Adjust the size as per preference
# Button to navigate to the external voice chat link
if st.button("Go to Text-To-Diagram-Generator"):
st.write("Redirecting to Text-To-Diagram-Generator...") # You can customize this message
st.markdown(f'<a href="https://shukdevdatta123-text-2-diagram.hf.space" target="_blank">Go to Text-To-Diagram-Generator</a>', unsafe_allow_html=True) |