|
import streamlit as st |
|
import os |
|
import google.generativeai as genai |
|
import json |
|
from PIL import Image |
|
|
|
|
|
MODEL_ID = "gemini-2.0-flash-exp" |
|
try: |
|
api_key = os.getenv("GEMINI_API_KEY") |
|
model_id = MODEL_ID |
|
genai.configure(api_key=api_key) |
|
except Exception as e: |
|
st.error(f"Error: {e}") |
|
st.stop() |
|
|
|
model = genai.GenerativeModel(MODEL_ID) |
|
chat = model.start_chat() |
|
|
|
def get_local_pdf_path(): |
|
""" |
|
Returns the path to the local PDF file. |
|
""" |
|
try: |
|
pdf_path = os.path.join("problems", "problems.pdf") |
|
if not os.path.exists(pdf_path): |
|
raise FileNotFoundError(f"{pdf_path} does not exist.") |
|
return pdf_path |
|
except Exception as e: |
|
st.error(f"Failed to find the local PDF: {e}") |
|
st.stop() |
|
|
|
|
|
if "conversation_history" not in st.session_state: |
|
st.session_state.conversation_history = [] |
|
if "uploaded_file_part" not in st.session_state: |
|
st.session_state.uploaded_file_part = None |
|
if "uploaded_pdf_path" not in st.session_state: |
|
st.session_state.uploaded_pdf_path = get_local_pdf_path() |
|
|
|
def multimodal_prompt(pdf_path, text_prompt): |
|
""" |
|
Sends a multimodal prompt to Gemini, handling file uploads efficiently. |
|
Args: |
|
pdf_path: The path to the PDF file. |
|
text_prompt: The text prompt for the model. |
|
Returns: |
|
The model's response as a string, or an error message. |
|
""" |
|
try: |
|
if st.session_state.uploaded_file_part is None: |
|
pdf_part = genai.upload_file(pdf_path, mime_type="application/pdf") |
|
st.session_state.uploaded_file_part = pdf_part |
|
prompt = [text_prompt, pdf_part] |
|
else: |
|
prompt = [text_prompt, st.session_state.uploaded_file_part] |
|
|
|
response = chat.send_message(prompt) |
|
|
|
|
|
st.session_state.conversation_history.append({"role": "user", "content": text_prompt, "has_pdf": True}) |
|
st.session_state.conversation_history.append({"role": "assistant", "content": response.text}) |
|
return response.text |
|
|
|
except Exception as e: |
|
return f"An error occurred: {e}" |
|
|
|
|
|
|
|
st.title("📚❓Problem Solving Tutor") |
|
about = """ |
|
**How to use this App** |
|
Replace this placeholder with the actual text. |
|
""" |
|
|
|
with st.expander("How to use this App"): |
|
st.markdown(about) |
|
|
|
|
|
|
|
|
|
|
|
st.header("Quadratic Equations") |
|
|
|
with st.spinner("AI is thinking..."): |
|
if st.session_state.uploaded_pdf_path is None: |
|
st.session_state.uploaded_pdf_path = get_local_pdf_path() |
|
|
|
filepath = st.session_state.uploaded_pdf_path |
|
text_prompt = """Use the provided document. "Read the list of 5 quadratic equations. |
|
Return your response in JSON format, as a list of strings. |
|
Do not include any extra text, explanations, or backslashes. |
|
|
|
Example JSON output: |
|
[ |
|
"x^2 - 5x + 6 = 0", |
|
"2x^2 + 3x - 1 = 0", |
|
"x^2 - 9 = 0", |
|
"3x^2 - 2x + 4 = 0", |
|
"x^2 + 8x + 15 = 0" |
|
] |
|
""" |
|
response = multimodal_prompt(filepath, text_prompt) |
|
st.markdown(response) |
|
|
|
try: |
|
problems = json.loads(response) |
|
equations = [] |
|
for problem in problems: |
|
equation = problem.split(": ")[1] |
|
equations.append(equation) |
|
st.write("Equations only:") |
|
for equation in equations: |
|
st.write(equation) |
|
except json.JSONDecodeError: |
|
st.write("Error: Invalid JSON format in the response.") |
|
except Exception as e: |
|
st.write(f"An unexpected error occurred: {e}") |
|
|
|
st.markdown("Visit our Hugging Face Space!") |
|
st.markdown("© 2025 WVSU AI Dev Team 🤖 ✨") |