File size: 3,598 Bytes
f28ca64
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
import streamlit as st
import os
import google.generativeai as genai
import json
from PIL import Image


MODEL_ID = "gemini-2.0-flash-exp"  # Keep the model ID as is
try:
    api_key = os.getenv("GEMINI_API_KEY")
    model_id = MODEL_ID
    genai.configure(api_key=api_key)
except Exception as e:
    st.error(f"Error: {e}")
    st.stop()

model = genai.GenerativeModel(MODEL_ID)
chat = model.start_chat()

def get_local_pdf_path():
    """
    Returns the path to the local PDF file.
    """
    try:
        pdf_path = os.path.join("problems", "problems.pdf")
        if not os.path.exists(pdf_path):
            raise FileNotFoundError(f"{pdf_path} does not exist.")
        return pdf_path
    except Exception as e:
        st.error(f"Failed to find the local PDF: {e}")
        st.stop()  # Stop if the file is not found

# Initialize conversation history in Streamlit session state
if "conversation_history" not in st.session_state:
    st.session_state.conversation_history = []
if "uploaded_file_part" not in st.session_state:  # Store the file *part*
    st.session_state.uploaded_file_part = None
if "uploaded_pdf_path" not in st.session_state:
    st.session_state.uploaded_pdf_path = get_local_pdf_path()

def multimodal_prompt(pdf_path, text_prompt):
    """
    Sends a multimodal prompt to Gemini, handling file uploads efficiently.
    Args:
        pdf_path: The path to the PDF file.
        text_prompt: The text prompt for the model.
    Returns:
        The model's response as a string, or an error message.
    """
    try:
        if st.session_state.uploaded_file_part is None:  # First time, upload
            pdf_part = genai.upload_file(pdf_path, mime_type="application/pdf")
            st.session_state.uploaded_file_part = pdf_part
            prompt = [text_prompt, pdf_part]  # First turn includes the actual file
        else:  # Subsequent turns, reference the file
            prompt = [text_prompt, st.session_state.uploaded_file_part]  # Subsequent turns include the file reference

        response = chat.send_message(prompt)

        # Update conversation history
        st.session_state.conversation_history.append({"role": "user", "content": text_prompt, "has_pdf": True})
        st.session_state.conversation_history.append({"role": "assistant", "content": response.text})
        return response.text

    except Exception as e:
        return f"An error occurred: {e}"


# --- Main Page ---
st.title("📚❓Problem Solving Tutor")
about = """
**How to use this App**
Replace this placeholder with the actual text.
"""

with st.expander("How to use this App"):
    st.markdown(about)

# --- Load the image ---
# image = Image.open("higher_ed.png")
# st.image(image, width=400)

st.header("Quadratic Equations")
#load the problems form the pdf
with st.spinner("AI is thinking..."):
    if st.session_state.uploaded_pdf_path is None:
        st.session_state.uploaded_pdf_path = get_local_pdf_path()

    filepath = st.session_state.uploaded_pdf_path
    text_prompt = f"Use the provided document. Create a list of math problems found in the document. Format the output as a JSON string"
    response = multimodal_prompt(filepath, text_prompt)  # Use the local filepath
    st.markdown(response)

try:
    problems = json.loads(response)
    for problem in problems:
        st.write(problem)
except json.JSONDecodeError:
    st.write("Error: Invalid JSON format in the response.")
except Exception as e:
    st.write(f"An unexpected error occurred: {e}")

st.markdown("Visit our Hugging Face Space!")
st.markdown("© 2025 WVSU AI Dev Team 🤖 ✨")