Update app.py
Browse files
app.py
CHANGED
@@ -67,6 +67,21 @@ def generate_math_solution(query):
|
|
67 |
)
|
68 |
return response['choices'][0]['message']['content']
|
69 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
from PIL import Image # Required for local image files
|
71 |
|
72 |
# Streamlit app starts here
|
@@ -96,7 +111,8 @@ if openai_api_key:
|
|
96 |
"Mathematics Assistant", # Added option for Math
|
97 |
"Biology Assistant", # Added option for Biology
|
98 |
"Chemistry Assistant", # Added option for Chemistry
|
99 |
-
"Physics Assistant" # Added option for Physics
|
|
|
100 |
))
|
101 |
|
102 |
if mode == "Course Query Assistant":
|
@@ -380,3 +396,35 @@ if openai_api_key:
|
|
380 |
)
|
381 |
answer = response['choices'][0]['message']['content']
|
382 |
st.write(f"### Answer: {answer}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
)
|
68 |
return response['choices'][0]['message']['content']
|
69 |
|
70 |
+
# Function to extract text from image using OCR
|
71 |
+
def extract_text_from_image(image):
|
72 |
+
# Use pytesseract to do OCR on the image and extract text
|
73 |
+
text = pytesseract.image_to_string(image)
|
74 |
+
return text
|
75 |
+
|
76 |
+
# Function to answer questions based on the image or its content
|
77 |
+
def answer_question_based_on_image(query, image_text):
|
78 |
+
prompt = f"The following is text extracted from an image: {image_text}\n\nQuestion: {query}\n\nAnswer the question based on the image text."
|
79 |
+
response = openai.ChatCompletion.create(
|
80 |
+
model="gpt-4o-mini", # You can choose another model like GPT-4 Vision, if available
|
81 |
+
messages=[{"role": "user", "content": prompt}]
|
82 |
+
)
|
83 |
+
return response['choices'][0]['message']['content']
|
84 |
+
|
85 |
from PIL import Image # Required for local image files
|
86 |
|
87 |
# Streamlit app starts here
|
|
|
111 |
"Mathematics Assistant", # Added option for Math
|
112 |
"Biology Assistant", # Added option for Biology
|
113 |
"Chemistry Assistant", # Added option for Chemistry
|
114 |
+
"Physics Assistant", # Added option for Physics
|
115 |
+
"Image Upload" # New model for image upload
|
116 |
))
|
117 |
|
118 |
if mode == "Course Query Assistant":
|
|
|
396 |
)
|
397 |
answer = response['choices'][0]['message']['content']
|
398 |
st.write(f"### Answer: {answer}")
|
399 |
+
|
400 |
+
elif mode == "Image Upload":
|
401 |
+
st.header("Image Upload and Analysis")
|
402 |
+
|
403 |
+
# Image upload feature
|
404 |
+
uploaded_image = st.file_uploader("Upload an image:", type=["jpg", "jpeg", "png"])
|
405 |
+
|
406 |
+
if uploaded_image:
|
407 |
+
# Open the image with PIL
|
408 |
+
image = Image.open(uploaded_image)
|
409 |
+
|
410 |
+
# Display the uploaded image
|
411 |
+
st.image(image, caption="Uploaded Image", use_column_width=True)
|
412 |
+
|
413 |
+
# Extract text from the image using OCR
|
414 |
+
with st.spinner("Extracting text from the image..."):
|
415 |
+
image_text = extract_text_from_image(image)
|
416 |
+
|
417 |
+
# Show the extracted text
|
418 |
+
if image_text:
|
419 |
+
st.write("### Extracted Text from Image:")
|
420 |
+
st.write(image_text)
|
421 |
+
else:
|
422 |
+
st.write("No text was extracted from the image.")
|
423 |
+
|
424 |
+
# Allow the user to ask questions about the image
|
425 |
+
question = st.text_input("Ask a question about the image:")
|
426 |
+
|
427 |
+
if question:
|
428 |
+
with st.spinner("Getting answer..."):
|
429 |
+
answer = answer_question_based_on_image(question, image_text)
|
430 |
+
st.write(f"### Answer: {answer}")
|