Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -2,12 +2,13 @@ import streamlit as st
|
|
2 |
import google.generativeai as genai
|
3 |
import os
|
4 |
from dotenv import load_dotenv
|
|
|
|
|
5 |
|
6 |
load_dotenv()
|
7 |
# Configure the API key
|
8 |
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
|
9 |
|
10 |
-
|
11 |
safety_settings = [
|
12 |
{"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE"},
|
13 |
{"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE"},
|
@@ -15,7 +16,6 @@ safety_settings = [
|
|
15 |
{"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_NONE"},
|
16 |
]
|
17 |
|
18 |
-
|
19 |
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
|
20 |
|
21 |
model = genai.GenerativeModel('gemini-1.5-flash',safety_settings=safety_settings,
|
@@ -26,14 +26,12 @@ model = genai.GenerativeModel('gemini-1.5-flash',safety_settings=safety_settings
|
|
26 |
# Function to get response from the model
|
27 |
# Gemini uses 'model' for assistant; Streamlit uses 'assistant'
|
28 |
|
29 |
-
|
30 |
def role_to_streamlit(role):
|
31 |
if role == "model":
|
32 |
return "assistant"
|
33 |
else:
|
34 |
return role
|
35 |
|
36 |
-
|
37 |
# Add a Gemini Chat history object to Streamlit session state
|
38 |
if "chat" not in st.session_state:
|
39 |
st.session_state.chat = model.start_chat(history=[])
|
@@ -44,15 +42,62 @@ st.title("Mariam AI!")
|
|
44 |
# Display chat messages from history above current input box
|
45 |
for message in st.session_state.chat.history:
|
46 |
with st.chat_message(role_to_streamlit(message.role)):
|
47 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
48 |
|
49 |
# Accept user's next message, add to context, resubmit context to Gemini
|
50 |
if prompt := st.chat_input("Hey?"):
|
51 |
# Display user's last message
|
52 |
st.chat_message("user").markdown(prompt)
|
53 |
|
54 |
-
#
|
55 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
56 |
|
57 |
# Display last
|
58 |
with st.chat_message("assistant"):
|
|
|
2 |
import google.generativeai as genai
|
3 |
import os
|
4 |
from dotenv import load_dotenv
|
5 |
+
from PIL import Image
|
6 |
+
import mimetypes
|
7 |
|
8 |
load_dotenv()
|
9 |
# Configure the API key
|
10 |
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
|
11 |
|
|
|
12 |
safety_settings = [
|
13 |
{"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE"},
|
14 |
{"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE"},
|
|
|
16 |
{"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_NONE"},
|
17 |
]
|
18 |
|
|
|
19 |
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
|
20 |
|
21 |
model = genai.GenerativeModel('gemini-1.5-flash',safety_settings=safety_settings,
|
|
|
26 |
# Function to get response from the model
|
27 |
# Gemini uses 'model' for assistant; Streamlit uses 'assistant'
|
28 |
|
|
|
29 |
def role_to_streamlit(role):
|
30 |
if role == "model":
|
31 |
return "assistant"
|
32 |
else:
|
33 |
return role
|
34 |
|
|
|
35 |
# Add a Gemini Chat history object to Streamlit session state
|
36 |
if "chat" not in st.session_state:
|
37 |
st.session_state.chat = model.start_chat(history=[])
|
|
|
42 |
# Display chat messages from history above current input box
|
43 |
for message in st.session_state.chat.history:
|
44 |
with st.chat_message(role_to_streamlit(message.role)):
|
45 |
+
# Check if the message part is text or a file part
|
46 |
+
for part in message.parts:
|
47 |
+
if part.HasField("text"):
|
48 |
+
st.markdown(part.text)
|
49 |
+
elif part.HasField("file_data"):
|
50 |
+
# Handle file display (e.g., image)
|
51 |
+
try:
|
52 |
+
# Infer MIME type if not provided
|
53 |
+
if not part.file_data.mime_type:
|
54 |
+
mime_type = mimetypes.guess_type(part.file_data.file_name)[0]
|
55 |
+
else:
|
56 |
+
mime_type = part.file_data.mime_type
|
57 |
+
if mime_type and mime_type.startswith("image/"):
|
58 |
+
image_data = part.file_data.data # Access the image data directly
|
59 |
+
image = Image.open(io.BytesIO(image_data)) # Open the image using PIL
|
60 |
+
st.image(image)
|
61 |
+
else:
|
62 |
+
st.write(f"File: {part.file_data.file_name} (MIME type: {part.file_data.mime_type})")
|
63 |
+
except Exception as e:
|
64 |
+
st.error(f"Error displaying file: {e}")
|
65 |
|
66 |
# Accept user's next message, add to context, resubmit context to Gemini
|
67 |
if prompt := st.chat_input("Hey?"):
|
68 |
# Display user's last message
|
69 |
st.chat_message("user").markdown(prompt)
|
70 |
|
71 |
+
# Handle file uploads
|
72 |
+
uploaded_file = st.file_uploader("Choose a file", type=["jpg", "jpeg", "png", "pdf"]) # Add more type if needed
|
73 |
+
|
74 |
+
if uploaded_file is not None:
|
75 |
+
# To read file as bytes:
|
76 |
+
bytes_data = uploaded_file.getvalue()
|
77 |
+
|
78 |
+
# Display the uploaded file (if it's an image)
|
79 |
+
if uploaded_file.type.startswith("image/"):
|
80 |
+
image = Image.open(uploaded_file)
|
81 |
+
st.image(image, caption=f"Uploaded Image: {uploaded_file.name}")
|
82 |
+
|
83 |
+
# Construct the message parts, including the uploaded file
|
84 |
+
parts = [
|
85 |
+
prompt,
|
86 |
+
{
|
87 |
+
"file_data": {
|
88 |
+
"mime_type": uploaded_file.type,
|
89 |
+
"file_name": uploaded_file.name,
|
90 |
+
"data": bytes_data
|
91 |
+
}
|
92 |
+
}
|
93 |
+
]
|
94 |
+
|
95 |
+
# Send the message with file to Gemini
|
96 |
+
response = st.session_state.chat.send_message(parts)
|
97 |
+
|
98 |
+
else:
|
99 |
+
# Send user entry to Gemini and read the response
|
100 |
+
response = st.session_state.chat.send_message(prompt)
|
101 |
|
102 |
# Display last
|
103 |
with st.chat_message("assistant"):
|