Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,14 +1,11 @@
|
|
1 |
import streamlit as st
|
2 |
import google.generativeai as genai
|
3 |
import os
|
4 |
-
import tempfile
|
5 |
-
import PIL.Image
|
6 |
-
import time
|
7 |
-
import ssl
|
8 |
from dotenv import load_dotenv
|
|
|
|
|
9 |
|
10 |
load_dotenv()
|
11 |
-
|
12 |
# Configure the API key
|
13 |
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
|
14 |
|
@@ -19,16 +16,36 @@ safety_settings = [
|
|
19 |
{"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_NONE"},
|
20 |
]
|
21 |
|
22 |
-
|
23 |
-
|
|
|
|
|
|
|
|
|
24 |
|
25 |
# Function to get response from the model
|
26 |
# Gemini uses 'model' for assistant; Streamlit uses 'assistant'
|
|
|
27 |
def role_to_streamlit(role):
|
28 |
if role == "model":
|
29 |
return "assistant"
|
30 |
else:
|
31 |
return role
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
|
33 |
# Add a Gemini Chat history object to Streamlit session state
|
34 |
if "chat" not in st.session_state:
|
@@ -37,113 +54,41 @@ if "chat" not in st.session_state:
|
|
37 |
# Display Form Title
|
38 |
st.title("Mariam AI!")
|
39 |
|
40 |
-
# File uploader
|
41 |
-
uploaded_files = st.file_uploader("Choose a file", accept_multiple_files=True)
|
42 |
-
|
43 |
# Display chat messages from history above current input box
|
44 |
for message in st.session_state.chat.history:
|
45 |
with st.chat_message(role_to_streamlit(message.role)):
|
46 |
st.markdown(message.parts[0].text)
|
47 |
|
48 |
-
def upload_and_process_file(file_path):
|
49 |
-
"""Upload et traite un fichier avec l'API Gemini avec gestion des erreurs améliorée"""
|
50 |
-
max_retries = 3
|
51 |
-
retry_delay = 2 # secondes
|
52 |
-
|
53 |
-
for attempt in range(max_retries):
|
54 |
-
try:
|
55 |
-
print(f"Tentative d'upload {attempt + 1}/{max_retries} pour {file_path}")
|
56 |
-
|
57 |
-
# Vérification du fichier
|
58 |
-
if not os.path.exists(file_path):
|
59 |
-
raise FileNotFoundError(f"Le fichier {file_path} n'existe pas")
|
60 |
-
|
61 |
-
file_size = os.path.getsize(file_path)
|
62 |
-
if file_size == 0:
|
63 |
-
raise ValueError(f"Le fichier {file_path} est vide")
|
64 |
-
|
65 |
-
# Upload du fichier
|
66 |
-
uploaded_file = genai.upload_file(path=file_path)
|
67 |
-
print(f"Upload réussi: {uploaded_file.uri}")
|
68 |
-
|
69 |
-
# Attente du traitement
|
70 |
-
timeout = 300 # 5 minutes
|
71 |
-
start_time = time.time()
|
72 |
-
|
73 |
-
while uploaded_file.state.name == "PROCESSING":
|
74 |
-
if time.time() - start_time > timeout:
|
75 |
-
raise TimeoutError("Timeout pendant le traitement du fichier")
|
76 |
-
|
77 |
-
print(
|
78 |
-
f"En attente du traitement... Temps écoulé: {int(time.time() - start_time)}s")
|
79 |
-
time.sleep(10)
|
80 |
-
uploaded_file = genai.get_file(uploaded_file.name)
|
81 |
-
|
82 |
-
if uploaded_file.state.name == "FAILED":
|
83 |
-
raise ValueError(
|
84 |
-
f"Échec du traitement: {uploaded_file.state.name}")
|
85 |
-
|
86 |
-
print(f"Traitement terminé avec succès: {uploaded_file.uri}")
|
87 |
-
return uploaded_file
|
88 |
-
|
89 |
-
except ssl.SSLError as e:
|
90 |
-
print(
|
91 |
-
f"Erreur SSL lors de l'upload (tentative {attempt + 1}): {e}")
|
92 |
-
if attempt < max_retries - 1:
|
93 |
-
time.sleep(retry_delay * (attempt + 1))
|
94 |
-
else:
|
95 |
-
raise
|
96 |
-
|
97 |
-
except Exception as e:
|
98 |
-
print(
|
99 |
-
f"Erreur lors de l'upload (tentative {attempt + 1}): {e}")
|
100 |
-
if attempt < max_retries - 1:
|
101 |
-
time.sleep(retry_delay * (attempt + 1))
|
102 |
-
else:
|
103 |
-
raise
|
104 |
-
|
105 |
# Accept user's next message, add to context, resubmit context to Gemini
|
106 |
if prompt := st.chat_input("Hey?"):
|
107 |
# Display user's last message
|
108 |
st.chat_message("user").markdown(prompt)
|
109 |
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
#
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
|
|
|
|
|
|
|
|
|
|
|
|
|
126 |
# Send user entry to Gemini and read the response
|
127 |
-
response =
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
st.markdown(response.text)
|
133 |
-
|
134 |
-
# Update the chat history
|
135 |
-
st.session_state.chat.history.extend([
|
136 |
-
genai.types.Content(parts=[genai.types.Part(text=prompt)], role="user"),
|
137 |
-
genai.types.Content(parts=[genai.types.Part(text=response.text)], role="model")
|
138 |
-
])
|
139 |
-
|
140 |
-
except Exception as e:
|
141 |
-
st.error(f"An error occurred: {e}")
|
142 |
-
|
143 |
-
finally:
|
144 |
-
# Cleanup temporary files
|
145 |
-
for temp_file in temp_files:
|
146 |
-
try:
|
147 |
-
os.unlink(temp_file)
|
148 |
-
except Exception as e:
|
149 |
-
print(f"Error deleting temporary file {temp_file}: {e}")
|
|
|
1 |
import streamlit as st
|
2 |
import google.generativeai as genai
|
3 |
import os
|
|
|
|
|
|
|
|
|
4 |
from dotenv import load_dotenv
|
5 |
+
from mimetypes import guess_type as guessmime
|
6 |
+
from io import BytesIO
|
7 |
|
8 |
load_dotenv()
|
|
|
9 |
# Configure the API key
|
10 |
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
|
11 |
|
|
|
16 |
{"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_NONE"},
|
17 |
]
|
18 |
|
19 |
+
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
|
20 |
+
|
21 |
+
model = genai.GenerativeModel('gemini-1.5-flash',safety_settings=safety_settings,
|
22 |
+
system_instruction="Tu es un assistant intelligent. ton but est d'assister au mieux que tu peux. tu as été créé par Aenir et tu t'appelles Mariam")
|
23 |
+
|
24 |
+
|
25 |
|
26 |
# Function to get response from the model
|
27 |
# Gemini uses 'model' for assistant; Streamlit uses 'assistant'
|
28 |
+
|
29 |
def role_to_streamlit(role):
|
30 |
if role == "model":
|
31 |
return "assistant"
|
32 |
else:
|
33 |
return role
|
34 |
+
|
35 |
+
def create_media_part(data, mimetype,filename=None):
|
36 |
+
"""Creates a media part for the GenerativeModel.
|
37 |
+
|
38 |
+
Args:
|
39 |
+
data: The image data as bytes.
|
40 |
+
mimetype: The mimetype of the image.
|
41 |
+
filename: optional filename for the image part
|
42 |
+
|
43 |
+
Returns:
|
44 |
+
A Part object representing the image.
|
45 |
+
"""
|
46 |
+
# The API expects raw bytes so avoid having it base64 encode the input
|
47 |
+
blob = genai.types.Blob(mimetype, genai.types.bytes_to_data(data, mime_type=mimetype))
|
48 |
+
return genai.types.Part(filename=filename, inline_data=blob)
|
49 |
|
50 |
# Add a Gemini Chat history object to Streamlit session state
|
51 |
if "chat" not in st.session_state:
|
|
|
54 |
# Display Form Title
|
55 |
st.title("Mariam AI!")
|
56 |
|
|
|
|
|
|
|
57 |
# Display chat messages from history above current input box
|
58 |
for message in st.session_state.chat.history:
|
59 |
with st.chat_message(role_to_streamlit(message.role)):
|
60 |
st.markdown(message.parts[0].text)
|
61 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
# Accept user's next message, add to context, resubmit context to Gemini
|
63 |
if prompt := st.chat_input("Hey?"):
|
64 |
# Display user's last message
|
65 |
st.chat_message("user").markdown(prompt)
|
66 |
|
67 |
+
# Handle file uploads
|
68 |
+
uploaded_file = st.file_uploader("Choose a file", type=["png", "jpg", "jpeg", "mp3", "wav", "mp4", "avi"])
|
69 |
+
|
70 |
+
if uploaded_file is not None:
|
71 |
+
# Display the uploaded file
|
72 |
+
if uploaded_file.type.startswith('image'):
|
73 |
+
st.image(uploaded_file, caption="Uploaded Image.", use_column_width=True)
|
74 |
+
elif uploaded_file.type.startswith('audio'):
|
75 |
+
st.audio(uploaded_file, format=uploaded_file.type)
|
76 |
+
elif uploaded_file.type.startswith('video'):
|
77 |
+
st.video(uploaded_file, format=uploaded_file.type)
|
78 |
+
|
79 |
+
file_bytes = uploaded_file.getvalue()
|
80 |
+
mime_type = guessmime(uploaded_file.name)[0]
|
81 |
+
media_part = create_media_part(file_bytes, mime_type,filename=uploaded_file.name)
|
82 |
|
83 |
+
# Combine prompt and uploaded file for Gemini
|
84 |
+
parts = [media_part, "\n\n", prompt]
|
85 |
+
response = st.session_state.chat.send_message(parts)
|
86 |
+
|
87 |
+
else:
|
88 |
+
|
89 |
# Send user entry to Gemini and read the response
|
90 |
+
response = st.session_state.chat.send_message(prompt)
|
91 |
+
|
92 |
+
# Display last
|
93 |
+
with st.chat_message("assistant"):
|
94 |
+
st.markdown(response.text)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|