Spaces:
Build error
Build error
Upload 2 files
Browse files- app.py +187 -0
- requirements.txt +11 -0
app.py
ADDED
@@ -0,0 +1,187 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import tensorflow as tf
|
3 |
+
import numpy as np
|
4 |
+
from streamlit_option_menu import option_menu
|
5 |
+
import streamlit.components.v1 as components
|
6 |
+
import os
|
7 |
+
from groq import Groq
|
8 |
+
from gtts import gTTS
|
9 |
+
from langdetect import detect
|
10 |
+
from keras.utils import load_img, img_to_array
|
11 |
+
from keras.models import load_model
|
12 |
+
from dotenv import load_dotenv
|
13 |
+
from keras.preprocessing import image
|
14 |
+
|
15 |
+
|
16 |
+
|
17 |
+
#changement du logo et du titre de mon application en anglais
|
18 |
+
st.set_page_config(page_title="Amelioration de la santé dentaire", page_icon=":tooth:", layout="centered", menu_items=None)
|
19 |
+
|
20 |
+
|
21 |
+
|
22 |
+
# Créer trois colonnes de largeur égale
|
23 |
+
col1, col2, col3 = st.columns(3)
|
24 |
+
|
25 |
+
# Laisser la première et la troisième colonne vides
|
26 |
+
with col1:
|
27 |
+
st.write("")
|
28 |
+
|
29 |
+
# Afficher le logo dans la deuxième colonne
|
30 |
+
with col2:
|
31 |
+
st.image("img/logo2.png", use_column_width=None)
|
32 |
+
|
33 |
+
with col3:
|
34 |
+
st.write("")
|
35 |
+
|
36 |
+
selected = option_menu(
|
37 |
+
menu_title=None, # required
|
38 |
+
options=["Accueil", "Chatbot : Groq","Prediction" ], # required
|
39 |
+
icons=["house","chat-dots" ], # optional
|
40 |
+
menu_icon="cast", # optional
|
41 |
+
default_index=0, # optional
|
42 |
+
orientation="horizontal",
|
43 |
+
)
|
44 |
+
|
45 |
+
if selected == "Accueil":
|
46 |
+
st.title(f"{selected}")
|
47 |
+
|
48 |
+
# Display home page with app description and logo
|
49 |
+
st.header('Intelligence artificielle et Amelioration de la santé dentaire')
|
50 |
+
st.markdown("<h5 style='text-align: justify;'>Notre application combine trois fonctionnalités principales pour vous offrir une expérience interactive et fluide.</h5>", unsafe_allow_html=True)
|
51 |
+
|
52 |
+
#st.image('img/image1.jpeg', caption='Large Language Model')
|
53 |
+
#st.title('Bienvenue sur l\'application de classification d\'images de radiographies pulmonaires')
|
54 |
+
#st.markdown("<h1 style='text-align: center;'>Bienvenue sur l'application de classification d'images de radiographies pulmonaires</h1>", unsafe_allow_html=True)
|
55 |
+
st.markdown("<h5 style='text-align: justify;'><U><b> Chatbot avec Groq : </b></U> Cette fonctionnalité permet aux utilisateurs d'interagir avec un chatbot alimenté par Groq. Les utilisateurs peuvent saisir des prompts dans une zone de texte et obtenir des réponses générées par le modèle Groq. L'interaction se fait de manière fluide et naturelle, simulant une conversation en langage naturel.</h5>", unsafe_allow_html=True)
|
56 |
+
st.markdown("<h5 style='text-align: justify;'>Avec ces trois volets, les utilisateurs peuvent interagir avec l'application de différentes manières, que ce soit en saisissant du texte, en parlant dans leur microphone, ou en écoutant les réponses générées par l'application. Cela offre une expérience utilisateur riche et flexible pour répondre à différents besoins et préférences.</h5>", unsafe_allow_html=True)
|
57 |
+
|
58 |
+
|
59 |
+
components.html(
|
60 |
+
"""
|
61 |
+
<div style="position: fixed; bottom: 0; left: 0; right: 0; text-align: center; font-size: 15px; color: gray;">
|
62 |
+
Tous droits réservés © septembre 2024 Fosso Tchatat Sidoine
|
63 |
+
</div>
|
64 |
+
""",
|
65 |
+
height=50
|
66 |
+
)
|
67 |
+
if selected == "Chatbot : Groq":
|
68 |
+
# Sidebar settings
|
69 |
+
with st.sidebar:
|
70 |
+
st.sidebar.subheader("PARAMETRES")
|
71 |
+
temperature = st.slider("Temperature", 0.0, 5.0, 1.0)
|
72 |
+
tokens = st.slider("Max Tokens", 0, 8192, 900)
|
73 |
+
stream = st.toggle("Stream", value=True)
|
74 |
+
etat = stream
|
75 |
+
|
76 |
+
class GroqAPI:
|
77 |
+
# Handles API operations with Groq to generate chat responses
|
78 |
+
def __init__(self, model_name: str):
|
79 |
+
self.client = Groq(api_key="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")
|
80 |
+
self.model_name = model_name
|
81 |
+
|
82 |
+
# Internal method to get responses from Groq API
|
83 |
+
def _response(self, message):
|
84 |
+
return self.client.chat.completions.create(
|
85 |
+
model=self.model_name,
|
86 |
+
messages=message,
|
87 |
+
temperature=temperature,
|
88 |
+
max_tokens=tokens,
|
89 |
+
stream=etat,
|
90 |
+
stop=None,
|
91 |
+
)
|
92 |
+
|
93 |
+
# Generator to stream responses from API
|
94 |
+
def response_stream(self, message):
|
95 |
+
for chunk in self._response(message):
|
96 |
+
if chunk.choices[0].delta.content:
|
97 |
+
yield chunk.choices[0].delta.content
|
98 |
+
|
99 |
+
class Message:
|
100 |
+
# Manages chat messages in the Streamlit UI
|
101 |
+
system_prompt = "You are a professional AI. Please generate responses in English to all user inputs."
|
102 |
+
|
103 |
+
# Initializes the chat history if it doesn't exist in session state
|
104 |
+
def __init__(self):
|
105 |
+
if "messages" not in st.session_state:
|
106 |
+
st.session_state.messages = [{"role": "system", "content": self.system_prompt}]
|
107 |
+
|
108 |
+
# Adds a new message to the session state
|
109 |
+
def add(self, role: str, content: str):
|
110 |
+
st.session_state.messages.append({"role": role, "content": content})
|
111 |
+
|
112 |
+
# Displays all past messages in the UI, ignoring system messages
|
113 |
+
def display_chat_history(self):
|
114 |
+
for message in st.session_state.messages:
|
115 |
+
if message["role"] == "system":
|
116 |
+
continue
|
117 |
+
with st.chat_message(message["role"]):
|
118 |
+
st.markdown(message["content"])
|
119 |
+
|
120 |
+
# Streams API responses to the Streamlit chat messages UI
|
121 |
+
def display_stream(self, generator):
|
122 |
+
with st.chat_message("assistant"):
|
123 |
+
return st.write_stream(generator)
|
124 |
+
|
125 |
+
class ModelSelector:
|
126 |
+
# Allows the user to select a model from a predefined list
|
127 |
+
def __init__(self):
|
128 |
+
# List of available models to choose from
|
129 |
+
self.models = ["llama3-70b-8192", "llama3-8b-8192", "gemma-7b-it", "mixtral-8x7b-32768"]
|
130 |
+
|
131 |
+
# Displays model selection in the sidebar with a title
|
132 |
+
def select(self):
|
133 |
+
with st.sidebar:
|
134 |
+
return st.selectbox("Model", self.models)
|
135 |
+
|
136 |
+
# Entry point for the Streamlit app "myChatbot"
|
137 |
+
def main():
|
138 |
+
user_input = st.chat_input("Chat avec moi...")
|
139 |
+
model = ModelSelector()
|
140 |
+
selected_model = model.select()
|
141 |
+
message = Message()
|
142 |
+
|
143 |
+
# If there is user input, process it via the selected model
|
144 |
+
if user_input:
|
145 |
+
llm = GroqAPI(selected_model)
|
146 |
+
message.add("user", user_input)
|
147 |
+
message.display_chat_history()
|
148 |
+
response = message.display_stream(llm.response_stream(st.session_state.messages))
|
149 |
+
message.add("assistant", response)
|
150 |
+
|
151 |
+
main()
|
152 |
+
|
153 |
+
model = load_model("model.h5")
|
154 |
+
mal_classes = {'Data_caries': 0, 'Gingivitis': 1, 'Mouth_Ulcer': 2, 'hypodontia': 3}
|
155 |
+
|
156 |
+
|
157 |
+
if selected == "Prediction":
|
158 |
+
# Display prediction page with sidebar and main
|
159 |
+
# Sidebar
|
160 |
+
st.header("Prédiction")
|
161 |
+
#st.markdown("<h1 style='text-align: center;'>Prédiction</h1>", unsafe_allow_html=True)
|
162 |
+
st.sidebar.header("Paramètre pour la prédiction")
|
163 |
+
upload_file = st.sidebar.file_uploader("Télécharger le fichier", type=['jpg', 'jpeg', 'png'], key="upload")
|
164 |
+
generate_pred = st.sidebar.button("Predict")
|
165 |
+
|
166 |
+
# Main
|
167 |
+
if upload_file:
|
168 |
+
# Display uploaded image
|
169 |
+
st.header("Image téléchargée")
|
170 |
+
st.image(upload_file, caption="Image", use_column_width=True)
|
171 |
+
|
172 |
+
# Predict label
|
173 |
+
if generate_pred:
|
174 |
+
# Preprocess image
|
175 |
+
test_image = image.load_img(upload_file, target_size=(64, 64))
|
176 |
+
image_array = img_to_array(test_image)
|
177 |
+
image_array = np.expand_dims(image_array, axis=0)
|
178 |
+
|
179 |
+
# Make prediction
|
180 |
+
prediction = model.predict(image_array)
|
181 |
+
classe = np.argmax(prediction, axis=1)
|
182 |
+
label = [key for key, value in mal_classes.items() if value == classe][0]
|
183 |
+
|
184 |
+
# Display result
|
185 |
+
st.header("Résultat de la prédiction")
|
186 |
+
st.write(f"L'image est classée comme {label}.")
|
187 |
+
|
requirements.txt
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
groq==0.11.0
|
2 |
+
gTTS==2.5.3
|
3 |
+
langdetect==1.0.9
|
4 |
+
numpy==2.1.1
|
5 |
+
python-dotenv==1.0.1
|
6 |
+
Requests==2.32.3
|
7 |
+
streamlit==1.38.0
|
8 |
+
streamlit_option_menu==0.3.13
|
9 |
+
tensorflow==2.15.0
|
10 |
+
tensorflow_intel==2.15.0
|
11 |
+
|