Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -222,31 +222,31 @@ import gradio as gr
|
|
222 |
from huggingface_hub import InferenceClient
|
223 |
from dotenv import load_dotenv
|
224 |
|
225 |
-
# Load API token
|
226 |
load_dotenv()
|
227 |
-
HF_TOKEN = os.getenv("HF_TOKEN")
|
228 |
|
229 |
-
# Initialize
|
230 |
client = InferenceClient(
|
231 |
model="mistralai/Mistral-7B-Instruct-v0.3",
|
232 |
token=HF_TOKEN
|
233 |
)
|
234 |
|
235 |
-
#
|
236 |
-
|
237 |
"You are an AI Dermatologist chatbot designed to assist users with skin by only providing text "
|
238 |
"and if user information is not provided related to skin then ask what they want to know related to skin."
|
239 |
)
|
240 |
|
|
|
241 |
def respond(message, history):
|
242 |
-
messages
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
if bot_msg:
|
247 |
-
messages.append({"role": "assistant", "content": bot_msg})
|
248 |
messages.append({"role": "user", "content": message})
|
249 |
|
|
|
250 |
response = ""
|
251 |
for chunk in client.chat.completions.create(
|
252 |
model="mistralai/Mistral-7B-Instruct-v0.3",
|
@@ -256,17 +256,16 @@ def respond(message, history):
|
|
256 |
top_p=0.95,
|
257 |
stream=True,
|
258 |
):
|
259 |
-
token = chunk.choices[0].delta.get("content", "")
|
260 |
response += token
|
261 |
yield response
|
262 |
|
263 |
-
#
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
theme="default"
|
268 |
-
)
|
269 |
|
|
|
270 |
if __name__ == "__main__":
|
271 |
demo.launch()
|
272 |
|
|
|
222 |
from huggingface_hub import InferenceClient
|
223 |
from dotenv import load_dotenv
|
224 |
|
225 |
+
# Load Hugging Face API token
|
226 |
load_dotenv()
|
227 |
+
HF_TOKEN = os.getenv("HF_TOKEN")
|
228 |
|
229 |
+
# Initialize Hugging Face client
|
230 |
client = InferenceClient(
|
231 |
model="mistralai/Mistral-7B-Instruct-v0.3",
|
232 |
token=HF_TOKEN
|
233 |
)
|
234 |
|
235 |
+
# System prompt about Indian monuments
|
236 |
+
system_message = (
|
237 |
"You are an AI Dermatologist chatbot designed to assist users with skin by only providing text "
|
238 |
"and if user information is not provided related to skin then ask what they want to know related to skin."
|
239 |
)
|
240 |
|
241 |
+
# Streaming chatbot logic
|
242 |
def respond(message, history):
|
243 |
+
# Prepare messages with system prompt
|
244 |
+
messages = [{"role": "system", "content": system_message}]
|
245 |
+
for msg in history:
|
246 |
+
messages.append(msg)
|
|
|
|
|
247 |
messages.append({"role": "user", "content": message})
|
248 |
|
249 |
+
# Stream response from the model
|
250 |
response = ""
|
251 |
for chunk in client.chat.completions.create(
|
252 |
model="mistralai/Mistral-7B-Instruct-v0.3",
|
|
|
256 |
top_p=0.95,
|
257 |
stream=True,
|
258 |
):
|
259 |
+
token = chunk.choices[0].delta.get("content", "") or ""
|
260 |
response += token
|
261 |
yield response
|
262 |
|
263 |
+
# Create Gradio interface
|
264 |
+
with gr.Blocks() as demo:
|
265 |
+
chatbot = gr.Chatbot(type='messages') # Use modern message format
|
266 |
+
gr.ChatInterface(fn=respond, chatbot=chatbot, type="messages") # Match format
|
|
|
|
|
267 |
|
268 |
+
# Launch app
|
269 |
if __name__ == "__main__":
|
270 |
demo.launch()
|
271 |
|