Bhaskar2611 commited on
Commit
f62a0c5
·
verified ·
1 Parent(s): b1da4ed

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -18
app.py CHANGED
@@ -222,31 +222,31 @@ import gradio as gr
222
  from huggingface_hub import InferenceClient
223
  from dotenv import load_dotenv
224
 
225
- # Load API token from .env or environment
226
  load_dotenv()
227
- HF_TOKEN = os.getenv("HF_TOKEN") # or directly use your token here
228
 
229
- # Initialize the Hugging Face inference client
230
  client = InferenceClient(
231
  model="mistralai/Mistral-7B-Instruct-v0.3",
232
  token=HF_TOKEN
233
  )
234
 
235
- # Skin assistant prompt
236
- SKIN_ASSISTANT_PROMPT = (
237
  "You are an AI Dermatologist chatbot designed to assist users with skin by only providing text "
238
  "and if user information is not provided related to skin then ask what they want to know related to skin."
239
  )
240
 
 
241
  def respond(message, history):
242
- messages = [{"role": "system", "content": SKIN_ASSISTANT_PROMPT}]
243
- for user_msg, bot_msg in history:
244
- if user_msg:
245
- messages.append({"role": "user", "content": user_msg})
246
- if bot_msg:
247
- messages.append({"role": "assistant", "content": bot_msg})
248
  messages.append({"role": "user", "content": message})
249
 
 
250
  response = ""
251
  for chunk in client.chat.completions.create(
252
  model="mistralai/Mistral-7B-Instruct-v0.3",
@@ -256,17 +256,16 @@ def respond(message, history):
256
  top_p=0.95,
257
  stream=True,
258
  ):
259
- token = chunk.choices[0].delta.get("content", "")
260
  response += token
261
  yield response
262
 
263
- # Launch Gradio interface
264
- demo = gr.ChatInterface(
265
- fn=respond,
266
- title="Skin-Bot",
267
- theme="default"
268
- )
269
 
 
270
  if __name__ == "__main__":
271
  demo.launch()
272
 
 
222
  from huggingface_hub import InferenceClient
223
  from dotenv import load_dotenv
224
 
225
+ # Load Hugging Face API token
226
  load_dotenv()
227
+ HF_TOKEN = os.getenv("HF_TOKEN")
228
 
229
+ # Initialize Hugging Face client
230
  client = InferenceClient(
231
  model="mistralai/Mistral-7B-Instruct-v0.3",
232
  token=HF_TOKEN
233
  )
234
 
235
+ # System prompt about Indian monuments
236
+ system_message = (
237
  "You are an AI Dermatologist chatbot designed to assist users with skin by only providing text "
238
  "and if user information is not provided related to skin then ask what they want to know related to skin."
239
  )
240
 
241
+ # Streaming chatbot logic
242
  def respond(message, history):
243
+ # Prepare messages with system prompt
244
+ messages = [{"role": "system", "content": system_message}]
245
+ for msg in history:
246
+ messages.append(msg)
 
 
247
  messages.append({"role": "user", "content": message})
248
 
249
+ # Stream response from the model
250
  response = ""
251
  for chunk in client.chat.completions.create(
252
  model="mistralai/Mistral-7B-Instruct-v0.3",
 
256
  top_p=0.95,
257
  stream=True,
258
  ):
259
+ token = chunk.choices[0].delta.get("content", "") or ""
260
  response += token
261
  yield response
262
 
263
+ # Create Gradio interface
264
+ with gr.Blocks() as demo:
265
+ chatbot = gr.Chatbot(type='messages') # Use modern message format
266
+ gr.ChatInterface(fn=respond, chatbot=chatbot, type="messages") # Match format
 
 
267
 
268
+ # Launch app
269
  if __name__ == "__main__":
270
  demo.launch()
271