josondev commited on
Commit
5e888bb
·
verified ·
1 Parent(s): f629f62

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +58 -103
app.py CHANGED
@@ -1,57 +1,14 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
9
-
10
- def respond(
11
- message,
12
- history: list[tuple[str, str]],
13
- system_message,
14
- max_tokens,
15
- temperature,
16
- top_p,
17
- ):
18
- messages = [{"role": "system", "content": system_message}]
19
-
20
- for val in history:
21
- if val[0]:
22
- messages.append({"role": "user", "content": val[0]})
23
- if val[1]:
24
- messages.append({"role": "assistant", "content": val[1]})
25
-
26
- messages.append({"role": "user", "content": message})
27
-
28
- response = ""
29
-
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
-
43
- """
44
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
- """
46
- demo = gr.ChatInterface(
47
- respond,
48
- additional_inputs=[
49
- gr.Textbox(value='''
50
- # Update MEDICAL_PROMPT to be more restrictive
51
- MEDICAL_PROMPT = PromptTemplate(
52
- input_variables=["query"],
53
- template="""<bos><start_of_turn>system
54
  You are Gemma, a medical AI assistant. You MUST ONLY answer health and medical-related questions.
 
 
55
  Your responses should be professional, accurate, and focused on medical topics only.
56
  For any non-medical questions, respond with a redirection to medical topics.
57
  For medication queries, provide general information and recommend consulting a healthcare professional.
@@ -190,54 +147,63 @@ IMPORTANT NOTES:
190
  <end_of_turn>
191
  <start_of_turn>user
192
  {query}<end_of_turn>
193
- <start_of_turn>model
194
- '''
195
- )
196
 
197
- # Update is_medical_query to be more comprehensive
198
  def is_medical_query(query):
199
- medical_keywords_and_greetings = [
200
- "health", "disease", "symptom", "doctor", "medicine", "medical", "treatment",
201
- "hospital", "clinic", "diagnosis", "patient", "drug", "prescription", "therapy",
202
- "cancer", "diabetes", "heart", "blood", "pain", "surgery", "vaccine", "infection",
203
- "allergy", "diet", "nutrition", "vitamin", "exercise", "mental health", "depression",
204
- "anxiety", "disorder", "syndrome", "chronic", "acute", "emergency", "pharmacy",
205
- "dosage", "side effect", "contraindication", "body", "organ", "immune", "virus",
206
- "bacterial", "fungal", "parasite", "genetic", "hereditary", "congenital", "prenatal",
207
- "headaches", "ache", "stomach ache", "skin", "head", "arm", "leg", "chest", "back", "throat", "eye", "ear", "nose", "mouth"
208
- ]
209
-
210
- # Remove greetings from the keyword list
211
- medical_keywords = [word for word in medical_keywords_and_greetings if word not in ["hello", "hi", "greetings", "good morning", "good afternoon", "good evening", "hey"]]
212
-
213
- query_lower = query.lower()
214
- return any(keyword in query_lower for keyword in medical_keywords)
215
-
216
-
217
- # Update chat_with_model to enforce medical-only responses
218
- def chat_with_model(message, history):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
  try:
220
- context = "\n".join([f"User: {msg}\nAssistant: {res}" for msg, res in history])
221
- full_query = f"{context}\nUser: {message}"
222
-
223
- if not is_medical_query(full_query):
224
- return "I'm specialized in medical topics only. I cannot answer this question. How can I assist with a health-related concern instead?"
225
-
226
- response = medical_chain.run(query=full_query)
227
- clean_response = response.split("<end_of_turn>")[0].strip()
228
-
229
- # Check if the response is medical-related
230
- if not is_medical_query(clean_response):
231
- return "I can only provide information on medical topics. Please ask a medical question."
232
-
233
- return clean_response
234
-
235
  except Exception as e:
236
- return f"I apologize, but I encountered an error: {str(e)}. Please try again."
237
 
238
- # Update Gradio examples to be medical-specific
239
- iface = gr.ChatInterface(
240
- fn=chat_with_model,
241
  title="MedexDroid - Medical Assistant",
242
  examples=[
243
  "What are the symptoms of diabetes?",
@@ -249,19 +215,8 @@ iface = gr.ChatInterface(
249
  description="An AI Medical Assistant. Please ask health-related questions only.",
250
  theme=gr.themes.Soft(),
251
  css=".gradio-container {background-color: #f0f4f8}"
252
- , label="System message"),
253
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
254
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
255
- gr.Slider(
256
- minimum=0.1,
257
- maximum=1.0,
258
- value=0.95,
259
- step=0.05,
260
- label="Top-p (nucleus sampling)",
261
- ),
262
- ],
263
  )
264
 
265
-
266
  if __name__ == "__main__":
267
  demo.launch()
 
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
+ # Initialize the client
 
 
5
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
6
 
7
+ # Define the medical system prompt
8
+ MEDICAL_SYSTEM_PROMPT = """<bos><start_of_turn>system
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  You are Gemma, a medical AI assistant. You MUST ONLY answer health and medical-related questions.
10
+ USE BULLET POINTS TO STRUCTURE YOUR ANSWERS AND THEY MUST BE PUNCTUATED AND GRAMMATICALLY CORRECT.
11
+ Your responses should be concise and informative.
12
  Your responses should be professional, accurate, and focused on medical topics only.
13
  For any non-medical questions, respond with a redirection to medical topics.
14
  For medication queries, provide general information and recommend consulting a healthcare professional.
 
147
  <end_of_turn>
148
  <start_of_turn>user
149
  {query}<end_of_turn>
150
+ <start_of_turn>model."""
 
 
151
 
152
+ # Function to check if a query is medical-related
153
  def is_medical_query(query):
154
+ medical_keywords = [
155
+ "health", "disease", "symptom", "doctor", "medicine", "medical", "treatment",
156
+ "hospital", "clinic", "diagnosis", "patient", "drug", "prescription", "therapy",
157
+ "cancer", "diabetes", "heart", "blood", "pain", "surgery", "vaccine", "infection",
158
+ "allergy", "diet", "nutrition", "vitamin", "exercise", "mental health", "depression",
159
+ "anxiety", "disorder", "syndrome", "chronic", "acute", "emergency", "pharmacy",
160
+ "dosage", "side effect", "contraindication", "body", "organ", "immune", "virus",
161
+ "bacterial", "fungal", "parasite", "genetic", "hereditary", "congenital", "prenatal",
162
+ "headaches", "ache", "stomach ache", "skin", "head", "arm", "leg", "chest", "back",
163
+ "throat", "eye", "ear", "nose", "mouth"
164
+ ]
165
+
166
+ query_lower = query.lower()
167
+ return any(keyword in query_lower for keyword in medical_keywords)
168
+
169
+ # Response function for the chatbot
170
+ def respond(message, history, max_tokens=512, temperature=0.7, top_p=0.95):
171
+ # Check if the query is medical-related
172
+ if not is_medical_query(message):
173
+ return "I'm specialized in medical topics only. I cannot answer this question. How can I assist with a health-related concern instead?"
174
+
175
+ # Prepare the messages for the API
176
+ messages = [{"role": "system", "content": MEDICAL_SYSTEM_PROMPT}]
177
+
178
+ for user_msg, assistant_msg in history:
179
+ if user_msg:
180
+ messages.append({"role": "user", "content": user_msg})
181
+ if assistant_msg:
182
+ messages.append({"role": "assistant", "content": assistant_msg})
183
+
184
+ messages.append({"role": "user", "content": message})
185
+
186
+ # Generate the response
187
+ response = ""
188
+
189
  try:
190
+ for message in client.chat_completion(
191
+ messages,
192
+ max_tokens=max_tokens,
193
+ stream=True,
194
+ temperature=temperature,
195
+ top_p=top_p,
196
+ ):
197
+ token = message.choices[0].delta.content
198
+ if token:
199
+ response += token
200
+ yield response
 
 
 
 
201
  except Exception as e:
202
+ yield f"I apologize, but I encountered an error: {str(e)}. Please try again."
203
 
204
+ # Create the Gradio interface
205
+ demo = gr.ChatInterface(
206
+ fn=respond,
207
  title="MedexDroid - Medical Assistant",
208
  examples=[
209
  "What are the symptoms of diabetes?",
 
215
  description="An AI Medical Assistant. Please ask health-related questions only.",
216
  theme=gr.themes.Soft(),
217
  css=".gradio-container {background-color: #f0f4f8}"
 
 
 
 
 
 
 
 
 
 
 
218
  )
219
 
 
220
  if __name__ == "__main__":
221
  demo.launch()
222
+