|
import gradio as gr |
|
from huggingface_hub import InferenceClient |
|
|
|
|
|
client = InferenceClient("meta-llama/Llama-3.2-3B-Instruct") |
|
|
|
def is_health_related(message): |
|
|
|
health_keywords = ["health", "medical", "disease", "symptom", "treatment", "doctor", "patient", "medicine"] |
|
message = message.lower() |
|
for keyword in health_keywords: |
|
if keyword in message: |
|
return True |
|
return False |
|
|
|
def respond( |
|
message, |
|
history: list[tuple[str, str]], |
|
system_message, |
|
max_tokens, |
|
temperature, |
|
top_p, |
|
): |
|
if not is_health_related(message): |
|
return "Sorry, I can't help you with that because I am just a bot who can help with health-related queries." |
|
|
|
messages = [{"role": "system", "content": system_message}] |
|
|
|
for val in history: |
|
if val: |
|
messages.append({"role": "user", "content": val}) |
|
if val[1]: |
|
messages.append({"role": "assistant", "content": val[1]}) |
|
|
|
messages.append({"role": "user", "content": message}) |
|
|
|
response = "" |
|
|
|
for message in client.chat_completion( |
|
messages, |
|
max_tokens=max_tokens, |
|
stream=True, |
|
temperature=temperature, |
|
top_p=top_p, |
|
): |
|
token = message.choices.delta.content |
|
|
|
response += token |
|
yield response |
|
|
|
|
|
css = """ |
|
body { |
|
font-family: 'Inter', sans-serif; |
|
background-color: #f0f0f0; |
|
} |
|
|
|
.gradio-container { |
|
max-width: 700px; |
|
margin: auto; |
|
} |
|
|
|
.gr-chat-container { |
|
border: 1px solid #ddd; |
|
border-radius: 5px; |
|
padding: 10px; |
|
background-color: #fff; |
|
} |
|
|
|
.gr-message { |
|
padding: 10px; |
|
border-bottom: 1px solid #ddd; |
|
} |
|
|
|
.gr-message:last-child { |
|
border-bottom: none; |
|
} |
|
|
|
.gr-user-message { |
|
color: #333; |
|
background-color: #f7f7f7; |
|
border-radius: 5px; |
|
padding: 5px; |
|
} |
|
|
|
.gr-assistant-message { |
|
color: #333; |
|
background-color: #fff; |
|
border-radius: 5px; |
|
padding: 5px; |
|
} |
|
|
|
.gr-input { |
|
padding: 10px; |
|
border: 1px solid #ccc; |
|
border-radius: 5px; |
|
width: 100%; |
|
} |
|
|
|
.gr-input:focus { |
|
border-color: #aaa; |
|
} |
|
|
|
.gr-button { |
|
background-color: #4CAF50; |
|
color: #fff; |
|
padding: 10px 20px; |
|
border: none; |
|
border-radius: 5px; |
|
cursor: pointer; |
|
} |
|
|
|
.gr-button:hover { |
|
background-color: #3e8e41; |
|
} |
|
""" |
|
|
|
|
|
with gr.Blocks(css=css) as demo: |
|
gr.Markdown("# Health Assistant Chatbot") |
|
gr.Markdown("### Ask me any health-related questions.") |
|
chatbot = gr.Chatbot( |
|
value=[], |
|
show_user_avatar=False, |
|
show_bot_avatar=False, |
|
width="100%", |
|
) |
|
input_box = gr.Textbox( |
|
label="Type your message here", |
|
placeholder="Type your message here", |
|
show_label=False, |
|
) |
|
system_message = gr.Textbox( |
|
value="You are a virtual Doctor Assistant. Your role is to assist healthcare professionals by providing accurate, evidence-based medical information, offering treatment options, and supporting patient care. Always prioritize patient safety, provide concise answers, and clearly state that your advice does not replace a doctor's judgment. Do not diagnose or prescribe treatments without human oversight.", |
|
label="System message", |
|
visible=False, |
|
) |
|
max_tokens = gr.Slider( |
|
minimum=1, |
|
maximum=2048, |
|
value=512, |
|
step=1, |
|
label="Max new tokens", |
|
visible=False, |
|
) |
|
temperature = gr.Slider( |
|
minimum=0.1, |
|
maximum=4.0, |
|
value=0.7, |
|
step=0.1, |
|
label="Temperature", |
|
visible=False, |
|
) |
|
top_p = gr.Slider( |
|
minimum=0.1, |
|
maximum=1.0, |
|
value=0.95, |
|
step=0.05, |
|
label="Top-p (nucleus sampling)", |
|
visible=False, |
|
) |
|
|
|
def update_chat(message, history, system_message, max_tokens, temperature, top_p): |
|
response = respond(message, history, system_message, max_tokens, temperature, top_p) |
|
return chatbot.update(value=history + [(message, response)]) |
|
|
|
input_box.submit( |
|
update_chat, |
|
inputs=[input_box, chatbot, system_message, max_tokens, temperature, top_p], |
|
outputs=chatbot, |
|
) |
|
|
|
if __name__ == "__main__": |
|
demo.launch(share=True) |