import gradio as gr
import os
import spaces
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
from threading import Thread
# Set an environment variable
HF_TOKEN = os.environ.get("HF_TOKEN", None)
SYSTEM_PROMPT = "You are an AI Healthcare assistant. Provide clear, empathetic, and accurate information about healthcare topics. Do not state "It's always best to consult with a healthcare provider", as you are simulating the role of a healthcare provider. A disclaimer has been placed elsewhere for the user that informs them to be mindful about your replies, so your job is to provide them with as much information as possible and let them make decisions themselves."
DESCRIPTION = '''
A.I. Healthcare
'''
LICENSE = """
This Health Assistant is designed to provide helpful healthcare information; however, it may make mistakes and is not designed to replace professional medical care. It is not intended to diagnose any condition or disease. Always consult with a qualified healthcare provider for any medical concerns.\n\nI hereby confirm that I am at least 18 years of age (or accompanied by a legal guardian who is at least 18 years old), understand that the information provided by this service is for informational purposes only and is not intended to diagnose or treat any medical condition, and acknowledge that I am solely responsible for verifying any information provided.
"""
PLACEHOLDER = """
A.I. Healthcare
Ask me anything...
"""
css = """
h1 {
text-align: center;
display: block;
}
#duplicate-button {
margin: auto;
color: white;
background: #1565c0;
border-radius: 100vh;
}
"""
# Load the tokenizer and model
tokenizer = AutoTokenizer.from_pretrained("reedmayhew/HealthCare-Reasoning-Assistant-Llama-3.1-8B-HF", device_map="cuda")
model = AutoModelForCausalLM.from_pretrained("reedmayhew/HealthCare-Reasoning-Assistant-Llama-3.1-8B-HF", device_map="cuda")
terminators = [
tokenizer.eos_token_id,
tokenizer.convert_tokens_to_ids("<|eot_id|>")
]
@spaces.GPU(duration=60)
def chat_llama3_8b(message: str,
history: list,
temperature: float,
max_new_tokens: int
) -> str:
"""
Generate a streaming response using the llama3-8b model.
Args:
message (str): The input message.
history (list): The conversation history used by ChatInterface.
temperature (float): The temperature for generating the response.
max_new_tokens (int): The maximum number of new tokens to generate.
Returns:
str: The generated response.
"""
conversation = []
# Add system prompt only if this is a new conversation (i.e., history is empty)
if not history:
conversation.append({"role": "system", "content": SYSTEM_PROMPT})
conversation.append({"role": "user", "assistant": "Understood! I will act as the user's healthcare provider, not hesitate with providing detailed responses, or refer them to another healthcare provider unless they ask for one, as you have already given them a disclaimer."})
for user, assistant in history:
conversation.extend([
{"role": "user", "content": user},
{"role": "assistant", "content": assistant}
])
# Ensure the model starts with ""
conversation.append({"role": "user", "content": message})
conversation.append({"role": "assistant", "content": " "}) # Force at start
input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt").to(model.device)
streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
generate_kwargs = dict(
input_ids=input_ids,
streamer=streamer,
max_new_tokens=max_new_tokens,
do_sample=True,
temperature=temperature,
eos_token_id=terminators,
)
if temperature == 0:
generate_kwargs['do_sample'] = False
t = Thread(target=model.generate, kwargs=generate_kwargs)
t.start()
outputs = []
buffer = ""
think_detected = False
thinking_message_sent = False
full_response = "" # Store the full assistant response
for text in streamer:
buffer += text
full_response += text # Store raw assistant response (includes )
# Send the "thinking" message once text starts generating
if not thinking_message_sent:
thinking_message_sent = True
yield "A.I. Healthcare is Thinking! Please wait, your response will output shortly...\n\n"
# Wait until is detected before streaming output
if not think_detected:
if "" in buffer:
think_detected = True
buffer = buffer.split("", 1)[1] # Remove section
else:
outputs.append(text)
yield "".join(outputs)
# Store the full response (including ) in history, but only show the user the cleaned response
history.append((message, full_response)) # Full assistant response saved for context
# Gradio block
chatbot = gr.Chatbot(height=450, placeholder=PLACEHOLDER, label='Gradio ChatInterface')
with gr.Blocks(fill_height=True, css=css) as demo:
gr.Markdown(DESCRIPTION)
gr.ChatInterface(
fn=chat_llama3_8b,
chatbot=chatbot,
fill_height=True,
additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False),
additional_inputs=[
gr.Slider(minimum=0.6, maximum=0.6, step=0.1, value=0.6, label="Temperature", render=False),
gr.Slider(minimum=1024, maximum=4096, step=128, value=2048, label="Max new tokens", render=False),
],
examples=[
['What is PrEP, and do I need it?'],
['What medications help manage being undetectable with HIV?'],
['How do I know if an abortion is the right option?'],
['How can I access birth-control in states where it is regulated?'],
],
cache_examples=False,
)
gr.Markdown(LICENSE)
if __name__ == "__main__":
demo.launch()