Spaces:
Running
Running
import gradio as gr | |
import os | |
import spaces | |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer | |
from threading import Thread | |
# Set an environment variable | |
HF_TOKEN = os.environ.get("HF_TOKEN", None) | |
DESCRIPTION = ''' | |
<div> | |
<h1 style="text-align: center;">A.I. Healthcare</h1> | |
</div> | |
''' | |
LICENSE = """ | |
<p> | |
This Health Assistant is designed to provide helpful healthcare information; however, it may make mistakes and is not designed to replace professional medical care. It is not intended to diagnose any condition or disease. Always consult with a qualified healthcare provider for any medical concerns.\n\nI hereby confirm that I am at least 18 years of age (or accompanied by a legal guardian who is at least 18 years old), understand that the information provided by this service is for informational purposes only and is not intended to diagnose or treat any medical condition, and acknowledge that I am solely responsible for verifying any information provided. | |
</p> | |
""" | |
PLACEHOLDER = """ | |
<div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;"> | |
<h1 style="font-size: 28px; margin-bottom: 2px; opacity: 0.55;">A.I. Healthcare</h1> | |
<p style="font-size: 18px; margin-bottom: 2px; opacity: 0.65;">Ask me anything...</p> | |
</div> | |
""" | |
css = """ | |
h1 { | |
text-align: center; | |
display: block; | |
} | |
#duplicate-button { | |
margin: auto; | |
color: white; | |
background: #1565c0; | |
border-radius: 100vh; | |
} | |
""" | |
# Load the tokenizer and model | |
tokenizer = AutoTokenizer.from_pretrained("reedmayhew/HealthCare-Reasoning-Assistant-Llama-3.1-8B-HF", device_map="cuda") | |
model = AutoModelForCausalLM.from_pretrained("reedmayhew/HealthCare-Reasoning-Assistant-Llama-3.1-8B-HF", device_map="cuda") | |
terminators = [ | |
tokenizer.eos_token_id, | |
tokenizer.convert_tokens_to_ids("<|eot_id|>") | |
] | |
def chat_llama3_8b(message: str, | |
history: list, | |
temperature: float, | |
max_new_tokens: int | |
) -> str: | |
""" | |
Generate a streaming response using the llama3-8b model. | |
Args: | |
message (str): The input message. | |
history (list): The conversation history used by ChatInterface. | |
temperature (float): The temperature for generating the response. | |
max_new_tokens (int): The maximum number of new tokens to generate. | |
Returns: | |
str: The generated response. | |
""" | |
conversation = [] | |
for user, assistant in history: | |
conversation.extend([ | |
{"role": "user", "content": user}, | |
{"role": "assistant", "content": assistant} | |
]) | |
# Ensure the model starts with "<think>" | |
conversation.append({"role": "user", "content": message}) | |
conversation.append({"role": "assistant", "content": "<think> "}) # Force <think> at start | |
input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt").to(model.device) | |
streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True) | |
generate_kwargs = dict( | |
input_ids=input_ids, | |
streamer=streamer, | |
max_new_tokens=max_new_tokens, | |
do_sample=True, | |
temperature=temperature, | |
eos_token_id=terminators, | |
) | |
if temperature == 0: | |
generate_kwargs['do_sample'] = False | |
t = Thread(target=model.generate, kwargs=generate_kwargs) | |
t.start() | |
outputs = [] | |
buffer = "" | |
think_detected = False | |
thinking_message_sent = False | |
full_response = "" # Store the full assistant response | |
for text in streamer: | |
buffer += text | |
full_response += text # Store raw assistant response (includes <think>) | |
# Send the "thinking" message once text starts generating | |
if not thinking_message_sent: | |
thinking_message_sent = True | |
yield "A.I. Healthcare is Thinking! Please wait, your response will output shortly...\n\n" | |
# Wait until </think> is detected before streaming output | |
if not think_detected: | |
if "</think>" in buffer: | |
think_detected = True | |
buffer = buffer.split("</think>", 1)[1] # Remove <think> section | |
else: | |
outputs.append(text) | |
yield "".join(outputs) | |
# Store the full response (including <think>) in history, but only show the user the cleaned response | |
history.append((message, full_response)) # Full assistant response saved for context | |
# Gradio block | |
chatbot = gr.Chatbot(height=450, placeholder=PLACEHOLDER, label='Gradio ChatInterface') | |
with gr.Blocks(fill_height=True, css=css) as demo: | |
gr.Markdown(DESCRIPTION) | |
gr.ChatInterface( | |
fn=chat_llama3_8b, | |
chatbot=chatbot, | |
fill_height=True, | |
additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False), | |
additional_inputs=[ | |
gr.Slider(minimum=0.6, maximum=0.6, step=0.1, value=0.6, label="Temperature", render=False), | |
gr.Slider(minimum=1024, maximum=4096, step=128, value=2048, label="Max new tokens", render=False), | |
], | |
examples=[ | |
['What is PrEP, and do I need it?'], | |
['What medications help manage being undetectable with HIV?'], | |
['How do I know if an abortion is the right option?'], | |
['How can I access birth-control in states where it is regulated?'], | |
] | |
cache_examples=False, | |
) | |
gr.Markdown(LICENSE) | |
if __name__ == "__main__": | |
demo.launch() |