Spaces:
Running
Running
File size: 6,555 Bytes
6079c6e d2c3421 b35805b 9a97411 6079c6e 9a97411 6079c6e 5f89d23 9a97411 f98d1cf 9a97411 6079c6e 9a97411 29cb53e f03ab9c 29cb53e 9a97411 6079c6e 9a97411 35446dd 9a97411 6079c6e 9a97411 f98d1cf b78d721 f804d88 9a97411 ae66ad0 9a97411 f03ab9c 9a97411 f98d1cf f03ab9c 9a97411 f03ab9c 9a97411 5f89d23 9a97411 f804d88 f98d1cf 9a97411 f98d1cf 9a97411 b35805b 9a97411 f98d1cf d2c3421 f804d88 d2c3421 9a97411 d2c3421 f804d88 d2c3421 9a97411 d2c3421 f98d1cf 9a97411 f98d1cf 9a97411 f98d1cf 98a100b f98d1cf e0a81d3 f03ab9c f98d1cf 9a97411 f98d1cf 9a97411 f98d1cf 98a100b f804d88 f03ab9c dd72b73 1ab91ee fd36e75 7fd2cf6 9a97411 f804d88 9a97411 f03ab9c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 |
import gradio as gr
import os
import spaces
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
from threading import Thread
# Set an environment variable
HF_TOKEN = os.environ.get("HF_TOKEN", None)
SYSTEM_PROMPT = "You are an AI Healthcare assistant. Provide clear, empathetic, and accurate information about healthcare topics. Do not state "It's always best to consult with a healthcare provider", as you are simulating the role of a healthcare provider. A disclaimer has been placed elsewhere for the user that informs them to be mindful about your replies, so your job is to provide them with as much information as possible and let them make decisions themselves."
DESCRIPTION = '''
<div>
<h1 style="text-align: center;">A.I. Healthcare</h1>
</div>
'''
LICENSE = """
<p>
This Health Assistant is designed to provide helpful healthcare information; however, it may make mistakes and is not designed to replace professional medical care. It is not intended to diagnose any condition or disease. Always consult with a qualified healthcare provider for any medical concerns.\n\nI hereby confirm that I am at least 18 years of age (or accompanied by a legal guardian who is at least 18 years old), understand that the information provided by this service is for informational purposes only and is not intended to diagnose or treat any medical condition, and acknowledge that I am solely responsible for verifying any information provided.
</p>
"""
PLACEHOLDER = """
<div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;">
<h1 style="font-size: 28px; margin-bottom: 2px; opacity: 0.55;">A.I. Healthcare</h1>
<p style="font-size: 18px; margin-bottom: 2px; opacity: 0.65;">Ask me anything...</p>
</div>
"""
css = """
h1 {
text-align: center;
display: block;
}
#duplicate-button {
margin: auto;
color: white;
background: #1565c0;
border-radius: 100vh;
}
"""
# Load the tokenizer and model
tokenizer = AutoTokenizer.from_pretrained("reedmayhew/HealthCare-Reasoning-Assistant-Llama-3.1-8B-HF", device_map="cuda")
model = AutoModelForCausalLM.from_pretrained("reedmayhew/HealthCare-Reasoning-Assistant-Llama-3.1-8B-HF", device_map="cuda")
terminators = [
tokenizer.eos_token_id,
tokenizer.convert_tokens_to_ids("<|eot_id|>")
]
@spaces.GPU(duration=60)
def chat_llama3_8b(message: str,
history: list,
temperature: float,
max_new_tokens: int
) -> str:
"""
Generate a streaming response using the llama3-8b model.
Args:
message (str): The input message.
history (list): The conversation history used by ChatInterface.
temperature (float): The temperature for generating the response.
max_new_tokens (int): The maximum number of new tokens to generate.
Returns:
str: The generated response.
"""
conversation = []
# Add system prompt only if this is a new conversation (i.e., history is empty)
if not history:
conversation.append({"role": "system", "content": SYSTEM_PROMPT})
conversation.append({"role": "user", "assistant": "Understood! I will act as the user's healthcare provider, not hesitate with providing detailed responses, or refer them to another healthcare provider unless they ask for one, as you have already given them a disclaimer."})
for user, assistant in history:
conversation.extend([
{"role": "user", "content": user},
{"role": "assistant", "content": assistant}
])
# Ensure the model starts with "<think>"
conversation.append({"role": "user", "content": message})
conversation.append({"role": "assistant", "content": "<think> "}) # Force <think> at start
input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt").to(model.device)
streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
generate_kwargs = dict(
input_ids=input_ids,
streamer=streamer,
max_new_tokens=max_new_tokens,
do_sample=True,
temperature=temperature,
eos_token_id=terminators,
)
if temperature == 0:
generate_kwargs['do_sample'] = False
t = Thread(target=model.generate, kwargs=generate_kwargs)
t.start()
outputs = []
buffer = ""
think_detected = False
thinking_message_sent = False
full_response = "" # Store the full assistant response
for text in streamer:
buffer += text
full_response += text # Store raw assistant response (includes <think>)
# Send the "thinking" message once text starts generating
if not thinking_message_sent:
thinking_message_sent = True
yield "A.I. Healthcare is Thinking! Please wait, your response will output shortly...\n\n"
# Wait until </think> is detected before streaming output
if not think_detected:
if "</think>" in buffer:
think_detected = True
buffer = buffer.split("</think>", 1)[1] # Remove <think> section
else:
outputs.append(text)
yield "".join(outputs)
# Store the full response (including <think>) in history, but only show the user the cleaned response
history.append((message, full_response)) # Full assistant response saved for context
# Gradio block
chatbot = gr.Chatbot(height=450, placeholder=PLACEHOLDER, label='Gradio ChatInterface')
with gr.Blocks(fill_height=True, css=css) as demo:
gr.Markdown(DESCRIPTION)
gr.ChatInterface(
fn=chat_llama3_8b,
chatbot=chatbot,
fill_height=True,
additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False),
additional_inputs=[
gr.Slider(minimum=0.6, maximum=0.6, step=0.1, value=0.6, label="Temperature", render=False),
gr.Slider(minimum=1024, maximum=4096, step=128, value=2048, label="Max new tokens", render=False),
],
examples=[
['What is PrEP, and do I need it?'],
['What medications help manage being undetectable with HIV?'],
['How do I know if an abortion is the right option?'],
['How can I access birth-control in states where it is regulated?'],
],
cache_examples=False,
)
gr.Markdown(LICENSE)
if __name__ == "__main__":
demo.launch() |