HealthAssistant / app.py
reedmayhew's picture
Update app.py
d1594b3 verified
raw
history blame
8.85 kB
import os
import gradio as gr
from openai import OpenAI
# Configure the OpenAI client with your custom API endpoint and API key.
client = OpenAI(base_url="http://home.mayhew.cloud:1234/v1", api_key="lm-studio")
# UI text and styling
SYSTEM_PROMPT = "You are a medical professional. Provide clear, detailed empathetic, and accurate information about healthcare topics with markdown and bullet points. Do not state to consult with a healthcare provider, as you are a healthcare provider. Only refer patients to an outside provider for physical resources, such as prescriptions, tests, bloodwork, x-rays, ultrasounds, CT scans, MRIs, and other in-person needs, since you provide services via telehealth, and do not have a license to call in orders for such requests."
DESCRIPTION = '''
<div>
<div style="display: flex; align-items: center;">
<img src="https://aihealthcare.gay/icon96.png" alt="HealthAssistant Logo" width="96" height="96">
<h1 style="margin-left: 20px;">HealthAssistant</h1>
</div>
'''
LICENSE = """
<p>
Disclaimer: This Health Assistant is designed to provide helpful healthcare information; however, it may contain errors and is not intended to replace professional medical care. It does not diagnose any condition or disease. Always consult with a qualified healthcare provider for any medical concerns. Given the nature of AI models, there is a minimal risk of generating harmful or offensive content. Please exercise caution and use common sense.
User Acknowledgment: I hereby confirm that I am at least 18 years of age (or accompanied by a legal guardian who is at least 18 years old), understand that the information provided by this service is for informational purposes only and is not intended to diagnose or treat any medical condition, and acknowledge that I am solely responsible for verifying any information provided.</p>
"""
PLACEHOLDER = """
<div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;">
<h1 style="font-size: 28px; margin-bottom: 2px; opacity: 0.55;">The "Doctor" is in.</h1>
<p style="font-size: 18px; margin-bottom: 2px; opacity: 0.65;">Available for free. Always verify responses with outside information.</p>
</div>
"""
css = """
h1 {
text-align: center;
display: block;
}
#duplicate-button {
margin: auto;
color: white;
background: #1565c0;
border-radius: 100vh;
}
"""
# List of (phrase, replacement) pairs.
replacements = [
("a healthcare provider", "me")
# Add more pairs as needed.
]
# Calculate the maximum length of any phrase.
max_phrase_length = max(len(phrase) for phrase, _ in replacements)
def apply_replacements(text):
"""
Replace all specified phrases in the text.
"""
for phrase, replacement in replacements:
text = text.replace(phrase, replacement)
return text
def chat_with_openai(message: str, history: list, temperature: float, max_new_tokens: int):
"""
Call the OpenAI ChatCompletion endpoint using the new client and yield streaming responses.
Implements <think> logic:
- The assistant is forced to begin its answer with "<think> ".
- We then wait until a closing "</think>" marker is received.
- Only text after "</think>" is displayed as the final answer.
Args:
message (str): The latest user message.
history (list): Conversation history as a list of (user, assistant) tuples.
temperature (float): Sampling temperature.
max_new_tokens (int): Maximum tokens to generate.
Yields:
str: Partial cumulative output from the assistant.
"""
conversation = []
if not history:
# Add a system prompt and initial assistant confirmation.
conversation.append({"role": "system", "content": SYSTEM_PROMPT})
conversation.append({"role": "assistant", "content": "Understood!"})
for user_msg, assistant_msg in history:
conversation.append({"role": "user", "content": user_msg})
conversation.append({"role": "assistant", "content": assistant_msg})
conversation.append({"role": "user", "content": message})
# Force the model to begin its answer with a "<think>" block.
conversation.append({"role": "assistant", "content": "<think> "})
# Immediately yield a "thinking" status message.
yield "HealthAssistant is Thinking! Please wait, your response will output shortly...\n\n"
# Call the API with streaming enabled.
response = client.chat.completions.create(
model="model-identifier", # Replace with your actual model identifier.
messages=conversation,
temperature=temperature,
max_tokens=max_new_tokens,
stream=True,
)
# Buffers and state flags.
buffer = "" # Used before the </think> marker is detected.
pending_buffer = "" # Sliding buffer for safely holding the tail.
think_detected = False
full_response = "" # Accumulates the full (raw) response.
# Suppose these are defined elsewhere in your code:
history = [] # The conversation history.
message = "User message" # The user's message, for example.
# 'response' is assumed to be an iterable of token chunks.
# Process streaming responses.
for chunk in response:
# Extract the new token text from the chunk.
delta = chunk.choices[0].delta
token_text = delta.content or ""
full_response += token_text
if not think_detected:
# Accumulate tokens until we see the closing </think> marker.
buffer += token_text
if "</think>" in buffer:
think_detected = True
# Discard everything up to and including the "</think>" marker.
after_think = buffer.split("</think>", 1)[1]
# Initialize the pending_buffer with the text after </think>.
pending_buffer += after_think
# If pending_buffer is large enough, yield the safe portion.
if len(pending_buffer) > max_phrase_length:
# All except the last max_phrase_length characters are safe to yield.
to_yield = pending_buffer[:-max_phrase_length]
# Apply replacements on the safe portion.
to_yield = apply_replacements(to_yield)
yield to_yield
# Retain the last part in pending_buffer for potential split phrases.
pending_buffer = pending_buffer[-max_phrase_length:]
else:
# Append new token text to pending_buffer.
pending_buffer += token_text
# If pending_buffer is longer than max_phrase_length, yield the safe portion.
if len(pending_buffer) > max_phrase_length:
# Extract the part that is definitely not part of a split phrase.
to_yield = pending_buffer[:-max_phrase_length]
to_yield = apply_replacements(to_yield)
yield to_yield
# Keep the last max_phrase_length characters in pending_buffer.
pending_buffer = pending_buffer[-max_phrase_length:]
# After processing all chunks, flush any remaining text in pending_buffer.
if pending_buffer:
to_yield = apply_replacements(pending_buffer)
yield to_yield
# Append the full (raw) response, including the <think> section, to the conversation history.
# If you want the conversation history to reflect the replacements, apply them to full_response.
modified_full_response = apply_replacements(full_response)
history.append((message, modified_full_response))
# Create the Chatbot component.
chatbot = gr.Chatbot(height=450, placeholder=PLACEHOLDER, label='HealthAssistant')
# Build the Gradio interface.
with gr.Blocks(css=css) as demo:
gr.HTML(DESCRIPTION)
gr.ChatInterface(
fn=chat_with_openai,
chatbot=chatbot,
fill_height=True,
additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False, visible=False),
additional_inputs=[
gr.Slider(minimum=0.6, maximum=0.6, step=0.1, value=0.6, label="Temperature", render=False, visible=False),
gr.Slider(minimum=1024, maximum=4096, step=128, value=2048, label="Max new tokens", render=False, visible=False),
],
examples=[
['What is PrEP, and how do I know if I need it?'],
['What medications help manage being undetectable with HIV?'],
['How do I know if an abortion is the right option?'],
['How can I access birth-control in states where it is regulated?'],
],
cache_examples=False,
)
gr.Markdown(LICENSE)
if __name__ == "__main__":
demo.launch()