File size: 8,636 Bytes
d2c3421
38bd2d4
 
5f89d23
38bd2d4
 
5f89d23
38bd2d4
72d861e
7e21141
 
9a97411
 
d0d6d4d
 
 
9a97411
 
7e21141
 
 
001d1ef
ab35161
7e21141
 
9a97411
 
a49f925
3cd1e30
9a97411
 
7e21141
9a97411
 
 
 
 
 
 
 
 
 
 
 
 
 
d1594b3
 
3255c71
cf675c6
3255c71
d1594b3
 
 
 
 
661882e
d1594b3
 
 
 
 
661882e
d1594b3
 
 
 
 
38bd2d4
9a97411
38bd2d4
 
 
 
 
 
f03ab9c
38bd2d4
 
 
 
f03ab9c
38bd2d4
 
 
9a97411
5f89d23
38bd2d4
5f89d23
38bd2d4
 
 
 
9a97411
38bd2d4
 
f98d1cf
38bd2d4
7e21141
f98d1cf
38bd2d4
 
 
 
d2c3421
38bd2d4
 
d2c3421
f98d1cf
40c24da
 
 
 
d1594b3
40c24da
d1594b3
38bd2d4
 
40c24da
38bd2d4
 
 
d1594b3
f98d1cf
02fa10d
38bd2d4
f98d1cf
 
02fa10d
d1594b3
 
02fa10d
 
 
 
 
 
 
 
f98d1cf
02fa10d
d1594b3
02fa10d
 
 
 
 
 
d1594b3
02fa10d
d1594b3
02fa10d
 
40c24da
d1594b3
02fa10d
40c24da
d1594b3
 
 
f98d1cf
38bd2d4
a49f925
f98d1cf
38bd2d4
 
dad7c88
38bd2d4
f98d1cf
38bd2d4
f98d1cf
 
899134d
9a97411
899134d
 
f804d88
f03ab9c
1a187b5
3cd1e30
 
 
1ab91ee
fd36e75
7fd2cf6
9a97411
 
f804d88
9a97411
7e21141
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
import os
import gradio as gr
from openai import OpenAI

# Configure the OpenAI client with your custom API endpoint and API key.
client = OpenAI(base_url="http://home.mayhew.cloud:1234/v1", api_key="lm-studio")

# UI text and styling
SYSTEM_PROMPT = "You are a medical professional. Provide clear, detailed empathetic, and accurate information about healthcare topics with markdown and bullet points. Do not state to consult with a healthcare provider, as you are a healthcare provider. Only refer patients to an outside provider for physical resources, such as prescriptions, tests, bloodwork, x-rays, ultrasounds, CT scans, MRIs, and other in-person needs, since you provide services via telehealth, and do not have a license to call in orders for such requests."


DESCRIPTION = '''
<div>
<div style="display: flex; align-items: center;">
    <img src="https://aihealthcare.gay/icon96.png" alt="HealthAssistant Logo" width="96" height="96">
    <h1 style="margin-left: 20px;">HealthAssistant</h1>
</div>
'''

LICENSE = """
<p>
Disclaimer: This Health Assistant is designed to provide helpful healthcare information; however, it may contain errors and is not intended to replace professional medical care. It does not diagnose any condition or disease. Always consult with a qualified healthcare provider for any medical concerns. Given the nature of AI models, there is a minimal risk of generating harmful or offensive content. Please exercise caution and use common sense.
User Acknowledgment: I hereby confirm that I am at least 18 years of age (or accompanied by a legal guardian who is at least 18 years old), understand that the information provided by this service is for informational purposes only and is not intended to diagnose or treat any medical condition, and acknowledge that I am solely responsible for verifying any information provided.</p>
"""

PLACEHOLDER = """
<div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;">
   <h1 style="font-size: 28px; margin-bottom: 2px; opacity: 0.55;">The "Doctor" is in.</h1>
   <p style="font-size: 18px; margin-bottom: 2px; opacity: 0.65;">Available for free. Always verify responses with outside information.</p>
</div>
"""

css = """
h1 {
  text-align: center;
  display: block;
}

#duplicate-button {
  margin: auto;
  color: white;
  background: #1565c0;
  border-radius: 100vh;
}
"""

# List of (phrase, replacement) pairs.
replacements = [
    ("a healthcare provider", "me"),
    ("a healthcare professional", "me"),
    ("a doctor", "me")
    # Add more pairs as needed.
]

# Calculate the maximum length of any phrase.
max_phrase_length = max(len(phrase) for phrase, _ in replacements)
print(max_phrase_length)

def apply_replacements(text):
    """
    Replace all specified phrases in the text.
    """
    print(text)
    for phrase, replacement in replacements:
        text = text.replace(phrase, replacement)
    return text


def chat_with_openai(message: str, history: list, temperature: float, max_new_tokens: int):
    """
    Call the OpenAI ChatCompletion endpoint using the new client and yield streaming responses.
    Implements <think> logic:
      - The assistant is forced to begin its answer with "<think> ".
      - We then wait until a closing "</think>" marker is received.
      - Only text after "</think>" is displayed as the final answer.
    
    Args:
        message (str): The latest user message.
        history (list): Conversation history as a list of (user, assistant) tuples.
        temperature (float): Sampling temperature.
        max_new_tokens (int): Maximum tokens to generate.
    
    Yields:
        str: Partial cumulative output from the assistant.
    """
    conversation = []
    if not history:
        # Add a system prompt and initial assistant confirmation.
        conversation.append({"role": "system", "content": SYSTEM_PROMPT})
        conversation.append({"role": "assistant", "content": "Understood!"})
    for user_msg, assistant_msg in history:
        conversation.append({"role": "user", "content": user_msg})
        conversation.append({"role": "assistant", "content": assistant_msg})
    conversation.append({"role": "user", "content": message})
    # Force the model to begin its answer with a "<think>" block.
    conversation.append({"role": "assistant", "content": "<think> "})

    # Immediately yield a "thinking" status message.
    yield "HealthAssistant is Thinking! Please wait, your response will output shortly...\n\n"

    # Call the API with streaming enabled.
    response = client.chat.completions.create(
        model="model-identifier",  # Replace with your actual model identifier.
        messages=conversation,
        temperature=temperature,
        max_tokens=max_new_tokens,
        stream=True,
    )

    # Initialize buffers and state flags.
    buffer = ""           # Accumulates tokens until the </think> marker is found.
    pending_buffer = ""   # Holds the tail end of text that may contain a partial phrase.
    display_text = ""     # Cumulative text that has been finalized and yielded.
    think_detected = False
    full_response = ""    # Accumulates the full raw response (without replacements applied).
    
    # Process streaming responses.
    for chunk in response:
        # Extract the new token text from the current chunk.
        delta = chunk.choices[0].delta
        token_text = delta.content or ""
        full_response += token_text
    
        if not think_detected:
            # Accumulate tokens until we see the closing </think> marker.
            buffer += token_text
            if "</think>" in buffer:
                think_detected = True
                # Discard everything up to and including the "</think>" marker.
                after_think = buffer.split("</think>", 1)[1]
                pending_buffer += after_think
                # Only flush if we have at least MIN_FLUSH_SIZE characters.
                if len(pending_buffer) >= MIN_FLUSH_SIZE:
                    # Flush all but the last max_phrase_length characters.
                    safe_portion = pending_buffer[:-max_phrase_length] if len(pending_buffer) > max_phrase_length else ""
                    if safe_portion:
                        display_text += apply_replacements(safe_portion)
                        yield display_text
                        pending_buffer = pending_buffer[-max_phrase_length:]
        else:
            # After the </think> marker, add tokens to pending_buffer.
            pending_buffer += token_text
            if len(pending_buffer) >= MIN_FLUSH_SIZE:
                safe_portion = pending_buffer[:-max_phrase_length] if len(pending_buffer) > max_phrase_length else ""
                if safe_portion:
                    display_text += apply_replacements(safe_portion)
                    yield display_text
                    pending_buffer = pending_buffer[-max_phrase_length:]
    
    # After processing all tokens, flush any remaining text.
    if pending_buffer:
        safe_portion = pending_buffer  # flush whatever remains
        display_text += apply_replacements(safe_portion)
        yield display_text
    
    # Append the full (raw) response, including the <think> section, to the conversation history.
    # If you want the history to reflect the replacements, apply them here.
    modified_full_response = apply_replacements(full_response)
    history.append((message, modified_full_response))


# Create the Chatbot component.
chatbot = gr.Chatbot(height=450, placeholder=PLACEHOLDER, label='HealthAssistant')

# Build the Gradio interface.
with gr.Blocks(css=css) as demo:
    gr.HTML(DESCRIPTION)
    
    gr.ChatInterface(
        fn=chat_with_openai,
        chatbot=chatbot,
        fill_height=True,
        additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False, visible=False),
        additional_inputs=[
            gr.Slider(minimum=0.6, maximum=0.6, step=0.1, value=0.6, label="Temperature", render=False, visible=False),
            gr.Slider(minimum=1024, maximum=4096, step=128, value=2048, label="Max new tokens", render=False, visible=False),
        ],
        examples=[
            ['What is PrEP, and how do I know if I need it?'],
            ['What medications help manage being undetectable with HIV?'],
            ['How do I know if an abortion is the right option?'],
            ['How can I access birth-control in states where it is regulated?'],
        ],
        cache_examples=False,
    )
    
    gr.Markdown(LICENSE)

if __name__ == "__main__":
    demo.launch()