File size: 4,370 Bytes
809a13a
 
 
8790e06
809a13a
 
8790e06
 
 
 
 
 
 
 
809a13a
 
 
 
 
 
 
 
 
8790e06
 
 
809a13a
 
 
8790e06
 
809a13a
 
 
 
 
 
 
 
 
 
 
 
 
 
8790e06
809a13a
 
 
 
8790e06
8a1cfea
 
8790e06
 
8a1cfea
60031f6
8790e06
 
 
8a1cfea
60031f6
8790e06
 
 
 
 
8a1cfea
60031f6
8790e06
 
 
8a1cfea
 
8790e06
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60031f6
809a13a
8790e06
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60031f6
809a13a
8790e06
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
import gradio as gr
from huggingface_hub import InferenceClient

# Initialize the InferenceClient
client = InferenceClient("meta-llama/Llama-3.2-3B-Instruct")

def is_health_related(message):
    # Simple heuristic to check if the message is health-related
    health_keywords = ["health", "medical", "disease", "symptom", "treatment", "doctor", "patient", "medicine"]
    message = message.lower()
    for keyword in health_keywords:
        if keyword in message:
            return True
    return False

def respond(
    message,
    history: list[tuple[str, str]],
    system_message,
    max_tokens,
    temperature,
    top_p,
):
    if not is_health_related(message):
        return "Sorry, I can't help you with that because I am just a bot who can help with health-related queries."

    messages = [{"role": "system", "content": system_message}]

    for val in history:
        if val:
            messages.append({"role": "user", "content": val})
        if val[1]:
            messages.append({"role": "assistant", "content": val[1]})

    messages.append({"role": "user", "content": message})

    response = ""

    for message in client.chat_completion(
        messages,
        max_tokens=max_tokens,
        stream=True,
        temperature=temperature,
        top_p=top_p,
    ):
        token = message.choices.delta.content

        response += token
        yield response

# Custom CSS to make it look like ChatGPT
css = """
body {
    font-family: 'Inter', sans-serif;
    background-color: #f0f0f0;
}

.gradio-container {
    max-width: 700px;
    margin: auto;
}

.gr-chat-container {
    border: 1px solid #ddd;
    border-radius: 5px;
    padding: 10px;
    background-color: #fff;
}

.gr-message {
    padding: 10px;
    border-bottom: 1px solid #ddd;
}

.gr-message:last-child {
    border-bottom: none;
}

.gr-user-message {
    color: #333;
    background-color: #f7f7f7;
    border-radius: 5px;
    padding: 5px;
}

.gr-assistant-message {
    color: #333;
    background-color: #fff;
    border-radius: 5px;
    padding: 5px;
}

.gr-input {
    padding: 10px;
    border: 1px solid #ccc;
    border-radius: 5px;
    width: 100%;
}

.gr-input:focus {
    border-color: #aaa;
}

.gr-button {
    background-color: #4CAF50;
    color: #fff;
    padding: 10px 20px;
    border: none;
    border-radius: 5px;
    cursor: pointer;
}

.gr-button:hover {
    background-color: #3e8e41;
}
"""

# Create a custom chat interface using gr.Blocks
with gr.Blocks(css=css) as demo:
    gr.Markdown("# Health Assistant Chatbot")
    gr.Markdown("### Ask me any health-related questions.")
    chatbot = gr.Chatbot(
        value=[],
        show_user_avatar=False,
        show_bot_avatar=False,
        width="100%",
    )
    input_box = gr.Textbox(
        label="Type your message here",
        placeholder="Type your message here",
        show_label=False,
    )
    system_message = gr.Textbox(
        value="You are a virtual Doctor Assistant. Your role is to assist healthcare professionals by providing accurate, evidence-based medical information, offering treatment options, and supporting patient care. Always prioritize patient safety, provide concise answers, and clearly state that your advice does not replace a doctor's judgment. Do not diagnose or prescribe treatments without human oversight.",
        label="System message",
        visible=False,
    )
    max_tokens = gr.Slider(
        minimum=1,
        maximum=2048,
        value=512,
        step=1,
        label="Max new tokens",
        visible=False,
    )
    temperature = gr.Slider(
        minimum=0.1,
        maximum=4.0,
        value=0.7,
        step=0.1,
        label="Temperature",
        visible=False,
    )
    top_p = gr.Slider(
        minimum=0.1,
        maximum=1.0,
        value=0.95,
        step=0.05,
        label="Top-p (nucleus sampling)",
        visible=False,
    )

    def update_chat(message, history, system_message, max_tokens, temperature, top_p):
        response = respond(message, history, system_message, max_tokens, temperature, top_p)
        return chatbot.update(value=history + [(message, response)])

    input_box.submit(
        update_chat,
        inputs=[input_box, chatbot, system_message, max_tokens, temperature, top_p],
        outputs=chatbot,
    )

if __name__ == "__main__":
    demo.launch(share=True)