reedmayhew commited on
Commit
8cd3c65
·
verified ·
1 Parent(s): 29cb53e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +56 -19
app.py CHANGED
@@ -9,7 +9,7 @@ HF_TOKEN = os.environ.get("HF_TOKEN", None)
9
 
10
  DESCRIPTION = '''
11
  <div>
12
- <h1 style="text-align: center;">A.I. Healthcare</h1>
13
  </div>
14
  '''
15
 
@@ -51,20 +51,26 @@ terminators = [
51
 
52
  @spaces.GPU(duration=60)
53
  def chat_llama3_8b(message: str,
54
- history: list,
55
- temperature: float,
56
- max_new_tokens: int
57
- ) -> str:
58
  """
59
  Generate a streaming response using the llama3-8b model.
 
60
  Args:
61
  message (str): The input message.
62
- history (list): The conversation history used by ChatInterface.
63
  temperature (float): The temperature for generating the response.
64
  max_new_tokens (int): The maximum number of new tokens to generate.
 
 
65
  Returns:
66
  str: The generated response.
67
  """
 
 
 
68
 
69
  conversation = []
70
  for user, assistant in history:
@@ -120,23 +126,53 @@ def chat_llama3_8b(message: str,
120
  outputs.append(text)
121
  yield "".join(outputs)
122
 
123
- # Store the full response (including <think>) in history, but only show the user the cleaned response
124
- history.append((message, full_response)) # Full assistant response saved for context
125
-
126
- # Gradio block
127
- chatbot = gr.Chatbot(height=450, placeholder=PLACEHOLDER, label='Gradio ChatInterface')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128
 
129
- with gr.Blocks(fill_height=True, css=css) as demo:
130
-
131
  gr.Markdown(DESCRIPTION)
132
- gr.ChatInterface(
 
 
 
 
133
  fn=chat_llama3_8b,
134
- chatbot=chatbot,
135
- fill_height=True,
136
- additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False),
137
  additional_inputs=[
138
- gr.Slider(minimum=0.6, maximum=0.6, step=0.1, value=0.6, label="Temperature", render=False),
139
- gr.Slider(minimum=128, maximum=4096, step=64, value=1024, label="Max new tokens", render=False),
 
 
 
 
 
 
 
 
140
  ],
141
  examples=[
142
  ['What are the common symptoms of diabetes?'],
@@ -146,6 +182,7 @@ with gr.Blocks(fill_height=True, css=css) as demo:
146
  ['What should I know about the side effects of common medications?']
147
  ],
148
  cache_examples=False,
 
149
  )
150
 
151
  gr.Markdown(LICENSE)
 
9
 
10
  DESCRIPTION = '''
11
  <div>
12
+ <h1 style="text-align: center;">A.I. Healthcare</h1>
13
  </div>
14
  '''
15
 
 
51
 
52
  @spaces.GPU(duration=60)
53
  def chat_llama3_8b(message: str,
54
+ history: list,
55
+ temperature: float,
56
+ max_new_tokens: int,
57
+ confirm: bool) -> str:
58
  """
59
  Generate a streaming response using the llama3-8b model.
60
+
61
  Args:
62
  message (str): The input message.
63
+ history (list): The conversation history.
64
  temperature (float): The temperature for generating the response.
65
  max_new_tokens (int): The maximum number of new tokens to generate.
66
+ confirm (bool): Whether the user has confirmed the age/disclaimer.
67
+
68
  Returns:
69
  str: The generated response.
70
  """
71
+ # If the confirmation checkbox is not checked, return a short message immediately.
72
+ if not confirm:
73
+ return "⚠️ You must confirm that you meet the usage requirements before sending a message."
74
 
75
  conversation = []
76
  for user, assistant in history:
 
126
  outputs.append(text)
127
  yield "".join(outputs)
128
 
129
+ # Store the full response (including <think>) in history for context
130
+ history.append((message, full_response))
131
+
132
+ # Custom JavaScript to disable the send button until confirmation is given.
133
+ # (The JS waits for the checkbox with a label containing the specified text and then monitors its state.)
134
+ CUSTOM_JS = """
135
+ <script>
136
+ document.addEventListener("DOMContentLoaded", function() {
137
+ // Poll for the confirmation checkbox and the send button inside the ChatInterface.
138
+ const interval = setInterval(() => {
139
+ // The checkbox is rendered as an <input type="checkbox"> with an associated label.
140
+ const checkbox = document.querySelector('input[type="checkbox"][aria-label*="I hereby confirm that I am at least 18 years of age"]');
141
+ // The send button might be a <button> element with a title or specific text. Adjust the selector as needed.
142
+ const sendButton = document.querySelector('button[title="Send"]');
143
+ if (checkbox && sendButton) {
144
+ sendButton.disabled = !checkbox.checked;
145
+ checkbox.addEventListener('change', function() {
146
+ sendButton.disabled = !checkbox.checked;
147
+ });
148
+ clearInterval(interval);
149
+ }
150
+ }, 500);
151
+ });
152
+ </script>
153
+ """
154
 
155
+ with gr.Blocks(css=css, title="A.I. Healthcare") as demo:
 
156
  gr.Markdown(DESCRIPTION)
157
+ # Inject the custom JavaScript.
158
+ gr.HTML(CUSTOM_JS)
159
+
160
+ # The ChatInterface below now includes additional inputs: the confirmation checkbox and the parameter sliders.
161
+ chat_interface = gr.ChatInterface(
162
  fn=chat_llama3_8b,
163
+ title="A.I. Healthcare Chat",
164
+ chatbot=gr.Chatbot(height=450, placeholder=PLACEHOLDER, label='Conversation'),
 
165
  additional_inputs=[
166
+ gr.Checkbox(
167
+ value=False,
168
+ label=("I hereby confirm that I am at least 18 years of age (or accompanied by a legal guardian "
169
+ "who is at least 18 years old), understand that the information provided by this service "
170
+ "is for informational purposes only and is not intended to diagnose or treat any medical condition, "
171
+ "and acknowledge that I am solely responsible for verifying any information provided."),
172
+ elem_id="age_confirm_checkbox"
173
+ ),
174
+ gr.Slider(minimum=0.6, maximum=0.6, step=0.1, value=0.6, label="Temperature", visible=False),
175
+ gr.Slider(minimum=128, maximum=4096, step=64, value=1024, label="Max new tokens", visible=False),
176
  ],
177
  examples=[
178
  ['What are the common symptoms of diabetes?'],
 
182
  ['What should I know about the side effects of common medications?']
183
  ],
184
  cache_examples=False,
185
+ allow_screenshot=False,
186
  )
187
 
188
  gr.Markdown(LICENSE)