reedmayhew commited on
Commit
f03ab9c
·
verified ·
1 Parent(s): a0c0fb9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -40
app.py CHANGED
@@ -15,9 +15,7 @@ DESCRIPTION = '''
15
 
16
  LICENSE = """
17
  <p>
18
- This Health Assistant is designed to provide helpful healthcare information; however, it may make mistakes and is not designed to replace professional medical care. It is not intended to diagnose any condition or disease. Always consult with a qualified healthcare provider for any medical concerns.
19
- <br><br>
20
- I hereby confirm that I am at least 18 years of age (or accompanied by a legal guardian who is at least 18 years old), understand that the information provided by this service is for informational purposes only and is not intended to diagnose or treat any medical condition, and acknowledge that I am solely responsible for verifying any information provided.
21
  </p>
22
  """
23
 
@@ -53,13 +51,21 @@ terminators = [
53
 
54
  @spaces.GPU(duration=60)
55
  def chat_llama3_8b(message: str,
56
- history: list,
57
- temperature: float,
58
- max_new_tokens: int
59
- ) -> str:
60
  """
61
  Generate a streaming response using the llama3-8b model.
 
 
 
 
 
 
 
62
  """
 
63
  conversation = []
64
  for user, assistant in history:
65
  conversation.extend([
@@ -117,30 +123,12 @@ def chat_llama3_8b(message: str,
117
  # Store the full response (including <think>) in history, but only show the user the cleaned response
118
  history.append((message, full_response)) # Full assistant response saved for context
119
 
120
- # JavaScript snippet to conditionally show examples if ?examples=true is present in the URL.
121
- js_code = """
122
- <script>
123
- window.addEventListener("load", function(){
124
- const urlParams = new URLSearchParams(window.location.search);
125
- if(urlParams.get('examples') !== 'true'){
126
- var elem = document.getElementById("examples-container");
127
- if (elem) {
128
- elem.style.display = "none";
129
- }
130
- }
131
- });
132
- </script>
133
- """
134
-
135
  # Gradio block
136
  chatbot = gr.Chatbot(height=450, placeholder=PLACEHOLDER, label='Gradio ChatInterface')
137
 
138
  with gr.Blocks(fill_height=True, css=css) as demo:
139
- gr.Markdown(DESCRIPTION)
140
-
141
- # Include the JavaScript so it runs on the client side.
142
- gr.HTML(js_code)
143
 
 
144
  gr.ChatInterface(
145
  fn=chat_llama3_8b,
146
  chatbot=chatbot,
@@ -150,23 +138,17 @@ with gr.Blocks(fill_height=True, css=css) as demo:
150
  gr.Slider(minimum=0.6, maximum=0.6, step=0.1, value=0.6, label="Temperature", render=False),
151
  gr.Slider(minimum=1024, maximum=4096, step=128, value=2048, label="Max new tokens", render=False),
152
  ],
 
 
 
 
 
153
  cache_examples=False,
 
 
154
  )
155
 
156
- # Wrap your examples in a container with an id.
157
- with gr.Column(elem_id="examples-container"):
158
- gr.Markdown("### Examples")
159
- gr.Examples(
160
- examples=[
161
- ['What is PrEP, and do I need it?', ''],
162
- ['What medications help manage being undetectable with HIV?', ''],
163
- ['How do I know if an abortion is the right option?', ''],
164
- ['How can I access birth-control in states where it is regulated?', '']
165
- ],
166
- inputs=chatbot,
167
- )
168
-
169
  gr.Markdown(LICENSE)
170
 
171
  if __name__ == "__main__":
172
- demo.launch()
 
15
 
16
  LICENSE = """
17
  <p>
18
+ This Health Assistant is designed to provide helpful healthcare information; however, it may make mistakes and is not designed to replace professional medical care. It is not intended to diagnose any condition or disease. Always consult with a qualified healthcare provider for any medical concerns.\n\nI hereby confirm that I am at least 18 years of age (or accompanied by a legal guardian who is at least 18 years old), understand that the information provided by this service is for informational purposes only and is not intended to diagnose or treat any medical condition, and acknowledge that I am solely responsible for verifying any information provided.
 
 
19
  </p>
20
  """
21
 
 
51
 
52
  @spaces.GPU(duration=60)
53
  def chat_llama3_8b(message: str,
54
+ history: list,
55
+ temperature: float,
56
+ max_new_tokens: int
57
+ ) -> str:
58
  """
59
  Generate a streaming response using the llama3-8b model.
60
+ Args:
61
+ message (str): The input message.
62
+ history (list): The conversation history used by ChatInterface.
63
+ temperature (float): The temperature for generating the response.
64
+ max_new_tokens (int): The maximum number of new tokens to generate.
65
+ Returns:
66
+ str: The generated response.
67
  """
68
+
69
  conversation = []
70
  for user, assistant in history:
71
  conversation.extend([
 
123
  # Store the full response (including <think>) in history, but only show the user the cleaned response
124
  history.append((message, full_response)) # Full assistant response saved for context
125
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
126
  # Gradio block
127
  chatbot = gr.Chatbot(height=450, placeholder=PLACEHOLDER, label='Gradio ChatInterface')
128
 
129
  with gr.Blocks(fill_height=True, css=css) as demo:
 
 
 
 
130
 
131
+ gr.Markdown(DESCRIPTION)
132
  gr.ChatInterface(
133
  fn=chat_llama3_8b,
134
  chatbot=chatbot,
 
138
  gr.Slider(minimum=0.6, maximum=0.6, step=0.1, value=0.6, label="Temperature", render=False),
139
  gr.Slider(minimum=1024, maximum=4096, step=128, value=2048, label="Max new tokens", render=False),
140
  ],
141
+ examples=[
142
+ ['What is PrEP, and do I need it?'],
143
+ ['What medications help manage being undetectable with HIV?'],
144
+ ['How do I know if an abortion is the right option?'],
145
+ ['How can I access birth-control in states where it is regulated?']
146
  cache_examples=False,
147
+ ]
148
+
149
  )
150
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151
  gr.Markdown(LICENSE)
152
 
153
  if __name__ == "__main__":
154
+ demo.launch()