YangWu001 commited on
Commit
7ce6cf0
·
1 Parent(s): 29e354b
Files changed (1) hide show
  1. app.py +14 -5
app.py CHANGED
@@ -13,6 +13,7 @@ stop_inference = False
13
 
14
  def respond(
15
  message,
 
16
  system_message,
17
  max_tokens,
18
  temperature,
@@ -25,6 +26,11 @@ def respond(
25
  if use_local_model:
26
  # Simulate local inference (ignoring history)
27
  messages = [{"role": "system", "content": system_message}]
 
 
 
 
 
28
  messages.append({"role": "user", "content": message})
29
 
30
  response = ""
@@ -42,6 +48,11 @@ def respond(
42
  else:
43
  # API-based inference (ignoring history)
44
  messages = [{"role": "system", "content": system_message}]
 
 
 
 
 
45
  messages.append({"role": "user", "content": message})
46
 
47
  response = ""
@@ -129,7 +140,7 @@ with gr.Blocks(css=custom_css) as demo:
129
 
130
  cancel_button = gr.Button("Cancel Inference", variant="danger")
131
 
132
- def chat_fn(message, chat_history):
133
  response_gen = respond(
134
  message,
135
  system_message.value,
@@ -142,11 +153,9 @@ with gr.Blocks(css=custom_css) as demo:
142
  for response in response_gen:
143
  full_response += response # Accumulate the full response
144
 
145
- # Append the new message-response pair to chat_history
146
- chat_history.append((message, full_response))
147
- return chat_history
148
 
149
- user_input.submit(chat_fn, [user_input, chat_history], chat_history)
150
  cancel_button.click(cancel_inference)
151
 
152
  if __name__ == "__main__":
 
13
 
14
  def respond(
15
  message,
16
+ history: list[tuple[str, str]],
17
  system_message,
18
  max_tokens,
19
  temperature,
 
26
  if use_local_model:
27
  # Simulate local inference (ignoring history)
28
  messages = [{"role": "system", "content": system_message}]
29
+ for val in history:
30
+ if val[0]:
31
+ messages.append({"role": "user", "content": val[0]})
32
+ if val[1]:
33
+ messages.append({"role": "assistant", "content": val[1]})
34
  messages.append({"role": "user", "content": message})
35
 
36
  response = ""
 
48
  else:
49
  # API-based inference (ignoring history)
50
  messages = [{"role": "system", "content": system_message}]
51
+ for val in history:
52
+ if val[0]:
53
+ messages.append({"role": "user", "content": val[0]})
54
+ if val[1]:
55
+ messages.append({"role": "assistant", "content": val[1]})
56
  messages.append({"role": "user", "content": message})
57
 
58
  response = ""
 
140
 
141
  cancel_button = gr.Button("Cancel Inference", variant="danger")
142
 
143
+ def chat_fn(message):
144
  response_gen = respond(
145
  message,
146
  system_message.value,
 
153
  for response in response_gen:
154
  full_response += response # Accumulate the full response
155
 
156
+ return full_response
 
 
157
 
158
+ user_input.submit(chat_fn, inputs=user_input, outputs=chat_history)
159
  cancel_button.click(cancel_inference)
160
 
161
  if __name__ == "__main__":