ffreemt commited on
Commit
558796e
·
1 Parent(s): efc09da

Update single model

Browse files
Files changed (1) hide show
  1. app.py +24 -18
app.py CHANGED
@@ -118,11 +118,12 @@ def bot(chat_history, **kwargs):
118
  )
119
  """
120
  logger.debug("run model.chat...")
 
121
  response, chat_history = model.chat(
122
  tokenizer,
123
  message,
124
  chat_history[:-1],
125
- **kwargs,
126
  )
127
  del response
128
  return chat_history
@@ -131,6 +132,21 @@ def bot(chat_history, **kwargs):
131
  chat_history[:-1].append(["message", str(exc)])
132
  return chat_history
133
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134
 
135
  SYSTEM_PROMPT = "You are a helpful assistant."
136
  MAX_MAX_NEW_TOKENS = 1024
@@ -146,7 +162,8 @@ class Config:
146
  top_p: float = 0.9
147
 
148
 
149
- stats_default = SimpleNamespace(llm=model, system_prompt=SYSTEM_PROMPT, config=Config())
 
150
 
151
  theme = gr.themes.Soft(text_size="sm")
152
  with gr.Blocks(
@@ -158,20 +175,9 @@ with gr.Blocks(
158
  if not torch.cuda.is_available():
159
  raise gr.Error("GPU not available, cant run. Turn on GPU and restart")
160
 
161
- # model_ = stats.value.llm # OOM
162
- config = stats.value.config
163
- stats.value.llm.generation_config.update(**asdict(config))
164
- def bot_stream(chat_history):
165
- try:
166
- message = chat_history[-1][0]
167
- except Exception as exc:
168
- logger.error(f"{chat_history=}: {exc}")
169
- raise gr.Error(f"{chat_history=}")
170
- # yield chat_history
171
- # for elm in model.chat_stream(tokenizer, message, chat_history):
172
- for elm in stats.value.llm.chat_stream(tokenizer, message, chat_history):
173
- chat_history[-1] = [message, elm]
174
- yield chat_history
175
 
176
  with gr.Accordion("🎈 Info", open=False):
177
  gr.Markdown(
@@ -213,7 +219,7 @@ with gr.Blocks(
213
  queue=True,
214
  show_progress="full",
215
  # api_name=None,
216
- ).then(bot_stream, chatbot, chatbot, queue=True)
217
  submit_click_event = submit.click(
218
  # fn=lambda x, y: ("",) + user(x, y)[1:], # clear msg
219
  fn=user, # clear msg
@@ -222,7 +228,7 @@ with gr.Blocks(
222
  queue=True,
223
  show_progress="full",
224
  # api_name=None,
225
- ).then(bot_stream, chatbot, chatbot, queue=True)
226
  stop.click(
227
  fn=None,
228
  inputs=None,
 
118
  )
119
  """
120
  logger.debug("run model.chat...")
121
+ model.generation_config(**kwargs)
122
  response, chat_history = model.chat(
123
  tokenizer,
124
  message,
125
  chat_history[:-1],
126
+ # **kwargs,
127
  )
128
  del response
129
  return chat_history
 
132
  chat_history[:-1].append(["message", str(exc)])
133
  return chat_history
134
 
135
+ def bot_stream(chat_history, **kwargs):
136
+ try:
137
+ message = chat_history[-1][0]
138
+ except Exception as exc:
139
+ logger.error(f"{chat_history=}: {exc}")
140
+ raise gr.Error(f"{chat_history=}")
141
+ # yield chat_history
142
+
143
+ # for elm in model.chat_stream(tokenizer, message, chat_history):
144
+ model.generation_config(**kwargs)
145
+ for elm in model.chat_stream(tokenizer, message, chat_history):
146
+ chat_history[-1] = [message, elm]
147
+ yield chat_history
148
+
149
+
150
 
151
  SYSTEM_PROMPT = "You are a helpful assistant."
152
  MAX_MAX_NEW_TOKENS = 1024
 
162
  top_p: float = 0.9
163
 
164
 
165
+ # stats_default = SimpleNamespace(llm=model, system_prompt=SYSTEM_PROMPT, config=Config())
166
+ stats_default = SimpleNamespace(llm=None, system_prompt=SYSTEM_PROMPT, config=Config())
167
 
168
  theme = gr.themes.Soft(text_size="sm")
169
  with gr.Blocks(
 
175
  if not torch.cuda.is_available():
176
  raise gr.Error("GPU not available, cant run. Turn on GPU and restart")
177
 
178
+ config = asdict(stats.value.config)
179
+ def bot_stream_state(chat_history):
180
+ return bot_stream(chat_history, **config)
 
 
 
 
 
 
 
 
 
 
 
181
 
182
  with gr.Accordion("🎈 Info", open=False):
183
  gr.Markdown(
 
219
  queue=True,
220
  show_progress="full",
221
  # api_name=None,
222
+ ).then(bot_stream_state, chatbot, chatbot, queue=True)
223
  submit_click_event = submit.click(
224
  # fn=lambda x, y: ("",) + user(x, y)[1:], # clear msg
225
  fn=user, # clear msg
 
228
  queue=True,
229
  show_progress="full",
230
  # api_name=None,
231
+ ).then(bot_stream_state, chatbot, chatbot, queue=True)
232
  stop.click(
233
  fn=None,
234
  inputs=None,