gururise commited on
Commit
674ab76
·
1 Parent(s): 77b370c

additional chatbot fixes

Browse files
Files changed (1) hide show
  1. app.py +8 -9
app.py CHANGED
@@ -111,7 +111,6 @@ def chat(
111
  max_new_tokens=10,
112
  temperature=0.1,
113
  top_p=1.0,
114
- stop="<|endoftext|>",
115
  seed=42,
116
  ):
117
  global model
@@ -148,7 +147,7 @@ FRITZ: I am an RNN based Large Language Model (LLM) that use no attention layers
148
  max_new_tokens = int(max_new_tokens)
149
  temperature = float(temperature)
150
  top_p = float(top_p)
151
- stop = [x.strip(' ') for x in stop.split(',')]
152
  seed = seed
153
 
154
  assert 1 <= max_new_tokens <= 384
@@ -165,16 +164,17 @@ FRITZ: I am an RNN based Large Language Model (LLM) that use no attention layers
165
 
166
  model.loadContext(newctx=intro+prompt)
167
 
168
- generated_text = model.forward(number=max_new_tokens, stopStrings=stop,temp=temperature,top_p_usual=top_p)["output"]
169
 
170
  generated_text = generated_text.lstrip("\n ")
 
171
  print(f"{generated_text}")
172
-
173
  for stop_word in stop:
174
  stop_word = codecs.getdecoder("unicode_escape")(stop_word)[0]
175
  if stop_word != '' and stop_word in generated_text:
176
  generated_text = generated_text[:generated_text.find(stop_word)]
177
-
178
  gc.collect()
179
  history.append((prompt, generated_text))
180
  return history,history
@@ -230,7 +230,7 @@ iface = gr.Interface(
230
  gr.Slider(0.0, 1.0, value=0.85), # top_p
231
  gr.Textbox(lines=1, value="<|endoftext|>") # stop
232
  ],
233
- outputs=gr.Textbox(lines=25),
234
  examples=examples,
235
  cache_examples=False,
236
  ).queue()
@@ -246,10 +246,9 @@ chatiface = gr.Interface(
246
  "state",
247
  gr.Slider(1, 256, value=60), # max_tokens
248
  gr.Slider(0.0, 1.0, value=0.8), # temperature
249
- gr.Slider(0.0, 1.0, value=0.85), # top_p
250
- gr.Textbox(lines=1, value="USER:,<|endoftext|>") # stop
251
  ],
252
- outputs=[gr.Chatbot(color_map=("green", "pink")),"state"],
253
  ).queue()
254
 
255
  demo = gr.TabbedInterface(
 
111
  max_new_tokens=10,
112
  temperature=0.1,
113
  top_p=1.0,
 
114
  seed=42,
115
  ):
116
  global model
 
147
  max_new_tokens = int(max_new_tokens)
148
  temperature = float(temperature)
149
  top_p = float(top_p)
150
+ #stop = [x.strip(' ') for x in stop.split(',')]
151
  seed = seed
152
 
153
  assert 1 <= max_new_tokens <= 384
 
164
 
165
  model.loadContext(newctx=intro+prompt)
166
 
167
+ generated_text = model.forward(number=max_new_tokens, stopStrings=["<|endoftext|>","USER:"],temp=temperature,top_p_usual=top_p)["output"]
168
 
169
  generated_text = generated_text.lstrip("\n ")
170
+ generated_text = generated_text.rstrip("USER:")
171
  print(f"{generated_text}")
172
+ '''
173
  for stop_word in stop:
174
  stop_word = codecs.getdecoder("unicode_escape")(stop_word)[0]
175
  if stop_word != '' and stop_word in generated_text:
176
  generated_text = generated_text[:generated_text.find(stop_word)]
177
+ '''
178
  gc.collect()
179
  history.append((prompt, generated_text))
180
  return history,history
 
230
  gr.Slider(0.0, 1.0, value=0.85), # top_p
231
  gr.Textbox(lines=1, value="<|endoftext|>") # stop
232
  ],
233
+ outputs=gr.Textbox(label="Generated Output", lines=25),
234
  examples=examples,
235
  cache_examples=False,
236
  ).queue()
 
246
  "state",
247
  gr.Slider(1, 256, value=60), # max_tokens
248
  gr.Slider(0.0, 1.0, value=0.8), # temperature
249
+ gr.Slider(0.0, 1.0, value=0.85) # top_p
 
250
  ],
251
+ outputs=[gr.Chatbot(label="Chat Log", color_map=("green", "pink")),"state"],
252
  ).queue()
253
 
254
  demo = gr.TabbedInterface(