FINGU-AI commited on
Commit
21a06fb
Β·
verified Β·
1 Parent(s): 1ad2513

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -89,7 +89,7 @@ def inference(query):
89
  outputs = model.generate(tokenized_chat, **generation_params)
90
  decoded_outputs = tokenizer.batch_decode(outputs, skip_special_tokens=False)
91
  assistant_response = decoded_outputs[0].split("<|start_header_id|>assistant<|end_header_id|>")[-1].strip()
92
- # response_ = assistant_response.replace('<|im_end|>', "")
93
  return assistant_response
94
  # outputs = model.generate(tokenized_chat, **generation_params, streamer=streamer)
95
  # return outputs
@@ -98,8 +98,8 @@ examples = ["Translate ko to en: \n\n 은행원: μ•ˆλ…•ν•˜μ„Έμš”! BNKμ€ν–‰μž…
98
 
99
  def response(message, history):
100
  text = inference(message)
101
- return text
102
- # for i in range(len(text)):
103
- # time.sleep(0.01)
104
- # yield text[: i + 1]
105
  gr.ChatInterface(response,examples=examples).launch()
 
89
  outputs = model.generate(tokenized_chat, **generation_params)
90
  decoded_outputs = tokenizer.batch_decode(outputs, skip_special_tokens=False)
91
  assistant_response = decoded_outputs[0].split("<|start_header_id|>assistant<|end_header_id|>")[-1].strip()
92
+ response_ = assistant_response.replace('<|eot_id|>', "")
93
  return assistant_response
94
  # outputs = model.generate(tokenized_chat, **generation_params, streamer=streamer)
95
  # return outputs
 
98
 
99
  def response(message, history):
100
  text = inference(message)
101
+ # return text
102
+ for i in range(len(text)):
103
+ time.sleep(0.01)
104
+ yield text[: i + 1]
105
  gr.ChatInterface(response,examples=examples).launch()