Namitg02 commited on
Commit
d6462f2
·
verified ·
1 Parent(s): 36ed1e4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -141,16 +141,16 @@ def talk(prompt, history):
141
  import pprint
142
  stream = model.create_chat_completion(messages = [{"role":"system","content":SYS_PROMPT},{"role":"user","content":formatted_prompt}], max_tokens=1000, stop=["</s>"], stream=True)
143
  # print(output['choices'][0]['message']['content'])
144
- print(f"{stream}")
145
- pprint.pprint(stream)
146
  text = []
147
  for output in stream:
148
- # text += output['choices'][0]
 
 
 
149
  text.append(output['choices'][0])
150
  print(f"{text}")
151
  yield "".join(text)
152
  print(text)
153
- print("check3H")
154
 
155
  # preparing tokens for model input
156
  # add_generation_prompt argument tells the template to add tokens that indicate the start of a bot response
 
141
  import pprint
142
  stream = model.create_chat_completion(messages = [{"role":"system","content":SYS_PROMPT},{"role":"user","content":formatted_prompt}], max_tokens=1000, stop=["</s>"], stream=True)
143
  # print(output['choices'][0]['message']['content'])
 
 
144
  text = []
145
  for output in stream:
146
+ # text += output['choices'][0]
147
+ print(f"{output}")
148
+ print("check3H")
149
+
150
  text.append(output['choices'][0])
151
  print(f"{text}")
152
  yield "".join(text)
153
  print(text)
 
154
 
155
  # preparing tokens for model input
156
  # add_generation_prompt argument tells the template to add tokens that indicate the start of a bot response