Update app.py
Browse files
app.py
CHANGED
@@ -147,11 +147,14 @@ def talk(prompt, history):
|
|
147 |
stream = model.create_chat_completion(messages = [{"role":"system","content":SYS_PROMPT},{"role":"user","content":formatted_prompt}], max_tokens=1000, stop=["</s>"], stream=True)
|
148 |
# print(output['choices'][0]['message']['content'])
|
149 |
pprint.pprint(stream)
|
150 |
-
text =
|
151 |
for output in stream:
|
152 |
-
|
153 |
-
|
154 |
-
|
|
|
|
|
|
|
155 |
# preparing tokens for model input
|
156 |
# add_generation_prompt argument tells the template to add tokens that indicate the start of a bot response
|
157 |
|
|
|
147 |
stream = model.create_chat_completion(messages = [{"role":"system","content":SYS_PROMPT},{"role":"user","content":formatted_prompt}], max_tokens=1000, stop=["</s>"], stream=True)
|
148 |
# print(output['choices'][0]['message']['content'])
|
149 |
pprint.pprint(stream)
|
150 |
+
text = []
|
151 |
for output in stream:
|
152 |
+
# text += output['choices'][0]
|
153 |
+
text.append(['choices'][0])
|
154 |
+
yield "".join(text)
|
155 |
+
print(text)
|
156 |
+
print("check3H")
|
157 |
+
|
158 |
# preparing tokens for model input
|
159 |
# add_generation_prompt argument tells the template to add tokens that indicate the start of a bot response
|
160 |
|