Update app.py
Browse files
app.py
CHANGED
@@ -114,21 +114,22 @@ def talk(prompt, history):
|
|
114 |
# the chat template structure should be based on text generation model format
|
115 |
|
116 |
# indicates the end of a sequence
|
117 |
-
stream = model.create_chat_completion(messages = messages, max_tokens=1000, stop=["</s>"], stream=
|
118 |
-
print(f"{stream}")
|
119 |
print("check 7")
|
120 |
-
print(stream['choices'][0]['message']['content'])
|
121 |
-
return(stream['choices'][0]['message']['content'])
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
#
|
130 |
-
|
131 |
-
|
|
|
132 |
|
133 |
# preparing tokens for model input
|
134 |
# add_generation_prompt argument tells the template to add tokens that indicate the start of a bot response
|
|
|
114 |
# the chat template structure should be based on text generation model format
|
115 |
|
116 |
# indicates the end of a sequence
|
117 |
+
stream = model.create_chat_completion(messages = messages, max_tokens=1000, stop=["</s>"], stream=True)
|
118 |
+
# print(f"{stream}")
|
119 |
print("check 7")
|
120 |
+
# print(stream['choices'][0]['message']['content'])
|
121 |
+
# return(stream['choices'][0]['message']['content'])
|
122 |
+
text = ""
|
123 |
+
for output in stream:
|
124 |
+
text += output['choices'][0]['message']['content']
|
125 |
+
print(f"{output}")
|
126 |
+
print("check3H")
|
127 |
+
print(text)
|
128 |
+
return(text)
|
129 |
+
# text.append(output['choices'][0])
|
130 |
+
# print(f"{text}")
|
131 |
+
# yield "".join(text)
|
132 |
+
# print(text)
|
133 |
|
134 |
# preparing tokens for model input
|
135 |
# add_generation_prompt argument tells the template to add tokens that indicate the start of a bot response
|