Namitg02 commited on
Commit
6c719b3
·
verified ·
1 Parent(s): d6462f2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -11
app.py CHANGED
@@ -132,25 +132,27 @@ def talk(prompt, history):
132
 
133
  # formatted_prompt_with_history = formatted_prompt_with_history[:600] # to avoid memory issue
134
  # print(formatted_prompt_with_history)
135
- # messages = [{"role":"system","content":SYS_PROMPT},{"role":"user","content":formatted_prompt}]
136
  # binding the system context and new prompt for LLM
137
  # the chat template structure should be based on text generation model format
138
  print("check6")
139
 
140
  # indicates the end of a sequence
141
  import pprint
142
- stream = model.create_chat_completion(messages = [{"role":"system","content":SYS_PROMPT},{"role":"user","content":formatted_prompt}], max_tokens=1000, stop=["</s>"], stream=True)
143
- # print(output['choices'][0]['message']['content'])
144
- text = []
145
- for output in stream:
 
 
146
  # text += output['choices'][0]
147
- print(f"{output}")
148
- print("check3H")
149
 
150
- text.append(output['choices'][0])
151
- print(f"{text}")
152
- yield "".join(text)
153
- print(text)
154
 
155
  # preparing tokens for model input
156
  # add_generation_prompt argument tells the template to add tokens that indicate the start of a bot response
 
132
 
133
  # formatted_prompt_with_history = formatted_prompt_with_history[:600] # to avoid memory issue
134
  # print(formatted_prompt_with_history)
135
+ messages = [{"role":"system","content":SYS_PROMPT},{"role":"user","content":formatted_prompt}]
136
  # binding the system context and new prompt for LLM
137
  # the chat template structure should be based on text generation model format
138
  print("check6")
139
 
140
  # indicates the end of a sequence
141
  import pprint
142
+ stream = model.create_chat_completion(messages = messages, max_tokens=1000,echo=True, stop=["</s>"], stream=False)
143
+ print(f"{stream}")
144
+ print("check 7")
145
+ print(stream['choices'][0]['message']['content'])
146
+ # text = []
147
+ # for output in stream:
148
  # text += output['choices'][0]
149
+ # print(f"{output}")
150
+ # print("check3H")
151
 
152
+ # text.append(output['choices'][0])
153
+ # print(f"{text}")
154
+ # yield "".join(text)
155
+ # print(text)
156
 
157
  # preparing tokens for model input
158
  # add_generation_prompt argument tells the template to add tokens that indicate the start of a bot response