Namitg02 commited on
Commit
670dc65
·
verified ·
1 Parent(s): 2e8ca1a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -2
app.py CHANGED
@@ -136,14 +136,15 @@ def talk(prompt, history):
136
  # formatted_prompt_with_history = formatted_prompt_with_history[:600] # to avoid memory issue
137
  # print(formatted_prompt_with_history)
138
  messages = [{"role":"system","content":SYS_PROMPT},{"role":"user","content":formatted_prompt}]
 
139
  print(messages)
140
  # binding the system context and new prompt for LLM
141
  # the chat template structure should be based on text generation model format
142
  print("check6")
143
 
144
  # indicates the end of a sequence
145
- # stream = model.create_chat_completion(messages = messages, max_tokens=1000, stop=["</s>"], stream=False)
146
- stream = model.create_completion(prompt = messages, max_tokens=1000, stop=["</s>"],echo=True, stream=False)
147
  print(f"{stream}")
148
  print("check 7")
149
  print(stream['choices'][0]['message']['content'])
 
136
  # formatted_prompt_with_history = formatted_prompt_with_history[:600] # to avoid memory issue
137
  # print(formatted_prompt_with_history)
138
  messages = [{"role":"system","content":SYS_PROMPT},{"role":"user","content":formatted_prompt}]
139
+ # messages = "role":"system","content":SYS_PROMPT},{"role":"user","content":formatted_prompt}]
140
  print(messages)
141
  # binding the system context and new prompt for LLM
142
  # the chat template structure should be based on text generation model format
143
  print("check6")
144
 
145
  # indicates the end of a sequence
146
+ stream = model.create_chat_completion(messages = messages, max_tokens=1000, stop=["</s>"], stream=False)
147
+ # stream = model(prompt = messages, max_tokens=1000, stop=["</s>"],echo=True, stream=False)
148
  print(f"{stream}")
149
  print("check 7")
150
  print(stream['choices'][0]['message']['content'])