research14 commited on
Commit
02f452d
·
1 Parent(s): 5d98961

removed formatted prompt from output

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -64,9 +64,9 @@ def vicuna_respond(tab_name, message, chat_history):
64
  bot_message = vicuna_tokenizer.decode(output_ids[0], skip_special_tokens=True)
65
  print(bot_message)
66
 
67
- chat_history.append((formatted_prompt, bot_message))
68
  time.sleep(2)
69
- return tab_name, "", bot_message
70
 
71
  def llama_respond(tab_name, message, chat_history):
72
  formatted_prompt = f'''Generate the output only for the assistant. Please output any {tab_name} in the following sentence one per line without any additional text: {message}'''
@@ -77,9 +77,9 @@ def llama_respond(tab_name, message, chat_history):
77
  bot_message = llama_tokenizer.decode(output_ids[0], skip_special_tokens=True)
78
  print(bot_message)
79
 
80
- chat_history.append((formatted_prompt, bot_message))
81
  time.sleep(2)
82
- return tab_name, "", bot_message
83
 
84
  def interface():
85
  gr.Markdown(" Description ")
 
64
  bot_message = vicuna_tokenizer.decode(output_ids[0], skip_special_tokens=True)
65
  print(bot_message)
66
 
67
+ chat_history.append((bot_message))
68
  time.sleep(2)
69
+ return tab_name, "", chat_history
70
 
71
  def llama_respond(tab_name, message, chat_history):
72
  formatted_prompt = f'''Generate the output only for the assistant. Please output any {tab_name} in the following sentence one per line without any additional text: {message}'''
 
77
  bot_message = llama_tokenizer.decode(output_ids[0], skip_special_tokens=True)
78
  print(bot_message)
79
 
80
+ chat_history.append((bot_message))
81
  time.sleep(2)
82
+ return tab_name, "", chat_history
83
 
84
  def interface():
85
  gr.Markdown(" Description ")