research14 commited on
Commit
ee8b9cc
·
1 Parent(s): c15f723

added formatted prompt to model fns

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -56,22 +56,22 @@ def gpt_respond(tab_name, message, chat_history, max_convo_length = 10):
56
  return "", chat_history
57
 
58
  def vicuna_respond(tab_name, message, chat_history):
59
- # formatted_prompt = f'''Generate the output only for the assistant. Please output any <{tab_name}> in the following sentence one per line without any additional text: <{message}>'''
60
- input_ids = vicuna_tokenizer.encode(message, return_tensors="pt")
61
  output_ids = vicuna_model.generate(input_ids, max_length=50, num_beams=5, no_repeat_ngram_size=2)
62
  bot_message = vicuna_tokenizer.decode(output_ids[0], skip_special_tokens=True)
63
 
64
- chat_history.append((message, bot_message))
65
  time.sleep(2)
66
  return "", chat_history
67
 
68
  def llama_respond(tab_name, message, chat_history):
69
- # formatted_prompt = f'''Generate the output only for the assistant. Please output any <{tab_name}> in the following sentence one per line without any additional text: <{message}>'''
70
- input_ids = llama_tokenizer.encode(message, return_tensors="pt")
71
  output_ids = llama_model.generate(input_ids, max_length=50, num_beams=5, no_repeat_ngram_size=2)
72
  bot_message = llama_tokenizer.decode(output_ids[0], skip_special_tokens=True)
73
 
74
- chat_history.append((message, bot_message))
75
  time.sleep(2)
76
  return "", chat_history
77
 
 
56
  return "", chat_history
57
 
58
  def vicuna_respond(tab_name, message, chat_history):
59
+ formatted_prompt = f'''Generate the output only for the assistant. Please output any <{tab_name}> in the following sentence one per line without any additional text: <{message}>'''
60
+ input_ids = vicuna_tokenizer.encode(formatted_prompt, return_tensors="pt")
61
  output_ids = vicuna_model.generate(input_ids, max_length=50, num_beams=5, no_repeat_ngram_size=2)
62
  bot_message = vicuna_tokenizer.decode(output_ids[0], skip_special_tokens=True)
63
 
64
+ chat_history.append((formatted_prompt, bot_message))
65
  time.sleep(2)
66
  return "", chat_history
67
 
68
  def llama_respond(tab_name, message, chat_history):
69
+ formatted_prompt = f'''Generate the output only for the assistant. Please output any <{tab_name}> in the following sentence one per line without any additional text: <{message}>'''
70
+ input_ids = llama_tokenizer.encode(formatted_prompt, return_tensors="pt")
71
  output_ids = llama_model.generate(input_ids, max_length=50, num_beams=5, no_repeat_ngram_size=2)
72
  bot_message = llama_tokenizer.decode(output_ids[0], skip_special_tokens=True)
73
 
74
+ chat_history.append((formatted_prompt, bot_message))
75
  time.sleep(2)
76
  return "", chat_history
77