research14 commited on
Commit
2b07352
·
1 Parent(s): 914c859

added print to debug

Browse files
Files changed (1) hide show
  1. app.py +8 -2
app.py CHANGED
@@ -56,20 +56,26 @@ def gpt_respond(tab_name, message, chat_history, max_convo_length = 10):
56
  return "", chat_history
57
 
58
  def vicuna_respond(tab_name, message, chat_history):
59
- formatted_prompt = f'''Generate the output only for the assistant. Please output any <{tab_name}> in the following sentence one per line without any additional text: <{message}>'''
 
 
60
  input_ids = vicuna_tokenizer.encode(formatted_prompt, return_tensors="pt")
61
  output_ids = vicuna_model.generate(input_ids, max_length=149, num_beams=5, no_repeat_ngram_size=2)
62
  bot_message = vicuna_tokenizer.decode(output_ids[0], skip_special_tokens=True)
 
63
 
64
  chat_history.append((formatted_prompt, bot_message))
65
  time.sleep(2)
66
  return tab_name, "", chat_history
67
 
68
  def llama_respond(tab_name, message, chat_history):
69
- formatted_prompt = f'''Generate the output only for the assistant. Please output any <{tab_name}> in the following sentence one per line without any additional text: <{message}>'''
 
 
70
  input_ids = llama_tokenizer.encode(formatted_prompt, return_tensors="pt")
71
  output_ids = llama_model.generate(input_ids, max_length=149, num_beams=5, no_repeat_ngram_size=2)
72
  bot_message = llama_tokenizer.decode(output_ids[0], skip_special_tokens=True)
 
73
 
74
  chat_history.append((formatted_prompt, bot_message))
75
  time.sleep(2)
 
56
  return "", chat_history
57
 
58
  def vicuna_respond(tab_name, message, chat_history):
59
+ formatted_prompt = f'''Generate the output only for the assistant. Please output any {tab_name} in the following sentence one per line without any additional text: {message}'''
60
+ print('Prompt + Context:')
61
+ print(formatted_prompt)
62
  input_ids = vicuna_tokenizer.encode(formatted_prompt, return_tensors="pt")
63
  output_ids = vicuna_model.generate(input_ids, max_length=149, num_beams=5, no_repeat_ngram_size=2)
64
  bot_message = vicuna_tokenizer.decode(output_ids[0], skip_special_tokens=True)
65
+ print(bot_message)
66
 
67
  chat_history.append((formatted_prompt, bot_message))
68
  time.sleep(2)
69
  return tab_name, "", chat_history
70
 
71
  def llama_respond(tab_name, message, chat_history):
72
+ formatted_prompt = f'''Generate the output only for the assistant. Please output any {tab_name} in the following sentence one per line without any additional text: {message}'''
73
+ print('Prompt + Context:')
74
+ print(formatted_prompt)
75
  input_ids = llama_tokenizer.encode(formatted_prompt, return_tensors="pt")
76
  output_ids = llama_model.generate(input_ids, max_length=149, num_beams=5, no_repeat_ngram_size=2)
77
  bot_message = llama_tokenizer.decode(output_ids[0], skip_special_tokens=True)
78
+ print(bot_message)
79
 
80
  chat_history.append((formatted_prompt, bot_message))
81
  time.sleep(2)