research14 commited on
Commit
914c859
·
1 Parent(s): 51f2585

testing max_length

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -58,7 +58,7 @@ def gpt_respond(tab_name, message, chat_history, max_convo_length = 10):
58
  def vicuna_respond(tab_name, message, chat_history):
59
  formatted_prompt = f'''Generate the output only for the assistant. Please output any <{tab_name}> in the following sentence one per line without any additional text: <{message}>'''
60
  input_ids = vicuna_tokenizer.encode(formatted_prompt, return_tensors="pt")
61
- output_ids = vicuna_model.generate(input_ids, num_beams=5, no_repeat_ngram_size=2)
62
  bot_message = vicuna_tokenizer.decode(output_ids[0], skip_special_tokens=True)
63
 
64
  chat_history.append((formatted_prompt, bot_message))
@@ -68,7 +68,7 @@ def vicuna_respond(tab_name, message, chat_history):
68
  def llama_respond(tab_name, message, chat_history):
69
  formatted_prompt = f'''Generate the output only for the assistant. Please output any <{tab_name}> in the following sentence one per line without any additional text: <{message}>'''
70
  input_ids = llama_tokenizer.encode(formatted_prompt, return_tensors="pt")
71
- output_ids = llama_model.generate(input_ids, num_beams=5, no_repeat_ngram_size=2)
72
  bot_message = llama_tokenizer.decode(output_ids[0], skip_special_tokens=True)
73
 
74
  chat_history.append((formatted_prompt, bot_message))
 
58
  def vicuna_respond(tab_name, message, chat_history):
59
  formatted_prompt = f'''Generate the output only for the assistant. Please output any <{tab_name}> in the following sentence one per line without any additional text: <{message}>'''
60
  input_ids = vicuna_tokenizer.encode(formatted_prompt, return_tensors="pt")
61
+ output_ids = vicuna_model.generate(input_ids, max_length=149, num_beams=5, no_repeat_ngram_size=2)
62
  bot_message = vicuna_tokenizer.decode(output_ids[0], skip_special_tokens=True)
63
 
64
  chat_history.append((formatted_prompt, bot_message))
 
68
  def llama_respond(tab_name, message, chat_history):
69
  formatted_prompt = f'''Generate the output only for the assistant. Please output any <{tab_name}> in the following sentence one per line without any additional text: <{message}>'''
70
  input_ids = llama_tokenizer.encode(formatted_prompt, return_tensors="pt")
71
+ output_ids = llama_model.generate(input_ids, max_length=149, num_beams=5, no_repeat_ngram_size=2)
72
  bot_message = llama_tokenizer.decode(output_ids[0], skip_special_tokens=True)
73
 
74
  chat_history.append((formatted_prompt, bot_message))