research14 commited on
Commit
84d7a47
·
1 Parent(s): ee8b9cc

added output tab_name

Browse files
Files changed (1) hide show
  1. app.py +2 -11
app.py CHANGED
@@ -63,7 +63,7 @@ def vicuna_respond(tab_name, message, chat_history):
63
 
64
  chat_history.append((formatted_prompt, bot_message))
65
  time.sleep(2)
66
- return "", chat_history
67
 
68
  def llama_respond(tab_name, message, chat_history):
69
  formatted_prompt = f'''Generate the output only for the assistant. Please output any <{tab_name}> in the following sentence one per line without any additional text: <{message}>'''
@@ -73,16 +73,7 @@ def llama_respond(tab_name, message, chat_history):
73
 
74
  chat_history.append((formatted_prompt, bot_message))
75
  time.sleep(2)
76
- return "", chat_history
77
-
78
- def vicuna_respond(message, chat_history):
79
- input_ids = vicuna_tokenizer.encode(message, return_tensors="pt")
80
- output_ids = vicuna_model.generate(input_ids, max_length=50, num_beams=5, no_repeat_ngram_size=2)
81
- bot_message = vicuna_tokenizer.decode(output_ids[0], skip_special_tokens=True)
82
-
83
- chat_history.append((message, bot_message))
84
- time.sleep(2)
85
- return "", chat_history
86
 
87
  def interface():
88
  gr.Markdown(" Description ")
 
63
 
64
  chat_history.append((formatted_prompt, bot_message))
65
  time.sleep(2)
66
+ return tab_name, "", chat_history
67
 
68
  def llama_respond(tab_name, message, chat_history):
69
  formatted_prompt = f'''Generate the output only for the assistant. Please output any <{tab_name}> in the following sentence one per line without any additional text: <{message}>'''
 
73
 
74
  chat_history.append((formatted_prompt, bot_message))
75
  time.sleep(2)
76
+ return tab_name, "", chat_history
 
 
 
 
 
 
 
 
 
77
 
78
  def interface():
79
  gr.Markdown(" Description ")