research14 commited on
Commit
c15f723
·
1 Parent(s): 2b06a1b

updated respond fns with tab_name

Browse files
Files changed (1) hide show
  1. app.py +13 -11
app.py CHANGED
@@ -47,15 +47,16 @@ def format_chat_prompt(message, chat_history, max_convo_length):
47
  return prompt
48
 
49
  def gpt_respond(tab_name, message, chat_history, max_convo_length = 10):
50
- formatted_prompt = format_chat_prompt(message, chat_history, max_convo_length)
51
- print('Prompt + Context:')
52
- print(formatted_prompt)
53
- bot_message = chat(system_prompt = f'''Generate the output only for the assistant. Please output any <{tab_name}> in the following sentence one per line without any additional text.''',
54
- user_prompt = formatted_prompt)
55
- chat_history.append((message, bot_message))
56
- return "", chat_history
57
 
58
- def vicuna_respond(message, chat_history):
 
59
  input_ids = vicuna_tokenizer.encode(message, return_tensors="pt")
60
  output_ids = vicuna_model.generate(input_ids, max_length=50, num_beams=5, no_repeat_ngram_size=2)
61
  bot_message = vicuna_tokenizer.decode(output_ids[0], skip_special_tokens=True)
@@ -64,7 +65,8 @@ def vicuna_respond(message, chat_history):
64
  time.sleep(2)
65
  return "", chat_history
66
 
67
- def llama_respond(message, chat_history):
 
68
  input_ids = llama_tokenizer.encode(message, return_tensors="pt")
69
  output_ids = llama_model.generate(input_ids, max_length=50, num_beams=5, no_repeat_ngram_size=2)
70
  bot_message = llama_tokenizer.decode(output_ids[0], skip_special_tokens=True)
@@ -88,9 +90,9 @@ def interface():
88
  textbox_prompt = gr.Textbox(show_label=False, placeholder="Write a prompt and press enter")
89
  with gr.Row():
90
  api_key_input = gr.Textbox(label="Open AI Key", placeholder="Enter your Openai key here", type="password")
91
- api_key_btn = gr.Button(label="Submit Api Key", scale=0)
92
  tab_name = gr.Dropdown(["Noun", "Determiner", "Noun phrase", "Verb phrase", "Dependent clause", "T-units"], label="Linguistic Entity")
93
- btn = gr.Button(label="Submit")
94
 
95
  # prompt = template_single.format(tab_name, textbox_prompt)
96
 
 
47
  return prompt
48
 
49
  def gpt_respond(tab_name, message, chat_history, max_convo_length = 10):
50
+ formatted_prompt = format_chat_prompt(message, chat_history, max_convo_length)
51
+ print('Prompt + Context:')
52
+ print(formatted_prompt)
53
+ bot_message = chat(system_prompt = f'''Generate the output only for the assistant. Please output any <{tab_name}> in the following sentence one per line without any additional text.''',
54
+ user_prompt = formatted_prompt)
55
+ chat_history.append((message, bot_message))
56
+ return "", chat_history
57
 
58
+ def vicuna_respond(tab_name, message, chat_history):
59
+ # formatted_prompt = f'''Generate the output only for the assistant. Please output any <{tab_name}> in the following sentence one per line without any additional text: <{message}>'''
60
  input_ids = vicuna_tokenizer.encode(message, return_tensors="pt")
61
  output_ids = vicuna_model.generate(input_ids, max_length=50, num_beams=5, no_repeat_ngram_size=2)
62
  bot_message = vicuna_tokenizer.decode(output_ids[0], skip_special_tokens=True)
 
65
  time.sleep(2)
66
  return "", chat_history
67
 
68
+ def llama_respond(tab_name, message, chat_history):
69
+ # formatted_prompt = f'''Generate the output only for the assistant. Please output any <{tab_name}> in the following sentence one per line without any additional text: <{message}>'''
70
  input_ids = llama_tokenizer.encode(message, return_tensors="pt")
71
  output_ids = llama_model.generate(input_ids, max_length=50, num_beams=5, no_repeat_ngram_size=2)
72
  bot_message = llama_tokenizer.decode(output_ids[0], skip_special_tokens=True)
 
90
  textbox_prompt = gr.Textbox(show_label=False, placeholder="Write a prompt and press enter")
91
  with gr.Row():
92
  api_key_input = gr.Textbox(label="Open AI Key", placeholder="Enter your Openai key here", type="password")
93
+ api_key_btn = gr.Button(value="Submit Key", scale=0)
94
  tab_name = gr.Dropdown(["Noun", "Determiner", "Noun phrase", "Verb phrase", "Dependent clause", "T-units"], label="Linguistic Entity")
95
+ btn = gr.Button(value="Submit")
96
 
97
  # prompt = template_single.format(tab_name, textbox_prompt)
98