research14 commited on
Commit
28ca6ce
·
1 Parent(s): cab4ff3

test model

Browse files
Files changed (1) hide show
  1. app.py +10 -10
app.py CHANGED
@@ -9,28 +9,28 @@ model = AutoModelForCausalLM.from_pretrained(model_name)
9
  tokenizer = AutoTokenizer.from_pretrained(model_name)
10
 
11
  with gr.Blocks() as demo:
12
- gr.Markdown("<center># LLM Evaluator With Linguistic Scrutiny</center>")
13
 
14
  with gr.Tab("POS"):
15
  with gr.Row():
16
- vicuna_chatbot = gr.Chatbot(label="vicuna-7b", live=False)
17
  llama_chatbot = gr.Chatbot(label="llama-7b", live=False)
18
  gpt_chatbot = gr.Chatbot(label="gpt-3.5", live=False)
19
  with gr.Row():
20
  prompt = gr.Textbox(show_label=False, placeholder="Enter prompt")
21
- send_button_Chunk = gr.Button("Send", scale=0)
22
  clear = gr.ClearButton([prompt, vicuna_chatbot])
23
  with gr.Tab("Chunk"):
24
  with gr.Row():
25
- vicuna_chatbot = gr.Chatbot(label="vicuna-7b", live=False)
26
- llama_chatbot = gr.Chatbot(label="llama-7b", live=False)
27
- gpt_chatbot = gr.Chatbot(label="gpt-3.5", live=False)
28
  with gr.Row():
29
- prompt = gr.Textbox(show_label=False, placeholder="Enter prompt")
30
  send_button_Chunk = gr.Button("Send", scale=0)
31
- clear = gr.ClearButton([prompt, vicuna_chatbot])
32
 
33
- def respond(message, chat_history, chatbot_idx):
34
  input_ids = tokenizer.encode(message, return_tensors="pt")
35
  output = model.generate(input_ids, max_length=50, num_beams=5, no_repeat_ngram_size=2)
36
  bot_message = tokenizer.decode(output[0], skip_special_tokens=True)
@@ -38,6 +38,6 @@ with gr.Blocks() as demo:
38
  time.sleep(2)
39
  return "", chat_history
40
 
41
- prompt.submit(respond, [prompt, vicuna_chatbot, vicuna_chatbot])
42
 
43
  demo.launch()
 
9
  tokenizer = AutoTokenizer.from_pretrained(model_name)
10
 
11
  with gr.Blocks() as demo:
12
+ gr.Markdown("# LLM Evaluator With Linguistic Scrutiny")
13
 
14
  with gr.Tab("POS"):
15
  with gr.Row():
16
+ vicuna_chatbot = gr.Chatbot(label="vicuna-7b", live=True)
17
  llama_chatbot = gr.Chatbot(label="llama-7b", live=False)
18
  gpt_chatbot = gr.Chatbot(label="gpt-3.5", live=False)
19
  with gr.Row():
20
  prompt = gr.Textbox(show_label=False, placeholder="Enter prompt")
21
+ send_button_POS = gr.Button("Send", scale=0)
22
  clear = gr.ClearButton([prompt, vicuna_chatbot])
23
  with gr.Tab("Chunk"):
24
  with gr.Row():
25
+ vicuna_chatbot_chunk = gr.Chatbot(label="vicuna-7b", live=True)
26
+ llama_chatbot_chunk = gr.Chatbot(label="llama-7b", live=False)
27
+ gpt_chatbot_chunk = gr.Chatbot(label="gpt-3.5", live=False)
28
  with gr.Row():
29
+ prompt_chunk = gr.Textbox(show_label=False, placeholder="Enter prompt")
30
  send_button_Chunk = gr.Button("Send", scale=0)
31
+ clear = gr.ClearButton([prompt_chunk, vicuna_chatbot_chunk])
32
 
33
+ def respond(message, chat_history, chatbot):
34
  input_ids = tokenizer.encode(message, return_tensors="pt")
35
  output = model.generate(input_ids, max_length=50, num_beams=5, no_repeat_ngram_size=2)
36
  bot_message = tokenizer.decode(output[0], skip_special_tokens=True)
 
38
  time.sleep(2)
39
  return "", chat_history
40
 
41
+ prompt.submit(respond, [prompt, vicuna_chatbot, vicuna_chatbot_chunk])
42
 
43
  demo.launch()