research14 commited on
Commit
b4af604
·
1 Parent(s): 006127c
Files changed (1) hide show
  1. app.py +8 -1
app.py CHANGED
@@ -11,11 +11,14 @@ tokenizer = AutoTokenizer.from_pretrained(model_name)
11
  with gr.Blocks() as demo:
12
  gr.Markdown("# LLM Evaluator With Linguistic Scrutiny")
13
 
 
 
14
  with gr.Tab("POS"):
15
- gr.Markdown("Strategy 1 QA")
16
  with gr.Row():
17
  prompt = gr.Textbox(show_label=False, placeholder="Enter prompt")
18
  send_button_POS = gr.Button("Send", scale=0)
 
 
19
  with gr.Row():
20
  vicuna_chatbot1 = gr.Chatbot(label="vicuna-7b", live=True)
21
  llama_chatbot1 = gr.Chatbot(label="llama-7b", live=False)
@@ -30,11 +33,14 @@ with gr.Blocks() as demo:
30
  vicuna_chatbot3 = gr.Chatbot(label="vicuna-7b", live=True)
31
  llama_chatbot3 = gr.Chatbot(label="llama-7b", live=False)
32
  gpt_chatbot3 = gr.Chatbot(label="gpt-3.5", live=False)
 
33
  clear = gr.ClearButton([prompt, vicuna_chatbot1])
 
34
  with gr.Tab("Chunk"):
35
  with gr.Row():
36
  prompt_chunk = gr.Textbox(show_label=False, placeholder="Enter prompt")
37
  send_button_Chunk = gr.Button("Send", scale=0)
 
38
  gr.Markdown("Strategy 1 QA")
39
  with gr.Row():
40
  vicuna_chatbot1_chunk = gr.Chatbot(label="vicuna-7b", live=True)
@@ -50,6 +56,7 @@ with gr.Blocks() as demo:
50
  vicuna_chatbot3_chunk = gr.Chatbot(label="vicuna-7b", live=True)
51
  llama_chatbot3_chunk = gr.Chatbot(label="llama-7b", live=False)
52
  gpt_chatbot3_chunk = gr.Chatbot(label="gpt-3.5", live=False)
 
53
  clear = gr.ClearButton([prompt_chunk, vicuna_chatbot1_chunk])
54
 
55
  # Define the function for generating responses
 
11
  with gr.Blocks() as demo:
12
  gr.Markdown("# LLM Evaluator With Linguistic Scrutiny")
13
 
14
+ gr.Markdown(" Description ")
15
+
16
  with gr.Tab("POS"):
 
17
  with gr.Row():
18
  prompt = gr.Textbox(show_label=False, placeholder="Enter prompt")
19
  send_button_POS = gr.Button("Send", scale=0)
20
+
21
+ gr.Markdown("Strategy 1 QA")
22
  with gr.Row():
23
  vicuna_chatbot1 = gr.Chatbot(label="vicuna-7b", live=True)
24
  llama_chatbot1 = gr.Chatbot(label="llama-7b", live=False)
 
33
  vicuna_chatbot3 = gr.Chatbot(label="vicuna-7b", live=True)
34
  llama_chatbot3 = gr.Chatbot(label="llama-7b", live=False)
35
  gpt_chatbot3 = gr.Chatbot(label="gpt-3.5", live=False)
36
+
37
  clear = gr.ClearButton([prompt, vicuna_chatbot1])
38
+
39
  with gr.Tab("Chunk"):
40
  with gr.Row():
41
  prompt_chunk = gr.Textbox(show_label=False, placeholder="Enter prompt")
42
  send_button_Chunk = gr.Button("Send", scale=0)
43
+
44
  gr.Markdown("Strategy 1 QA")
45
  with gr.Row():
46
  vicuna_chatbot1_chunk = gr.Chatbot(label="vicuna-7b", live=True)
 
56
  vicuna_chatbot3_chunk = gr.Chatbot(label="vicuna-7b", live=True)
57
  llama_chatbot3_chunk = gr.Chatbot(label="llama-7b", live=False)
58
  gpt_chatbot3_chunk = gr.Chatbot(label="gpt-3.5", live=False)
59
+
60
  clear = gr.ClearButton([prompt_chunk, vicuna_chatbot1_chunk])
61
 
62
  # Define the function for generating responses