research14 commited on
Commit
b85d51d
·
1 Parent(s): d51f719
Files changed (1) hide show
  1. app.py +32 -31
app.py CHANGED
@@ -15,12 +15,14 @@ def gpt3(prompt):
15
  model='gpt3.5', messages=[{"role": "user", "content": prompt}])
16
  return response['choices'][0]['message']['content']
17
 
18
- def respond(message, chat_history):
19
- input_ids = tokenizer.encode(message, return_tensors="pt")
 
 
20
  output_ids = model.generate(input_ids, max_length=50, num_beams=5, no_repeat_ngram_size=2)
21
  bot_message = tokenizer.decode(output_ids[0], skip_special_tokens=True)
22
 
23
- chat_history.append((message, bot_message))
24
  time.sleep(2)
25
  return "", chat_history
26
 
@@ -30,8 +32,6 @@ def interface(tab_name):
30
  textbox_prompt = gr.Textbox(show_label=False, placeholder="Write a prompt and press enter")
31
  openai.api_key = gr.Textbox(label="Open AI Key", placeholder="Enter your Openai key here", type="password")
32
 
33
- prompt = template_single.format(tab_name, textbox_prompt)
34
-
35
  gr.Markdown("Strategy 1 QA-Based Prompting")
36
  with gr.Row():
37
  vicuna_S1_chatbot = gr.Chatbot(label="vicuna-7b")
@@ -51,9 +51,9 @@ def interface(tab_name):
51
  gpt_S3_chatbot = gr.Chatbot(label="gpt-3.5")
52
  clear = gr.ClearButton([textbox_prompt, vicuna_S3_chatbot])
53
 
54
- textbox_prompt.submit(respond, [textbox_prompt, vicuna_S1_chatbot], [prompt, vicuna_S1_chatbot])
55
- textbox_prompt.submit(respond, [textbox_prompt, vicuna_S2_chatbot], [prompt, vicuna_S2_chatbot])
56
- textbox_prompt.submit(respond, [textbox_prompt, vicuna_S3_chatbot], [prompt, vicuna_S3_chatbot])
57
 
58
 
59
  with gr.Blocks() as demo:
@@ -63,28 +63,29 @@ with gr.Blocks() as demo:
63
  interface("Noun")
64
 
65
  with gr.Tab("Determiner"):
66
- gr.Markdown(" Description ")
 
67
 
68
- prompt_CHUNK = gr.Textbox(show_label=False, placeholder="Write a prompt and press enter")
69
 
70
- gr.Markdown("Strategy 1 QA")
71
- with gr.Row():
72
- vicuna_S1_chatbot_CHUNK = gr.Chatbot(label="vicuna-7b")
73
- llama_S1_chatbot_CHUNK = gr.Chatbot(label="llama-7b")
74
- gpt_S1_chatbot_CHUNK = gr.Chatbot(label="gpt-3.5")
75
- clear = gr.ClearButton([prompt_CHUNK, vicuna_S1_chatbot_CHUNK])
76
- gr.Markdown("Strategy 2 Instruction")
77
- with gr.Row():
78
- vicuna_S2_chatbot_CHUNK = gr.Chatbot(label="vicuna-7b")
79
- llama_S2_chatbot_CHUNK = gr.Chatbot(label="llama-7b")
80
- gpt_S2_chatbot_CHUNK = gr.Chatbot(label="gpt-3.5")
81
- clear = gr.ClearButton([prompt_CHUNK, vicuna_S2_chatbot_CHUNK])
82
- gr.Markdown("Strategy 3 Structured Prompting")
83
- with gr.Row():
84
- vicuna_S3_chatbot_CHUNK = gr.Chatbot(label="vicuna-7b")
85
- llama_S3_chatbot_CHUNK = gr.Chatbot(label="llama-7b")
86
- gpt_S3_chatbot_CHUNK = gr.Chatbot(label="gpt-3.5")
87
- clear = gr.ClearButton([prompt_CHUNK, vicuna_S3_chatbot_CHUNK])
88
 
89
  with gr.Tab("Noun phrase"):
90
  interface("Noun phrase")
@@ -95,8 +96,8 @@ with gr.Blocks() as demo:
95
  with gr.Tab("T-units"):
96
  interface("T-units")
97
 
98
- prompt_CHUNK.submit(respond, [prompt_CHUNK, vicuna_S1_chatbot_CHUNK], [prompt_CHUNK, vicuna_S1_chatbot_CHUNK])
99
- prompt_CHUNK.submit(respond, [prompt_CHUNK, vicuna_S2_chatbot_CHUNK], [prompt_CHUNK, vicuna_S2_chatbot_CHUNK])
100
- prompt_CHUNK.submit(respond, [prompt_CHUNK, vicuna_S3_chatbot_CHUNK], [prompt_CHUNK, vicuna_S3_chatbot_CHUNK])
101
 
102
  demo.launch()
 
15
  model='gpt3.5', messages=[{"role": "user", "content": prompt}])
16
  return response['choices'][0]['message']['content']
17
 
18
+ def respond(tab_name, message, chat_history):
19
+ prompt = template_single.format(tab_name, message)
20
+
21
+ input_ids = tokenizer.encode(prompt, return_tensors="pt")
22
  output_ids = model.generate(input_ids, max_length=50, num_beams=5, no_repeat_ngram_size=2)
23
  bot_message = tokenizer.decode(output_ids[0], skip_special_tokens=True)
24
 
25
+ chat_history.append((prompt, bot_message))
26
  time.sleep(2)
27
  return "", chat_history
28
 
 
32
  textbox_prompt = gr.Textbox(show_label=False, placeholder="Write a prompt and press enter")
33
  openai.api_key = gr.Textbox(label="Open AI Key", placeholder="Enter your Openai key here", type="password")
34
 
 
 
35
  gr.Markdown("Strategy 1 QA-Based Prompting")
36
  with gr.Row():
37
  vicuna_S1_chatbot = gr.Chatbot(label="vicuna-7b")
 
51
  gpt_S3_chatbot = gr.Chatbot(label="gpt-3.5")
52
  clear = gr.ClearButton([textbox_prompt, vicuna_S3_chatbot])
53
 
54
+ textbox_prompt.submit(respond, [tab_name, textbox_prompt, vicuna_S1_chatbot], [tab_name, textbox_prompt, vicuna_S1_chatbot])
55
+ textbox_prompt.submit(respond, [tab_name, textbox_prompt, vicuna_S2_chatbot], [tab_name, textbox_prompt, vicuna_S2_chatbot])
56
+ textbox_prompt.submit(respond, [tab_name, textbox_prompt, vicuna_S3_chatbot], [tab_name, textbox_prompt, vicuna_S3_chatbot])
57
 
58
 
59
  with gr.Blocks() as demo:
 
63
  interface("Noun")
64
 
65
  with gr.Tab("Determiner"):
66
+ interface("Determiner")
67
+ # gr.Markdown(" Description ")
68
 
69
+ # prompt_CHUNK = gr.Textbox(show_label=False, placeholder="Write a prompt and press enter")
70
 
71
+ # gr.Markdown("Strategy 1 QA")
72
+ # with gr.Row():
73
+ # vicuna_S1_chatbot_CHUNK = gr.Chatbot(label="vicuna-7b")
74
+ # llama_S1_chatbot_CHUNK = gr.Chatbot(label="llama-7b")
75
+ # gpt_S1_chatbot_CHUNK = gr.Chatbot(label="gpt-3.5")
76
+ # clear = gr.ClearButton([prompt_CHUNK, vicuna_S1_chatbot_CHUNK])
77
+ # gr.Markdown("Strategy 2 Instruction")
78
+ # with gr.Row():
79
+ # vicuna_S2_chatbot_CHUNK = gr.Chatbot(label="vicuna-7b")
80
+ # llama_S2_chatbot_CHUNK = gr.Chatbot(label="llama-7b")
81
+ # gpt_S2_chatbot_CHUNK = gr.Chatbot(label="gpt-3.5")
82
+ # clear = gr.ClearButton([prompt_CHUNK, vicuna_S2_chatbot_CHUNK])
83
+ # gr.Markdown("Strategy 3 Structured Prompting")
84
+ # with gr.Row():
85
+ # vicuna_S3_chatbot_CHUNK = gr.Chatbot(label="vicuna-7b")
86
+ # llama_S3_chatbot_CHUNK = gr.Chatbot(label="llama-7b")
87
+ # gpt_S3_chatbot_CHUNK = gr.Chatbot(label="gpt-3.5")
88
+ # clear = gr.ClearButton([prompt_CHUNK, vicuna_S3_chatbot_CHUNK])
89
 
90
  with gr.Tab("Noun phrase"):
91
  interface("Noun phrase")
 
96
  with gr.Tab("T-units"):
97
  interface("T-units")
98
 
99
+ # prompt_CHUNK.submit(respond, [prompt_CHUNK, vicuna_S1_chatbot_CHUNK], [prompt_CHUNK, vicuna_S1_chatbot_CHUNK])
100
+ # prompt_CHUNK.submit(respond, [prompt_CHUNK, vicuna_S2_chatbot_CHUNK], [prompt_CHUNK, vicuna_S2_chatbot_CHUNK])
101
+ # prompt_CHUNK.submit(respond, [prompt_CHUNK, vicuna_S3_chatbot_CHUNK], [prompt_CHUNK, vicuna_S3_chatbot_CHUNK])
102
 
103
  demo.launch()