research14 commited on
Commit
cee52ff
·
1 Parent(s): 4329a8a
Files changed (1) hide show
  1. app.py +6 -13
app.py CHANGED
@@ -13,11 +13,6 @@ template_single = '''Please output any <{}> in the following sentence one per li
13
 
14
  api_key = ""
15
 
16
- def set_api_key(new_api_key):
17
- global api_key
18
- api_key = new_api_key
19
- os.environ['OPENAI_API_TOKEN'] = api_key
20
-
21
  def chat(system_prompt, user_prompt, model = 'gpt-3.5-turbo', temperature = 0, verbose = False):
22
  ''' Normal call of OpenAI API '''
23
  response = openai.ChatCompletion.create(
@@ -45,7 +40,11 @@ def format_chat_prompt(message, chat_history, max_convo_length):
45
  prompt = f"{prompt}\nUser: {message}\nAssistant:"
46
  return prompt
47
 
48
- def respond_gpt(tab_name, message, chat_history, max_convo_length = 10):
 
 
 
 
49
  formatted_prompt = format_chat_prompt(message, chat_history, max_convo_length)
50
  print('Prompt + Context:')
51
  print(formatted_prompt)
@@ -54,11 +53,6 @@ def respond_gpt(tab_name, message, chat_history, max_convo_length = 10):
54
  chat_history.append((message, bot_message))
55
  return "", chat_history
56
 
57
- def set_api_key_and_submit(api_key_input, textbox_prompt, vicuna_chatbot):
58
- set_api_key(api_key_input)
59
- textbox_prompt.submit(respond, inputs=[textbox_prompt, vicuna_chatbot], outputs=[textbox_prompt, vicuna_chatbot])
60
-
61
-
62
  def respond(message, chat_history):
63
  input_ids = tokenizer.encode(message, return_tensors="pt")
64
  output_ids = model.generate(input_ids, max_length=50, num_beams=5, no_repeat_ngram_size=2)
@@ -101,8 +95,7 @@ def interface():
101
  textbox_prompt.submit(respond, inputs=[textbox_prompt, vicuna_S2_chatbot], outputs=[textbox_prompt, vicuna_S2_chatbot])
102
  textbox_prompt.submit(respond, inputs=[textbox_prompt, vicuna_S3_chatbot], outputs=[textbox_prompt, vicuna_S3_chatbot])
103
 
104
- btn.click(set_api_key_and_submit, inputs=[api_key_input, textbox_prompt, gpt_S1_chatbot],
105
- outputs=[api_key_input, textbox_prompt, gpt_S1_chatbot])
106
 
107
  with gr.Blocks() as demo:
108
  gr.Markdown("# LLM Evaluator With Linguistic Scrutiny")
 
13
 
14
  api_key = ""
15
 
 
 
 
 
 
16
  def chat(system_prompt, user_prompt, model = 'gpt-3.5-turbo', temperature = 0, verbose = False):
17
  ''' Normal call of OpenAI API '''
18
  response = openai.ChatCompletion.create(
 
40
  prompt = f"{prompt}\nUser: {message}\nAssistant:"
41
  return prompt
42
 
43
+ def respond_gpt(new_api_key, tab_name, message, chat_history, max_convo_length = 10):
44
+ global api_key
45
+ api_key = new_api_key
46
+ os.environ['OPENAI_API_TOKEN'] = api_key
47
+
48
  formatted_prompt = format_chat_prompt(message, chat_history, max_convo_length)
49
  print('Prompt + Context:')
50
  print(formatted_prompt)
 
53
  chat_history.append((message, bot_message))
54
  return "", chat_history
55
 
 
 
 
 
 
56
  def respond(message, chat_history):
57
  input_ids = tokenizer.encode(message, return_tensors="pt")
58
  output_ids = model.generate(input_ids, max_length=50, num_beams=5, no_repeat_ngram_size=2)
 
95
  textbox_prompt.submit(respond, inputs=[textbox_prompt, vicuna_S2_chatbot], outputs=[textbox_prompt, vicuna_S2_chatbot])
96
  textbox_prompt.submit(respond, inputs=[textbox_prompt, vicuna_S3_chatbot], outputs=[textbox_prompt, vicuna_S3_chatbot])
97
 
98
+ btn.click(respond_gpt, inputs=[api_key_input, tab_name, textbox_prompt, gpt_S1_chatbot], outputs=[api_key_input, tab_name, textbox_prompt, gpt_S1_chatbot])
 
99
 
100
  with gr.Blocks() as demo:
101
  gr.Markdown("# LLM Evaluator With Linguistic Scrutiny")