sylvainHellin commited on
Commit
3ce50f4
·
verified ·
1 Parent(s): 17108a1

Moved advanced options to the left

Browse files
Files changed (1) hide show
  1. app.py +56 -48
app.py CHANGED
@@ -7,7 +7,7 @@ from dotenv import load_dotenv, find_dotenv
7
  import gradio as gr
8
  import openai
9
 
10
- # _ = load_dotenv(find_dotenv(filename="secrets.env", raise_error_if_not_found=False))
11
 
12
  # Global variable
13
  # ROOT_DIR = os.environ["ROOT_DIR"]
@@ -85,7 +85,7 @@ def formatPrompt(newMsg:str, chatHistory, instruction):
85
  "content": newMsg
86
  })
87
 
88
- # return the formated messages
89
  return messages
90
 
91
  # def the response function (to get the answer as one block after generation)
@@ -109,54 +109,62 @@ def streamResponse(newMsg:str, chatHistory, instruction, temperature, max_tokens
109
 
110
  # Build the app
111
  with gr.Blocks(theme='Insuz/Mocha') as app:
112
- gr.Markdown("# Private GPT")
113
- gr.Markdown("This chatbot is powered by the openAI GPT series.\
114
- \nThe default model is `GPT-3.5`, but `GPT-4` can be selected in the advanced options.\
115
- \nAs it uses the openAI API, user data is not used to train openAI models. (side note: GPT-4 is currently 500 times more expensive than GPT-3.5)")
116
- chatbot = gr.Chatbot() # Associated variable: chatHistory
117
- msg = gr.Textbox(label="Message")
118
- with gr.Accordion(label="Advanced options", open=False):
119
- model = gr.Dropdown(
120
- choices=["GPT-3.5", "GPT-4"],
121
- value="GPT-3.5",
122
- multiselect=False,
123
- label="Model",
124
- info="Choose the model you want to chat with"
125
- )
126
- instruction = gr.Textbox(
127
- value=SYSTEM_PROMPT,
128
- label="System instructions",
129
- lines=2,)
130
- temperature = gr.Slider(
131
- minimum=0,
132
- maximum=2,
133
- step=0.1,
134
- value=0.7,
135
- label="Temperature",
136
- info="The higher, the more random the results will be"
137
- )
138
- max_token = gr.Slider(
139
- minimum=64,
140
- maximum=2048,
141
- step=64,
142
- value=1024,
143
- label="Max Token",
144
- info="Maximum number of token the model will take into consideration"
145
- )
146
- Button = gr.Button(value="Submit")
147
- msg.submit(
148
- fn=streamResponse,
149
- inputs=[msg, chatbot, instruction, temperature, max_token, model],
150
- outputs=[msg, chatbot]
151
- )
152
- Button.click(
153
- fn=streamResponse,
154
- inputs=[msg, chatbot, instruction, temperature, max_token, model],
155
- outputs=[msg, chatbot]
156
- )
 
 
 
 
 
 
 
157
 
158
  gr.close_all()
159
- app.queue().launch(auth=(AUTH_USERNAME, AUTH_PASSWORD), share=True)
 
160
 
161
  # %%
162
 
 
7
  import gradio as gr
8
  import openai
9
 
10
+ _ = load_dotenv(find_dotenv(filename="secrets.env", raise_error_if_not_found=False))
11
 
12
  # Global variable
13
  # ROOT_DIR = os.environ["ROOT_DIR"]
 
85
  "content": newMsg
86
  })
87
 
88
+ # return the formated messages
89
  return messages
90
 
91
  # def the response function (to get the answer as one block after generation)
 
109
 
110
  # Build the app
111
  with gr.Blocks(theme='Insuz/Mocha') as app:
112
+ with gr.Row():
113
+ with gr.Column(scale = 1):
114
+ with gr.Accordion(label="Advanced options", open=True):
115
+ model = gr.Dropdown(
116
+ choices=["GPT-3.5", "GPT-4"],
117
+ value="GPT-3.5",
118
+ multiselect=False,
119
+ label="Model",
120
+ info="Choose the model you want to chat with.\nGo easy on GPT-4: it costs 500 times more than GPT 3.5!"
121
+ )
122
+ instruction = gr.Textbox(
123
+ value=SYSTEM_PROMPT,
124
+ label="System instructions",
125
+ lines=4,)
126
+ temperature = gr.Slider(
127
+ minimum=0,
128
+ maximum=2,
129
+ step=0.1,
130
+ value=0.7,
131
+ label="Temperature",
132
+ info="The higher, the more random the results will be"
133
+ )
134
+ max_token = gr.Slider(
135
+ minimum=64,
136
+ maximum=2048,
137
+ step=64,
138
+ value=1024,
139
+ label="Max Token",
140
+ info="Maximum number of token the model will take into consideration"
141
+ )
142
+ with gr.Column(scale = 8):
143
+ gr.Markdown("# Private GPT")
144
+ gr.Markdown("This chatbot is powered by the openAI GPT series.\
145
+ The default model is `GPT-3.5`, but `GPT-4` can be selected in the advanced options.\
146
+ \nAs it uses the openAI API, user data is not used to train openAI models (see their official [website](https://help.openai.com/en/articles/5722486-how-your-data-is-used-to-improve-model-performance)).")
147
+ chatbot = gr.Chatbot() # Associated variable: chatHistory
148
+ msg = gr.Textbox(label="Message")
149
+ with gr.Row():
150
+ with gr.Column(scale=4):
151
+ Button = gr.Button(value="Submit")
152
+ with gr.Column(scale=4):
153
+ clearButton = gr.ClearButton([chatbot, msg])
154
+ msg.submit(
155
+ fn=streamResponse,
156
+ inputs=[msg, chatbot, instruction, temperature, max_token, model],
157
+ outputs=[msg, chatbot]
158
+ )
159
+ Button.click(
160
+ fn=streamResponse,
161
+ inputs=[msg, chatbot, instruction, temperature, max_token, model],
162
+ outputs=[msg, chatbot]
163
+ )
164
 
165
  gr.close_all()
166
+ app.queue().launch(auth=(AUTH_USERNAME, AUTH_PASSWORD))
167
+ # app.queue().launch()
168
 
169
  # %%
170