shukdevdatta123 commited on
Commit
28eacfd
·
verified ·
1 Parent(s): 30d63ae

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -34
app.py CHANGED
@@ -140,30 +140,6 @@ with gr.Blocks() as demo:
140
  Limits the number of tokens (words or subwords) the model can generate in its response. You can use this to control the length of the response.
141
  """)
142
 
143
- gr.HTML("""
144
- <style>
145
- #api_key_button {
146
- margin-top: 27px;
147
- background: linear-gradient(135deg, #4a00e0 0%, #8e2de2 100%);
148
- }
149
- #api_key_button:hover {
150
- background: linear-gradient(135deg, #5b10f1 0%, #9f3ef3 100%);
151
- }
152
- #clear_chat_button {
153
- background: linear-gradient(135deg, #e53e3e 0%, #f56565 100%);
154
- }
155
- #clear_chat_button:hover {
156
- background: linear-gradient(135deg, #c53030 0%, #e53e3e 100%);
157
- }
158
- #ask_button {
159
- background: linear-gradient(135deg, #fbd38d 0%, #f6e05e 100%);
160
- }
161
- #ask_button:hover {
162
- background: linear-gradient(135deg, #ecc94b 0%, #fbd38d 100%);
163
- }
164
- </style>
165
- """)
166
-
167
  # API Key Input
168
  with gr.Row():
169
  api_key_input = gr.Textbox(label="Enter OpenAI API Key", type="password")
@@ -180,30 +156,30 @@ with gr.Blocks() as demo:
180
  image_url = gr.Textbox(label="Enter Image URL")
181
  image_query = gr.Textbox(label="Ask about the Image")
182
  image_url_output = gr.Textbox(label="Response", interactive=False)
183
- image_url_button = gr.Button("Ask",elem_id="ask_button")
184
 
185
  with gr.Tab("Text Chat"):
186
  text_query = gr.Textbox(label="Enter your query")
187
  text_output = gr.Textbox(label="Response", interactive=False)
188
- text_button = gr.Button("Ask",elem_id="ask_button")
189
 
190
  with gr.Tab("Image Chat"):
191
  image_upload = gr.File(label="Upload an Image", type="filepath")
192
  image_text_query = gr.Textbox(label="Ask about the uploaded image")
193
  image_output = gr.Textbox(label="Response", interactive=False)
194
- image_button = gr.Button("Ask",elem_id="ask_button")
195
 
196
  with gr.Tab("PDF Chat"):
197
  pdf_upload = gr.File(label="Upload a PDF", type="filepath")
198
  pdf_text_query = gr.Textbox(label="Ask about the uploaded PDF")
199
  pdf_output = gr.Textbox(label="Response", interactive=False)
200
- pdf_button = gr.Button("Ask",elem_id="ask_button")
201
-
202
- with gr.Tab("Voice Chat"):
203
  audio_upload = gr.File(label="Upload an Audio File", type="binary")
204
  audio_query = gr.Textbox(label="Ask about the transcription")
205
  audio_output = gr.Textbox(label="Response", interactive=False)
206
- audio_button = gr.Button("Ask",elem_id="ask_button")
207
 
208
  with gr.Tab("Voice(Record) Chat"):
209
  audio_record = gr.Audio(source="microphone", type="binary", label="Record your voice")
@@ -221,15 +197,15 @@ with gr.Blocks() as demo:
221
  image_button.click(image_chat, [image_upload, image_text_query, temperature, top_p, max_output_tokens], image_output)
222
  pdf_button.click(pdf_chat, [pdf_upload, pdf_text_query, temperature, top_p, max_output_tokens], pdf_output)
223
 
224
- # For Voice Chat
225
  audio_button.click(
226
  lambda audio_binary, query, temperature, top_p, max_output_tokens: query_openai(
227
  [{"role": "user", "content": [{"type": "text", "text": transcribe_audio(audio_binary, api_key)}, {"type": "text", "text": query}]}],
228
  temperature, top_p, max_output_tokens
229
  ), [audio_upload, audio_query, temperature, top_p, max_output_tokens], audio_output
230
  )
231
-
232
- # For Voice(Record) Chat
233
  audio_record_button.click(
234
  lambda audio_binary, query, temperature, top_p, max_output_tokens: query_openai(
235
  [{"role": "user", "content": [{"type": "text", "text": transcribe_audio(audio_binary, api_key)}, {"type": "text", "text": query}]}],
 
140
  Limits the number of tokens (words or subwords) the model can generate in its response. You can use this to control the length of the response.
141
  """)
142
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
143
  # API Key Input
144
  with gr.Row():
145
  api_key_input = gr.Textbox(label="Enter OpenAI API Key", type="password")
 
156
  image_url = gr.Textbox(label="Enter Image URL")
157
  image_query = gr.Textbox(label="Ask about the Image")
158
  image_url_output = gr.Textbox(label="Response", interactive=False)
159
+ image_url_button = gr.Button("Ask", elem_id="ask_button")
160
 
161
  with gr.Tab("Text Chat"):
162
  text_query = gr.Textbox(label="Enter your query")
163
  text_output = gr.Textbox(label="Response", interactive=False)
164
+ text_button = gr.Button("Ask", elem_id="ask_button")
165
 
166
  with gr.Tab("Image Chat"):
167
  image_upload = gr.File(label="Upload an Image", type="filepath")
168
  image_text_query = gr.Textbox(label="Ask about the uploaded image")
169
  image_output = gr.Textbox(label="Response", interactive=False)
170
+ image_button = gr.Button("Ask", elem_id="ask_button")
171
 
172
  with gr.Tab("PDF Chat"):
173
  pdf_upload = gr.File(label="Upload a PDF", type="filepath")
174
  pdf_text_query = gr.Textbox(label="Ask about the uploaded PDF")
175
  pdf_output = gr.Textbox(label="Response", interactive=False)
176
+ pdf_button = gr.Button("Ask", elem_id="ask_button")
177
+
178
+ with gr.Tab("Voice Chat (Upload)"):
179
  audio_upload = gr.File(label="Upload an Audio File", type="binary")
180
  audio_query = gr.Textbox(label="Ask about the transcription")
181
  audio_output = gr.Textbox(label="Response", interactive=False)
182
+ audio_button = gr.Button("Ask", elem_id="ask_button")
183
 
184
  with gr.Tab("Voice(Record) Chat"):
185
  audio_record = gr.Audio(source="microphone", type="binary", label="Record your voice")
 
197
  image_button.click(image_chat, [image_upload, image_text_query, temperature, top_p, max_output_tokens], image_output)
198
  pdf_button.click(pdf_chat, [pdf_upload, pdf_text_query, temperature, top_p, max_output_tokens], pdf_output)
199
 
200
+ # For Voice Chat (Upload)
201
  audio_button.click(
202
  lambda audio_binary, query, temperature, top_p, max_output_tokens: query_openai(
203
  [{"role": "user", "content": [{"type": "text", "text": transcribe_audio(audio_binary, api_key)}, {"type": "text", "text": query}]}],
204
  temperature, top_p, max_output_tokens
205
  ), [audio_upload, audio_query, temperature, top_p, max_output_tokens], audio_output
206
  )
207
+
208
+ # For Voice Chat (Record)
209
  audio_record_button.click(
210
  lambda audio_binary, query, temperature, top_p, max_output_tokens: query_openai(
211
  [{"role": "user", "content": [{"type": "text", "text": transcribe_audio(audio_binary, api_key)}, {"type": "text", "text": query}]}],