CobaltZvc commited on
Commit
eb9e06f
1 Parent(s): 4252f3b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +319 -195
app.py CHANGED
@@ -23,6 +23,45 @@ from google.oauth2 import service_account
23
  from googleapiclient.discovery import build
24
  import wget
25
  import urllib.request
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
  stability_api = client.StabilityInference(
28
  key=st.secrets["STABILITY_KEY"], #os.environ("STABILITY_KEY"), # key=os.environ['STABILITY_KEY'], # API Key reference.
@@ -138,209 +177,201 @@ def openai_response(PROMPT):
138
  st.title("Welcome to :red[_HyperChat_]!!🤖")
139
  st.title("How can I help?")
140
 
141
- Input_type = st.radio(
142
- "**Input type:**",
143
- ('TEXT', 'SPEECH')
144
- )
145
-
146
- if Input_type == 'TEXT':
147
- #page_bg_img2 = """
148
- #<style>
149
- #[data-testid="stAppViewContainer"] {
150
- #background-color: #e5e5f7;
151
- #opacity: 0.8;
152
- #background-size: 20px 20px;
153
- #background-image: repeating-linear-gradient(0deg, #32d947, #32d947 1px, #e5e5f7 1px, #e5e5f7);
154
- #}
155
- #</style>
156
- #"""
157
- #st.markdown(page_bg_img, unsafe_allow_html=True)
158
- st.write('**You are now in Text input mode**')
159
- mytext = st.text_input('**Go on! Ask me anything:**')
160
- if st.button("SUBMIT"):
161
- question=mytext
162
- response = openai.Completion.create(
163
- model="text-davinci-003",
164
- prompt=f'''Your name is alexa and knowledge cutoff date is 2021-09, and it is not aware of any events after that time. if the
165
- Answer to following questions is not from your knowledge base or in case of queries like weather
166
- updates / stock updates / current news Etc which requires you to have internet connection then print i don't have access to internet to answer your question,
167
- if question is related to image or painting or drawing generation then print ipython type output function gen_draw("detailed prompt of image to be generated")
168
- if the question is related to playing a song or video or music of a singer then print ipython type output function vid_tube("relevent search query")
169
- if the question is related to operating home appliances then print ipython type output function home_app(" action(ON/Off),appliance(TV,Geaser,Fridge,Lights,fans,AC)") .
170
- if question is realted to sending mail or sms then print ipython type output function messenger_app(" message of us ,messenger(email,sms)")
171
- \nQuestion-{question}
172
- \nAnswer -''',
173
- temperature=0.49,
174
- max_tokens=256,
175
- top_p=1,
176
- frequency_penalty=0,
177
- presence_penalty=0
178
- )
179
- string_temp=response.choices[0].text
180
 
181
- if ("gen_draw" in string_temp):
 
 
 
 
 
 
 
 
 
 
 
 
182
  try:
183
- try:
184
- wget.download(openai_response(prompt))
185
- img2 = Image.open(wget.download(openai_response(prompt)))
186
- img2.show()
187
- rx = 'Image returned'
188
- g_sheet_log(mytext, rx)
189
- except:
190
- urllib.request.urlretrieve(openai_response(prompt),"img_ret.png")
191
- img = Image.open("img_ret.png")
192
- img.show()
193
- rx = 'Image returned'
194
- g_sheet_log(mytext, rx)
195
  except:
196
- # Set up our initial generation parameters.
197
- answers = stability_api.generate(
198
- prompt = mytext,
199
- seed=992446758, # If a seed is provided, the resulting generated image will be deterministic.
200
- # What this means is that as long as all generation parameters remain the same, you can always recall the same image simply by generating it again.
201
- # Note: This isn't quite the case for Clip Guided generations, which we'll tackle in a future example notebook.
202
- steps=30, # Amount of inference steps performed on image generation. Defaults to 30.
203
- cfg_scale=8.0, # Influences how strongly your generation is guided to match your prompt.
204
- # Setting this value higher increases the strength in which it tries to match your prompt.
205
- # Defaults to 7.0 if not specified.
206
- width=512, # Generation width, defaults to 512 if not included.
207
- height=512, # Generation height, defaults to 512 if not included.
208
- samples=1, # Number of images to generate, defaults to 1 if not included.
209
- sampler=generation.SAMPLER_K_DPMPP_2M # Choose which sampler we want to denoise our generation with.
210
- # Defaults to k_dpmpp_2m if not specified. Clip Guidance only supports ancestral samplers.
211
- # (Available Samplers: ddim, plms, k_euler, k_euler_ancestral, k_heun, k_dpm_2, k_dpm_2_ancestral, k_dpmpp_2s_ancestral, k_lms, k_dpmpp_2m)
212
- )
213
-
214
- # Set up our warning to print to the console if the adult content classifier is tripped.
215
- # If adult content classifier is not tripped, save generated images.
216
- for resp in answers:
217
- for artifact in resp.artifacts:
218
- if artifact.finish_reason == generation.FILTER:
219
- warnings.warn(
220
- "Your request activated the API's safety filters and could not be processed."
221
- "Please modify the prompt and try again.")
222
- if artifact.type == generation.ARTIFACT_IMAGE:
223
- img = Image.open(io.BytesIO(artifact.binary))
224
- st.image(img)
225
- img.save(str(artifact.seed)+ ".png") # Save our generated images with their seed number as the filename.
226
- rx = 'Image returned'
227
- g_sheet_log(mytext, rx)
228
-
229
- # except:
230
- # st.write('image is being generated please wait...')
231
- # def extract_image_description(input_string):
232
- # return input_string.split('gen_draw("')[1].split('")')[0]
233
- # prompt=extract_image_description(string_temp)
234
- # # model_id = "CompVis/stable-diffusion-v1-4"
235
- # model_id='runwayml/stable-diffusion-v1-5'
236
- # device = "cuda"
237
-
238
-
239
- # pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
240
- # pipe = pipe.to(device)
241
-
242
- # # prompt = "a photo of an astronaut riding a horse on mars"
243
- # image = pipe(prompt).images[0]
244
-
245
- # image.save("astronaut_rides_horse.png")
246
- # st.image(image)
247
- # # image
248
-
249
- elif ("vid_tube" in string_temp):
250
- s = Search(mytext)
251
- search_res = s.results
252
- first_vid = search_res[0]
253
- print(first_vid)
254
- string = str(first_vid)
255
- video_id = string[string.index('=') + 1:-1]
256
- # print(video_id)
257
- YoutubeURL = "https://www.youtube.com/watch?v="
258
- OurURL = YoutubeURL + video_id
259
- st.write(OurURL)
260
- st_player(OurURL)
261
- ry = 'Youtube link and video returned'
262
- g_sheet_log(mytext, ry)
263
-
264
- elif ("don't" in string_temp or "internet" in string_temp):
265
- st.write('searching internet ')
266
- search_internet(question)
267
- rz = 'Internet result returned'
268
- g_sheet_log(mytext, rz)
269
-
270
- else:
271
- st.write(string_temp)
272
- g_sheet_log(mytext, string_temp)
273
 
274
- elif Input_type == 'SPEECH':
275
- stt_button = Button(label="Speak", width=100)
276
- stt_button.js_on_event("button_click", CustomJS(code="""
277
- var recognition = new webkitSpeechRecognition();
278
- recognition.continuous = true;
279
- recognition.interimResults = true;
280
- recognition.onresult = function (e) {
281
- var value = "";
282
- for (var i = e.resultIndex; i < e.results.length; ++i) {
283
- if (e.results[i].isFinal) {
284
- value += e.results[i][0].transcript;
285
- }
286
- }
287
- if ( value != "") {
288
- document.dispatchEvent(new CustomEvent("GET_TEXT", {detail: value}));
289
- }
290
- }
291
- recognition.start();
292
- """))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
293
 
294
- result = streamlit_bokeh_events(
295
- stt_button,
296
- events="GET_TEXT",
297
- key="listen",
298
- refresh_on_update=False,
299
- override_height=75,
300
- debounce_time=0)
301
 
302
- if result:
303
- if "GET_TEXT" in result:
304
- st.write(result.get("GET_TEXT"))
305
- question = result.get("GET_TEXT")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
306
  response = openai.Completion.create(
307
- model="text-davinci-003",
308
- prompt=f'''Your knowledge cutoff is 2021-09, and it is not aware of any events after that time. if the
309
- Answer to following questions is not from your knowledge base or in case of queries like weather
310
- updates / stock updates / current news Etc which requires you to have internet connection then print i don't have access to internet to answer your question,
311
- if question is related to image or painting or drawing generation then print ipython type output function gen_draw("detailed prompt of image to be generated")
312
- if the question is related to playing a song or video or music of a singer then print ipython type output function vid_tube("relevent search query")
313
- \nQuestion-{question}
314
- \nAnswer -''',
315
- temperature=0.49,
316
- max_tokens=256,
317
- top_p=1,
318
- frequency_penalty=0,
319
- presence_penalty=0
 
 
320
  )
321
  string_temp=response.choices[0].text
322
-
323
  if ("gen_draw" in string_temp):
324
- st.write('*image is being generated please wait..* ')
325
- def extract_image_description(input_string):
326
- return input_string.split('gen_draw("')[1].split('")')[0]
327
- prompt=extract_image_description(string_temp)
328
- # model_id = "CompVis/stable-diffusion-v1-4"
329
- model_id='runwayml/stable-diffusion-v1-5'
330
- device = "cuda"
331
-
332
- pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
333
- pipe = pipe.to(device)
334
-
335
- # prompt = "a photo of an astronaut riding a horse on mars"
336
- image = pipe(prompt).images[0]
337
-
338
- image.save("astronaut_rides_horse.png")
339
- st.image(image)
340
- # image
341
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
342
  elif ("vid_tube" in string_temp):
343
- s = Search(question)
344
  search_res = s.results
345
  first_vid = search_res[0]
346
  print(first_vid)
@@ -351,9 +382,102 @@ elif Input_type == 'SPEECH':
351
  OurURL = YoutubeURL + video_id
352
  st.write(OurURL)
353
  st_player(OurURL)
354
-
355
- elif ("don't" in string_temp or "internet" in string_temp ):
356
- st.write('*searching internet*')
 
 
357
  search_internet(question)
 
 
 
358
  else:
359
  st.write(string_temp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  from googleapiclient.discovery import build
24
  import wget
25
  import urllib.request
26
+ import sqlite3
27
+ import pandas as pd
28
+ import pandasql as ps
29
+
30
+ def clean(value):
31
+ val = value.replace("'",'').replace("[",'').replace("]",'')
32
+ return val
33
+
34
+ def save_uploadedfile(uploadedfile):
35
+ with open(uploadedfile.name,"wb") as f:
36
+ f.write(uploadedfile.getbuffer())
37
+
38
+ def gpt3(texts):
39
+ # openai.api_key = os.environ["Secret"]
40
+ openai.api_key = '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'#'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' #'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'
41
+ response = openai.Completion.create(
42
+ engine="text-davinci-002",
43
+ prompt= texts,
44
+ temperature=temp,
45
+ max_tokens=750,
46
+ top_p=1,
47
+ frequency_penalty=0.0,
48
+ presence_penalty=0.0,
49
+ stop = (";", "/*", "</code>"))
50
+ x = response.choices[0].text
51
+ return x
52
+
53
+ def warning(sqlOutput):
54
+ dl = []
55
+ lst = ['DELETE','DROP','TRUNCATE','MERGE','ALTER','UPDATE','INSERT']
56
+ op2 = " ".join(sqlOutput.split())
57
+ op3 = op2.split(' ')
58
+ op4 = list(map(lambda x: x.upper(), op3))
59
+ for i in op4:
60
+ if i in lst:
61
+ dl.append(i)
62
+ for i in dl:
63
+ st.warning("This query will " + i + " the data ",icon="⚠️")
64
+
65
 
66
  stability_api = client.StabilityInference(
67
  key=st.secrets["STABILITY_KEY"], #os.environ("STABILITY_KEY"), # key=os.environ['STABILITY_KEY'], # API Key reference.
 
177
  st.title("Welcome to :red[_HyperChat_]!!🤖")
178
  st.title("How can I help?")
179
 
180
+ Usage = st.radio(
181
+ "I want to ask:",
182
+ ('Random Questions', 'Questions based on custom CSV data')
183
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
184
 
185
+ if Usage == 'Questions based on custom CSV data':
186
+ option = ['Reset','Upload_csv']
187
+ res = st.selectbox('Select the Upload_csv option:',option)
188
+ if res == 'Upload_csv':
189
+ uploaded_file = st.file_uploader("Add dataset (csv) ",type=['csv'])
190
+ if uploaded_file is not None:
191
+ st.write("File Uploaded")
192
+ file_name=uploaded_file.name
193
+ ext=file_name.split(".")[0]
194
+ st.write(ext)
195
+ df=pd.read_csv(uploaded_file)
196
+ save_uploadedfile(uploaded_file)
197
+ col= df.columns
198
  try:
199
+ columns = str((df.columns).tolist())
200
+ column = clean(columns)
201
+ st.write('Columns:' )
202
+ st.text(col)
 
 
 
 
 
 
 
 
203
  except:
204
+ pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205
 
206
+ temp = st.slider('Temperature: ', 0.0, 1.0, 0.0)
207
+
208
+ if st.checkbox('Use Prompt'):
209
+ with st.form(key='columns_in_form2'):
210
+ col3, col4 = st.columns(2)
211
+ with col3:
212
+ userPrompt = st.text_area("Input Prompt",'Enter Natural Language Query')
213
+ submitButton = st.form_submit_button(label = 'Submit')
214
+ if submitButton:
215
+ try:
216
+ col_p ="Create SQL statement from instruction. "+ext+" " " (" + column +")." +" Request:" + userPrompt + "SQL statement:"
217
+ result = gpt3(col_p)
218
+ except:
219
+ results = gpt3(userPrompt)
220
+ st.success('loaded')
221
+ with col4:
222
+ try:
223
+ sqlOutput = st.text_area('SQL Query', value=gpt3(col_p))
224
+ warning(sqlOutput)
225
+ cars=pd.read_csv('cars.csv')
226
+ result_tab2=ps.sqldf(sqlOutput)
227
+ st.write(result_tab2)
228
+ with open("fewshot_matplot.txt", "r") as file:
229
+ text_plot = file.read()
230
+
231
+ result_tab = result_tab2.reset_index(drop=True)
232
+ result_tab_string = result_tab.to_string()
233
+ gr_prompt = text_plot + userPrompt + result_tab_string + "Plot graph for: "
234
+
235
+ if len(gr_prompt) > 4097:
236
+ st.write('OVERWHELMING DATA!!! You have given me more than 4097 tokens! ^_^')
237
+ st.write('As of today, the NLP model text-davinci-003 that I run on takes in inputs that have less than 4097 tokens. Kindly retry ^_^')
238
+
239
+ elif len(result_tab2.columns) < 2:
240
+ st.write("I need more data to conduct analysis and provide visualizations for you... ^_^")
241
+
242
+ else:
243
+ st.success("Plotting...")
244
+ response_graph = openai.Completion.create(
245
+ engine="text-davinci-003",
246
+ prompt = gr_prompt,
247
+ max_tokens=1024,
248
+ n=1,
249
+ stop=None,
250
+ temperature=0.5,
251
+ )
252
+
253
+ if response_graph['choices'][0]['text'] != "":
254
+ print(response_graph['choices'][0]['text'])
255
+ exec(response_graph['choices'][0]['text'])
256
+
257
+ else:
258
+ print('Retry! Graph could not be plotted *_*')
259
+
260
+ except:
261
+ pass
262
 
 
 
 
 
 
 
 
263
 
264
+ elif Usage == 'Random Questions':
265
+ Input_type = st.radio(
266
+ "**Input type:**",
267
+ ('TEXT', 'SPEECH')
268
+ )
269
+
270
+ if Input_type == 'TEXT':
271
+ #page_bg_img2 = """
272
+ #<style>
273
+ #[data-testid="stAppViewContainer"] {
274
+ #background-color: #e5e5f7;
275
+ #opacity: 0.8;
276
+ #background-size: 20px 20px;
277
+ #background-image: repeating-linear-gradient(0deg, #32d947, #32d947 1px, #e5e5f7 1px, #e5e5f7);
278
+ #}
279
+ #</style>
280
+ #"""
281
+ #st.markdown(page_bg_img, unsafe_allow_html=True)
282
+ st.write('**You are now in Text input mode**')
283
+ mytext = st.text_input('**Go on! Ask me anything:**')
284
+ if st.button("SUBMIT"):
285
+ question=mytext
286
  response = openai.Completion.create(
287
+ model="text-davinci-003",
288
+ prompt=f'''Your name is alexa and knowledge cutoff date is 2021-09, and it is not aware of any events after that time. if the
289
+ Answer to following questions is not from your knowledge base or in case of queries like weather
290
+ updates / stock updates / current news Etc which requires you to have internet connection then print i don't have access to internet to answer your question,
291
+ if question is related to image or painting or drawing generation then print ipython type output function gen_draw("detailed prompt of image to be generated")
292
+ if the question is related to playing a song or video or music of a singer then print ipython type output function vid_tube("relevent search query")
293
+ if the question is related to operating home appliances then print ipython type output function home_app(" action(ON/Off),appliance(TV,Geaser,Fridge,Lights,fans,AC)") .
294
+ if question is realted to sending mail or sms then print ipython type output function messenger_app(" message of us ,messenger(email,sms)")
295
+ \nQuestion-{question}
296
+ \nAnswer -''',
297
+ temperature=0.49,
298
+ max_tokens=256,
299
+ top_p=1,
300
+ frequency_penalty=0,
301
+ presence_penalty=0
302
  )
303
  string_temp=response.choices[0].text
304
+
305
  if ("gen_draw" in string_temp):
306
+ try:
307
+ try:
308
+ wget.download(openai_response(prompt))
309
+ img2 = Image.open(wget.download(openai_response(prompt)))
310
+ img2.show()
311
+ rx = 'Image returned'
312
+ g_sheet_log(mytext, rx)
313
+ except:
314
+ urllib.request.urlretrieve(openai_response(prompt),"img_ret.png")
315
+ img = Image.open("img_ret.png")
316
+ img.show()
317
+ rx = 'Image returned'
318
+ g_sheet_log(mytext, rx)
319
+ except:
320
+ # Set up our initial generation parameters.
321
+ answers = stability_api.generate(
322
+ prompt = mytext,
323
+ seed=992446758, # If a seed is provided, the resulting generated image will be deterministic.
324
+ # What this means is that as long as all generation parameters remain the same, you can always recall the same image simply by generating it again.
325
+ # Note: This isn't quite the case for Clip Guided generations, which we'll tackle in a future example notebook.
326
+ steps=30, # Amount of inference steps performed on image generation. Defaults to 30.
327
+ cfg_scale=8.0, # Influences how strongly your generation is guided to match your prompt.
328
+ # Setting this value higher increases the strength in which it tries to match your prompt.
329
+ # Defaults to 7.0 if not specified.
330
+ width=512, # Generation width, defaults to 512 if not included.
331
+ height=512, # Generation height, defaults to 512 if not included.
332
+ samples=1, # Number of images to generate, defaults to 1 if not included.
333
+ sampler=generation.SAMPLER_K_DPMPP_2M # Choose which sampler we want to denoise our generation with.
334
+ # Defaults to k_dpmpp_2m if not specified. Clip Guidance only supports ancestral samplers.
335
+ # (Available Samplers: ddim, plms, k_euler, k_euler_ancestral, k_heun, k_dpm_2, k_dpm_2_ancestral, k_dpmpp_2s_ancestral, k_lms, k_dpmpp_2m)
336
+ )
337
+
338
+ # Set up our warning to print to the console if the adult content classifier is tripped.
339
+ # If adult content classifier is not tripped, save generated images.
340
+ for resp in answers:
341
+ for artifact in resp.artifacts:
342
+ if artifact.finish_reason == generation.FILTER:
343
+ warnings.warn(
344
+ "Your request activated the API's safety filters and could not be processed."
345
+ "Please modify the prompt and try again.")
346
+ if artifact.type == generation.ARTIFACT_IMAGE:
347
+ img = Image.open(io.BytesIO(artifact.binary))
348
+ st.image(img)
349
+ img.save(str(artifact.seed)+ ".png") # Save our generated images with their seed number as the filename.
350
+ rx = 'Image returned'
351
+ g_sheet_log(mytext, rx)
352
+
353
+ # except:
354
+ # st.write('image is being generated please wait...')
355
+ # def extract_image_description(input_string):
356
+ # return input_string.split('gen_draw("')[1].split('")')[0]
357
+ # prompt=extract_image_description(string_temp)
358
+ # # model_id = "CompVis/stable-diffusion-v1-4"
359
+ # model_id='runwayml/stable-diffusion-v1-5'
360
+ # device = "cuda"
361
+
362
+
363
+ # pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
364
+ # pipe = pipe.to(device)
365
+
366
+ # # prompt = "a photo of an astronaut riding a horse on mars"
367
+ # image = pipe(prompt).images[0]
368
+
369
+ # image.save("astronaut_rides_horse.png")
370
+ # st.image(image)
371
+ # # image
372
+
373
  elif ("vid_tube" in string_temp):
374
+ s = Search(mytext)
375
  search_res = s.results
376
  first_vid = search_res[0]
377
  print(first_vid)
 
382
  OurURL = YoutubeURL + video_id
383
  st.write(OurURL)
384
  st_player(OurURL)
385
+ ry = 'Youtube link and video returned'
386
+ g_sheet_log(mytext, ry)
387
+
388
+ elif ("don't" in string_temp or "internet" in string_temp):
389
+ st.write('searching internet ')
390
  search_internet(question)
391
+ rz = 'Internet result returned'
392
+ g_sheet_log(mytext, rz)
393
+
394
  else:
395
  st.write(string_temp)
396
+ g_sheet_log(mytext, string_temp)
397
+
398
+ elif Input_type == 'SPEECH':
399
+ stt_button = Button(label="Speak", width=100)
400
+ stt_button.js_on_event("button_click", CustomJS(code="""
401
+ var recognition = new webkitSpeechRecognition();
402
+ recognition.continuous = true;
403
+ recognition.interimResults = true;
404
+ recognition.onresult = function (e) {
405
+ var value = "";
406
+ for (var i = e.resultIndex; i < e.results.length; ++i) {
407
+ if (e.results[i].isFinal) {
408
+ value += e.results[i][0].transcript;
409
+ }
410
+ }
411
+ if ( value != "") {
412
+ document.dispatchEvent(new CustomEvent("GET_TEXT", {detail: value}));
413
+ }
414
+ }
415
+ recognition.start();
416
+ """))
417
+
418
+ result = streamlit_bokeh_events(
419
+ stt_button,
420
+ events="GET_TEXT",
421
+ key="listen",
422
+ refresh_on_update=False,
423
+ override_height=75,
424
+ debounce_time=0)
425
+
426
+ if result:
427
+ if "GET_TEXT" in result:
428
+ st.write(result.get("GET_TEXT"))
429
+ question = result.get("GET_TEXT")
430
+ response = openai.Completion.create(
431
+ model="text-davinci-003",
432
+ prompt=f'''Your knowledge cutoff is 2021-09, and it is not aware of any events after that time. if the
433
+ Answer to following questions is not from your knowledge base or in case of queries like weather
434
+ updates / stock updates / current news Etc which requires you to have internet connection then print i don't have access to internet to answer your question,
435
+ if question is related to image or painting or drawing generation then print ipython type output function gen_draw("detailed prompt of image to be generated")
436
+ if the question is related to playing a song or video or music of a singer then print ipython type output function vid_tube("relevent search query")
437
+ \nQuestion-{question}
438
+ \nAnswer -''',
439
+ temperature=0.49,
440
+ max_tokens=256,
441
+ top_p=1,
442
+ frequency_penalty=0,
443
+ presence_penalty=0
444
+ )
445
+ string_temp=response.choices[0].text
446
+
447
+ if ("gen_draw" in string_temp):
448
+ st.write('*image is being generated please wait..* ')
449
+ def extract_image_description(input_string):
450
+ return input_string.split('gen_draw("')[1].split('")')[0]
451
+ prompt=extract_image_description(string_temp)
452
+ # model_id = "CompVis/stable-diffusion-v1-4"
453
+ model_id='runwayml/stable-diffusion-v1-5'
454
+ device = "cuda"
455
+
456
+ pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16)
457
+ pipe = pipe.to(device)
458
+
459
+ # prompt = "a photo of an astronaut riding a horse on mars"
460
+ image = pipe(prompt).images[0]
461
+
462
+ image.save("astronaut_rides_horse.png")
463
+ st.image(image)
464
+ # image
465
+
466
+ elif ("vid_tube" in string_temp):
467
+ s = Search(question)
468
+ search_res = s.results
469
+ first_vid = search_res[0]
470
+ print(first_vid)
471
+ string = str(first_vid)
472
+ video_id = string[string.index('=') + 1:-1]
473
+ # print(video_id)
474
+ YoutubeURL = "https://www.youtube.com/watch?v="
475
+ OurURL = YoutubeURL + video_id
476
+ st.write(OurURL)
477
+ st_player(OurURL)
478
+
479
+ elif ("don't" in string_temp or "internet" in string_temp ):
480
+ st.write('*searching internet*')
481
+ search_internet(question)
482
+ else:
483
+ st.write(string_temp)