Pijush2023 commited on
Commit
b01e77f
·
verified ·
1 Parent(s): 3d17868

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +112 -56
app.py CHANGED
@@ -358,33 +358,65 @@ Sure! Here's the information you requested:
358
  """
359
 
360
 
361
- def generate_bot_response(history, choice, retrieval_mode, model_choice):
362
- if not history:
363
- return
 
 
 
 
364
 
365
- # Select the model
366
- # selected_model = chat_model if model_choice == "LM-1" else phi_pipe
367
- selected_model = chat_model if model_choice == "LM-1" else (chat_model1 if model_choice == "LM-3" else phi_pipe)
368
 
 
 
369
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
370
  response, addresses = generate_answer(history[-1][0], choice, retrieval_mode, selected_model)
 
 
371
  history[-1][1] = ""
372
 
373
- for character in response:
374
- history[-1][1] += character
375
- yield history # Stream each character as it is generated
376
- time.sleep(0.05) # Add a slight delay to simulate streaming
377
 
378
- yield history # Final yield with the complete response
379
 
380
 
381
 
382
 
383
 
384
 
 
 
 
 
 
 
 
 
385
 
386
- def generate_tts_response(response, tts_choice):
 
 
 
 
 
387
  with concurrent.futures.ThreadPoolExecutor() as executor:
 
388
  if tts_choice == "Alpha":
389
  audio_future = executor.submit(generate_audio_elevenlabs, response)
390
  elif tts_choice == "Beta":
@@ -392,74 +424,98 @@ def generate_tts_response(response, tts_choice):
392
  # elif tts_choice == "Gamma":
393
  # audio_future = executor.submit(generate_audio_mars5, response)
394
 
 
 
 
 
 
 
 
395
  audio_path = audio_future.result()
396
- return audio_path
 
 
397
 
398
 
399
 
400
 
401
 
402
- import concurrent.futures
403
- # Existing bot function with concurrent futures for parallel processing
404
- def bot(history, choice, tts_choice, retrieval_mode, model_choice):
405
- # Initialize an empty response
406
- response = ""
407
 
408
- # Create a thread pool to handle both text generation and TTS conversion in parallel
409
- with concurrent.futures.ThreadPoolExecutor() as executor:
410
- # Start the bot response generation in parallel
411
- bot_future = executor.submit(generate_bot_response, history, choice, retrieval_mode, model_choice)
412
 
413
- # Wait for the text generation to start
414
- for history_chunk in bot_future.result():
415
- response = history_chunk[-1][1] # Update the response with the current state
416
- yield history_chunk, None # Stream the text output as it's generated
417
 
418
- # Once text is fully generated, start the TTS conversion
419
- tts_future = executor.submit(generate_tts_response, response, tts_choice)
420
 
421
- # Get the audio output after TTS is done
422
- audio_path = tts_future.result()
423
 
424
- # Stream the final text and audio output
425
- yield history, audio_path
 
 
 
 
 
 
 
 
 
 
426
 
 
 
 
 
 
427
 
428
 
429
 
430
 
431
 
432
 
433
- # Modified bot function to separate chatbot response and TTS generation
434
 
435
- def generate_bot_response(history, choice, retrieval_mode, model_choice):
436
- if not history:
437
- return
438
 
439
- # Select the model
440
- # selected_model = chat_model if model_choice == "LM-1" else phi_pipe
441
- selected_model = chat_model if model_choice == "LM-1" else (chat_model1 if model_choice == "LM-3" else phi_pipe)
442
 
443
 
444
- response, addresses = generate_answer(history[-1][0], choice, retrieval_mode, selected_model)
445
- history[-1][1] = ""
446
 
447
- for character in response:
448
- history[-1][1] += character
449
- yield history # Stream each character as it is generated
450
- time.sleep(0.05) # Add a slight delay to simulate streaming
451
 
452
- yield history # Final yield with the complete response
453
 
454
 
455
 
456
 
457
- def generate_audio_after_text(response, tts_choice):
458
- # Generate TTS audio after text response is completed
459
- with concurrent.futures.ThreadPoolExecutor() as executor:
460
- tts_future = executor.submit(generate_tts_response, response, tts_choice)
461
- audio_path = tts_future.result()
462
- return audio_path
463
 
464
  import re
465
 
@@ -1417,9 +1473,9 @@ with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
1417
  retriever_button.click(fn=stop_audio, inputs=[], outputs=[audio_output], api_name="api_stop_audio_recording")
1418
  .then(fn=add_message, inputs=[chatbot, chat_input], outputs=[chatbot, chat_input], api_name="api_addprompt_chathistory")
1419
  # First, generate the bot response
1420
- .then(fn=generate_bot_response, inputs=[chatbot, choice, retrieval_mode, model_choice], outputs=[chatbot], api_name="api_generate_bot_response")
1421
  # Then, generate the TTS response based on the bot's response
1422
- .then(fn=generate_tts_response, inputs=[chatbot, tts_choice], outputs=[audio_output], api_name="api_generate_tts_response")
1423
  .then(fn=show_map_if_details, inputs=[chatbot, choice], outputs=[location_output, location_output], api_name="api_show_map_details")
1424
  .then(fn=clear_textbox, inputs=[], outputs=[chat_input], api_name="api_clear_textbox")
1425
  )
@@ -1435,10 +1491,10 @@ with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
1435
  fn=add_message, inputs=[chatbot, chat_input], outputs=[chatbot, chat_input], api_name="api_addprompt_chathistory"
1436
  ).then(
1437
  # First, generate the bot response
1438
- fn=generate_bot_response, inputs=[chatbot, choice, retrieval_mode, model_choice], outputs=[chatbot], api_name="api_generate_bot_response"
1439
  ).then(
1440
  # Then, generate the TTS response based on the bot's response
1441
- fn=generate_tts_response, inputs=[chatbot, tts_choice], outputs=[audio_output], api_name="api_generate_tts_response"
1442
  ).then(
1443
  fn=show_map_if_details, inputs=[chatbot, choice], outputs=[location_output, location_output], api_name="api_show_map_details"
1444
  ).then(
 
358
  """
359
 
360
 
361
+ # def generate_bot_response(history, choice, retrieval_mode, model_choice):
362
+ # if not history:
363
+ # return
364
+
365
+ # # Select the model
366
+ # # selected_model = chat_model if model_choice == "LM-1" else phi_pipe
367
+ # selected_model = chat_model if model_choice == "LM-1" else (chat_model1 if model_choice == "LM-3" else phi_pipe)
368
 
 
 
 
369
 
370
+ # response, addresses = generate_answer(history[-1][0], choice, retrieval_mode, selected_model)
371
+ # history[-1][1] = ""
372
 
373
+ # for character in response:
374
+ # history[-1][1] += character
375
+ # yield history # Stream each character as it is generated
376
+ # time.sleep(0.05) # Add a slight delay to simulate streaming
377
+
378
+ # yield history # Final yield with the complete response
379
+
380
+ # Function to generate bot response
381
+ def bot_response(history, choice, retrieval_mode, model_choice):
382
+ if not history:
383
+ return history, None
384
+
385
+ # Select the model
386
+ selected_model = chat_model if model_choice == "LM-1" else (chat_model1 if model_choice == "LM-3" else phi_pipe)
387
+
388
+ # Generate bot's response
389
  response, addresses = generate_answer(history[-1][0], choice, retrieval_mode, selected_model)
390
+
391
+ # Clear current message in history
392
  history[-1][1] = ""
393
 
394
+ # Return the generated response and updated history
395
+ return history, response
 
 
396
 
 
397
 
398
 
399
 
400
 
401
 
402
 
403
+ # def generate_tts_response(response, tts_choice):
404
+ # with concurrent.futures.ThreadPoolExecutor() as executor:
405
+ # if tts_choice == "Alpha":
406
+ # audio_future = executor.submit(generate_audio_elevenlabs, response)
407
+ # elif tts_choice == "Beta":
408
+ # audio_future = executor.submit(generate_audio_parler_tts, response)
409
+ # # elif tts_choice == "Gamma":
410
+ # # audio_future = executor.submit(generate_audio_mars5, response)
411
 
412
+ # audio_path = audio_future.result()
413
+ # return audio_path
414
+
415
+
416
+ # Function to generate TTS response
417
+ def tts_response(history, response, tts_choice):
418
  with concurrent.futures.ThreadPoolExecutor() as executor:
419
+ # Based on the TTS choice, submit the corresponding task
420
  if tts_choice == "Alpha":
421
  audio_future = executor.submit(generate_audio_elevenlabs, response)
422
  elif tts_choice == "Beta":
 
424
  # elif tts_choice == "Gamma":
425
  # audio_future = executor.submit(generate_audio_mars5, response)
426
 
427
+ # Simulate typing effect by adding characters with delay
428
+ for character in response:
429
+ history[-1][1] += character
430
+ time.sleep(0.05)
431
+ yield history, None
432
+
433
+ # Get the path of the generated audio
434
  audio_path = audio_future.result()
435
+
436
+ # Return the final history and audio path
437
+ yield history, audio_path
438
 
439
 
440
 
441
 
442
 
443
+ # import concurrent.futures
444
+ # # Existing bot function with concurrent futures for parallel processing
445
+ # def bot(history, choice, tts_choice, retrieval_mode, model_choice):
446
+ # # Initialize an empty response
447
+ # response = ""
448
 
449
+ # # Create a thread pool to handle both text generation and TTS conversion in parallel
450
+ # with concurrent.futures.ThreadPoolExecutor() as executor:
451
+ # # Start the bot response generation in parallel
452
+ # bot_future = executor.submit(generate_bot_response, history, choice, retrieval_mode, model_choice)
453
 
454
+ # # Wait for the text generation to start
455
+ # for history_chunk in bot_future.result():
456
+ # response = history_chunk[-1][1] # Update the response with the current state
457
+ # yield history_chunk, None # Stream the text output as it's generated
458
 
459
+ # # Once text is fully generated, start the TTS conversion
460
+ # tts_future = executor.submit(generate_tts_response, response, tts_choice)
461
 
462
+ # # Get the audio output after TTS is done
463
+ # audio_path = tts_future.result()
464
 
465
+ # # Stream the final text and audio output
466
+ # yield history, audio_path
467
+
468
+
469
+ # Full process combining both bot response and TTS
470
+ def bot(history, choice, tts_choice, retrieval_mode, model_choice):
471
+ # Generate bot response
472
+ history, response = bot_response(history, choice, retrieval_mode, model_choice)
473
+
474
+ # Start TTS process if response exists
475
+ if response:
476
+ tts_gen = tts_response(history, response, tts_choice)
477
 
478
+ # Process the TTS generator and yield history and audio
479
+ for updated_history, audio_path in tts_gen:
480
+ yield updated_history, audio_path
481
+ else:
482
+ yield history, None
483
 
484
 
485
 
486
 
487
 
488
 
489
+ # # Modified bot function to separate chatbot response and TTS generation
490
 
491
+ # def generate_bot_response(history, choice, retrieval_mode, model_choice):
492
+ # if not history:
493
+ # return
494
 
495
+ # # Select the model
496
+ # # selected_model = chat_model if model_choice == "LM-1" else phi_pipe
497
+ # selected_model = chat_model if model_choice == "LM-1" else (chat_model1 if model_choice == "LM-3" else phi_pipe)
498
 
499
 
500
+ # response, addresses = generate_answer(history[-1][0], choice, retrieval_mode, selected_model)
501
+ # history[-1][1] = ""
502
 
503
+ # for character in response:
504
+ # history[-1][1] += character
505
+ # yield history # Stream each character as it is generated
506
+ # time.sleep(0.05) # Add a slight delay to simulate streaming
507
 
508
+ # yield history # Final yield with the complete response
509
 
510
 
511
 
512
 
513
+ # def generate_audio_after_text(response, tts_choice):
514
+ # # Generate TTS audio after text response is completed
515
+ # with concurrent.futures.ThreadPoolExecutor() as executor:
516
+ # tts_future = executor.submit(generate_tts_response, response, tts_choice)
517
+ # audio_path = tts_future.result()
518
+ # return audio_path
519
 
520
  import re
521
 
 
1473
  retriever_button.click(fn=stop_audio, inputs=[], outputs=[audio_output], api_name="api_stop_audio_recording")
1474
  .then(fn=add_message, inputs=[chatbot, chat_input], outputs=[chatbot, chat_input], api_name="api_addprompt_chathistory")
1475
  # First, generate the bot response
1476
+ .then(fn=bot_response, inputs=[chatbot, choice, retrieval_mode, model_choice], outputs=[chatbot], api_name="api_generate_bot_response")
1477
  # Then, generate the TTS response based on the bot's response
1478
+ .then(fn=tts_response, inputs=[chatbot, tts_choice], outputs=[audio_output], api_name="api_generate_tts_response")
1479
  .then(fn=show_map_if_details, inputs=[chatbot, choice], outputs=[location_output, location_output], api_name="api_show_map_details")
1480
  .then(fn=clear_textbox, inputs=[], outputs=[chat_input], api_name="api_clear_textbox")
1481
  )
 
1491
  fn=add_message, inputs=[chatbot, chat_input], outputs=[chatbot, chat_input], api_name="api_addprompt_chathistory"
1492
  ).then(
1493
  # First, generate the bot response
1494
+ fn=bot_response, inputs=[chatbot, choice, retrieval_mode, model_choice], outputs=[chatbot], api_name="api_generate_bot_response"
1495
  ).then(
1496
  # Then, generate the TTS response based on the bot's response
1497
+ fn=tts_response, inputs=[chatbot, tts_choice], outputs=[audio_output], api_name="api_generate_tts_response"
1498
  ).then(
1499
  fn=show_map_if_details, inputs=[chatbot, choice], outputs=[location_output, location_output], api_name="api_show_map_details"
1500
  ).then(