Pijush2023 commited on
Commit
1568ea2
·
verified ·
1 Parent(s): 6fea545

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -6
app.py CHANGED
@@ -385,14 +385,22 @@ def generate_bot_response(history, choice, retrieval_mode, model_choice):
385
 
386
 
387
 
388
- def generate_tts_response(response, tts_choice):
 
 
 
 
 
 
 
 
 
 
389
  with concurrent.futures.ThreadPoolExecutor() as executor:
390
  if tts_choice == "Alpha":
391
- audio_future = executor.submit(generate_audio_elevenlabs, response)
392
  elif tts_choice == "Beta":
393
- audio_future = executor.submit(generate_audio_parler_tts, response)
394
- # elif tts_choice == "Gamma":
395
- # audio_future = executor.submit(generate_audio_mars5, response)
396
 
397
  audio_path = audio_future.result()
398
  return audio_path
@@ -403,7 +411,6 @@ def generate_tts_response(response, tts_choice):
403
 
404
 
405
 
406
-
407
  import concurrent.futures
408
  # Existing bot function with concurrent futures for parallel processing
409
  def bot(history, choice, tts_choice, retrieval_mode, model_choice):
 
385
 
386
 
387
 
388
+
389
+
390
+
391
+ def generate_tts_response(history, tts_choice):
392
+ # Get the most recent bot response from the chat history
393
+ if history and len(history) > 0:
394
+ recent_response = history[-1][1] # The second item in the tuple is the bot response text
395
+ else:
396
+ recent_response = ""
397
+
398
+ # Call the TTS function for the recent response
399
  with concurrent.futures.ThreadPoolExecutor() as executor:
400
  if tts_choice == "Alpha":
401
+ audio_future = executor.submit(generate_audio_elevenlabs, recent_response)
402
  elif tts_choice == "Beta":
403
+ audio_future = executor.submit(generate_audio_parler_tts, recent_response)
 
 
404
 
405
  audio_path = audio_future.result()
406
  return audio_path
 
411
 
412
 
413
 
 
414
  import concurrent.futures
415
  # Existing bot function with concurrent futures for parallel processing
416
  def bot(history, choice, tts_choice, retrieval_mode, model_choice):