Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -358,65 +358,35 @@ Sure! Here's the information you requested:
|
|
358 |
"""
|
359 |
|
360 |
|
361 |
-
|
362 |
-
# if not history:
|
363 |
-
# return
|
364 |
-
|
365 |
-
# # Select the model
|
366 |
-
# # selected_model = chat_model if model_choice == "LM-1" else phi_pipe
|
367 |
-
# selected_model = chat_model if model_choice == "LM-1" else (chat_model1 if model_choice == "LM-3" else phi_pipe)
|
368 |
-
|
369 |
-
|
370 |
-
# response, addresses = generate_answer(history[-1][0], choice, retrieval_mode, selected_model)
|
371 |
-
# history[-1][1] = ""
|
372 |
-
|
373 |
-
# for character in response:
|
374 |
-
# history[-1][1] += character
|
375 |
-
# yield history # Stream each character as it is generated
|
376 |
-
# time.sleep(0.05) # Add a slight delay to simulate streaming
|
377 |
-
|
378 |
-
# yield history # Final yield with the complete response
|
379 |
-
|
380 |
-
# Function to generate bot response
|
381 |
-
def bot_response(history, choice, retrieval_mode, model_choice):
|
382 |
if not history:
|
383 |
-
return
|
384 |
-
|
385 |
# Select the model
|
|
|
386 |
selected_model = chat_model if model_choice == "LM-1" else (chat_model1 if model_choice == "LM-3" else phi_pipe)
|
387 |
-
|
388 |
-
|
389 |
response, addresses = generate_answer(history[-1][0], choice, retrieval_mode, selected_model)
|
390 |
-
|
391 |
-
# Clear current message in history
|
392 |
history[-1][1] = ""
|
393 |
|
394 |
-
|
395 |
-
|
|
|
|
|
396 |
|
|
|
397 |
|
398 |
|
399 |
|
400 |
|
401 |
|
402 |
|
403 |
-
# def generate_tts_response(response, tts_choice):
|
404 |
-
# with concurrent.futures.ThreadPoolExecutor() as executor:
|
405 |
-
# if tts_choice == "Alpha":
|
406 |
-
# audio_future = executor.submit(generate_audio_elevenlabs, response)
|
407 |
-
# elif tts_choice == "Beta":
|
408 |
-
# audio_future = executor.submit(generate_audio_parler_tts, response)
|
409 |
-
# # elif tts_choice == "Gamma":
|
410 |
-
# # audio_future = executor.submit(generate_audio_mars5, response)
|
411 |
-
|
412 |
-
# audio_path = audio_future.result()
|
413 |
-
# return audio_path
|
414 |
|
415 |
|
416 |
-
|
417 |
-
def
|
418 |
with concurrent.futures.ThreadPoolExecutor() as executor:
|
419 |
-
# Based on the TTS choice, submit the corresponding task
|
420 |
if tts_choice == "Alpha":
|
421 |
audio_future = executor.submit(generate_audio_elevenlabs, response)
|
422 |
elif tts_choice == "Beta":
|
@@ -424,88 +394,49 @@ def tts_response(history, response, tts_choice):
|
|
424 |
# elif tts_choice == "Gamma":
|
425 |
# audio_future = executor.submit(generate_audio_mars5, response)
|
426 |
|
427 |
-
# Simulate typing effect by adding characters with delay
|
428 |
-
for character in response:
|
429 |
-
history[-1][1] += character
|
430 |
-
time.sleep(0.05)
|
431 |
-
yield history, None
|
432 |
-
|
433 |
-
# Get the path of the generated audio
|
434 |
audio_path = audio_future.result()
|
435 |
-
|
436 |
-
# Return the final history and audio path
|
437 |
-
yield history, audio_path
|
438 |
-
|
439 |
-
|
440 |
|
441 |
|
442 |
|
443 |
-
# import concurrent.futures
|
444 |
-
# # Existing bot function with concurrent futures for parallel processing
|
445 |
-
# def bot(history, choice, tts_choice, retrieval_mode, model_choice):
|
446 |
-
# # Initialize an empty response
|
447 |
-
# response = ""
|
448 |
|
449 |
-
# # Create a thread pool to handle both text generation and TTS conversion in parallel
|
450 |
-
# with concurrent.futures.ThreadPoolExecutor() as executor:
|
451 |
-
# # Start the bot response generation in parallel
|
452 |
-
# bot_future = executor.submit(generate_bot_response, history, choice, retrieval_mode, model_choice)
|
453 |
-
|
454 |
-
# # Wait for the text generation to start
|
455 |
-
# for history_chunk in bot_future.result():
|
456 |
-
# response = history_chunk[-1][1] # Update the response with the current state
|
457 |
-
# yield history_chunk, None # Stream the text output as it's generated
|
458 |
|
459 |
-
# # Once text is fully generated, start the TTS conversion
|
460 |
-
# tts_future = executor.submit(generate_tts_response, response, tts_choice)
|
461 |
|
462 |
-
# # Get the audio output after TTS is done
|
463 |
-
# audio_path = tts_future.result()
|
464 |
|
465 |
-
# # Stream the final text and audio output
|
466 |
-
# yield history, audio_path
|
467 |
|
468 |
-
|
469 |
-
#
|
470 |
def bot(history, choice, tts_choice, retrieval_mode, model_choice):
|
471 |
-
#
|
472 |
-
|
473 |
-
|
474 |
-
# Start TTS process if response exists
|
475 |
-
if response:
|
476 |
-
tts_gen = tts_response(history, response, tts_choice)
|
477 |
|
478 |
-
|
479 |
-
|
480 |
-
|
481 |
-
|
482 |
-
|
|
|
|
|
|
|
|
|
483 |
|
|
|
|
|
484 |
|
|
|
|
|
485 |
|
|
|
|
|
486 |
|
487 |
|
488 |
|
489 |
-
# # Modified bot function to separate chatbot response and TTS generation
|
490 |
|
491 |
-
# def generate_bot_response(history, choice, retrieval_mode, model_choice):
|
492 |
-
# if not history:
|
493 |
-
# return
|
494 |
|
495 |
-
# # Select the model
|
496 |
-
# # selected_model = chat_model if model_choice == "LM-1" else phi_pipe
|
497 |
-
# selected_model = chat_model if model_choice == "LM-1" else (chat_model1 if model_choice == "LM-3" else phi_pipe)
|
498 |
|
499 |
|
500 |
-
# response, addresses = generate_answer(history[-1][0], choice, retrieval_mode, selected_model)
|
501 |
-
# history[-1][1] = ""
|
502 |
|
503 |
-
# for character in response:
|
504 |
-
# history[-1][1] += character
|
505 |
-
# yield history # Stream each character as it is generated
|
506 |
-
# time.sleep(0.05) # Add a slight delay to simulate streaming
|
507 |
|
508 |
-
# yield history # Final yield with the complete response
|
509 |
|
510 |
|
511 |
|
@@ -1475,9 +1406,9 @@ with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
|
|
1475 |
retriever_button.click(fn=stop_audio, inputs=[], outputs=[audio_output], api_name="api_stop_audio_recording")
|
1476 |
.then(fn=add_message, inputs=[chatbot, chat_input], outputs=[chatbot, chat_input], api_name="api_addprompt_chathistory")
|
1477 |
# First, generate the bot response
|
1478 |
-
.then(fn=
|
1479 |
# Then, generate the TTS response based on the bot's response
|
1480 |
-
.then(fn=
|
1481 |
.then(fn=show_map_if_details, inputs=[chatbot, choice], outputs=[location_output, location_output], api_name="api_show_map_details")
|
1482 |
.then(fn=clear_textbox, inputs=[], outputs=[chat_input], api_name="api_clear_textbox")
|
1483 |
)
|
@@ -1493,10 +1424,10 @@ with gr.Blocks(theme='Pijush2023/scikit-learn-pijush') as demo:
|
|
1493 |
fn=add_message, inputs=[chatbot, chat_input], outputs=[chatbot, chat_input], api_name="api_addprompt_chathistory"
|
1494 |
).then(
|
1495 |
# First, generate the bot response
|
1496 |
-
fn=
|
1497 |
).then(
|
1498 |
# Then, generate the TTS response based on the bot's response
|
1499 |
-
fn=
|
1500 |
).then(
|
1501 |
fn=show_map_if_details, inputs=[chatbot, choice], outputs=[location_output, location_output], api_name="api_show_map_details"
|
1502 |
).then(
|
|
|
358 |
"""
|
359 |
|
360 |
|
361 |
+
def generate_bot_response(history, choice, retrieval_mode, model_choice):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
362 |
if not history:
|
363 |
+
return
|
364 |
+
|
365 |
# Select the model
|
366 |
+
# selected_model = chat_model if model_choice == "LM-1" else phi_pipe
|
367 |
selected_model = chat_model if model_choice == "LM-1" else (chat_model1 if model_choice == "LM-3" else phi_pipe)
|
368 |
+
|
369 |
+
|
370 |
response, addresses = generate_answer(history[-1][0], choice, retrieval_mode, selected_model)
|
|
|
|
|
371 |
history[-1][1] = ""
|
372 |
|
373 |
+
for character in response:
|
374 |
+
history[-1][1] += character
|
375 |
+
yield history # Stream each character as it is generated
|
376 |
+
time.sleep(0.05) # Add a slight delay to simulate streaming
|
377 |
|
378 |
+
yield history # Final yield with the complete response
|
379 |
|
380 |
|
381 |
|
382 |
|
383 |
|
384 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
385 |
|
386 |
|
387 |
+
|
388 |
+
def generate_tts_response(response, tts_choice):
|
389 |
with concurrent.futures.ThreadPoolExecutor() as executor:
|
|
|
390 |
if tts_choice == "Alpha":
|
391 |
audio_future = executor.submit(generate_audio_elevenlabs, response)
|
392 |
elif tts_choice == "Beta":
|
|
|
394 |
# elif tts_choice == "Gamma":
|
395 |
# audio_future = executor.submit(generate_audio_mars5, response)
|
396 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
397 |
audio_path = audio_future.result()
|
398 |
+
return audio_path
|
|
|
|
|
|
|
|
|
399 |
|
400 |
|
401 |
|
|
|
|
|
|
|
|
|
|
|
402 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
403 |
|
|
|
|
|
404 |
|
|
|
|
|
405 |
|
|
|
|
|
406 |
|
407 |
+
import concurrent.futures
|
408 |
+
# Existing bot function with concurrent futures for parallel processing
|
409 |
def bot(history, choice, tts_choice, retrieval_mode, model_choice):
|
410 |
+
# Initialize an empty response
|
411 |
+
response = ""
|
|
|
|
|
|
|
|
|
412 |
|
413 |
+
# Create a thread pool to handle both text generation and TTS conversion in parallel
|
414 |
+
with concurrent.futures.ThreadPoolExecutor() as executor:
|
415 |
+
# Start the bot response generation in parallel
|
416 |
+
bot_future = executor.submit(generate_bot_response, history, choice, retrieval_mode, model_choice)
|
417 |
+
|
418 |
+
# Wait for the text generation to start
|
419 |
+
for history_chunk in bot_future.result():
|
420 |
+
response = history_chunk[-1][1] # Update the response with the current state
|
421 |
+
yield history_chunk, None # Stream the text output as it's generated
|
422 |
|
423 |
+
# Once text is fully generated, start the TTS conversion
|
424 |
+
tts_future = executor.submit(generate_tts_response, response, tts_choice)
|
425 |
|
426 |
+
# Get the audio output after TTS is done
|
427 |
+
audio_path = tts_future.result()
|
428 |
|
429 |
+
# Stream the final text and audio output
|
430 |
+
yield history, audio_path
|
431 |
|
432 |
|
433 |
|
|
|
434 |
|
|
|
|
|
|
|
435 |
|
|
|
|
|
|
|
436 |
|
437 |
|
|
|
|
|
438 |
|
|
|
|
|
|
|
|
|
439 |
|
|
|
440 |
|
441 |
|
442 |
|
|
|
1406 |
retriever_button.click(fn=stop_audio, inputs=[], outputs=[audio_output], api_name="api_stop_audio_recording")
|
1407 |
.then(fn=add_message, inputs=[chatbot, chat_input], outputs=[chatbot, chat_input], api_name="api_addprompt_chathistory")
|
1408 |
# First, generate the bot response
|
1409 |
+
.then(fn=generate_bot_response, inputs=[chatbot, choice, retrieval_mode, model_choice], outputs=[chatbot], api_name="api_generate_bot_response")
|
1410 |
# Then, generate the TTS response based on the bot's response
|
1411 |
+
.then(fn=generate_tts_response, inputs=[chatbot, tts_choice], outputs=[audio_output], api_name="api_generate_tts_response")
|
1412 |
.then(fn=show_map_if_details, inputs=[chatbot, choice], outputs=[location_output, location_output], api_name="api_show_map_details")
|
1413 |
.then(fn=clear_textbox, inputs=[], outputs=[chat_input], api_name="api_clear_textbox")
|
1414 |
)
|
|
|
1424 |
fn=add_message, inputs=[chatbot, chat_input], outputs=[chatbot, chat_input], api_name="api_addprompt_chathistory"
|
1425 |
).then(
|
1426 |
# First, generate the bot response
|
1427 |
+
fn=generate_bot_response, inputs=[chatbot, choice, retrieval_mode, model_choice], outputs=[chatbot], api_name="api_generate_bot_response"
|
1428 |
).then(
|
1429 |
# Then, generate the TTS response based on the bot's response
|
1430 |
+
fn=generate_tts_response, inputs=[chatbot, tts_choice], outputs=[audio_output], api_name="api_generate_tts_response"
|
1431 |
).then(
|
1432 |
fn=show_map_if_details, inputs=[chatbot, choice], outputs=[location_output, location_output], api_name="api_show_map_details"
|
1433 |
).then(
|