Pijush2023 commited on
Commit
dc929c0
·
verified ·
1 Parent(s): a30fcda

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +288 -207
app.py CHANGED
@@ -367,11 +367,31 @@ QA_CHAIN_PROMPT_2 = PromptTemplate(input_variables=["context", "question"], temp
367
  # Sure! Here's the information:
368
  # """
369
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
370
  phi_custom_template = """
371
  <|system|>
372
  Sei un esperto di madrelingua italiana. Il tuo compito è fornire risposte precise, accurate, concise, nitide e brevi basate sul documento fornito. Dovresti restituire le informazioni nel seguente formato:
373
 
374
- - Nome del documento: (il nome del documento)
375
  - Numero di pagina: (numero di pagina)
376
  - Contenuto effettivo: (contenuto pertinente del documento)
377
 
@@ -573,154 +593,155 @@ Detailed Answer:
573
 
574
  import traceback
575
 
576
- def generate_answer(message, choice, retrieval_mode, selected_model):
577
- logging.debug(f"generate_answer called with choice: {choice}, retrieval_mode: {retrieval_mode}, and selected_model: {selected_model}")
578
-
579
- # Logic for disabling options for Phi-3.5
580
- if selected_model == "LM-2":
581
- choice = None
582
- retrieval_mode = None
583
-
584
- try:
585
- # Select the appropriate template based on the choice and model
586
- if choice == "Details" and selected_model == chat_model1: # GPT-4o-mini
587
- prompt_template = PromptTemplate(input_variables=["context", "question"], template=gpt4o_mini_template_details)
588
- elif choice == "Details":
589
- prompt_template = QA_CHAIN_PROMPT_1
590
- elif choice == "Conversational":
591
- prompt_template = QA_CHAIN_PROMPT_2
592
- else:
593
- prompt_template = QA_CHAIN_PROMPT_1 # Fallback to template1
594
-
595
- # # Handle hotel-related queries
596
- # if "hotel" in message.lower() or "hotels" in message.lower() and "birmingham" in message.lower():
597
- # logging.debug("Handling hotel-related query")
598
- # response = fetch_google_hotels()
599
- # logging.debug(f"Hotel response: {response}")
600
- # return response, extract_addresses(response)
601
-
602
- # # Handle restaurant-related queries
603
- # if "restaurant" in message.lower() or "restaurants" in message.lower() and "birmingham" in message.lower():
604
- # logging.debug("Handling restaurant-related query")
605
- # response = fetch_yelp_restaurants()
606
- # logging.debug(f"Restaurant response: {response}")
607
- # return response, extract_addresses(response)
608
-
609
- # # Handle flight-related queries
610
- # if "flight" in message.lower() or "flights" in message.lower() and "birmingham" in message.lower():
611
- # logging.debug("Handling flight-related query")
612
- # response = fetch_google_flights()
613
- # logging.debug(f"Flight response: {response}")
614
- # return response, extract_addresses(response)
615
-
616
- # Retrieval-based response
617
- if retrieval_mode == "VDB":
618
- logging.debug("Using VDB retrieval mode")
619
- if selected_model == chat_model:
620
- logging.debug("Selected model: LM-1")
621
- retriever = gpt_retriever
622
- context = retriever.get_relevant_documents(message)
623
- logging.debug(f"Retrieved context: {context}")
624
-
625
- prompt = prompt_template.format(context=context, question=message)
626
- logging.debug(f"Generated prompt: {prompt}")
627
-
628
- qa_chain = RetrievalQA.from_chain_type(
629
- llm=chat_model,
630
- chain_type="stuff",
631
- retriever=retriever,
632
- chain_type_kwargs={"prompt": prompt_template}
633
- )
634
- response = qa_chain({"query": message})
635
- logging.debug(f"LM-1 response: {response}")
636
- return response['result'], extract_addresses(response['result'])
637
 
638
- elif selected_model == chat_model1:
639
- logging.debug("Selected model: LM-3")
640
- retriever = gpt_retriever
641
- context = retriever.get_relevant_documents(message)
642
- logging.debug(f"Retrieved context: {context}")
643
-
644
- prompt = prompt_template.format(context=context, question=message)
645
- logging.debug(f"Generated prompt: {prompt}")
646
-
647
- qa_chain = RetrievalQA.from_chain_type(
648
- llm=chat_model1,
649
- chain_type="stuff",
650
- retriever=retriever,
651
- chain_type_kwargs={"prompt": prompt_template}
652
- )
653
- response = qa_chain({"query": message})
654
- logging.debug(f"LM-3 response: {response}")
655
- return response['result'], extract_addresses(response['result'])
656
-
657
 
658
 
659
- elif selected_model == phi_pipe:
660
- logging.debug("Selected model: LM-2")
661
- retriever = phi_retriever
662
- context_documents = retriever.get_relevant_documents(message)
663
- context = "\n".join([doc.page_content for doc in context_documents])
664
- logging.debug(f"Retrieved context for LM-2: {context}")
665
-
666
- # Use the correct template variable
667
- prompt = phi_custom_template.format(context=context, question=message)
668
- logging.debug(f"Generated LM-2 prompt: {prompt}")
669
-
670
- response = selected_model(prompt, **{
671
- "max_new_tokens": 250,
672
- "return_full_text": True,
673
- "temperature": 0.0,
674
- "do_sample": False,
675
- })
676
-
677
- if response:
678
- generated_text = response[0]['generated_text']
679
- logging.debug(f"LM-2 Response: {generated_text}")
680
- cleaned_response = clean_response(generated_text)
681
- return cleaned_response, extract_addresses(cleaned_response)
682
- else:
683
- logging.error("LM-2 did not return any response.")
684
- return "No response generated.", []
685
-
686
- elif retrieval_mode == "KGF":
687
- logging.debug("Using KGF retrieval mode")
688
- response = chain_neo4j.invoke({"question": message})
689
- logging.debug(f"KGF response: {response}")
690
- return response, extract_addresses(response)
691
- else:
692
- logging.error("Invalid retrieval mode selected.")
693
- return "Invalid retrieval mode selected.", []
694
-
695
- except Exception as e:
696
- logging.error(f"Error in generate_answer: {str(e)}")
697
- logging.error(traceback.format_exc())
698
- return "Sorry, I encountered an error while processing your request.", []
699
-
700
- # def generate_answer(message, choice, retrieval_mode, selected_model):
701
- # # Logic for Phi-3.5
702
- # if selected_model == phi_pipe: # LM-2 Phi-3.5 selected
703
- # retriever = phi_retriever
704
- # context_documents = retriever.get_relevant_documents(message)
705
- # context = "\n".join([doc.page_content for doc in context_documents])
706
-
707
- # # Use the correct template for Phi-3.5
708
- # prompt = phi_custom_template.format(context=context, question=message)
709
-
710
- # response = selected_model(prompt, **{
711
- # "max_new_tokens": 400,
712
- # "return_full_text": True,
713
- # "temperature": 0.7,
714
- # "do_sample": True,
715
- # })
716
-
717
- # if response:
718
- # generated_text = response[0]['generated_text']
719
- # cleaned_response = clean_response(generated_text)
720
- # # return cleaned_response, extract_addresses(cleaned_response)
721
- # return cleaned_response
722
  # else:
723
- # return "No response generated.", []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
724
 
725
 
726
 
@@ -1480,58 +1501,67 @@ def fetch_google_flights(departure_id="JFK", arrival_id="BHM", outbound_date=cur
1480
  # # Function to insert the prompt into the textbox when clicked
1481
  # def insert_prompt(current_text, prompt):
1482
  # return prompt[0] if prompt else current_text
 
1483
 
 
 
 
 
 
 
 
 
1484
 
1485
 
1486
 
1487
 
1488
 
1489
- with gr.Blocks(theme='gradio/soft') as demo:
1490
 
1491
 
1492
- with gr.Row():
1493
- with gr.Column():
1494
- state = gr.State()
1495
 
1496
- chatbot = gr.Chatbot([], elem_id="RADAR:Channel 94.1", bubble_full_width=False)
1497
- choice = gr.Radio(label="Select Style", choices=["Details", "Conversational"], value="Conversational",interactive=False,visible=False)
1498
- retrieval_mode = gr.Radio(label="Retrieval Mode", choices=["VDB", "KGF"], value="VDB",interactive=False,visible=False)
1499
- model_choice = gr.Dropdown(label="Choose Model", choices=["LM-2"], value="LM-2")
1500
 
1501
- # Link the dropdown change to handle_model_choice_change
1502
- model_choice.change(fn=handle_model_choice_change, inputs=model_choice, outputs=[retrieval_mode, choice, choice])
1503
 
1504
- # gr.Markdown("<h1 style='color: red;'>Talk to RADAR</h1>", elem_id="voice-markdown")
1505
 
1506
- chat_input = gr.Textbox(show_copy_button=True, interactive=True, show_label=False, label="ASK Radar !!!")
1507
- tts_choice = gr.Radio(label="Select TTS System", choices=["Alpha", "Beta"], value="Alpha")
1508
 
1509
- retriever_button = gr.Button("Retriever")
1510
 
1511
- clear_button = gr.Button("Clear")
1512
- clear_button.click(lambda: [None, None], outputs=[chat_input, state])
1513
 
1514
- # gr.Markdown("<h1 style='color: red;'>Radar Map</h1>", elem_id="Map-Radar")
1515
- # location_output = gr.HTML()
1516
- audio_output = gr.Audio(interactive=False, autoplay=True)
1517
 
1518
- def stop_audio():
1519
- audio_output.stop()
1520
- return None
1521
 
1522
 
1523
 
1524
 
1525
 
1526
- retriever_sequence = (
1527
- retriever_button.click(fn=stop_audio, inputs=[], outputs=[audio_output], api_name="api_stop_audio_recording")
1528
- .then(fn=add_message, inputs=[chatbot, chat_input], outputs=[chatbot, chat_input], api_name="api_addprompt_chathistory")
1529
- # First, generate the bot response
1530
- .then(fn=generate_bot_response, inputs=[chatbot, choice, retrieval_mode, model_choice], outputs=[chatbot], api_name="api_generate_bot_response")
1531
- # Then, generate the TTS response based on the bot's response
1532
- .then(fn=generate_tts_response, inputs=[chatbot, tts_choice], outputs=[audio_output], api_name="api_generate_tts_response")
1533
- .then(fn=clear_textbox, inputs=[], outputs=[chat_input], api_name="api_clear_textbox")
1534
- )
1535
 
1536
 
1537
 
@@ -1540,17 +1570,17 @@ with gr.Blocks(theme='gradio/soft') as demo:
1540
 
1541
 
1542
 
1543
- chat_input.submit(fn=stop_audio, inputs=[], outputs=[audio_output], api_name="api_stop_audio_recording").then(
1544
- fn=add_message, inputs=[chatbot, chat_input], outputs=[chatbot, chat_input], api_name="api_addprompt_chathistory"
1545
- ).then(
1546
- # First, generate the bot response
1547
- fn=generate_bot_response, inputs=[chatbot, choice, retrieval_mode, model_choice], outputs=[chatbot], api_name="api_generate_bot_response"
1548
- ).then(
1549
- # Then, generate the TTS response based on the bot's response
1550
- fn=generate_tts_response, inputs=[chatbot, tts_choice], outputs=[audio_output], api_name="api_generate_tts_response"
1551
- ).then(
1552
- fn=clear_textbox, inputs=[], outputs=[chat_input], api_name="api_clear_textbox"
1553
- )
1554
 
1555
 
1556
 
@@ -1558,31 +1588,31 @@ with gr.Blocks(theme='gradio/soft') as demo:
1558
 
1559
 
1560
 
1561
- audio_input = gr.Audio(sources=["microphone"], streaming=True, type='numpy', every=0.1)
1562
- audio_input.stream(transcribe_function, inputs=[state, audio_input], outputs=[state, chat_input], api_name="api_voice_to_text")
1563
 
1564
- # gr.Markdown("<h1 style='color: red;'>Example Prompts</h1>", elem_id="Example-Prompts")
1565
- # gr.Examples(examples=examples, fn=insert_prompt,inputs=chat_input, outputs=chat_input)
1566
 
1567
- # with gr.Column():
1568
- # weather_output = gr.HTML(value=fetch_local_weather())
1569
- # news_output = gr.HTML(value=fetch_local_news())
1570
- # events_output = gr.HTML(value=fetch_local_events())
1571
 
1572
- # with gr.Column():
1573
 
1574
 
1575
- # # Call update_images during the initial load to display images when the interface appears
1576
- # initial_images = update_images()
1577
 
1578
- # # Displaying the images generated using Flux API directly
1579
- # image_output_1 = gr.Image(value=initial_images[0], label="Image 1", elem_id="flux_image_1", width=400, height=400)
1580
- # image_output_2 = gr.Image(value=initial_images[1], label="Image 2", elem_id="flux_image_2", width=400, height=400)
1581
- # image_output_3 = gr.Image(value=initial_images[2], label="Image 3", elem_id="flux_image_3", width=400, height=400)
1582
 
1583
- # # Refresh button to update images
1584
- # refresh_button = gr.Button("Refresh Images")
1585
- # refresh_button.click(fn=update_images, inputs=None, outputs=[image_output_1, image_output_2, image_output_3])
1586
 
1587
 
1588
 
@@ -1590,8 +1620,59 @@ with gr.Blocks(theme='gradio/soft') as demo:
1590
 
1591
 
1592
 
1593
- demo.queue()
1594
- demo.launch(show_error=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1595
 
1596
 
1597
 
 
367
  # Sure! Here's the information:
368
  # """
369
 
370
+ # phi_custom_template = """
371
+ # <|system|>
372
+ # Sei un esperto di madrelingua italiana. Il tuo compito è fornire risposte precise, accurate, concise, nitide e brevi basate sul documento fornito. Dovresti restituire le informazioni nel seguente formato:
373
+
374
+ # - Nome del documento: (il nome del documento)
375
+ # - Numero di pagina: (numero di pagina)
376
+ # - Contenuto effettivo: (contenuto pertinente del documento)
377
+
378
+ # Se non riesci a trovare la risposta nel documento, rispondi semplicemente con "Questa domanda va oltre la mia conoscenza".
379
+ # <|end|>
380
+
381
+ # <|user|>
382
+ # {context}
383
+ # Question: {question}<|end|>
384
+
385
+ # <|assistant|>
386
+ # Sure! The Responses are as follows:
387
+ # """
388
+
389
+ # Modify the Phi-3.5 template to include the selected file
390
  phi_custom_template = """
391
  <|system|>
392
  Sei un esperto di madrelingua italiana. Il tuo compito è fornire risposte precise, accurate, concise, nitide e brevi basate sul documento fornito. Dovresti restituire le informazioni nel seguente formato:
393
 
394
+ - Nome del documento: {document_name}
395
  - Numero di pagina: (numero di pagina)
396
  - Contenuto effettivo: (contenuto pertinente del documento)
397
 
 
593
 
594
  import traceback
595
 
596
+ # def generate_answer(message, choice, retrieval_mode, selected_model):
597
+ # logging.debug(f"generate_answer called with choice: {choice}, retrieval_mode: {retrieval_mode}, and selected_model: {selected_model}")
598
+
599
+ # # Logic for disabling options for Phi-3.5
600
+ # if selected_model == "LM-2":
601
+ # choice = None
602
+ # retrieval_mode = None
603
+
604
+ # try:
605
+ # # Select the appropriate template based on the choice and model
606
+ # if choice == "Details" and selected_model == chat_model1: # GPT-4o-mini
607
+ # prompt_template = PromptTemplate(input_variables=["context", "question"], template=gpt4o_mini_template_details)
608
+ # elif choice == "Details":
609
+ # prompt_template = QA_CHAIN_PROMPT_1
610
+ # elif choice == "Conversational":
611
+ # prompt_template = QA_CHAIN_PROMPT_2
612
+ # else:
613
+ # prompt_template = QA_CHAIN_PROMPT_1 # Fallback to template1
614
+
615
+ # # # Handle hotel-related queries
616
+ # # if "hotel" in message.lower() or "hotels" in message.lower() and "birmingham" in message.lower():
617
+ # # logging.debug("Handling hotel-related query")
618
+ # # response = fetch_google_hotels()
619
+ # # logging.debug(f"Hotel response: {response}")
620
+ # # return response, extract_addresses(response)
621
+
622
+ # # # Handle restaurant-related queries
623
+ # # if "restaurant" in message.lower() or "restaurants" in message.lower() and "birmingham" in message.lower():
624
+ # # logging.debug("Handling restaurant-related query")
625
+ # # response = fetch_yelp_restaurants()
626
+ # # logging.debug(f"Restaurant response: {response}")
627
+ # # return response, extract_addresses(response)
628
+
629
+ # # # Handle flight-related queries
630
+ # # if "flight" in message.lower() or "flights" in message.lower() and "birmingham" in message.lower():
631
+ # # logging.debug("Handling flight-related query")
632
+ # # response = fetch_google_flights()
633
+ # # logging.debug(f"Flight response: {response}")
634
+ # # return response, extract_addresses(response)
635
+
636
+ # # Retrieval-based response
637
+ # if retrieval_mode == "VDB":
638
+ # logging.debug("Using VDB retrieval mode")
639
+ # if selected_model == chat_model:
640
+ # logging.debug("Selected model: LM-1")
641
+ # retriever = gpt_retriever
642
+ # context = retriever.get_relevant_documents(message)
643
+ # logging.debug(f"Retrieved context: {context}")
644
+
645
+ # prompt = prompt_template.format(context=context, question=message)
646
+ # logging.debug(f"Generated prompt: {prompt}")
647
+
648
+ # qa_chain = RetrievalQA.from_chain_type(
649
+ # llm=chat_model,
650
+ # chain_type="stuff",
651
+ # retriever=retriever,
652
+ # chain_type_kwargs={"prompt": prompt_template}
653
+ # )
654
+ # response = qa_chain({"query": message})
655
+ # logging.debug(f"LM-1 response: {response}")
656
+ # return response['result'], extract_addresses(response['result'])
657
 
658
+ # elif selected_model == chat_model1:
659
+ # logging.debug("Selected model: LM-3")
660
+ # retriever = gpt_retriever
661
+ # context = retriever.get_relevant_documents(message)
662
+ # logging.debug(f"Retrieved context: {context}")
663
+
664
+ # prompt = prompt_template.format(context=context, question=message)
665
+ # logging.debug(f"Generated prompt: {prompt}")
666
+
667
+ # qa_chain = RetrievalQA.from_chain_type(
668
+ # llm=chat_model1,
669
+ # chain_type="stuff",
670
+ # retriever=retriever,
671
+ # chain_type_kwargs={"prompt": prompt_template}
672
+ # )
673
+ # response = qa_chain({"query": message})
674
+ # logging.debug(f"LM-3 response: {response}")
675
+ # return response['result'], extract_addresses(response['result'])
 
676
 
677
 
678
+
679
+ # elif selected_model == phi_pipe:
680
+ # logging.debug("Selected model: LM-2")
681
+ # retriever = phi_retriever
682
+ # context_documents = retriever.get_relevant_documents(message)
683
+ # context = "\n".join([doc.page_content for doc in context_documents])
684
+ # logging.debug(f"Retrieved context for LM-2: {context}")
685
+
686
+ # # Use the correct template variable
687
+ # prompt = phi_custom_template.format(context=context, question=message)
688
+ # logging.debug(f"Generated LM-2 prompt: {prompt}")
689
+
690
+ # response = selected_model(prompt, **{
691
+ # "max_new_tokens": 250,
692
+ # "return_full_text": True,
693
+ # "temperature": 0.0,
694
+ # "do_sample": False,
695
+ # })
696
+
697
+ # if response:
698
+ # generated_text = response[0]['generated_text']
699
+ # logging.debug(f"LM-2 Response: {generated_text}")
700
+ # cleaned_response = clean_response(generated_text)
701
+ # return cleaned_response, extract_addresses(cleaned_response)
702
+ # else:
703
+ # logging.error("LM-2 did not return any response.")
704
+ # return "No response generated.", []
705
+
706
+ # elif retrieval_mode == "KGF":
707
+ # logging.debug("Using KGF retrieval mode")
708
+ # response = chain_neo4j.invoke({"question": message})
709
+ # logging.debug(f"KGF response: {response}")
710
+ # return response, extract_addresses(response)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
711
  # else:
712
+ # logging.error("Invalid retrieval mode selected.")
713
+ # return "Invalid retrieval mode selected.", []
714
+
715
+ # except Exception as e:
716
+ # logging.error(f"Error in generate_answer: {str(e)}")
717
+ # logging.error(traceback.format_exc())
718
+ # return "Sorry, I encountered an error while processing your request.", []
719
+
720
+ def generate_answer(message, choice, retrieval_mode, selected_model, selected_file):
721
+ # Ensure a file is selected
722
+ if not selected_file:
723
+ return "Please choose a file to proceed."
724
+
725
+ # Modify the Phi-3.5 prompt to include the selected file
726
+ if selected_model == phi_pipe:
727
+ retriever = phi_retriever
728
+ context_documents = retriever.get_relevant_documents(message)
729
+ context = "\n".join([doc.page_content for doc in context_documents])
730
+
731
+ prompt = phi_custom_template.format(context=context, question=message, document_name=selected_file)
732
+ response = selected_model(prompt, **{
733
+ "max_new_tokens": 250,
734
+ "return_full_text": True,
735
+ "temperature": 0.0,
736
+ "do_sample": False,
737
+ })
738
+
739
+ if response:
740
+ generated_text = response[0]['generated_text']
741
+ cleaned_response = clean_response(generated_text)
742
+ return cleaned_response
743
+ else:
744
+ return "No response generated.", []
745
 
746
 
747
 
 
1501
  # # Function to insert the prompt into the textbox when clicked
1502
  # def insert_prompt(current_text, prompt):
1503
  # return prompt[0] if prompt else current_text
1504
+ # List of files for dropdown
1505
 
1506
+ files_list = [
1507
+ "176769_000219051794_V2.pdf",
1508
+ "177115_000230633979_V2.pdf",
1509
+ "177273_000219963851_V2.pdf",
1510
+ "177429_000219726050_V2.pdf",
1511
+ "177495_000219962533_V2.pdf",
1512
+ "178041_000197000852_V2.pdf"
1513
+ ]
1514
 
1515
 
1516
 
1517
 
1518
 
1519
+ # with gr.Blocks(theme='gradio/soft') as demo:
1520
 
1521
 
1522
+ # with gr.Row():
1523
+ # with gr.Column():
1524
+ # state = gr.State()
1525
 
1526
+ # chatbot = gr.Chatbot([], elem_id="RADAR:Channel 94.1", bubble_full_width=False)
1527
+ # choice = gr.Radio(label="Select Style", choices=["Details", "Conversational"], value="Conversational",interactive=False,visible=False)
1528
+ # retrieval_mode = gr.Radio(label="Retrieval Mode", choices=["VDB", "KGF"], value="VDB",interactive=False,visible=False)
1529
+ # model_choice = gr.Dropdown(label="Choose Model", choices=["LM-2"], value="LM-2")
1530
 
1531
+ # # Link the dropdown change to handle_model_choice_change
1532
+ # model_choice.change(fn=handle_model_choice_change, inputs=model_choice, outputs=[retrieval_mode, choice, choice])
1533
 
1534
+ # # gr.Markdown("<h1 style='color: red;'>Talk to RADAR</h1>", elem_id="voice-markdown")
1535
 
1536
+ # chat_input = gr.Textbox(show_copy_button=True, interactive=True, show_label=False, label="ASK Radar !!!")
1537
+ # tts_choice = gr.Radio(label="Select TTS System", choices=["Alpha", "Beta"], value="Alpha")
1538
 
1539
+ # retriever_button = gr.Button("Retriever")
1540
 
1541
+ # clear_button = gr.Button("Clear")
1542
+ # clear_button.click(lambda: [None, None], outputs=[chat_input, state])
1543
 
1544
+ # # gr.Markdown("<h1 style='color: red;'>Radar Map</h1>", elem_id="Map-Radar")
1545
+ # # location_output = gr.HTML()
1546
+ # audio_output = gr.Audio(interactive=False, autoplay=True)
1547
 
1548
+ # def stop_audio():
1549
+ # audio_output.stop()
1550
+ # return None
1551
 
1552
 
1553
 
1554
 
1555
 
1556
+ # retriever_sequence = (
1557
+ # retriever_button.click(fn=stop_audio, inputs=[], outputs=[audio_output], api_name="api_stop_audio_recording")
1558
+ # .then(fn=add_message, inputs=[chatbot, chat_input], outputs=[chatbot, chat_input], api_name="api_addprompt_chathistory")
1559
+ # # First, generate the bot response
1560
+ # .then(fn=generate_bot_response, inputs=[chatbot, choice, retrieval_mode, model_choice], outputs=[chatbot], api_name="api_generate_bot_response")
1561
+ # # Then, generate the TTS response based on the bot's response
1562
+ # .then(fn=generate_tts_response, inputs=[chatbot, tts_choice], outputs=[audio_output], api_name="api_generate_tts_response")
1563
+ # .then(fn=clear_textbox, inputs=[], outputs=[chat_input], api_name="api_clear_textbox")
1564
+ # )
1565
 
1566
 
1567
 
 
1570
 
1571
 
1572
 
1573
+ # chat_input.submit(fn=stop_audio, inputs=[], outputs=[audio_output], api_name="api_stop_audio_recording").then(
1574
+ # fn=add_message, inputs=[chatbot, chat_input], outputs=[chatbot, chat_input], api_name="api_addprompt_chathistory"
1575
+ # ).then(
1576
+ # # First, generate the bot response
1577
+ # fn=generate_bot_response, inputs=[chatbot, choice, retrieval_mode, model_choice], outputs=[chatbot], api_name="api_generate_bot_response"
1578
+ # ).then(
1579
+ # # Then, generate the TTS response based on the bot's response
1580
+ # fn=generate_tts_response, inputs=[chatbot, tts_choice], outputs=[audio_output], api_name="api_generate_tts_response"
1581
+ # ).then(
1582
+ # fn=clear_textbox, inputs=[], outputs=[chat_input], api_name="api_clear_textbox"
1583
+ # )
1584
 
1585
 
1586
 
 
1588
 
1589
 
1590
 
1591
+ # audio_input = gr.Audio(sources=["microphone"], streaming=True, type='numpy', every=0.1)
1592
+ # audio_input.stream(transcribe_function, inputs=[state, audio_input], outputs=[state, chat_input], api_name="api_voice_to_text")
1593
 
1594
+ # # gr.Markdown("<h1 style='color: red;'>Example Prompts</h1>", elem_id="Example-Prompts")
1595
+ # # gr.Examples(examples=examples, fn=insert_prompt,inputs=chat_input, outputs=chat_input)
1596
 
1597
+ # # with gr.Column():
1598
+ # # weather_output = gr.HTML(value=fetch_local_weather())
1599
+ # # news_output = gr.HTML(value=fetch_local_news())
1600
+ # # events_output = gr.HTML(value=fetch_local_events())
1601
 
1602
+ # # with gr.Column():
1603
 
1604
 
1605
+ # # # Call update_images during the initial load to display images when the interface appears
1606
+ # # initial_images = update_images()
1607
 
1608
+ # # # Displaying the images generated using Flux API directly
1609
+ # # image_output_1 = gr.Image(value=initial_images[0], label="Image 1", elem_id="flux_image_1", width=400, height=400)
1610
+ # # image_output_2 = gr.Image(value=initial_images[1], label="Image 2", elem_id="flux_image_2", width=400, height=400)
1611
+ # # image_output_3 = gr.Image(value=initial_images[2], label="Image 3", elem_id="flux_image_3", width=400, height=400)
1612
 
1613
+ # # # Refresh button to update images
1614
+ # # refresh_button = gr.Button("Refresh Images")
1615
+ # # refresh_button.click(fn=update_images, inputs=None, outputs=[image_output_1, image_output_2, image_output_3])
1616
 
1617
 
1618
 
 
1620
 
1621
 
1622
 
1623
+ # demo.queue()
1624
+ # demo.launch(show_error=True)
1625
+
1626
+
1627
+ with gr.Blocks(theme='gradio/soft') as demo:
1628
+ with gr.Row():
1629
+ with gr.Column():
1630
+ state = gr.State()
1631
+
1632
+ # File dropdown
1633
+ file_dropdown = gr.Dropdown(label="Select Document", choices=files_list, value=None)
1634
+
1635
+ chatbot = gr.Chatbot([], elem_id="RADAR:Channel 94.1", bubble_full_width=False)
1636
+ choice = gr.Radio(label="Select Style", choices=["Details", "Conversational"], value="Conversational",interactive=False,visible=False)
1637
+ retrieval_mode = gr.Radio(label="Retrieval Mode", choices=["VDB", "KGF"], value="VDB",interactive=False,visible=False)
1638
+ model_choice = gr.Dropdown(label="Choose Model", choices=["LM-2"], value="LM-2")
1639
+
1640
+ # Link the dropdown change to handle_model_choice_change
1641
+ model_choice.change(fn=handle_model_choice_change, inputs=model_choice, outputs=[retrieval_mode, choice, choice])
1642
+
1643
+ chat_input = gr.Textbox(show_copy_button=True, interactive=True, show_label=False, label="ASK Radar !!!")
1644
+ tts_choice = gr.Radio(label="Select TTS System", choices=["Alpha", "Beta"], value="Alpha")
1645
+ retriever_button = gr.Button("Retriever")
1646
+ clear_button = gr.Button("Clear")
1647
+ clear_button.click(lambda: [None, None], outputs=[chat_input, state])
1648
+
1649
+ audio_output = gr.Audio(interactive=False, autoplay=True)
1650
+
1651
+ def stop_audio():
1652
+ audio_output.stop()
1653
+ return None
1654
+
1655
+ # Modify retriever sequence to include file selection
1656
+ retriever_sequence = (
1657
+ retriever_button.click(fn=stop_audio, inputs=[], outputs=[audio_output], api_name="api_stop_audio_recording")
1658
+ .then(fn=add_message, inputs=[chatbot, chat_input], outputs=[chatbot, chat_input], api_name="api_addprompt_chathistory")
1659
+ # Pass the selected file to the bot response generation
1660
+ .then(fn=generate_answer_with_file, inputs=[chatbot, choice, retrieval_mode, model_choice, file_dropdown], outputs=[chatbot], api_name="api_generate_bot_response_with_file")
1661
+ .then(fn=generate_tts_response, inputs=[chatbot, tts_choice], outputs=[audio_output], api_name="api_generate_tts_response")
1662
+ .then(fn=clear_textbox, inputs=[], outputs=[chat_input], api_name="api_clear_textbox")
1663
+ )
1664
+
1665
+ chat_input.submit(fn=stop_audio, inputs=[], outputs=[audio_output], api_name="api_stop_audio_recording").then(
1666
+ fn=add_message, inputs=[chatbot, chat_input], outputs=[chatbot, chat_input], api_name="api_addprompt_chathistory"
1667
+ ).then(
1668
+ fn=generate_answer_with_file, inputs=[chatbot, choice, retrieval_mode, model_choice, file_dropdown], outputs=[chatbot], api_name="api_generate_bot_response_with_file"
1669
+ ).then(
1670
+ fn=generate_tts_response, inputs=[chatbot, tts_choice], outputs=[audio_output], api_name="api_generate_tts_response"
1671
+ ).then(
1672
+ fn=clear_textbox, inputs=[], outputs=[chat_input], api_name="api_clear_textbox"
1673
+ )
1674
+
1675
+ demo.launch()
1676
 
1677
 
1678