Pijush2023 commited on
Commit
df8ecfb
·
verified ·
1 Parent(s): 34d5ece

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +175 -71
app.py CHANGED
@@ -400,27 +400,39 @@ Sure! The Responses are as follows:
400
 
401
 
402
 
403
- def generate_bot_response(history, choice, retrieval_mode, model_choice):
404
- if not history:
405
- return
 
 
 
 
 
406
 
407
- # Select the model
408
- # selected_model = chat_model if model_choice == "LM-1" else phi_pipe
409
- selected_model = chat_model if model_choice == "LM-1" else (chat_model1 if model_choice == "LM-3" else phi_pipe)
410
 
 
 
 
 
411
 
412
- response, addresses = generate_answer(history[-1][0], choice, retrieval_mode, selected_model)
413
- history[-1][1] = ""
414
 
415
- for character in response:
416
- history[-1][1] += character
417
- yield history # Stream each character as it is generated
418
- time.sleep(0.05) # Add a slight delay to simulate streaming
419
 
420
- yield history # Final yield with the complete response
 
 
 
421
 
 
 
422
 
 
 
423
 
 
 
424
 
425
 
426
 
@@ -1505,66 +1517,193 @@ def fetch_google_flights(departure_id="JFK", arrival_id="BHM", outbound_date=cur
1505
  # return prompt[0] if prompt else current_text
1506
 
1507
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1508
 
1509
- with gr.Blocks(theme='gradio/soft') as demo:
 
1510
 
1511
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1512
  with gr.Row():
1513
  with gr.Column():
1514
  state = gr.State()
1515
 
1516
  chatbot = gr.Chatbot([], elem_id="RADAR:Channel 94.1", bubble_full_width=False)
1517
- choice = gr.Radio(label="Select Style", choices=["Details", "Conversational"], value="Conversational",interactive=False,visible=False)
1518
- retrieval_mode = gr.Radio(label="Retrieval Mode", choices=["VDB", "KGF"], value="VDB",interactive=False,visible=False)
1519
  model_choice = gr.Dropdown(label="Choose Model", choices=["LM-2"], value="LM-2")
1520
 
1521
  # Link the dropdown change to handle_model_choice_change
1522
  model_choice.change(fn=handle_model_choice_change, inputs=model_choice, outputs=[retrieval_mode, choice, choice])
1523
 
1524
- # gr.Markdown("<h1 style='color: red;'>Talk to RADAR</h1>", elem_id="voice-markdown")
1525
-
1526
  chat_input = gr.Textbox(show_copy_button=True, interactive=True, show_label=False, label="ASK Radar !!!")
1527
  tts_choice = gr.Radio(label="Select TTS System", choices=["Alpha", "Beta"], value="Alpha")
1528
-
1529
  retriever_button = gr.Button("Retriever")
1530
-
1531
  clear_button = gr.Button("Clear")
1532
  clear_button.click(lambda: [None, None], outputs=[chat_input, state])
1533
 
1534
- # gr.Markdown("<h1 style='color: red;'>Radar Map</h1>", elem_id="Map-Radar")
1535
- # location_output = gr.HTML()
1536
  audio_output = gr.Audio(interactive=False, autoplay=True)
1537
 
1538
  def stop_audio():
1539
  audio_output.stop()
1540
  return None
1541
 
1542
-
1543
-
1544
-
1545
 
1546
  retriever_sequence = (
1547
  retriever_button.click(fn=stop_audio, inputs=[], outputs=[audio_output], api_name="api_stop_audio_recording")
1548
  .then(fn=add_message, inputs=[chatbot, chat_input], outputs=[chatbot, chat_input], api_name="api_addprompt_chathistory")
1549
- # First, generate the bot response
1550
- .then(fn=generate_bot_response, inputs=[chatbot, choice, retrieval_mode, model_choice], outputs=[chatbot], api_name="api_generate_bot_response")
1551
  # Then, generate the TTS response based on the bot's response
1552
  .then(fn=generate_tts_response, inputs=[chatbot, tts_choice], outputs=[audio_output], api_name="api_generate_tts_response")
1553
  .then(fn=clear_textbox, inputs=[], outputs=[chat_input], api_name="api_clear_textbox")
1554
  )
1555
 
1556
-
1557
-
1558
-
1559
-
1560
-
1561
-
1562
-
1563
  chat_input.submit(fn=stop_audio, inputs=[], outputs=[audio_output], api_name="api_stop_audio_recording").then(
1564
  fn=add_message, inputs=[chatbot, chat_input], outputs=[chatbot, chat_input], api_name="api_addprompt_chathistory"
1565
  ).then(
1566
- # First, generate the bot response
1567
- fn=generate_bot_response, inputs=[chatbot, choice, retrieval_mode, model_choice], outputs=[chatbot], api_name="api_generate_bot_response"
1568
  ).then(
1569
  # Then, generate the TTS response based on the bot's response
1570
  fn=generate_tts_response, inputs=[chatbot, tts_choice], outputs=[audio_output], api_name="api_generate_tts_response"
@@ -1572,43 +1711,8 @@ with gr.Blocks(theme='gradio/soft') as demo:
1572
  fn=clear_textbox, inputs=[], outputs=[chat_input], api_name="api_clear_textbox"
1573
  )
1574
 
1575
-
1576
-
1577
-
1578
-
1579
-
1580
-
1581
  audio_input = gr.Audio(sources=["microphone"], streaming=True, type='numpy', every=0.1)
1582
  audio_input.stream(transcribe_function, inputs=[state, audio_input], outputs=[state, chat_input], api_name="api_voice_to_text")
1583
-
1584
- # gr.Markdown("<h1 style='color: red;'>Example Prompts</h1>", elem_id="Example-Prompts")
1585
- # gr.Examples(examples=examples, fn=insert_prompt,inputs=chat_input, outputs=chat_input)
1586
-
1587
- # with gr.Column():
1588
- # weather_output = gr.HTML(value=fetch_local_weather())
1589
- # news_output = gr.HTML(value=fetch_local_news())
1590
- # events_output = gr.HTML(value=fetch_local_events())
1591
-
1592
- # with gr.Column():
1593
-
1594
-
1595
- # # Call update_images during the initial load to display images when the interface appears
1596
- # initial_images = update_images()
1597
-
1598
- # # Displaying the images generated using Flux API directly
1599
- # image_output_1 = gr.Image(value=initial_images[0], label="Image 1", elem_id="flux_image_1", width=400, height=400)
1600
- # image_output_2 = gr.Image(value=initial_images[1], label="Image 2", elem_id="flux_image_2", width=400, height=400)
1601
- # image_output_3 = gr.Image(value=initial_images[2], label="Image 3", elem_id="flux_image_3", width=400, height=400)
1602
-
1603
- # # Refresh button to update images
1604
- # refresh_button = gr.Button("Refresh Images")
1605
- # refresh_button.click(fn=update_images, inputs=None, outputs=[image_output_1, image_output_2, image_output_3])
1606
-
1607
-
1608
-
1609
-
1610
-
1611
-
1612
 
1613
  demo.queue()
1614
  demo.launch(show_error=True)
 
400
 
401
 
402
 
403
+ # def generate_bot_response(history, choice, retrieval_mode, model_choice):
404
+ # if not history:
405
+ # return
406
+
407
+ # # Select the model
408
+ # # selected_model = chat_model if model_choice == "LM-1" else phi_pipe
409
+ # selected_model = chat_model if model_choice == "LM-1" else (chat_model1 if model_choice == "LM-3" else phi_pipe)
410
+
411
 
412
+ # response, addresses = generate_answer(history[-1][0], choice, retrieval_mode, selected_model)
413
+ # history[-1][1] = ""
 
414
 
415
+ # for character in response:
416
+ # history[-1][1] += character
417
+ # yield history # Stream each character as it is generated
418
+ # time.sleep(0.05) # Add a slight delay to simulate streaming
419
 
420
+ # yield history # Final yield with the complete response
 
421
 
 
 
 
 
422
 
423
+ # Modified chatbot response function to also return the confidence plot
424
+ def generate_bot_response(chatbot, choice, retrieval_mode, model_choice):
425
+ # Your chatbot logic to generate the response
426
+ response = f"This is a sample response based on your query."
427
 
428
+ # Get the confidence level (you can customize this for your actual confidence calculation)
429
+ confidence_level = get_confidence_level(response)
430
 
431
+ # Plot the confidence as a bar graph
432
+ confidence_plot = plot_confidence_bar(confidence_level)
433
 
434
+ # Return both the chatbot response and the confidence plot
435
+ return chatbot + [[None, response]], confidence_plot
436
 
437
 
438
 
 
1517
  # return prompt[0] if prompt else current_text
1518
 
1519
 
1520
+ #confidence Level
1521
+ import numpy as np
1522
+ import matplotlib.pyplot as plt
1523
+
1524
+ # Function to calculate the confidence level (you can modify this to use real confidence scores)
1525
+ def get_confidence_level(response):
1526
+ # Simulate a confidence score between 0 and 1 for demonstration purposes
1527
+ confidence_level = np.random.rand()
1528
+ return confidence_level
1529
+
1530
+ # Function to plot confidence score as a bar graph
1531
+ def plot_confidence_bar(confidence_level):
1532
+ fig, ax = plt.subplots()
1533
+
1534
+ categories = ['Confidence Level']
1535
+ values = [confidence_level]
1536
+
1537
+ # Create a bar chart
1538
+ ax.bar(categories, values, color='blue')
1539
+ ax.set_ylim(0, 1) # Confidence level is between 0 and 1
1540
+ ax.set_title('Chatbot Confidence Level')
1541
+ ax.set_ylabel('Confidence Score')
1542
+
1543
+ # Save the plot as an image
1544
+ temp_image_path = "confidence_plot.png"
1545
+ plt.savefig(temp_image_path)
1546
+ plt.close(fig)
1547
+
1548
+ return temp_image_path
1549
+
1550
 
1551
+
1552
+ # with gr.Blocks(theme='gradio/soft') as demo:
1553
 
1554
 
1555
+ # with gr.Row():
1556
+ # with gr.Column():
1557
+ # state = gr.State()
1558
+
1559
+ # chatbot = gr.Chatbot([], elem_id="RADAR:Channel 94.1", bubble_full_width=False)
1560
+ # choice = gr.Radio(label="Select Style", choices=["Details", "Conversational"], value="Conversational",interactive=False,visible=False)
1561
+ # retrieval_mode = gr.Radio(label="Retrieval Mode", choices=["VDB", "KGF"], value="VDB",interactive=False,visible=False)
1562
+ # model_choice = gr.Dropdown(label="Choose Model", choices=["LM-2"], value="LM-2")
1563
+
1564
+ # # Link the dropdown change to handle_model_choice_change
1565
+ # model_choice.change(fn=handle_model_choice_change, inputs=model_choice, outputs=[retrieval_mode, choice, choice])
1566
+
1567
+ # # gr.Markdown("<h1 style='color: red;'>Talk to RADAR</h1>", elem_id="voice-markdown")
1568
+
1569
+ # chat_input = gr.Textbox(show_copy_button=True, interactive=True, show_label=False, label="ASK Radar !!!")
1570
+ # tts_choice = gr.Radio(label="Select TTS System", choices=["Alpha", "Beta"], value="Alpha")
1571
+
1572
+ # retriever_button = gr.Button("Retriever")
1573
+
1574
+ # clear_button = gr.Button("Clear")
1575
+ # clear_button.click(lambda: [None, None], outputs=[chat_input, state])
1576
+
1577
+ # # gr.Markdown("<h1 style='color: red;'>Radar Map</h1>", elem_id="Map-Radar")
1578
+ # # location_output = gr.HTML()
1579
+ # audio_output = gr.Audio(interactive=False, autoplay=True)
1580
+
1581
+ # def stop_audio():
1582
+ # audio_output.stop()
1583
+ # return None
1584
+
1585
+
1586
+
1587
+
1588
+
1589
+ # retriever_sequence = (
1590
+ # retriever_button.click(fn=stop_audio, inputs=[], outputs=[audio_output], api_name="api_stop_audio_recording")
1591
+ # .then(fn=add_message, inputs=[chatbot, chat_input], outputs=[chatbot, chat_input], api_name="api_addprompt_chathistory")
1592
+ # # First, generate the bot response
1593
+ # .then(fn=generate_bot_response, inputs=[chatbot, choice, retrieval_mode, model_choice], outputs=[chatbot], api_name="api_generate_bot_response")
1594
+ # # Then, generate the TTS response based on the bot's response
1595
+ # .then(fn=generate_tts_response, inputs=[chatbot, tts_choice], outputs=[audio_output], api_name="api_generate_tts_response")
1596
+ # .then(fn=clear_textbox, inputs=[], outputs=[chat_input], api_name="api_clear_textbox")
1597
+ # )
1598
+
1599
+
1600
+
1601
+
1602
+
1603
+
1604
+
1605
+
1606
+ # chat_input.submit(fn=stop_audio, inputs=[], outputs=[audio_output], api_name="api_stop_audio_recording").then(
1607
+ # fn=add_message, inputs=[chatbot, chat_input], outputs=[chatbot, chat_input], api_name="api_addprompt_chathistory"
1608
+ # ).then(
1609
+ # # First, generate the bot response
1610
+ # fn=generate_bot_response, inputs=[chatbot, choice, retrieval_mode, model_choice], outputs=[chatbot], api_name="api_generate_bot_response"
1611
+ # ).then(
1612
+ # # Then, generate the TTS response based on the bot's response
1613
+ # fn=generate_tts_response, inputs=[chatbot, tts_choice], outputs=[audio_output], api_name="api_generate_tts_response"
1614
+ # ).then(
1615
+ # fn=clear_textbox, inputs=[], outputs=[chat_input], api_name="api_clear_textbox"
1616
+ # )
1617
+
1618
+
1619
+
1620
+
1621
+
1622
+
1623
+
1624
+ # audio_input = gr.Audio(sources=["microphone"], streaming=True, type='numpy', every=0.1)
1625
+ # audio_input.stream(transcribe_function, inputs=[state, audio_input], outputs=[state, chat_input], api_name="api_voice_to_text")
1626
+
1627
+ # # gr.Markdown("<h1 style='color: red;'>Example Prompts</h1>", elem_id="Example-Prompts")
1628
+ # # gr.Examples(examples=examples, fn=insert_prompt,inputs=chat_input, outputs=chat_input)
1629
+
1630
+ # # with gr.Column():
1631
+ # # weather_output = gr.HTML(value=fetch_local_weather())
1632
+ # # news_output = gr.HTML(value=fetch_local_news())
1633
+ # # events_output = gr.HTML(value=fetch_local_events())
1634
+
1635
+ # # with gr.Column():
1636
+
1637
+
1638
+ # # # Call update_images during the initial load to display images when the interface appears
1639
+ # # initial_images = update_images()
1640
+
1641
+ # # # Displaying the images generated using Flux API directly
1642
+ # # image_output_1 = gr.Image(value=initial_images[0], label="Image 1", elem_id="flux_image_1", width=400, height=400)
1643
+ # # image_output_2 = gr.Image(value=initial_images[1], label="Image 2", elem_id="flux_image_2", width=400, height=400)
1644
+ # # image_output_3 = gr.Image(value=initial_images[2], label="Image 3", elem_id="flux_image_3", width=400, height=400)
1645
+
1646
+ # # # Refresh button to update images
1647
+ # # refresh_button = gr.Button("Refresh Images")
1648
+ # # refresh_button.click(fn=update_images, inputs=None, outputs=[image_output_1, image_output_2, image_output_3])
1649
+
1650
+
1651
+
1652
+
1653
+
1654
+
1655
+
1656
+ # demo.queue()
1657
+ # demo.launch(show_error=True)
1658
+
1659
+
1660
+
1661
+ # Rest of your Gradio code
1662
+ with gr.Blocks(theme='gradio/soft') as demo:
1663
+
1664
  with gr.Row():
1665
  with gr.Column():
1666
  state = gr.State()
1667
 
1668
  chatbot = gr.Chatbot([], elem_id="RADAR:Channel 94.1", bubble_full_width=False)
1669
+ choice = gr.Radio(label="Select Style", choices=["Details", "Conversational"], value="Conversational", interactive=False, visible=False)
1670
+ retrieval_mode = gr.Radio(label="Retrieval Mode", choices=["VDB", "KGF"], value="VDB", interactive=False, visible=False)
1671
  model_choice = gr.Dropdown(label="Choose Model", choices=["LM-2"], value="LM-2")
1672
 
1673
  # Link the dropdown change to handle_model_choice_change
1674
  model_choice.change(fn=handle_model_choice_change, inputs=model_choice, outputs=[retrieval_mode, choice, choice])
1675
 
 
 
1676
  chat_input = gr.Textbox(show_copy_button=True, interactive=True, show_label=False, label="ASK Radar !!!")
1677
  tts_choice = gr.Radio(label="Select TTS System", choices=["Alpha", "Beta"], value="Alpha")
1678
+
1679
  retriever_button = gr.Button("Retriever")
 
1680
  clear_button = gr.Button("Clear")
1681
  clear_button.click(lambda: [None, None], outputs=[chat_input, state])
1682
 
 
 
1683
  audio_output = gr.Audio(interactive=False, autoplay=True)
1684
 
1685
  def stop_audio():
1686
  audio_output.stop()
1687
  return None
1688
 
1689
+ # Add new output for the confidence bar graph
1690
+ confidence_plot_output = gr.Image(label="Confidence Level Bar Graph")
 
1691
 
1692
  retriever_sequence = (
1693
  retriever_button.click(fn=stop_audio, inputs=[], outputs=[audio_output], api_name="api_stop_audio_recording")
1694
  .then(fn=add_message, inputs=[chatbot, chat_input], outputs=[chatbot, chat_input], api_name="api_addprompt_chathistory")
1695
+ # First, generate the bot response and confidence score
1696
+ .then(fn=generate_bot_response, inputs=[chatbot, choice, retrieval_mode, model_choice], outputs=[chatbot, confidence_plot_output], api_name="api_generate_bot_response")
1697
  # Then, generate the TTS response based on the bot's response
1698
  .then(fn=generate_tts_response, inputs=[chatbot, tts_choice], outputs=[audio_output], api_name="api_generate_tts_response")
1699
  .then(fn=clear_textbox, inputs=[], outputs=[chat_input], api_name="api_clear_textbox")
1700
  )
1701
 
 
 
 
 
 
 
 
1702
  chat_input.submit(fn=stop_audio, inputs=[], outputs=[audio_output], api_name="api_stop_audio_recording").then(
1703
  fn=add_message, inputs=[chatbot, chat_input], outputs=[chatbot, chat_input], api_name="api_addprompt_chathistory"
1704
  ).then(
1705
+ # First, generate the bot response and confidence score
1706
+ fn=generate_bot_response, inputs=[chatbot, choice, retrieval_mode, model_choice], outputs=[chatbot, confidence_plot_output], api_name="api_generate_bot_response"
1707
  ).then(
1708
  # Then, generate the TTS response based on the bot's response
1709
  fn=generate_tts_response, inputs=[chatbot, tts_choice], outputs=[audio_output], api_name="api_generate_tts_response"
 
1711
  fn=clear_textbox, inputs=[], outputs=[chat_input], api_name="api_clear_textbox"
1712
  )
1713
 
 
 
 
 
 
 
1714
  audio_input = gr.Audio(sources=["microphone"], streaming=True, type='numpy', every=0.1)
1715
  audio_input.stream(transcribe_function, inputs=[state, audio_input], outputs=[state, chat_input], api_name="api_voice_to_text")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1716
 
1717
  demo.queue()
1718
  demo.launch(show_error=True)