shukdevdatta123 commited on
Commit
450f755
·
verified ·
1 Parent(s): 7a38f58

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -20
app.py CHANGED
@@ -1,6 +1,6 @@
1
  import streamlit as st
 
2
  import os
3
- import textwrap
4
  from helpers import text_to_speech, autoplay_audio, speech_to_text, get_api_key
5
  from generate_answer import base_model_chatbot, with_pdf_chatbot
6
  from audio_recorder_streamlit import audio_recorder
@@ -31,7 +31,6 @@ def main(answer_mode: str):
31
  with footer_container:
32
  audio_bytes = audio_recorder()
33
 
34
- # Display previous messages
35
  for message in st.session_state.messages:
36
  with st.chat_message(message["role"]):
37
  st.write(message["content"])
@@ -57,27 +56,15 @@ def main(answer_mode: str):
57
  final_response = base_model_chatbot(st.session_state.messages)
58
  elif answer_mode == 'pdf_chat':
59
  final_response = with_pdf_chatbot(st.session_state.messages)
60
-
61
- # Display response in chunks if it's too long
62
- with st.spinner("Generating audio response..."):
63
- audio_files = text_to_speech(final_response)
64
- autoplay_audio(audio_files)
65
-
66
- # Use the display_text_in_chunks function to avoid truncation
67
- display_text_in_chunks(final_response)
68
-
69
- st.session_state.messages.append({"role": "assistant", "content": final_response})
70
- for audio_file in audio_files:
71
  os.remove(audio_file)
72
 
73
  # Float the footer container and provide CSS to target it with
74
  footer_container.float("bottom: 0rem;")
75
 
76
- def display_text_in_chunks(text, chunk_size=500):
77
- """Display long text in manageable chunks."""
78
- chunks = textwrap.wrap(text, chunk_size)
79
- for chunk in chunks:
80
- st.write(chunk)
81
-
82
  if __name__ == "__main__":
83
- main(answer_mode='base_model') # Or: answer_mode='pdf_chat'
 
1
  import streamlit as st
2
+ import hmac
3
  import os
 
4
  from helpers import text_to_speech, autoplay_audio, speech_to_text, get_api_key
5
  from generate_answer import base_model_chatbot, with_pdf_chatbot
6
  from audio_recorder_streamlit import audio_recorder
 
31
  with footer_container:
32
  audio_bytes = audio_recorder()
33
 
 
34
  for message in st.session_state.messages:
35
  with st.chat_message(message["role"]):
36
  st.write(message["content"])
 
56
  final_response = base_model_chatbot(st.session_state.messages)
57
  elif answer_mode == 'pdf_chat':
58
  final_response = with_pdf_chatbot(st.session_state.messages)
59
+ with st.spinner("Generating audio response..."):
60
+ audio_file = text_to_speech(final_response)
61
+ autoplay_audio(audio_file)
62
+ st.write(final_response)
63
+ st.session_state.messages.append({"role": "assistant", "content": final_response})
 
 
 
 
 
 
64
  os.remove(audio_file)
65
 
66
  # Float the footer container and provide CSS to target it with
67
  footer_container.float("bottom: 0rem;")
68
 
 
 
 
 
 
 
69
  if __name__ == "__main__":
70
+ main(answer_mode='base_model') # Or: answer_mode='base_model'