Eniola Alese commited on
Commit
d272ebd
·
1 Parent(s): 189641a

update app

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -121,7 +121,7 @@ with gr.Blocks(title="Voice chat with LLM") as demo:
121
  def add_text(chatbot_history, text):
122
  chatbot_history = [] if chatbot_history is None else chatbot_history
123
  chatbot_history = chatbot_history + [(text, None)]
124
- return chatbot_history, gr.update(value="", interactive=False)
125
 
126
  # Will be triggered on voice submit (will transribe and send to generate_speech)
127
  def add_audio(chatbot_history, audio):
@@ -131,7 +131,7 @@ with gr.Blocks(title="Voice chat with LLM") as demo:
131
  text = list(response)[0].text.strip()
132
  print("Transcribed text:", text)
133
  chatbot_history = chatbot_history + [(text, None)]
134
- return chatbot_history, gr.update(value="", interactive=False)
135
 
136
  def generate_speech(chatbot_history, chatbot_voice, initial_greeting=False):
137
  # Start by yielding an initial empty audio to set up autoplay
@@ -159,12 +159,12 @@ with gr.Blocks(title="Voice chat with LLM") as demo:
159
  txt_msg = txt_box.submit(fn=add_text, inputs=[chatbot, txt_box], outputs=[chatbot, txt_box], queue=False
160
  ).then(fn=generate_speech, inputs=[chatbot,chatbot_voice], outputs=[sentence, chatbot, audio_playback])
161
 
162
- txt_msg.then(fn=lambda: gr.update(interactive=True), inputs=None, outputs=[txt_box], queue=False)
163
 
164
  audio_msg = audio_record.stop_recording(fn=add_audio, inputs=[chatbot, audio_record], outputs=[chatbot, txt_box], queue=False
165
  ).then(fn=generate_speech, inputs=[chatbot,chatbot_voice], outputs=[sentence, chatbot, audio_playback])
166
 
167
- audio_msg.then(fn=lambda: (gr.update(interactive=True),gr.update(interactive=True,value=None)), inputs=None, outputs=[txt_box, audio_record], queue=False)
168
 
169
  FOOTNOTE = """
170
  This Space demonstrates how to speak to an llm chatbot, based solely on open accessible models.
 
121
  def add_text(chatbot_history, text):
122
  chatbot_history = [] if chatbot_history is None else chatbot_history
123
  chatbot_history = chatbot_history + [(text, None)]
124
+ return chatbot_history, gr.Textbox(value="", interactive=False)
125
 
126
  # Will be triggered on voice submit (will transribe and send to generate_speech)
127
  def add_audio(chatbot_history, audio):
 
131
  text = list(response)[0].text.strip()
132
  print("Transcribed text:", text)
133
  chatbot_history = chatbot_history + [(text, None)]
134
+ return chatbot_history, gr.Textbox(value="", interactive=False)
135
 
136
  def generate_speech(chatbot_history, chatbot_voice, initial_greeting=False):
137
  # Start by yielding an initial empty audio to set up autoplay
 
159
  txt_msg = txt_box.submit(fn=add_text, inputs=[chatbot, txt_box], outputs=[chatbot, txt_box], queue=False
160
  ).then(fn=generate_speech, inputs=[chatbot,chatbot_voice], outputs=[sentence, chatbot, audio_playback])
161
 
162
+ txt_msg.then(fn=lambda: gr.Textbox(interactive=True), inputs=None, outputs=[txt_box], queue=False)
163
 
164
  audio_msg = audio_record.stop_recording(fn=add_audio, inputs=[chatbot, audio_record], outputs=[chatbot, txt_box], queue=False
165
  ).then(fn=generate_speech, inputs=[chatbot,chatbot_voice], outputs=[sentence, chatbot, audio_playback])
166
 
167
+ audio_msg.then(fn=lambda: (gr.Audio(interactive=True),gr.Audio(interactive=True,value=None)), inputs=None, outputs=[txt_box, audio_record], queue=False)
168
 
169
  FOOTNOTE = """
170
  This Space demonstrates how to speak to an llm chatbot, based solely on open accessible models.