ar08 commited on
Commit
03c0141
·
verified ·
1 Parent(s): aac3f56

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -7
app.py CHANGED
@@ -77,7 +77,6 @@ def transcribe_and_chat(audio):
77
  response, audio_path = asyncio.run(chat_with_ai(text))
78
  return response, audio_path
79
 
80
- # Define the Gradio interface
81
  def create_demo():
82
  with gr.Blocks() as demo:
83
  gr.Markdown("# AI Voice Assistant")
@@ -90,6 +89,10 @@ def create_demo():
90
  chat_output = gr.Textbox(label="AI Response")
91
  audio_output = gr.Audio(label="AI Voice Response", autoplay=True)
92
 
 
 
 
 
93
  demo.load(None, js="""
94
  function() {
95
  document.querySelector("audio").addEventListener("stop", function() {
@@ -98,30 +101,27 @@ def create_demo():
98
  }, 500);
99
  });
100
 
101
- // Function to play the assistant's audio response
102
  function playAssistantAudio() {
103
  var audioElements = document.querySelectorAll('audio');
104
  if (audioElements.length > 1) {
105
- var assistantAudio = audioElements[1]; // The second audio element is the assistant's response
106
  if (assistantAudio) {
107
  assistantAudio.play();
108
  }
109
  }
110
  }
111
 
112
- // Play assistant's audio when it's loaded
113
  document.addEventListener('gradioAudioLoaded', function(event) {
114
  playAssistantAudio();
115
  });
116
 
117
- // Also try to play the audio when the interface updates
118
  document.addEventListener('gradioUpdated', function(event) {
119
- setTimeout(playAssistantAudio, 100); // Small delay to ensure audio is ready
120
  });
121
  }
122
  """)
123
 
124
- audio_input.change(transcribe_and_chat, inputs=audio_input, outputs=[chat_output, audio_output])
125
 
126
  return demo
127
 
 
77
  response, audio_path = asyncio.run(chat_with_ai(text))
78
  return response, audio_path
79
 
 
80
  def create_demo():
81
  with gr.Blocks() as demo:
82
  gr.Markdown("# AI Voice Assistant")
 
89
  chat_output = gr.Textbox(label="AI Response")
90
  audio_output = gr.Audio(label="AI Voice Response", autoplay=True)
91
 
92
+ def process_audio(audio):
93
+ response, audio_path = transcribe_and_chat(audio)
94
+ return response, audio_path, None # Return None to clear the audio input
95
+
96
  demo.load(None, js="""
97
  function() {
98
  document.querySelector("audio").addEventListener("stop", function() {
 
101
  }, 500);
102
  });
103
 
 
104
  function playAssistantAudio() {
105
  var audioElements = document.querySelectorAll('audio');
106
  if (audioElements.length > 1) {
107
+ var assistantAudio = audioElements[1];
108
  if (assistantAudio) {
109
  assistantAudio.play();
110
  }
111
  }
112
  }
113
 
 
114
  document.addEventListener('gradioAudioLoaded', function(event) {
115
  playAssistantAudio();
116
  });
117
 
 
118
  document.addEventListener('gradioUpdated', function(event) {
119
+ setTimeout(playAssistantAudio, 100);
120
  });
121
  }
122
  """)
123
 
124
+ audio_input.change(process_audio, inputs=[audio_input], outputs=[chat_output, audio_output, audio_input])
125
 
126
  return demo
127