Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -94,12 +94,24 @@ class GeminiHandler(AsyncAudioVideoStreamHandler):
|
|
94 |
turn = self.session.receive()
|
95 |
try:
|
96 |
async for response in turn:
|
|
|
97 |
if data := response.data:
|
98 |
audio = np.frombuffer(data, dtype=np.int16).reshape(1, -1)
|
99 |
-
|
|
|
|
|
|
|
|
|
100 |
except websockets.exceptions.ConnectionClosedOK:
|
101 |
print("connection closed")
|
102 |
break
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
103 |
|
104 |
async def video_receive(self, frame: np.ndarray):
|
105 |
self.video_queue.put_nowait(frame)
|
@@ -124,8 +136,17 @@ class GeminiHandler(AsyncAudioVideoStreamHandler):
|
|
124 |
_, array = frame
|
125 |
array = array.squeeze()
|
126 |
audio_message = encode_audio(array)
|
127 |
-
|
128 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
129 |
|
130 |
async def emit(self):
|
131 |
array = await wait_for_item(self.audio_queue, 0.01)
|
|
|
94 |
turn = self.session.receive()
|
95 |
try:
|
96 |
async for response in turn:
|
97 |
+
# Check if data exists before trying to process it as audio
|
98 |
if data := response.data:
|
99 |
audio = np.frombuffer(data, dtype=np.int16).reshape(1, -1)
|
100 |
+
self.audio_queue.put_nowait(audio) # Only put if audio was created
|
101 |
+
# You might want to handle other parts of the response here
|
102 |
+
# e.g., response.text, response.tool_code, etc.
|
103 |
+
# For now, we just ensure we don't crash if data is None.
|
104 |
+
|
105 |
except websockets.exceptions.ConnectionClosedOK:
|
106 |
print("connection closed")
|
107 |
break
|
108 |
+
except Exception as e:
|
109 |
+
# Catch other potential errors during response processing
|
110 |
+
print(f"Error processing response: {e}")
|
111 |
+
# Depending on the error, you might want to break or continue
|
112 |
+
# For now, let's break to prevent infinite loops on persistent errors
|
113 |
+
break
|
114 |
+
|
115 |
|
116 |
async def video_receive(self, frame: np.ndarray):
|
117 |
self.video_queue.put_nowait(frame)
|
|
|
136 |
_, array = frame
|
137 |
array = array.squeeze()
|
138 |
audio_message = encode_audio(array)
|
139 |
+
# Add a check to ensure the session is still active before sending
|
140 |
+
if self.session and not self.session._ws.closed: # Check if session exists and websocket is not closed
|
141 |
+
try:
|
142 |
+
await self.session.send(input=audio_message)
|
143 |
+
except websockets.exceptions.ConnectionClosedOK:
|
144 |
+
print("Attempted to send on a closed connection.")
|
145 |
+
except Exception as e:
|
146 |
+
print(f"Error sending audio message: {e}")
|
147 |
+
else:
|
148 |
+
print("Session not active, cannot send audio message.")
|
149 |
+
|
150 |
|
151 |
async def emit(self):
|
152 |
array = await wait_for_item(self.audio_queue, 0.01)
|