david
commited on
Commit
·
b2de29e
1
Parent(s):
02e7bde
update coroutine call
Browse files
transcribe/whisper_llm_serve.py
CHANGED
@@ -5,7 +5,7 @@ import threading
|
|
5 |
import time
|
6 |
from logging import getLogger
|
7 |
from typing import List, Optional, Iterator, Tuple, Any
|
8 |
-
|
9 |
import numpy as np
|
10 |
# import wordninja
|
11 |
from api_model import TransResult, Message
|
@@ -40,7 +40,7 @@ class WhisperTranscriptionService(ServeClientBase):
|
|
40 |
|
41 |
# 文本分隔符,根据语言设置
|
42 |
self.text_separator = self._get_text_separator(language)
|
43 |
-
|
44 |
# 发送就绪状态
|
45 |
self.send_ready_state()
|
46 |
self._transcrible_analysis = None
|
@@ -50,6 +50,8 @@ class WhisperTranscriptionService(ServeClientBase):
|
|
50 |
self.translate_thread = self._start_thread(self._transcription_processing_loop)
|
51 |
self.frame_processing_thread = self._start_thread(self._frame_processing_loop)
|
52 |
|
|
|
|
|
53 |
|
54 |
def _start_thread(self, target_function) -> threading.Thread:
|
55 |
"""启动守护线程执行指定函数"""
|
@@ -100,9 +102,13 @@ class WhisperTranscriptionService(ServeClientBase):
|
|
100 |
"""应用语音活动检测来优化音频缓冲区"""
|
101 |
with self.lock:
|
102 |
if self.frames_np is not None:
|
|
|
103 |
frame = self.frames_np.copy()
|
104 |
processed_audio = self._translate_pipe.voice_detect(frame.tobytes())
|
105 |
self.frames_np = np.frombuffer(processed_audio.audio, dtype=np.float32).copy()
|
|
|
|
|
|
|
106 |
|
107 |
def _update_audio_buffer(self, offset: int) -> None:
|
108 |
"""从音频缓冲区中移除已处理的部分"""
|
@@ -204,8 +210,7 @@ class WhisperTranscriptionService(ServeClientBase):
|
|
204 |
time.sleep(0.2)
|
205 |
continue
|
206 |
logger.debug(f"🥤 Buffer Length: {len(audio_buffer)/self.sample_rate:.2f} ")
|
207 |
-
|
208 |
-
# save_to_wave(f"dev-{c}.wav", audio_buffer)
|
209 |
|
210 |
# try:
|
211 |
segments = self._transcribe_audio(audio_buffer)
|
@@ -255,7 +260,7 @@ class WhisperTranscriptionService(ServeClientBase):
|
|
255 |
try:
|
256 |
message = Message(result=result, request_id=self.client_uid).model_dump_json(by_alias=True)
|
257 |
coro = self.websocket.send_text(message)
|
258 |
-
asyncio.
|
259 |
except RuntimeError:
|
260 |
self.stop()
|
261 |
except Exception as e:
|
|
|
5 |
import time
|
6 |
from logging import getLogger
|
7 |
from typing import List, Optional, Iterator, Tuple, Any
|
8 |
+
import asyncio
|
9 |
import numpy as np
|
10 |
# import wordninja
|
11 |
from api_model import TransResult, Message
|
|
|
40 |
|
41 |
# 文本分隔符,根据语言设置
|
42 |
self.text_separator = self._get_text_separator(language)
|
43 |
+
self.loop = asyncio.get_event_loop()
|
44 |
# 发送就绪状态
|
45 |
self.send_ready_state()
|
46 |
self._transcrible_analysis = None
|
|
|
50 |
self.translate_thread = self._start_thread(self._transcription_processing_loop)
|
51 |
self.frame_processing_thread = self._start_thread(self._frame_processing_loop)
|
52 |
|
53 |
+
# self._c = 0
|
54 |
+
|
55 |
|
56 |
def _start_thread(self, target_function) -> threading.Thread:
|
57 |
"""启动守护线程执行指定函数"""
|
|
|
102 |
"""应用语音活动检测来优化音频缓冲区"""
|
103 |
with self.lock:
|
104 |
if self.frames_np is not None:
|
105 |
+
self._c+= 1
|
106 |
frame = self.frames_np.copy()
|
107 |
processed_audio = self._translate_pipe.voice_detect(frame.tobytes())
|
108 |
self.frames_np = np.frombuffer(processed_audio.audio, dtype=np.float32).copy()
|
109 |
+
# if len(frame) > self.sample_rate:
|
110 |
+
# save_to_wave(f"{self._c}-org.wav", frame)
|
111 |
+
# save_to_wave(f"{self._c}-vad.wav", self.frames_np)
|
112 |
|
113 |
def _update_audio_buffer(self, offset: int) -> None:
|
114 |
"""从音频缓冲区中移除已处理的部分"""
|
|
|
210 |
time.sleep(0.2)
|
211 |
continue
|
212 |
logger.debug(f"🥤 Buffer Length: {len(audio_buffer)/self.sample_rate:.2f} ")
|
213 |
+
|
|
|
214 |
|
215 |
# try:
|
216 |
segments = self._transcribe_audio(audio_buffer)
|
|
|
260 |
try:
|
261 |
message = Message(result=result, request_id=self.client_uid).model_dump_json(by_alias=True)
|
262 |
coro = self.websocket.send_text(message)
|
263 |
+
asyncio.run_coroutine_threadsafe(coro, self.loop)
|
264 |
except RuntimeError:
|
265 |
self.stop()
|
266 |
except Exception as e:
|