Spaces:
Running
Running
jhj0517
commited on
Commit
·
db60455
1
Parent(s):
564cf04
fix spaces bug
Browse files
modules/whisper/faster_whisper_inference.py
CHANGED
@@ -9,6 +9,7 @@ import ctranslate2
|
|
9 |
import whisper
|
10 |
import gradio as gr
|
11 |
from argparse import Namespace
|
|
|
12 |
|
13 |
from modules.whisper.whisper_parameter import *
|
14 |
from modules.whisper.whisper_base import WhisperBase
|
@@ -30,6 +31,7 @@ class FasterWhisperInference(WhisperBase):
|
|
30 |
self.available_models = self.model_paths.keys()
|
31 |
self.available_compute_types = self.get_available_compute_type()
|
32 |
|
|
|
33 |
def transcribe(self,
|
34 |
audio: Union[str, BinaryIO, np.ndarray],
|
35 |
progress: gr.Progress,
|
@@ -87,6 +89,7 @@ class FasterWhisperInference(WhisperBase):
|
|
87 |
elapsed_time = time.time() - start_time
|
88 |
return segments_result, elapsed_time
|
89 |
|
|
|
90 |
def update_model(self,
|
91 |
model_size: str,
|
92 |
compute_type: str,
|
@@ -146,6 +149,7 @@ class FasterWhisperInference(WhisperBase):
|
|
146 |
return ['int16', 'float32', 'int8', 'int8_float32']
|
147 |
|
148 |
@staticmethod
|
|
|
149 |
def get_device():
|
150 |
if torch.cuda.is_available():
|
151 |
return "cuda"
|
|
|
9 |
import whisper
|
10 |
import gradio as gr
|
11 |
from argparse import Namespace
|
12 |
+
import spaces
|
13 |
|
14 |
from modules.whisper.whisper_parameter import *
|
15 |
from modules.whisper.whisper_base import WhisperBase
|
|
|
31 |
self.available_models = self.model_paths.keys()
|
32 |
self.available_compute_types = self.get_available_compute_type()
|
33 |
|
34 |
+
@spaces.GPU
|
35 |
def transcribe(self,
|
36 |
audio: Union[str, BinaryIO, np.ndarray],
|
37 |
progress: gr.Progress,
|
|
|
89 |
elapsed_time = time.time() - start_time
|
90 |
return segments_result, elapsed_time
|
91 |
|
92 |
+
@spaces.GPU
|
93 |
def update_model(self,
|
94 |
model_size: str,
|
95 |
compute_type: str,
|
|
|
149 |
return ['int16', 'float32', 'int8', 'int8_float32']
|
150 |
|
151 |
@staticmethod
|
152 |
+
@spaces.GPU
|
153 |
def get_device():
|
154 |
if torch.cuda.is_available():
|
155 |
return "cuda"
|
modules/whisper/whisper_base.py
CHANGED
@@ -249,6 +249,7 @@ class WhisperBase(ABC):
|
|
249 |
self.release_cuda_memory()
|
250 |
self.remove_input_files([mic_audio])
|
251 |
|
|
|
252 |
def transcribe_youtube(self,
|
253 |
youtube_link: str,
|
254 |
file_format: str,
|
|
|
249 |
self.release_cuda_memory()
|
250 |
self.remove_input_files([mic_audio])
|
251 |
|
252 |
+
@spaces.GPU
|
253 |
def transcribe_youtube(self,
|
254 |
youtube_link: str,
|
255 |
file_format: str,
|