Upload 2 files
Browse files- handler.py +34 -0
- requirements.txt +1 -0
handler.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Dict
|
2 |
+
from faster_whisper import WhisperModel
|
3 |
+
import io
|
4 |
+
|
5 |
+
class EndpointHandler:
|
6 |
+
def __init__(self, model_dir=None):
|
7 |
+
# Set model size, assuming installation has been done with appropriate model files and setup
|
8 |
+
model_size = "medium" if model_dir is None else model_dir
|
9 |
+
# Change to 'cuda' to use the GPU, and set compute_type for faster computation
|
10 |
+
self.model = WhisperModel(model_size, device="cuda", compute_type="float16")
|
11 |
+
|
12 |
+
def __call__(self, data: Dict) -> Dict[str, str]:
|
13 |
+
# Process the input data expected to be in 'inputs' key containing audio file bytes
|
14 |
+
audio_bytes = data["inputs"]
|
15 |
+
|
16 |
+
# Convert bytes to a file-like object
|
17 |
+
audio_file = io.BytesIO(audio_bytes)
|
18 |
+
|
19 |
+
# Perform transcription using the model
|
20 |
+
segments, info = self.model.transcribe(audio_file)
|
21 |
+
|
22 |
+
# Compile the results into a text string and extract language information
|
23 |
+
text = " ".join(segment.text for segment in segments)
|
24 |
+
language_code = info.language
|
25 |
+
language_prob = info.language_probability
|
26 |
+
|
27 |
+
# Compile the response dictionary
|
28 |
+
result = {
|
29 |
+
"text": text,
|
30 |
+
"language": language_code,
|
31 |
+
"language_probability": language_prob
|
32 |
+
}
|
33 |
+
|
34 |
+
return result
|
requirements.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
faster-whisper @ https://github.com/guillaumekln/faster-whisper/archive/a4f1cc8f11433e454c3934442b5e1a4ed5e865c3.tar.gz
|