Update handler.py
Browse files- handler.py +2 -2
handler.py
CHANGED
@@ -8,7 +8,7 @@ SAMPLE_RATE=16000
|
|
8 |
class EndpointHandler():
|
9 |
def __init__(self, path=""):
|
10 |
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
11 |
-
pipe = pipeline(
|
12 |
"automatic-speech-recognition",
|
13 |
model="openai/whisper-large",
|
14 |
chunk_length_s=30,
|
@@ -22,7 +22,7 @@ class EndpointHandler():
|
|
22 |
audio_nparray = ffmpeg_read(inputs, 16000)
|
23 |
audio_tensor = torch.from_numpy(audio_nparray)
|
24 |
|
25 |
-
prediction = pipe(audio_nparray, return_timestamps=True)
|
26 |
return {"text": prediction[0]}
|
27 |
|
28 |
# we can also return timestamps for the predictions
|
|
|
8 |
class EndpointHandler():
|
9 |
def __init__(self, path=""):
|
10 |
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
11 |
+
self.pipe = pipeline(
|
12 |
"automatic-speech-recognition",
|
13 |
model="openai/whisper-large",
|
14 |
chunk_length_s=30,
|
|
|
22 |
audio_nparray = ffmpeg_read(inputs, 16000)
|
23 |
audio_tensor = torch.from_numpy(audio_nparray)
|
24 |
|
25 |
+
prediction = self.pipe(audio_nparray, return_timestamps=True)
|
26 |
return {"text": prediction[0]}
|
27 |
|
28 |
# we can also return timestamps for the predictions
|