Update app.py
Browse files
app.py
CHANGED
@@ -3,7 +3,7 @@ from transformers import pipeline
|
|
3 |
from transformers.pipelines.audio_utils import ffmpeg_read
|
4 |
import gradio as gr
|
5 |
|
6 |
-
MODEL_NAME = "riteshkr/whisper-large-v3
|
7 |
BATCH_SIZE = 8
|
8 |
|
9 |
device = 0 if torch.cuda.is_available() else "cpu"
|
@@ -14,9 +14,6 @@ pipe = pipeline(
|
|
14 |
chunk_length_s=30,
|
15 |
device=device,
|
16 |
)
|
17 |
-
|
18 |
-
|
19 |
-
# Copied from https://github.com/openai/whisper/blob/c09a7ae299c4c34c5839a76380ae407e7d785914/whisper/utils.py#L50
|
20 |
def format_timestamp(seconds: float, always_include_hours: bool = False, decimal_marker: str = "."):
|
21 |
if seconds is not None:
|
22 |
milliseconds = round(seconds * 1000.0)
|
@@ -33,7 +30,6 @@ def format_timestamp(seconds: float, always_include_hours: bool = False, decimal
|
|
33 |
hours_marker = f"{hours:02d}:" if always_include_hours or hours > 0 else ""
|
34 |
return f"{hours_marker}{minutes:02d}:{seconds:02d}{decimal_marker}{milliseconds:03d}"
|
35 |
else:
|
36 |
-
# we have a malformed timestamp so just return it as is
|
37 |
return seconds
|
38 |
|
39 |
|
|
|
3 |
from transformers.pipelines.audio_utils import ffmpeg_read
|
4 |
import gradio as gr
|
5 |
|
6 |
+
MODEL_NAME = "riteshkr/quantized-whisper-large-v3"
|
7 |
BATCH_SIZE = 8
|
8 |
|
9 |
device = 0 if torch.cuda.is_available() else "cpu"
|
|
|
14 |
chunk_length_s=30,
|
15 |
device=device,
|
16 |
)
|
|
|
|
|
|
|
17 |
def format_timestamp(seconds: float, always_include_hours: bool = False, decimal_marker: str = "."):
|
18 |
if seconds is not None:
|
19 |
milliseconds = round(seconds * 1000.0)
|
|
|
30 |
hours_marker = f"{hours:02d}:" if always_include_hours or hours > 0 else ""
|
31 |
return f"{hours_marker}{minutes:02d}:{seconds:02d}{decimal_marker}{milliseconds:03d}"
|
32 |
else:
|
|
|
33 |
return seconds
|
34 |
|
35 |
|