Spaces:
Sleeping
Sleeping
Upload app.py
Browse files
app.py
CHANGED
@@ -28,8 +28,6 @@ for required_variable in required_variables:
|
|
28 |
|
29 |
# Create the transcription pipeline.
|
30 |
model_name = os.environ["MODEL_NAME"]
|
31 |
-
model_name = "openai/whisper-tiny" # TODO: Remove this.
|
32 |
-
logger.warning("Using hardcoded model name 'openai/whisper-tiny'.")
|
33 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
34 |
logger.info(f"Loading model {model_name} with device {device}...")
|
35 |
transcriber = pipeline(
|
@@ -145,7 +143,7 @@ def transcribe_audio(audio: Tuple[int, np.ndarray], password: str = None) -> str
|
|
145 |
# Calculate elapsed time
|
146 |
elapsed_time = time.time() - start_time
|
147 |
audio_time = len(y) / sr
|
148 |
-
status_string = f"Transcription took {elapsed_time:.2f}s for {audio_time:.2f}s of audio"
|
149 |
return result["text"], status_string
|
150 |
|
151 |
|
|
|
28 |
|
29 |
# Create the transcription pipeline.
|
30 |
model_name = os.environ["MODEL_NAME"]
|
|
|
|
|
31 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
32 |
logger.info(f"Loading model {model_name} with device {device}...")
|
33 |
transcriber = pipeline(
|
|
|
143 |
# Calculate elapsed time
|
144 |
elapsed_time = time.time() - start_time
|
145 |
audio_time = len(y) / sr
|
146 |
+
status_string = f"Transcription took {elapsed_time:.2f}s for {audio_time:.2f}s of audio with model {model_name}."
|
147 |
return result["text"], status_string
|
148 |
|
149 |
|