Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -62,6 +62,36 @@
|
|
62 |
|
63 |
# if __name__ == "__main__":
|
64 |
# demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
65 |
import gradio as gr
|
66 |
from faster_whisper import WhisperModel
|
67 |
|
@@ -69,7 +99,6 @@ from faster_whisper import WhisperModel
|
|
69 |
try:
|
70 |
model = WhisperModel("medium", device="cpu", compute_type="int8")
|
71 |
except Exception as e:
|
72 |
-
# You could log the error or handle it more gracefully if needed
|
73 |
model = None
|
74 |
model_error = f"Failed to load model: {e}"
|
75 |
|
@@ -77,7 +106,7 @@ def transcribe(audio_file):
|
|
77 |
if model is None:
|
78 |
return model_error
|
79 |
try:
|
80 |
-
segments, info = model.transcribe(audio_file
|
81 |
text = " ".join([seg.text for seg in segments])
|
82 |
return text
|
83 |
except Exception as e:
|
@@ -91,4 +120,5 @@ iface = gr.Interface(
|
|
91 |
description="Upload audio and get transcription text."
|
92 |
)
|
93 |
|
94 |
-
iface.launch(server_name="0.0.0.0", server_port=
|
|
|
|
62 |
|
63 |
# if __name__ == "__main__":
|
64 |
# demo.launch()
|
65 |
+
# import gradio as gr
|
66 |
+
# from faster_whisper import WhisperModel
|
67 |
+
|
68 |
+
# # Try to load the model on startup
|
69 |
+
# try:
|
70 |
+
# model = WhisperModel("medium", device="cpu", compute_type="int8")
|
71 |
+
# except Exception as e:
|
72 |
+
# # You could log the error or handle it more gracefully if needed
|
73 |
+
# model = None
|
74 |
+
# model_error = f"Failed to load model: {e}"
|
75 |
+
|
76 |
+
# def transcribe(audio_file):
|
77 |
+
# if model is None:
|
78 |
+
# return model_error
|
79 |
+
# try:
|
80 |
+
# segments, info = model.transcribe(audio_file.name, beam_size=5)
|
81 |
+
# text = " ".join([seg.text for seg in segments])
|
82 |
+
# return text
|
83 |
+
# except Exception as e:
|
84 |
+
# return f"Transcription failed: {e}"
|
85 |
+
|
86 |
+
# iface = gr.Interface(
|
87 |
+
# fn=transcribe,
|
88 |
+
# inputs=gr.Audio(sources=["upload"], type="filepath", label="Audio file"),
|
89 |
+
# outputs="text",
|
90 |
+
# title="Faster Whisper Transcription API",
|
91 |
+
# description="Upload audio and get transcription text."
|
92 |
+
# )
|
93 |
+
|
94 |
+
# iface.launch(server_name="0.0.0.0", server_port=7860)
|
95 |
import gradio as gr
|
96 |
from faster_whisper import WhisperModel
|
97 |
|
|
|
99 |
try:
|
100 |
model = WhisperModel("medium", device="cpu", compute_type="int8")
|
101 |
except Exception as e:
|
|
|
102 |
model = None
|
103 |
model_error = f"Failed to load model: {e}"
|
104 |
|
|
|
106 |
if model is None:
|
107 |
return model_error
|
108 |
try:
|
109 |
+
segments, info = model.transcribe(audio_file, beam_size=5)
|
110 |
text = " ".join([seg.text for seg in segments])
|
111 |
return text
|
112 |
except Exception as e:
|
|
|
120 |
description="Upload audio and get transcription text."
|
121 |
)
|
122 |
|
123 |
+
iface.launch(server_name="0.0.0.0", server_port=7880)
|
124 |
+
|