Update app.py
Browse files
app.py
CHANGED
@@ -10,8 +10,8 @@ import numpy as np
|
|
10 |
# Initialize Whisper model
|
11 |
try:
|
12 |
whisper_model = whisper.load_model("base") # Ensure the model is installed from the correct Whisper library
|
13 |
-
except
|
14 |
-
st.error("
|
15 |
|
16 |
# Language options
|
17 |
LANGUAGES = {
|
@@ -50,12 +50,12 @@ if video_file:
|
|
50 |
# Function to transcribe audio in chunks
|
51 |
def transcribe_audio_in_chunks(audio_path, model, chunk_length=30):
|
52 |
audio_clip = whisper.load_audio(audio_path)
|
53 |
-
audio_duration = len(audio_clip) / whisper.SAMPLE_RATE #
|
54 |
segments = []
|
55 |
|
56 |
for start in np.arange(0, audio_duration, chunk_length):
|
57 |
end = min(start + chunk_length, audio_duration)
|
58 |
-
segment = audio_clip[int(start * whisper.SAMPLE_RATE):int(end * whisper.SAMPLE_RATE)] # Convert to the right format
|
59 |
result = model.transcribe(segment)
|
60 |
segments.append(result['text'])
|
61 |
|
|
|
10 |
# Initialize Whisper model
|
11 |
try:
|
12 |
whisper_model = whisper.load_model("base") # Ensure the model is installed from the correct Whisper library
|
13 |
+
except Exception as e:
|
14 |
+
st.error(f"Error loading Whisper model: {e}")
|
15 |
|
16 |
# Language options
|
17 |
LANGUAGES = {
|
|
|
50 |
# Function to transcribe audio in chunks
|
51 |
def transcribe_audio_in_chunks(audio_path, model, chunk_length=30):
|
52 |
audio_clip = whisper.load_audio(audio_path)
|
53 |
+
audio_duration = len(audio_clip) / whisper.audio.SAMPLE_RATE # Calculate duration in seconds
|
54 |
segments = []
|
55 |
|
56 |
for start in np.arange(0, audio_duration, chunk_length):
|
57 |
end = min(start + chunk_length, audio_duration)
|
58 |
+
segment = audio_clip[int(start * whisper.audio.SAMPLE_RATE):int(end * whisper.audio.SAMPLE_RATE)] # Convert to the right format
|
59 |
result = model.transcribe(segment)
|
60 |
segments.append(result['text'])
|
61 |
|