Update app.py
Browse files
app.py
CHANGED
@@ -50,12 +50,12 @@ if video_file:
|
|
50 |
# Function to transcribe audio in chunks
|
51 |
def transcribe_audio_in_chunks(audio_path, model, chunk_length=30):
|
52 |
audio_clip = whisper.load_audio(audio_path)
|
53 |
-
audio_duration =
|
54 |
segments = []
|
55 |
|
56 |
for start in np.arange(0, audio_duration, chunk_length):
|
57 |
end = min(start + chunk_length, audio_duration)
|
58 |
-
segment = audio_clip[int(start *
|
59 |
result = model.transcribe(segment)
|
60 |
segments.append(result['text'])
|
61 |
|
@@ -81,8 +81,10 @@ if video_file:
|
|
81 |
st.audio(audio_output_path, format="audio/mp3")
|
82 |
except Exception as e:
|
83 |
st.error(f"Error during transcription/translation: {e}")
|
|
|
84 |
|
85 |
# Clean up temporary files
|
86 |
os.remove(temp_video_path)
|
87 |
os.remove(audio_path)
|
88 |
-
|
|
|
|
50 |
# Function to transcribe audio in chunks
|
51 |
def transcribe_audio_in_chunks(audio_path, model, chunk_length=30):
|
52 |
audio_clip = whisper.load_audio(audio_path)
|
53 |
+
audio_duration = len(audio_clip) / whisper.SAMPLE_RATE # Get audio duration in seconds
|
54 |
segments = []
|
55 |
|
56 |
for start in np.arange(0, audio_duration, chunk_length):
|
57 |
end = min(start + chunk_length, audio_duration)
|
58 |
+
segment = audio_clip[int(start * whisper.SAMPLE_RATE):int(end * whisper.SAMPLE_RATE)] # Convert to the right format
|
59 |
result = model.transcribe(segment)
|
60 |
segments.append(result['text'])
|
61 |
|
|
|
81 |
st.audio(audio_output_path, format="audio/mp3")
|
82 |
except Exception as e:
|
83 |
st.error(f"Error during transcription/translation: {e}")
|
84 |
+
audio_output_path = None # Ensure this variable is defined
|
85 |
|
86 |
# Clean up temporary files
|
87 |
os.remove(temp_video_path)
|
88 |
os.remove(audio_path)
|
89 |
+
if audio_output_path: # Only remove if it was created
|
90 |
+
os.remove(audio_output_path)
|