Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -3,38 +3,43 @@ import librosa
|
|
3 |
import numpy as np
|
4 |
import soundfile as sf
|
5 |
|
6 |
-
def
|
7 |
-
# Load audio with
|
8 |
y, sr = librosa.load(audio_path, sr=None)
|
9 |
|
10 |
-
#
|
11 |
-
onset_env = librosa.onset.onset_strength(y=y, sr=sr
|
12 |
-
tempo,
|
13 |
|
14 |
-
#
|
15 |
-
|
|
|
|
|
|
|
|
|
|
|
16 |
|
17 |
# Reverse the entire audio first
|
18 |
y_reversed = y[::-1]
|
19 |
|
20 |
-
# Cut into
|
21 |
-
segments = [y_reversed[
|
22 |
|
23 |
# Reverse each segment back to forward
|
24 |
processed_audio = np.concatenate([segment[::-1] for segment in segments])
|
25 |
|
26 |
-
# Save the
|
27 |
output_path = "output.wav"
|
28 |
sf.write(output_path, processed_audio, sr)
|
29 |
|
30 |
return output_path
|
31 |
|
32 |
iface = gr.Interface(
|
33 |
-
fn=
|
34 |
inputs=gr.Audio(type="filepath"),
|
35 |
outputs=gr.Audio(),
|
36 |
-
title="
|
37 |
-
description="
|
38 |
)
|
39 |
|
40 |
iface.launch()
|
|
|
3 |
import numpy as np
|
4 |
import soundfile as sf
|
5 |
|
6 |
+
def reverse_segments_dynamic(audio_path):
|
7 |
+
# Load audio with original sampling rate
|
8 |
y, sr = librosa.load(audio_path, sr=None)
|
9 |
|
10 |
+
# Get a dynamic tempo estimate
|
11 |
+
onset_env = librosa.onset.onset_strength(y=y, sr=sr)
|
12 |
+
tempo, beat_frames = librosa.beat.beat_track(y=y, sr=sr, onset_envelope=onset_env, trim=False)
|
13 |
|
14 |
+
# Calculate a full tempogram (BPM over time)
|
15 |
+
oenv = librosa.onset.onset_strength(y=y, sr=sr)
|
16 |
+
tempogram = librosa.feature.tempogram(onset_envelope=oenv, sr=sr)
|
17 |
+
bpm_changes = librosa.beat.tempo(onset_envelope=oenv, sr=sr, aggregate=None) # Array of BPM values over time
|
18 |
+
|
19 |
+
# Convert beat positions to sample indices
|
20 |
+
beat_samples = librosa.frames_to_samples(beat_frames)
|
21 |
|
22 |
# Reverse the entire audio first
|
23 |
y_reversed = y[::-1]
|
24 |
|
25 |
+
# Cut into segments based on detected beats
|
26 |
+
segments = [y_reversed[beat_samples[i]:beat_samples[i+1]] for i in range(len(beat_samples)-1)]
|
27 |
|
28 |
# Reverse each segment back to forward
|
29 |
processed_audio = np.concatenate([segment[::-1] for segment in segments])
|
30 |
|
31 |
+
# Save the processed file
|
32 |
output_path = "output.wav"
|
33 |
sf.write(output_path, processed_audio, sr)
|
34 |
|
35 |
return output_path
|
36 |
|
37 |
iface = gr.Interface(
|
38 |
+
fn=reverse_segments_dynamic,
|
39 |
inputs=gr.Audio(type="filepath"),
|
40 |
outputs=gr.Audio(),
|
41 |
+
title="Dynamic BPM Beat-Reversed Music",
|
42 |
+
description="Detects BPM changes over time and reverses music accordingly, keeping beats aligned."
|
43 |
)
|
44 |
|
45 |
iface.launch()
|