sahandkh1419 commited on
Commit
f42c616
1 Parent(s): 8b0f701

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +103 -14
app.py CHANGED
@@ -5,6 +5,8 @@ from sklearn.metrics.pairwise import cosine_similarity
5
  import base64
6
  from pydub import AudioSegment
7
  from hezar.models import Model
 
 
8
 
9
  st.set_page_config(
10
  page_title="Sing It Forward App",
@@ -66,8 +68,6 @@ st.write('------')
66
 
67
 
68
 
69
-
70
-
71
  def cosine_sim(text1, text2):
72
  vectorizer = TfidfVectorizer().fit_transform([text1, text2])
73
  vectors = vectorizer.toarray()
@@ -97,6 +97,45 @@ def take_challenge(music_file, typed_lyrics, key, language):
97
  st.error('Awful! Try harder next time', icon="🚨")
98
  st.markdown('<style>div.stAlert { background-color: rgba(241, 36, 36, 0.9); }</style>', unsafe_allow_html=True)
99
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
100
 
101
 
102
 
@@ -117,25 +156,75 @@ with tab2:
117
  with open("raw_music.mp3", "wb") as f:
118
  f.write(uploaded_file.getbuffer())
119
  st.audio("raw_music.mp3")
120
- trimm_check = st.checkbox("Trim?")
 
 
 
 
121
  if trimm_check:
122
  st.write("Specify start and end times for trimming:")
123
- audio = AudioSegment.from_file("raw_music.mp3")
124
  duration = len(audio) // 1000
125
  start_time = st.number_input("Start Time (seconds)", min_value=0, max_value=duration, value=0)
126
  end_time = st.number_input("End Time (seconds)", min_value=0, max_value=duration, value=duration)
127
  if start_time < end_time:
128
  trimmed_audio = audio[start_time * 1000:end_time * 1000]
129
- trimmed_audio.export("trimmed_music.mp3", format="mp3")
130
- st.write("Now type what user should sing:")
131
- typed_lyrics = st.text_area("Lyrics to be singed:")
132
- st.write('------')
133
- take_challenge("trimmed_music.mp3", typed_lyrics, "unique_key_1", language)
134
  else:
135
  st.error('Start Time should be smaller than End Time!', icon="❌")
136
  st.markdown('<style>div.stAlert { background-color: rgba(241, 36, 36, 0.9); }</style>', unsafe_allow_html=True)
137
- else:
138
- st.write("Now type what user should sing:")
139
- typed_lyrics = st.text_area("Lyrics to be singed:")
140
- st.write('------')
141
- take_challenge("raw_music.mp3", typed_lyrics, "unique_key_2", language)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  import base64
6
  from pydub import AudioSegment
7
  from hezar.models import Model
8
+ import librosa
9
+ import soundfile as sf
10
 
11
  st.set_page_config(
12
  page_title="Sing It Forward App",
 
68
 
69
 
70
 
 
 
71
  def cosine_sim(text1, text2):
72
  vectorizer = TfidfVectorizer().fit_transform([text1, text2])
73
  vectors = vectorizer.toarray()
 
97
  st.error('Awful! Try harder next time', icon="🚨")
98
  st.markdown('<style>div.stAlert { background-color: rgba(241, 36, 36, 0.9); }</style>', unsafe_allow_html=True)
99
 
100
+ def change_volume(input_file, output_file, volume_factor):
101
+ sound = AudioSegment.from_mp3(input_file)
102
+ volume_changed = sound + volume_factor
103
+ volume_changed.export(output_file, format="mp3")
104
+
105
+ def change_speed(input_file, output_file, speed_factor):
106
+ sound, sr = librosa.load(input_file)
107
+ speed_changed = librosa.effects.time_stretch(sound, rate=speed_factor)
108
+ sf.write(output_file, speed_changed, sr)
109
+
110
+ def change_pitch(input_file, output_file, pitch_factor):
111
+ sound, sr = librosa.load(input_file)
112
+ pitch_changed = librosa.effects.pitch_shift(sound, sr=sr, n_steps=pitch_factor)
113
+ sf.write(output_file, pitch_changed, sr)
114
+
115
+ def low_pass_filter(input_file, output_file, cutoff_freq):
116
+ sound = AudioSegment.from_mp3(input_file)
117
+ low_filtered_sound = sound.low_pass_filter(cutoff_freq)
118
+ low_filtered_sound.export(output_file, format="mp3")
119
+
120
+ def high_pass_filter(input_file, output_file, cutoff_freq):
121
+ sound = AudioSegment.from_mp3(input_file)
122
+ high_filtered_sound = sound.high_pass_filter(cutoff_freq)
123
+ high_filtered_sound.export(output_file, format="mp3")
124
+
125
+ def pan_left_right(input_file, output_file, pan_factor):
126
+ sound = AudioSegment.from_mp3(input_file)
127
+ pan_sound = sound.pan(pan_factor)
128
+ pan_sound.export(output_file, format="mp3")
129
+
130
+ def fade_in_ms(input_file, output_file, fade_factor):
131
+ sound = AudioSegment.from_mp3(input_file)
132
+ faded_sound = sound.fade_in(fade_factor)
133
+ faded_sound.export(output_file, format="mp3")
134
+
135
+ def fade_out_ms(input_file, output_file, fade_factor):
136
+ sound = AudioSegment.from_mp3(input_file)
137
+ faded_sound = sound.fade_out(fade_factor)
138
+ faded_sound.export(output_file, format="mp3")
139
 
140
 
141
 
 
156
  with open("raw_music.mp3", "wb") as f:
157
  f.write(uploaded_file.getbuffer())
158
  st.audio("raw_music.mp3")
159
+
160
+ current_input = "raw_music.mp3"
161
+ output_file = "processed_music.mp3"
162
+
163
+ trimm_check = st.checkbox("Trim")
164
  if trimm_check:
165
  st.write("Specify start and end times for trimming:")
166
+ audio = AudioSegment.from_mp3(current_input)
167
  duration = len(audio) // 1000
168
  start_time = st.number_input("Start Time (seconds)", min_value=0, max_value=duration, value=0)
169
  end_time = st.number_input("End Time (seconds)", min_value=0, max_value=duration, value=duration)
170
  if start_time < end_time:
171
  trimmed_audio = audio[start_time * 1000:end_time * 1000]
172
+ trimmed_audio.export(output_file, format="mp3")
173
+ current_input = output_file
 
 
 
174
  else:
175
  st.error('Start Time should be smaller than End Time!', icon="❌")
176
  st.markdown('<style>div.stAlert { background-color: rgba(241, 36, 36, 0.9); }</style>', unsafe_allow_html=True)
177
+
178
+ volume_checkbox = st.checkbox("Change Volume")
179
+ if volume_checkbox:
180
+ volume_factor = st.slider("Volume Factor (dB)", -30, 30, 0)
181
+ change_volume(current_input, output_file, volume_factor)
182
+ current_input = output_file
183
+
184
+ speed_checkbox = st.checkbox("Change Speed")
185
+ if speed_checkbox:
186
+ speed_factor = st.slider("Speed Factor", 0.25, 2.0, 1.0)
187
+ change_speed(current_input, output_file, speed_factor)
188
+ current_input = output_file
189
+
190
+ pitch_checkbox = st.checkbox("Change Pitch")
191
+ if pitch_checkbox:
192
+ pitch_factor = st.slider("Pitch Shift (fractional steps)", -12, 12, 0)
193
+ change_pitch(current_input, output_file, pitch_factor)
194
+ current_input = output_file
195
+
196
+ low_pass_checkbox = st.checkbox("Low Pass Filter")
197
+ if low_pass_checkbox:
198
+ cutoff_freq = st.slider("Low Pass Filter Cutoff Frequency", min_value=20, max_value=20000, value=2000)
199
+ low_pass_filter(current_input, output_file, cutoff_freq)
200
+ current_input = output_file
201
+
202
+ high_pass_checkbox = st.checkbox("High Pass Filter")
203
+ if high_pass_checkbox:
204
+ cutoff_freq = st.slider("High Pass Filter Cutoff Frequency", min_value=20, max_value=20000, value=2000)
205
+ high_pass_filter(current_input, output_file, cutoff_freq)
206
+ current_input = output_file
207
+
208
+ pan_checkbox = st.checkbox("Pan Left/Right")
209
+ if pan_checkbox:
210
+ pan_factor = st.slider("Pan Factor (-1 for Left, 1 for Right)", -1.0, 1.0, 0.0)
211
+ pan_left_right(current_input, output_file, pan_factor)
212
+ current_input = output_file
213
+
214
+ fade_in_checkbox = st.checkbox("Fade In")
215
+ if fade_in_checkbox:
216
+ fade_in_time = st.slider("Fade In Duration (ms)", min_value=0, max_value=10000, value=1000)
217
+ fade_in_ms(current_input, output_file, fade_in_time)
218
+ current_input = output_file
219
+
220
+ fade_out_checkbox = st.checkbox("Fade Out")
221
+ if fade_out_checkbox:
222
+ fade_out_time = st.slider("Fade Out Duration (ms)", min_value=0, max_value=10000, value=1000)
223
+ fade_out_ms(current_input, output_file, fade_out_time)
224
+ current_input = output_file
225
+
226
+
227
+ st.write("Now type what user should sing:")
228
+ typed_lyrics = st.text_area("Lyrics to be singed:")
229
+ st.write('------')
230
+ take_challenge(current_input, typed_lyrics, "unique_key_1", language)