MusicChallenge / app.py
sahandkh1419's picture
Update app.py
f42c616 verified
import streamlit as st
import whisper
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import base64
from pydub import AudioSegment
from hezar.models import Model
import librosa
import soundfile as sf
st.set_page_config(
page_title="Sing It Forward App",
page_icon="🎵")
st.markdown(
"""
<style>
body {
background: linear-gradient(to bottom, #0E5AAB, #00ffff);
padding: 20px;
border-radius: 10px;
}
a {
color: #EDA67C !important
}
</style>
""",
unsafe_allow_html=True
)
def load_image(image_file):
with open(image_file, "rb") as f:
return f.read()
image_data = load_image("bcg.jpg")
image_base64 = base64.b64encode(image_data).decode()
st.markdown(
f"""
<style>
.stApp {{
background-image: url(data:image/jpeg;base64,{image_base64});
background-size: cover;
background-position: center;
background-repeat: no-repeat;
}}
</style>
""",
unsafe_allow_html=True
)
st.markdown("<h1 style='text-align: center; margin-bottom: 5px;'>Sing It Forward App🎵</h1>", unsafe_allow_html=True)
description = """
<h5>Welcome to Sing It Forward App!</h5>
<p style="text-align: justify;">
Get ready to test your singing skills and memory! First, listen carefully to the first part of the song, then it’s your turn to shine.
Record yourself singing the next 15 seconds on your own, matching the lyrics and rhythm perfectly. Think you’ve got what it takes to keep the music going?
Let’s see if you can hit the right notes and showcase your talent! Unleash your inner star and take the challenge!
</p>
📌For any questions or contact:
**Name:** <span style="color: #EDA67C;">Sahand Khorsandi</span>
**Email:** <a href="mailto:[email protected]" style="color: #EDA67C;">[email protected]</a>"""
st.markdown(description, unsafe_allow_html=True)
st.write('------')
def cosine_sim(text1, text2):
vectorizer = TfidfVectorizer().fit_transform([text1, text2])
vectors = vectorizer.toarray()
return cosine_similarity(vectors)[0, 1]
def take_challenge(music_file, typed_lyrics, key, language):
st.write("Listen to music since you have to record 15seconds after that")
st.audio(music_file)
audio_value = st.experimental_audio_input("Sing Rest of music:🎙️", key=key)
if audio_value:
with open("user_sing.mp3", "wb") as f:
f.write(audio_value.getbuffer())
if language == "en":
english_model = whisper.load_model("base.en")
user_lyrics = english_model.transcribe("user_sing.mp3", language=language)["text"]
else:
persian_model = Model.load("hezarai/whisper-small-fa")
user_lyrics = persian_model.predict("user_sing.mp3")[0]["text"]
st.write(user_lyrics)
similarity_score = cosine_sim(typed_lyrics, user_lyrics)
if similarity_score > 0.85:
st.success('Awsome! You are doing great', icon="✅")
st.markdown('<style>div.stAlert { background-color: rgba(3, 67, 24, 0.9); }</style>', unsafe_allow_html=True)
else:
st.error('Awful! Try harder next time', icon="🚨")
st.markdown('<style>div.stAlert { background-color: rgba(241, 36, 36, 0.9); }</style>', unsafe_allow_html=True)
def change_volume(input_file, output_file, volume_factor):
sound = AudioSegment.from_mp3(input_file)
volume_changed = sound + volume_factor
volume_changed.export(output_file, format="mp3")
def change_speed(input_file, output_file, speed_factor):
sound, sr = librosa.load(input_file)
speed_changed = librosa.effects.time_stretch(sound, rate=speed_factor)
sf.write(output_file, speed_changed, sr)
def change_pitch(input_file, output_file, pitch_factor):
sound, sr = librosa.load(input_file)
pitch_changed = librosa.effects.pitch_shift(sound, sr=sr, n_steps=pitch_factor)
sf.write(output_file, pitch_changed, sr)
def low_pass_filter(input_file, output_file, cutoff_freq):
sound = AudioSegment.from_mp3(input_file)
low_filtered_sound = sound.low_pass_filter(cutoff_freq)
low_filtered_sound.export(output_file, format="mp3")
def high_pass_filter(input_file, output_file, cutoff_freq):
sound = AudioSegment.from_mp3(input_file)
high_filtered_sound = sound.high_pass_filter(cutoff_freq)
high_filtered_sound.export(output_file, format="mp3")
def pan_left_right(input_file, output_file, pan_factor):
sound = AudioSegment.from_mp3(input_file)
pan_sound = sound.pan(pan_factor)
pan_sound.export(output_file, format="mp3")
def fade_in_ms(input_file, output_file, fade_factor):
sound = AudioSegment.from_mp3(input_file)
faded_sound = sound.fade_in(fade_factor)
faded_sound.export(output_file, format="mp3")
def fade_out_ms(input_file, output_file, fade_factor):
sound = AudioSegment.from_mp3(input_file)
faded_sound = sound.fade_out(fade_factor)
faded_sound.export(output_file, format="mp3")
tab1, tab2 = st.tabs(["Take Challenge", "Make Challenge"])
with tab1:
lyrics = "Far across the distance And spaces between us You have come to show you go on"
take_challenge("titanic.mp3", lyrics, key="en_challenge", language="en")
with tab2:
st.write("Upload music to make challenge:")
uploaded_file = st.file_uploader("Choose a music file", type=["mp3", "wav"])
language_mapping = {"English": "en", "Persian": "fa"}
selected_language = st.radio("Select Language", language_mapping.keys(), horizontal=True)
language = language_mapping[selected_language]
if uploaded_file is not None:
with open("raw_music.mp3", "wb") as f:
f.write(uploaded_file.getbuffer())
st.audio("raw_music.mp3")
current_input = "raw_music.mp3"
output_file = "processed_music.mp3"
trimm_check = st.checkbox("Trim")
if trimm_check:
st.write("Specify start and end times for trimming:")
audio = AudioSegment.from_mp3(current_input)
duration = len(audio) // 1000
start_time = st.number_input("Start Time (seconds)", min_value=0, max_value=duration, value=0)
end_time = st.number_input("End Time (seconds)", min_value=0, max_value=duration, value=duration)
if start_time < end_time:
trimmed_audio = audio[start_time * 1000:end_time * 1000]
trimmed_audio.export(output_file, format="mp3")
current_input = output_file
else:
st.error('Start Time should be smaller than End Time!', icon="❌")
st.markdown('<style>div.stAlert { background-color: rgba(241, 36, 36, 0.9); }</style>', unsafe_allow_html=True)
volume_checkbox = st.checkbox("Change Volume")
if volume_checkbox:
volume_factor = st.slider("Volume Factor (dB)", -30, 30, 0)
change_volume(current_input, output_file, volume_factor)
current_input = output_file
speed_checkbox = st.checkbox("Change Speed")
if speed_checkbox:
speed_factor = st.slider("Speed Factor", 0.25, 2.0, 1.0)
change_speed(current_input, output_file, speed_factor)
current_input = output_file
pitch_checkbox = st.checkbox("Change Pitch")
if pitch_checkbox:
pitch_factor = st.slider("Pitch Shift (fractional steps)", -12, 12, 0)
change_pitch(current_input, output_file, pitch_factor)
current_input = output_file
low_pass_checkbox = st.checkbox("Low Pass Filter")
if low_pass_checkbox:
cutoff_freq = st.slider("Low Pass Filter Cutoff Frequency", min_value=20, max_value=20000, value=2000)
low_pass_filter(current_input, output_file, cutoff_freq)
current_input = output_file
high_pass_checkbox = st.checkbox("High Pass Filter")
if high_pass_checkbox:
cutoff_freq = st.slider("High Pass Filter Cutoff Frequency", min_value=20, max_value=20000, value=2000)
high_pass_filter(current_input, output_file, cutoff_freq)
current_input = output_file
pan_checkbox = st.checkbox("Pan Left/Right")
if pan_checkbox:
pan_factor = st.slider("Pan Factor (-1 for Left, 1 for Right)", -1.0, 1.0, 0.0)
pan_left_right(current_input, output_file, pan_factor)
current_input = output_file
fade_in_checkbox = st.checkbox("Fade In")
if fade_in_checkbox:
fade_in_time = st.slider("Fade In Duration (ms)", min_value=0, max_value=10000, value=1000)
fade_in_ms(current_input, output_file, fade_in_time)
current_input = output_file
fade_out_checkbox = st.checkbox("Fade Out")
if fade_out_checkbox:
fade_out_time = st.slider("Fade Out Duration (ms)", min_value=0, max_value=10000, value=1000)
fade_out_ms(current_input, output_file, fade_out_time)
current_input = output_file
st.write("Now type what user should sing:")
typed_lyrics = st.text_area("Lyrics to be singed:")
st.write('------')
take_challenge(current_input, typed_lyrics, "unique_key_1", language)