|
import os |
|
import io |
|
import gradio as gr |
|
import torch |
|
import numpy as np |
|
import re |
|
import pronouncing |
|
import functools |
|
from transformers import ( |
|
AutoModelForAudioClassification, |
|
AutoFeatureExtractor, |
|
AutoTokenizer, |
|
pipeline, |
|
AutoModelForCausalLM, |
|
BitsAndBytesConfig |
|
) |
|
from huggingface_hub import login |
|
from utils import ( |
|
load_audio, |
|
extract_audio_duration, |
|
extract_mfcc_features, |
|
format_genre_results, |
|
ensure_cuda_availability |
|
) |
|
from emotionanalysis import MusicAnalyzer |
|
import librosa |
|
from beat_analysis import BeatAnalyzer |
|
|
|
|
|
beat_analyzer = BeatAnalyzer() |
|
|
|
|
|
if "HF_TOKEN" in os.environ: |
|
login(token=os.environ["HF_TOKEN"]) |
|
|
|
|
|
GENRE_MODEL_NAME = "dima806/music_genres_classification" |
|
MUSIC_DETECTION_MODEL = "MIT/ast-finetuned-audioset-10-10-0.4593" |
|
LLM_MODEL_NAME = "Qwen/Qwen3-32B" |
|
SAMPLE_RATE = 22050 |
|
|
|
|
|
CUDA_AVAILABLE = ensure_cuda_availability() |
|
|
|
|
|
print("Loading genre classification model...") |
|
try: |
|
genre_feature_extractor = AutoFeatureExtractor.from_pretrained(GENRE_MODEL_NAME) |
|
genre_model = AutoModelForAudioClassification.from_pretrained( |
|
GENRE_MODEL_NAME, |
|
device_map="auto" if CUDA_AVAILABLE else None |
|
) |
|
|
|
def get_genre_model(): |
|
return genre_model, genre_feature_extractor |
|
except Exception as e: |
|
print(f"Error loading genre model: {str(e)}") |
|
genre_model = None |
|
genre_feature_extractor = None |
|
|
|
|
|
print("Loading Qwen LLM model with 4-bit quantization...") |
|
try: |
|
|
|
quantization_config = BitsAndBytesConfig( |
|
load_in_4bit=True, |
|
bnb_4bit_quant_type="nf4", |
|
bnb_4bit_compute_dtype=torch.float16, |
|
bnb_4bit_use_double_quant=True |
|
) |
|
|
|
llm_tokenizer = AutoTokenizer.from_pretrained(LLM_MODEL_NAME) |
|
llm_model = AutoModelForCausalLM.from_pretrained( |
|
LLM_MODEL_NAME, |
|
quantization_config=quantization_config, |
|
device_map="auto", |
|
trust_remote_code=True, |
|
torch_dtype=torch.float16, |
|
use_cache=True |
|
) |
|
except Exception as e: |
|
print(f"Error loading LLM model: {str(e)}") |
|
llm_tokenizer = None |
|
llm_model = None |
|
|
|
|
|
music_analyzer = MusicAnalyzer() |
|
|
|
|
|
def process_audio(audio_file): |
|
if audio_file is None: |
|
return "No audio file provided", None, None, None, None, None, None, None |
|
|
|
try: |
|
|
|
y, sr = load_audio(audio_file, sr=SAMPLE_RATE) |
|
|
|
|
|
duration = extract_audio_duration(y, sr) |
|
|
|
|
|
music_analysis = music_analyzer.analyze_music(audio_file) |
|
|
|
|
|
time_signature = music_analysis["rhythm_analysis"]["estimated_time_signature"] |
|
|
|
|
|
if time_signature not in ["4/4", "3/4", "2/4", "6/8"]: |
|
time_signature = "4/4" |
|
music_analysis["rhythm_analysis"]["estimated_time_signature"] = time_signature |
|
|
|
|
|
beat_analysis = beat_analyzer.analyze_beat_pattern(audio_file, time_signature=time_signature) |
|
lyric_templates = beat_analyzer.create_lyric_template(beat_analysis) |
|
|
|
|
|
music_analysis["beat_analysis"] = beat_analysis |
|
music_analysis["lyric_templates"] = lyric_templates |
|
|
|
|
|
tempo = music_analysis["rhythm_analysis"]["tempo"] |
|
emotion = music_analysis["emotion_analysis"]["primary_emotion"] |
|
theme = music_analysis["theme_analysis"]["primary_theme"] |
|
|
|
|
|
if genre_model is not None and genre_feature_extractor is not None: |
|
|
|
y_16k = librosa.resample(y, orig_sr=sr, target_sr=16000) |
|
|
|
|
|
inputs = genre_feature_extractor( |
|
y_16k, |
|
sampling_rate=16000, |
|
return_tensors="pt" |
|
).to(genre_model.device) |
|
|
|
|
|
with torch.no_grad(): |
|
outputs = genre_model(**inputs) |
|
logits = outputs.logits |
|
probs = torch.nn.functional.softmax(logits, dim=-1) |
|
|
|
|
|
values, indices = torch.topk(probs[0], k=5) |
|
top_genres = [(genre_model.config.id2label[idx.item()], val.item()) for val, idx in zip(values, indices)] |
|
else: |
|
|
|
top_genres = [("Unknown", 1.0)] |
|
|
|
|
|
genre_results_text = format_genre_results(top_genres) |
|
primary_genre = top_genres[0][0] |
|
|
|
|
|
lyrics = generate_lyrics(music_analysis, primary_genre, duration) |
|
|
|
|
|
beat_match_analysis = analyze_lyrics_rhythm_match(lyrics, lyric_templates, primary_genre) |
|
|
|
|
|
analysis_summary = f""" |
|
### Music Analysis Results |
|
|
|
**Duration:** {duration:.2f} seconds |
|
**Tempo:** {tempo:.1f} BPM |
|
**Time Signature:** {time_signature} |
|
**Key:** {music_analysis["tonal_analysis"]["key"]} {music_analysis["tonal_analysis"]["mode"]} |
|
**Primary Emotion:** {emotion} |
|
**Primary Theme:** {theme} |
|
**Top Genre:** {primary_genre} |
|
|
|
{genre_results_text} |
|
""" |
|
|
|
|
|
if lyric_templates: |
|
analysis_summary += f""" |
|
### Beat Analysis |
|
|
|
**Total Phrases:** {len(lyric_templates)} |
|
**Average Beats Per Phrase:** {np.mean([t['num_beats'] for t in lyric_templates]):.1f} |
|
**Beat Pattern Examples:** |
|
- Phrase 1: {lyric_templates[0]['stress_pattern'] if lyric_templates else 'N/A'} |
|
- Phrase 2: {lyric_templates[1]['stress_pattern'] if len(lyric_templates) > 1 else 'N/A'} |
|
""" |
|
|
|
return analysis_summary, lyrics, tempo, time_signature, emotion, theme, primary_genre, beat_match_analysis |
|
|
|
except Exception as e: |
|
error_msg = f"Error processing audio: {str(e)}" |
|
print(error_msg) |
|
return error_msg, None, None, None, None, None, None, None |
|
|
|
def generate_lyrics(music_analysis, genre, duration): |
|
try: |
|
|
|
tempo = music_analysis["rhythm_analysis"]["tempo"] |
|
key = music_analysis["tonal_analysis"]["key"] |
|
mode = music_analysis["tonal_analysis"]["mode"] |
|
emotion = music_analysis["emotion_analysis"]["primary_emotion"] |
|
theme = music_analysis["theme_analysis"]["primary_theme"] |
|
|
|
|
|
lyric_templates = music_analysis.get("lyric_templates", []) |
|
|
|
|
|
if llm_model is None or llm_tokenizer is None: |
|
return "Error: LLM model not properly loaded" |
|
|
|
|
|
if not lyric_templates: |
|
|
|
prompt = f"""Write song lyrics for a {genre} song in {key} {mode} with tempo {tempo} BPM. The emotion is {emotion} and theme is {theme}. |
|
|
|
ONLY WRITE THE ACTUAL LYRICS. NO EXPLANATIONS OR META-TEXT. |
|
""" |
|
else: |
|
|
|
num_phrases = len(lyric_templates) |
|
|
|
|
|
prompt = f"""Write song lyrics for a {genre} song in {key} {mode} with tempo {tempo} BPM. The emotion is {emotion} and theme is {theme}. |
|
|
|
I need EXACTLY {num_phrases} lines of lyrics - one line for each musical phrase. Not one more, not one less. |
|
|
|
FORMAT: |
|
- Just write {num_phrases} plain text lines |
|
- Each line should be simple song lyrics (no annotations, no numbers, no labeling) |
|
- Don't include any explanations, thinking tags, or meta-commentary |
|
- Don't use any <think> or [thinking] tags |
|
- Don't include [Verse], [Chorus] or section markers |
|
- Don't include line numbers |
|
|
|
EXAMPLE OF WHAT I WANT (for a {num_phrases}-line song): |
|
Lost in the shadows of yesterday |
|
Dreams fade away like morning dew |
|
Time slips through fingers like desert sand |
|
Memories echo in empty rooms |
|
(... and so on for exactly {num_phrases} lines) |
|
|
|
JUST THE PLAIN LYRICS, EXACTLY {num_phrases} LINES. |
|
""" |
|
|
|
|
|
messages = [ |
|
{"role": "user", "content": prompt} |
|
] |
|
|
|
|
|
text = llm_tokenizer.apply_chat_template( |
|
messages, |
|
tokenize=False, |
|
add_generation_prompt=True |
|
) |
|
|
|
|
|
model_inputs = llm_tokenizer([text], return_tensors="pt").to(llm_model.device) |
|
|
|
|
|
generated_ids = llm_model.generate( |
|
**model_inputs, |
|
max_new_tokens=1024, |
|
do_sample=True, |
|
temperature=0.7, |
|
top_p=0.9, |
|
repetition_penalty=1.2, |
|
pad_token_id=llm_tokenizer.eos_token_id |
|
) |
|
|
|
|
|
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() |
|
lyrics = llm_tokenizer.decode(output_ids, skip_special_tokens=True).strip() |
|
|
|
|
|
|
|
|
|
|
|
divider_patterns = [ |
|
r'Here are the lyrics:', |
|
r'Here is my song:', |
|
r'The lyrics:', |
|
r'My lyrics:', |
|
r'Song lyrics:', |
|
r'\*\*\*+', |
|
r'===+', |
|
r'---+', |
|
r'```', |
|
r'Lyrics:' |
|
] |
|
|
|
for pattern in divider_patterns: |
|
matches = re.finditer(pattern, lyrics, re.IGNORECASE) |
|
for match in matches: |
|
|
|
lyrics = lyrics[match.end():].strip() |
|
|
|
|
|
lyrics = re.sub(r'<think>.*?</think>', '', lyrics, flags=re.DOTALL) |
|
lyrics = re.sub(r'\[thinking\].*?\[/thinking\]', '', lyrics, flags=re.DOTALL) |
|
lyrics = re.sub(r'<think>', '', lyrics, flags=re.DOTALL) |
|
lyrics = re.sub(r'</think>', '', lyrics, flags=re.DOTALL) |
|
lyrics = re.sub(r'\[thinking\]', '', lyrics, flags=re.DOTALL) |
|
lyrics = re.sub(r'\[/thinking\]', '', lyrics, flags=re.DOTALL) |
|
|
|
|
|
lines = lyrics.strip().split('\n') |
|
clean_lines = [] |
|
|
|
|
|
non_lyric_patterns = [ |
|
|
|
r'^(note|thinking|thoughts|let me|i will|i am going|i would|i can|i need to|i have to|i should|let\'s|here|now)', |
|
r'^(first|second|third|next|finally|importantly|remember|so|ok|okay|as requested|as asked|considering)', |
|
|
|
r'syllable[s]?|phrase|rhythm|beats?|tempo|bpm|instruction|follow|alignment|match|corresponding', |
|
r'verses?|chorus|bridge|section|stanza|part|template|format|pattern|example', |
|
r'requirements?|guidelines?|song structure|stressed|unstressed', |
|
|
|
r'generated|output|result|provide|create|write|draft|version', |
|
|
|
r'^line \d+|^\d+[\.\):]|^\[\w+\]|^[\*\-\+] ', |
|
|
|
r'\?$|analysis|evaluate|review|check|ensure', |
|
|
|
r'make sure|please note|important|notice|pay attention' |
|
] |
|
|
|
|
|
for line in lines: |
|
line = line.strip() |
|
|
|
|
|
if not line or line.isspace(): |
|
continue |
|
|
|
|
|
should_skip = False |
|
for pattern in non_lyric_patterns: |
|
if re.search(pattern, line.lower()): |
|
should_skip = True |
|
break |
|
|
|
if should_skip: |
|
continue |
|
|
|
|
|
if (line.startswith('[') and ']' in line) or (line.startswith('(') and ')' in line and len(line) < 20): |
|
continue |
|
|
|
|
|
if ':' in line and not any(word in line.lower() for word in ['like', 'when', 'where', 'how', 'why', 'what']): |
|
if len(line.split(':')[0]) < 15: |
|
continue |
|
|
|
|
|
if len(line) < 3: |
|
continue |
|
|
|
|
|
if re.match(r'^\d+\.|\(#\d+\)|\d+\)', line): |
|
continue |
|
|
|
|
|
if re.match(r'^#{1,6} |^\*\*|^__', line): |
|
continue |
|
|
|
|
|
if '<think>' in line.lower() or '</think>' in line.lower() or '[thinking]' in line.lower() or '[/thinking]' in line.lower(): |
|
continue |
|
|
|
|
|
clean_lines.append(line) |
|
|
|
|
|
|
|
if clean_lines and any(clean_lines[0].lower().startswith(prefix) for prefix in |
|
['here are', 'these are', 'below are', 'following are']): |
|
clean_lines = clean_lines[1:] |
|
|
|
|
|
if len(clean_lines) > 3: |
|
|
|
first_three = ' '.join(clean_lines[:3]).lower() |
|
if any(term in first_three for term in ['i will', 'i have created', 'i\'ll provide', 'i\'ll write']): |
|
|
|
start_idx = 0 |
|
for i, line in enumerate(clean_lines): |
|
if i >= 3 and not any(term in line.lower() for term in ['i will', 'created', 'write', 'provide']): |
|
start_idx = i |
|
break |
|
clean_lines = clean_lines[start_idx:] |
|
|
|
|
|
last_three = ' '.join(clean_lines[-3:]).lower() |
|
if any(term in last_three for term in ['hope this', 'these lyrics', 'as you can see', 'this song', 'i have']): |
|
|
|
end_idx = len(clean_lines) |
|
for i in range(len(clean_lines) - 1, max(0, len(clean_lines) - 4), -1): |
|
if i < len(clean_lines) and not any(term in clean_lines[i].lower() for term in |
|
['hope', 'these lyrics', 'as you can see', 'this song']): |
|
end_idx = i + 1 |
|
break |
|
clean_lines = clean_lines[:end_idx] |
|
|
|
|
|
for i in range(len(clean_lines)): |
|
|
|
clean_lines[i] = re.sub(r'\s+//.*$', '', clean_lines[i]) |
|
clean_lines[i] = re.sub(r'\s+\(.*?\)$', '', clean_lines[i]) |
|
|
|
|
|
clean_lines[i] = re.sub(r'<think>.*?</think>', '', clean_lines[i], flags=re.DOTALL) |
|
clean_lines[i] = re.sub(r'\[thinking\].*?\[/thinking\]', '', clean_lines[i], flags=re.DOTALL) |
|
clean_lines[i] = re.sub(r'<think>', '', clean_lines[i]) |
|
clean_lines[i] = re.sub(r'</think>', '', clean_lines[i]) |
|
clean_lines[i] = re.sub(r'\[thinking\]', '', clean_lines[i]) |
|
clean_lines[i] = re.sub(r'\[/thinking\]', '', clean_lines[i]) |
|
|
|
|
|
clean_lines = [line for line in clean_lines if line.strip() and not line.isspace()] |
|
|
|
|
|
if lyric_templates: |
|
num_required = len(lyric_templates) |
|
|
|
|
|
if len(clean_lines) > num_required: |
|
|
|
clean_lines = clean_lines[:num_required] |
|
|
|
|
|
while len(clean_lines) < num_required: |
|
placeholder = f"Echoes of {emotion} fill the {genre} night" |
|
if len(clean_lines) > 0: |
|
|
|
last_words = [word for line in clean_lines[-1:] for word in line.split() if len(word) > 3] |
|
if last_words: |
|
import random |
|
word = random.choice(last_words) |
|
placeholder = f"{word.capitalize()} whispers through the {emotion} silence" |
|
|
|
clean_lines.append(placeholder) |
|
|
|
|
|
final_lyrics = '\n'.join(clean_lines) |
|
|
|
|
|
if not final_lyrics or len(final_lyrics) < 10: |
|
return "The model generated only thinking content but no actual lyrics. Please try again." |
|
|
|
return final_lyrics |
|
|
|
except Exception as e: |
|
error_msg = f"Error generating lyrics: {str(e)}" |
|
print(error_msg) |
|
return error_msg |
|
|
|
def analyze_lyrics_rhythm_match(lyrics, lyric_templates, genre="pop"): |
|
"""Analyze how well the generated lyrics match the beat patterns and syllable requirements""" |
|
if not lyric_templates or not lyrics: |
|
return "No beat templates or lyrics available for analysis." |
|
|
|
|
|
lines = lyrics.strip().split('\n') |
|
lines = [line for line in lines if line.strip()] |
|
|
|
|
|
result = "### Beat & Syllable Match Analysis\n\n" |
|
result += "| Line | Syllables | Target Range | Match | Stress Pattern |\n" |
|
result += "| ---- | --------- | ------------ | ----- | -------------- |\n" |
|
|
|
|
|
line_count = min(len(lines), len(lyric_templates)) |
|
|
|
|
|
total_matches = 0 |
|
total_range_matches = 0 |
|
total_stress_matches = 0 |
|
total_stress_percentage = 0 |
|
total_ideal_matches = 0 |
|
|
|
for i in range(line_count): |
|
line = lines[i] |
|
template = lyric_templates[i] |
|
|
|
|
|
check_result = beat_analyzer.check_syllable_stress_match(line, template, genre) |
|
|
|
|
|
syllable_match = "✓" if check_result["matches_beat_count"] else ("✓*" if check_result["within_range"] else "✗") |
|
stress_match = "✓" if check_result["stress_matches"] else f"{int(check_result['stress_match_percentage']*100)}%" |
|
|
|
|
|
if check_result["matches_beat_count"]: |
|
total_matches += 1 |
|
if check_result["within_range"]: |
|
total_range_matches += 1 |
|
if check_result["stress_matches"]: |
|
total_stress_matches += 1 |
|
total_stress_percentage += check_result["stress_match_percentage"] |
|
|
|
|
|
if abs(check_result["syllable_count"] - check_result["ideal_syllable_count"]) <= 1: |
|
total_ideal_matches += 1 |
|
|
|
|
|
stress_visual = "" |
|
for char in template['stress_pattern']: |
|
if char == "S": |
|
stress_visual += "X" |
|
elif char == "M": |
|
stress_visual += "x" |
|
else: |
|
stress_visual += "." |
|
|
|
|
|
result += f"| {i+1} | {check_result['syllable_count']} | {check_result['min_expected']}-{check_result['max_expected']} | {syllable_match} | {stress_visual} |\n" |
|
|
|
|
|
if line_count > 0: |
|
exact_match_rate = (total_matches / line_count) * 100 |
|
range_match_rate = (total_range_matches / line_count) * 100 |
|
ideal_match_rate = (total_ideal_matches / line_count) * 100 |
|
stress_match_rate = (total_stress_matches / line_count) * 100 |
|
avg_stress_percentage = (total_stress_percentage / line_count) * 100 |
|
|
|
result += f"\n**Summary:**\n" |
|
result += f"- Exact syllable match rate: {exact_match_rate:.1f}%\n" |
|
result += f"- Genre-appropriate syllable range match rate: {range_match_rate:.1f}%\n" |
|
result += f"- Ideal genre syllable count match rate: {ideal_match_rate:.1f}%\n" |
|
result += f"- Perfect stress pattern match rate: {stress_match_rate:.1f}%\n" |
|
result += f"- Average stress pattern accuracy: {avg_stress_percentage:.1f}%\n" |
|
result += f"- Overall rhythmic accuracy: {((range_match_rate + avg_stress_percentage) / 2):.1f}%\n" |
|
|
|
|
|
result += f"\n**Genre Notes ({genre}):**\n" |
|
|
|
|
|
if genre.lower() == "pop": |
|
result += "- Pop music typically allows 1-3 syllables per beat using melisma and syncopation\n" |
|
result += "- Strong downbeats often align with stressed syllables of important words\n" |
|
elif genre.lower() == "rock": |
|
result += "- Rock music often uses 1-2 syllables per beat with some variation\n" |
|
result += "- Emphasis on strong beats for impact and rhythmic drive\n" |
|
elif genre.lower() in ["hiphop", "rap"]: |
|
result += "- Hip-hop/rap often features 2-5 syllables per beat through rapid delivery\n" |
|
result += "- Complex rhyme patterns and fast delivery create higher syllable density\n" |
|
elif genre.lower() in ["folk", "country"]: |
|
result += "- Folk/country music often stays closer to 1:1 syllable-to-beat ratio\n" |
|
result += "- Narrative focus leads to clearer enunciation of syllables\n" |
|
else: |
|
result += "- This genre typically allows for flexible syllable-to-beat relationships\n" |
|
result += "- Syllable count can vary based on vocal style and song section\n" |
|
|
|
return result |
|
|
|
|
|
def create_interface(): |
|
with gr.Blocks(title="Music Analysis & Lyrics Generator") as demo: |
|
gr.Markdown("# Music Analysis & Lyrics Generator") |
|
gr.Markdown("Upload a music file or record audio to analyze it and generate matching lyrics") |
|
|
|
with gr.Row(): |
|
with gr.Column(scale=1): |
|
audio_input = gr.Audio( |
|
label="Upload or Record Audio", |
|
type="filepath", |
|
sources=["upload", "microphone"] |
|
) |
|
analyze_btn = gr.Button("Analyze and Generate Lyrics", variant="primary") |
|
|
|
with gr.Column(scale=2): |
|
with gr.Tab("Analysis"): |
|
analysis_output = gr.Textbox(label="Music Analysis Results", lines=10) |
|
|
|
with gr.Row(): |
|
tempo_output = gr.Number(label="Tempo (BPM)") |
|
time_sig_output = gr.Textbox(label="Time Signature") |
|
emotion_output = gr.Textbox(label="Primary Emotion") |
|
theme_output = gr.Textbox(label="Primary Theme") |
|
genre_output = gr.Textbox(label="Primary Genre") |
|
|
|
with gr.Tab("Generated Lyrics"): |
|
lyrics_output = gr.Textbox(label="Generated Lyrics", lines=20) |
|
|
|
with gr.Tab("Beat Matching"): |
|
beat_match_output = gr.Markdown(label="Beat & Syllable Matching Analysis") |
|
|
|
|
|
analyze_btn.click( |
|
fn=process_audio, |
|
inputs=[audio_input], |
|
outputs=[analysis_output, lyrics_output, tempo_output, time_sig_output, |
|
emotion_output, theme_output, genre_output, beat_match_output] |
|
) |
|
|
|
gr.Markdown(""" |
|
## How it works |
|
1. Upload or record a music file |
|
2. The system analyzes tempo, beats, time signature and other musical features |
|
3. It detects emotion, theme, and music genre |
|
4. Using beat patterns and syllable stress analysis, it generates perfectly aligned lyrics |
|
5. Each line of the lyrics is matched to the beat pattern of the corresponding musical phrase |
|
""") |
|
|
|
return demo |
|
|
|
|
|
demo = create_interface() |
|
|
|
if __name__ == "__main__": |
|
demo.launch() |
|
else: |
|
|
|
app = demo |