import os import io import gradio as gr import torch import numpy as np import re import pronouncing # Add this to requirements.txt for syllable counting import functools # Add this for lru_cache functionality from transformers import ( AutoModelForAudioClassification, AutoFeatureExtractor, AutoTokenizer, pipeline, AutoModelForCausalLM, BitsAndBytesConfig ) from huggingface_hub import login from utils import ( load_audio, extract_audio_duration, extract_mfcc_features, format_genre_results, ensure_cuda_availability ) from emotionanalysis import MusicAnalyzer import librosa # Login to Hugging Face Hub if token is provided if "HF_TOKEN" in os.environ: login(token=os.environ["HF_TOKEN"]) # Constants GENRE_MODEL_NAME = "dima806/music_genres_classification" MUSIC_DETECTION_MODEL = "MIT/ast-finetuned-audioset-10-10-0.4593" LLM_MODEL_NAME = "Qwen/Qwen3-32B" SAMPLE_RATE = 22050 # Standard sample rate for audio processing # Check CUDA availability (for informational purposes) CUDA_AVAILABLE = ensure_cuda_availability() # Load models at initialization time print("Loading genre classification model...") try: genre_feature_extractor = AutoFeatureExtractor.from_pretrained(GENRE_MODEL_NAME) genre_model = AutoModelForAudioClassification.from_pretrained( GENRE_MODEL_NAME, device_map="auto" if CUDA_AVAILABLE else None ) # Create a convenience wrapper function with the same interface as before def get_genre_model(): return genre_model, genre_feature_extractor except Exception as e: print(f"Error loading genre model: {str(e)}") genre_model = None genre_feature_extractor = None # Load LLM and tokenizer at initialization time print("Loading Qwen LLM model with 4-bit quantization...") try: # Configure 4-bit quantization for better performance quantization_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.float16, bnb_4bit_use_double_quant=True ) llm_tokenizer = AutoTokenizer.from_pretrained(LLM_MODEL_NAME) llm_model = AutoModelForCausalLM.from_pretrained( LLM_MODEL_NAME, quantization_config=quantization_config, device_map="auto", trust_remote_code=True, torch_dtype=torch.float16, use_cache=True ) except Exception as e: print(f"Error loading LLM model: {str(e)}") llm_tokenizer = None llm_model = None # Create music analyzer instance music_analyzer = MusicAnalyzer() # Process uploaded audio file def process_audio(audio_file): if audio_file is None: return "No audio file provided", None, None, None, None, None, None try: # Load and analyze audio y, sr = load_audio(audio_file, sr=SAMPLE_RATE) # Basic audio information duration = extract_audio_duration(y, sr) # Analyze music with MusicAnalyzer music_analysis = music_analyzer.analyze_music(audio_file) # Extract key information tempo = music_analysis["rhythm_analysis"]["tempo"] time_signature = music_analysis["rhythm_analysis"]["estimated_time_signature"] emotion = music_analysis["emotion_analysis"]["primary_emotion"] theme = music_analysis["theme_analysis"]["primary_theme"] # Use genre classification directly instead of pipeline if genre_model is not None and genre_feature_extractor is not None: # Resample audio to 16000 Hz for the genre model y_16k = librosa.resample(y, orig_sr=sr, target_sr=16000) # Extract features inputs = genre_feature_extractor( y_16k, sampling_rate=16000, return_tensors="pt" ).to(genre_model.device) # Classify genre with torch.no_grad(): outputs = genre_model(**inputs) logits = outputs.logits probs = torch.nn.functional.softmax(logits, dim=-1) # Get top genres values, indices = torch.topk(probs[0], k=5) top_genres = [(genre_model.config.id2label[idx.item()], val.item()) for val, idx in zip(values, indices)] else: # Fallback if model loading failed top_genres = [("Unknown", 1.0)] # Format genre results for display genre_results_text = format_genre_results(top_genres) primary_genre = top_genres[0][0] # Generate lyrics using LLM lyrics = generate_lyrics(music_analysis, primary_genre, duration) # Prepare analysis summary analysis_summary = f""" ### Music Analysis Results **Duration:** {duration:.2f} seconds **Tempo:** {tempo:.1f} BPM **Time Signature:** {time_signature} **Key:** {music_analysis["tonal_analysis"]["key"]} {music_analysis["tonal_analysis"]["mode"]} **Primary Emotion:** {emotion} **Primary Theme:** {theme} **Top Genre:** {primary_genre} {genre_results_text} """ return analysis_summary, lyrics, tempo, time_signature, emotion, theme, primary_genre except Exception as e: error_msg = f"Error processing audio: {str(e)}" print(error_msg) return error_msg, None, None, None, None, None, None def generate_lyrics(music_analysis, genre, duration): try: # Extract meaningful information for context tempo = music_analysis["rhythm_analysis"]["tempo"] key = music_analysis["tonal_analysis"]["key"] mode = music_analysis["tonal_analysis"]["mode"] emotion = music_analysis["emotion_analysis"]["primary_emotion"] theme = music_analysis["theme_analysis"]["primary_theme"] # Verify LLM is loaded if llm_model is None or llm_tokenizer is None: return "Error: LLM model not properly loaded" # Construct prompt for the LLM with stronger instruction to avoid thinking prompt = f"""Write lyrics for a {genre} song with these specifications: - Key: {key} {mode} - Tempo: {tempo} BPM - Emotion: {emotion} - Theme: {theme} - Duration: {duration:.1f} seconds - Time signature: {music_analysis["rhythm_analysis"]["estimated_time_signature"]} CRITICAL INSTRUCTIONS: - The lyrics should be in English - Write ONLY the raw lyrics with no structural labels - DO NOT include any thinking, reasoning, or explanations - DO NOT include tags or thinking processes - DO NOT include [verse], [chorus], [bridge], or any other section markers - DO NOT number the verses or lines - DO NOT use bullet points - Format as simple line-by-line lyrics only - Make sure the lyrics match the specified duration and tempo - Keep lyrics concise enough to fit the duration when sung at the given tempo """ # Generate lyrics using the LLM model directly messages = [ {"role": "user", "content": prompt} ] # Apply chat template text = llm_tokenizer.apply_chat_template( messages, tokenize=False, add_generation_prompt=True ) # Tokenize and move to model device model_inputs = llm_tokenizer([text], return_tensors="pt").to(llm_model.device) # Generate with optimized parameters generated_ids = llm_model.generate( **model_inputs, max_new_tokens=1024, do_sample=True, temperature=0.6, # Lower temperature for more focused responses top_p=0.9, repetition_penalty=1.2, pad_token_id=llm_tokenizer.eos_token_id ) # Decode the output output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() lyrics = llm_tokenizer.decode(output_ids, skip_special_tokens=True).strip() # ENHANCED post-processing to remove ALL thinking elements # Remove thinking tags and their content lyrics = re.sub(r'.*?', '', lyrics, flags=re.DOTALL) lyrics = re.sub(r'\[thinking\].*?\[/thinking\]', '', lyrics, flags=re.DOTALL) # Remove any lines with section labels lyrics = re.sub(r'^\[.*?\].*$', '', lyrics, flags=re.MULTILINE) # Remove common prefixes and thinking text (expanded list) thinking_prefixes = [ r'^(Here are|Here is|These are|This is|Let me|I will|I\'ll).*?:\s*', r'^Okay, let\'s.*$', r'^First, I need to.*$', r'^Let me brainstorm.*$', r'^I\'ll write.*$', r'^Let\'s create.*$', r'^For a.*song.*$', r'^Now I\'ll.*$', r'^Taking into account.*$', r'^Given the specifications.*$', r'^Based on the.*$', r'^Considering the.*$' ] for pattern in thinking_prefixes: lyrics = re.sub(pattern, '', lyrics, flags=re.MULTILINE|re.IGNORECASE) lyrics = re.sub(r'^Title:.*?$', '', lyrics, flags=re.MULTILINE).strip() # Remove all section markers in any format lyrics = re.sub(r'^\s*(Verse|Chorus|Bridge|Pre.?Chorus|Intro|Outro|Refrain|Hook|Breakdown)(\s*\d*|\s*[A-Z])?:?\s*$', '', lyrics, flags=re.MULTILINE|re.IGNORECASE) lyrics = re.sub(r'\[(Verse|Chorus|Bridge|Pre.?Chorus|Intro|Outro|Refrain|Hook|Breakdown)(\s*\d*|\s*[A-Z])?\]', '', lyrics, flags=re.IGNORECASE) # Remove lines with obvious thinking content lyrics = re.sub(r'^.*?(think|brainstorm|consider|syllable|count|rhyme|scheme|tempo|calculate|bpm).*$', '', lyrics, flags=re.MULTILINE|re.IGNORECASE) # Remove any empty lines at beginning, collapse multiple blank lines, and trim lyrics = re.sub(r'^\s*\n', '', lyrics) lyrics = re.sub(r'\n\s*\n\s*\n+', '\n\n', lyrics) lyrics = lyrics.strip() # One final check - if lyrics still starts with obvious thinking, try to find the actual lyrics if re.match(r'.*?(I need to|Let me|Okay|Hmm|I will|I\'ll|First|Let\'s|Now).*', lyrics[:100], re.IGNORECASE): # Look for a double line break which often separates thinking from lyrics parts = lyrics.split('\n\n') if len(parts) > 1: # Take everything after the first paragraph as the actual lyrics lyrics = '\n\n'.join(parts[1:]) return lyrics except Exception as e: error_msg = f"Error generating lyrics: {str(e)}" print(error_msg) return error_msg # Create Gradio interface def create_interface(): with gr.Blocks(title="Music Analysis & Lyrics Generator") as demo: gr.Markdown("# Music Analysis & Lyrics Generator") gr.Markdown("Upload a music file or record audio to analyze it and generate matching lyrics") with gr.Row(): with gr.Column(scale=1): audio_input = gr.Audio( label="Upload or Record Audio", type="filepath", sources=["upload", "microphone"] ) analyze_btn = gr.Button("Analyze and Generate Lyrics", variant="primary") with gr.Column(scale=2): with gr.Tab("Analysis"): analysis_output = gr.Textbox(label="Music Analysis Results", lines=10) with gr.Row(): tempo_output = gr.Number(label="Tempo (BPM)") time_sig_output = gr.Textbox(label="Time Signature") emotion_output = gr.Textbox(label="Primary Emotion") theme_output = gr.Textbox(label="Primary Theme") genre_output = gr.Textbox(label="Primary Genre") with gr.Tab("Generated Lyrics"): lyrics_output = gr.Textbox(label="Generated Lyrics", lines=20) # Set up event handlers analyze_btn.click( fn=process_audio, inputs=[audio_input], outputs=[analysis_output, lyrics_output, tempo_output, time_sig_output, emotion_output, theme_output, genre_output] ) gr.Markdown(""" ## How it works 1. Upload or record a music file 2. The system analyzes tempo, beats, time signature and other musical features 3. It detects emotion, theme, and music genre 4. Using this information, it generates lyrics that match the style and length of your music """) return demo # Launch the app demo = create_interface() if __name__ == "__main__": demo.launch() else: # For Hugging Face Spaces app = demo