File size: 26,588 Bytes
9e21eef
 
 
 
 
4ddd8f4
8515dc5
 
9e21eef
 
 
 
 
 
 
 
 
 
 
 
 
 
bddf9c4
9e21eef
8515dc5
38b696f
8515dc5
 
 
 
9e21eef
 
 
 
 
 
 
 
bddf9c4
9e21eef
 
 
 
 
e3108aa
 
 
 
 
 
 
9e21eef
e3108aa
 
 
 
 
 
 
9e21eef
e3108aa
 
 
5b33796
 
 
 
 
 
 
bddf9c4
e3108aa
 
 
 
5b33796
 
e3108aa
 
5b33796
e3108aa
 
 
 
bddf9c4
5b33796
 
bddf9c4
5b33796
 
 
8515dc5
4ddd8f4
00af04f
5b33796
 
00af04f
5b33796
00af04f
 
5b33796
 
bddf9c4
8515dc5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5b33796
 
 
 
31a885a
e3108aa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bddf9c4
5b33796
 
 
bddf9c4
5b33796
 
bddf9c4
8515dc5
 
 
5b33796
 
 
38b696f
5b33796
 
 
 
 
 
 
bddf9c4
5b33796
8515dc5
 
 
 
 
 
 
 
 
 
 
 
 
bddf9c4
8515dc5
bddf9c4
5b33796
 
 
8515dc5
bddf9c4
5b33796
31a885a
5b33796
 
 
 
 
 
 
8515dc5
 
 
e3108aa
 
 
5b33796
8515dc5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5b33796
38b696f
8515dc5
e3108aa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5b33796
 
 
 
8515dc5
e3108aa
5b33796
31a885a
e3108aa
 
 
31a885a
8515dc5
 
370bf23
8515dc5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
370bf23
8515dc5
 
 
 
 
 
 
370bf23
8515dc5
 
 
370bf23
8515dc5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31a885a
8515dc5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4ddd8f4
5b33796
 
 
 
 
8515dc5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5b33796
 
 
 
 
 
 
 
 
 
 
 
 
 
651b0cd
5b33796
 
 
651b0cd
5b33796
 
 
 
 
 
 
 
 
8515dc5
 
 
5b33796
 
 
 
 
 
8515dc5
5b33796
651b0cd
5b33796
 
 
 
 
8515dc5
 
5b33796
31a885a
5b33796
054fb90
5b33796
 
9e21eef
5b33796
 
 
 
e3108aa
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
import os
import io
import gradio as gr
import torch
import numpy as np
import re
import pronouncing
import functools
from transformers import (
    AutoModelForAudioClassification,
    AutoFeatureExtractor,
    AutoTokenizer,
    pipeline,
    AutoModelForCausalLM,
    BitsAndBytesConfig
)
from huggingface_hub import login
from utils import (
    load_audio,
    extract_audio_duration,
    extract_mfcc_features,
    format_genre_results,
    ensure_cuda_availability
)
from emotionanalysis import MusicAnalyzer 
import librosa
from beat_analysis import BeatAnalyzer  # Import the BeatAnalyzer class

# Initialize beat analyzer
beat_analyzer = BeatAnalyzer()

# Login to Hugging Face Hub if token is provided
if "HF_TOKEN" in os.environ:
    login(token=os.environ["HF_TOKEN"])

# Constants
GENRE_MODEL_NAME = "dima806/music_genres_classification"
MUSIC_DETECTION_MODEL = "MIT/ast-finetuned-audioset-10-10-0.4593"
LLM_MODEL_NAME = "Qwen/Qwen3-32B"
SAMPLE_RATE = 22050  # Standard sample rate for audio processing

# Check CUDA availability (for informational purposes)
CUDA_AVAILABLE = ensure_cuda_availability()

# Load models at initialization time
print("Loading genre classification model...")
try:
    genre_feature_extractor = AutoFeatureExtractor.from_pretrained(GENRE_MODEL_NAME)
    genre_model = AutoModelForAudioClassification.from_pretrained(
        GENRE_MODEL_NAME,
        device_map="auto" if CUDA_AVAILABLE else None
    )
    # Create a convenience wrapper function with the same interface as before
    def get_genre_model():
        return genre_model, genre_feature_extractor
except Exception as e:
    print(f"Error loading genre model: {str(e)}")
    genre_model = None
    genre_feature_extractor = None

# Load LLM and tokenizer at initialization time
print("Loading Qwen LLM model with 4-bit quantization...")
try:
    # Configure 4-bit quantization for better performance
    quantization_config = BitsAndBytesConfig(
        load_in_4bit=True,
        bnb_4bit_quant_type="nf4",
        bnb_4bit_compute_dtype=torch.float16,
        bnb_4bit_use_double_quant=True
    )
    
    llm_tokenizer = AutoTokenizer.from_pretrained(LLM_MODEL_NAME)
    llm_model = AutoModelForCausalLM.from_pretrained(
        LLM_MODEL_NAME,
        quantization_config=quantization_config,
        device_map="auto",
        trust_remote_code=True,
        torch_dtype=torch.float16,
        use_cache=True
    )
except Exception as e:
    print(f"Error loading LLM model: {str(e)}")
    llm_tokenizer = None
    llm_model = None

# Create music analyzer instance
music_analyzer = MusicAnalyzer()

# Process uploaded audio file
def process_audio(audio_file):
    if audio_file is None:
        return "No audio file provided", None, None, None, None, None, None, None
    
    try:
        # Load and analyze audio
        y, sr = load_audio(audio_file, sr=SAMPLE_RATE)
        
        # Basic audio information
        duration = extract_audio_duration(y, sr)
        
        # Analyze music with MusicAnalyzer
        music_analysis = music_analyzer.analyze_music(audio_file)
        
        # Extract time signature from MusicAnalyzer result
        time_signature = music_analysis["rhythm_analysis"]["estimated_time_signature"]
        
        # Ensure time signature is one of the supported ones (4/4, 3/4, 2/4, 6/8)
        if time_signature not in ["4/4", "3/4", "2/4", "6/8"]:
            time_signature = "4/4"  # Default to 4/4 if unsupported
            music_analysis["rhythm_analysis"]["estimated_time_signature"] = time_signature
        
        # Analyze beat patterns and create lyrics template using MusicAnalyzer's time signature
        beat_analysis = beat_analyzer.analyze_beat_pattern(audio_file, time_signature=time_signature)
        lyric_templates = beat_analyzer.create_lyric_template(beat_analysis)
        
        # Store these in the music_analysis dict for use in lyrics generation
        music_analysis["beat_analysis"] = beat_analysis
        music_analysis["lyric_templates"] = lyric_templates
        
        # Extract key information
        tempo = music_analysis["rhythm_analysis"]["tempo"]
        emotion = music_analysis["emotion_analysis"]["primary_emotion"]
        theme = music_analysis["theme_analysis"]["primary_theme"]
        
        # Use genre classification directly instead of pipeline
        if genre_model is not None and genre_feature_extractor is not None:
            # Resample audio to 16000 Hz for the genre model
            y_16k = librosa.resample(y, orig_sr=sr, target_sr=16000)
            
            # Extract features
            inputs = genre_feature_extractor(
                y_16k, 
                sampling_rate=16000, 
                return_tensors="pt"
            ).to(genre_model.device)
            
            # Classify genre
            with torch.no_grad():
                outputs = genre_model(**inputs)
                logits = outputs.logits
                probs = torch.nn.functional.softmax(logits, dim=-1)
                
            # Get top genres
            values, indices = torch.topk(probs[0], k=5)
            top_genres = [(genre_model.config.id2label[idx.item()], val.item()) for val, idx in zip(values, indices)]
        else:
            # Fallback if model loading failed
            top_genres = [("Unknown", 1.0)]
        
        # Format genre results for display
        genre_results_text = format_genre_results(top_genres)
        primary_genre = top_genres[0][0]
        
        # Generate lyrics using LLM
        lyrics = generate_lyrics(music_analysis, primary_genre, duration)
        
        # Create beat/stress/syllable matching analysis
        beat_match_analysis = analyze_lyrics_rhythm_match(lyrics, lyric_templates, primary_genre)
        
        # Prepare analysis summary
        analysis_summary = f"""
### Music Analysis Results

**Duration:** {duration:.2f} seconds
**Tempo:** {tempo:.1f} BPM
**Time Signature:** {time_signature}
**Key:** {music_analysis["tonal_analysis"]["key"]} {music_analysis["tonal_analysis"]["mode"]}
**Primary Emotion:** {emotion}
**Primary Theme:** {theme}
**Top Genre:** {primary_genre}

{genre_results_text}
"""

        # Add beat analysis summary
        if lyric_templates:
            analysis_summary += f"""
### Beat Analysis

**Total Phrases:** {len(lyric_templates)}
**Average Beats Per Phrase:** {np.mean([t['num_beats'] for t in lyric_templates]):.1f}
**Beat Pattern Examples:** 
- Phrase 1: {lyric_templates[0]['stress_pattern'] if lyric_templates else 'N/A'}
- Phrase 2: {lyric_templates[1]['stress_pattern'] if len(lyric_templates) > 1 else 'N/A'}
"""
        
        return analysis_summary, lyrics, tempo, time_signature, emotion, theme, primary_genre, beat_match_analysis
    
    except Exception as e:
        error_msg = f"Error processing audio: {str(e)}"
        print(error_msg)
        return error_msg, None, None, None, None, None, None, None

def generate_lyrics(music_analysis, genre, duration):
    try:
        # Extract meaningful information for context
        tempo = music_analysis["rhythm_analysis"]["tempo"]
        key = music_analysis["tonal_analysis"]["key"]
        mode = music_analysis["tonal_analysis"]["mode"]
        emotion = music_analysis["emotion_analysis"]["primary_emotion"]
        theme = music_analysis["theme_analysis"]["primary_theme"]
        
        # Get beat analysis and templates
        lyric_templates = music_analysis.get("lyric_templates", [])
        
        # Verify LLM is loaded
        if llm_model is None or llm_tokenizer is None:
            return "Error: LLM model not properly loaded"

        # If no templates, fall back to original method
        if not lyric_templates:
            # Simplified prompt
            prompt = f"""Write song lyrics for a {genre} song in {key} {mode} with tempo {tempo} BPM. The emotion is {emotion} and theme is {theme}.

ONLY WRITE THE ACTUAL LYRICS. NO EXPLANATIONS OR META-TEXT.
"""
        else:
            # Create phrase examples
            num_phrases = len(lyric_templates)
            
            # Create a more direct prompt with examples
            prompt = f"""Write song lyrics for a {genre} song in {key} {mode} with tempo {tempo} BPM. The emotion is {emotion} and theme is {theme}.

I need EXACTLY {num_phrases} lines of lyrics - one line for each musical phrase. Not one more, not one less.

FORMAT:
- Just write {num_phrases} plain text lines
- Each line should be simple song lyrics (no annotations, no numbers, no labeling)
- Don't include any explanations, thinking tags, or meta-commentary
- Don't use any <think> or [thinking] tags
- Don't include [Verse], [Chorus] or section markers
- Don't include line numbers

EXAMPLE OF WHAT I WANT (for a {num_phrases}-line song):
Lost in the shadows of yesterday
Dreams fade away like morning dew
Time slips through fingers like desert sand
Memories echo in empty rooms
(... and so on for exactly {num_phrases} lines)

JUST THE PLAIN LYRICS, EXACTLY {num_phrases} LINES.
"""

        # Generate lyrics using the LLM model
        messages = [
            {"role": "user", "content": prompt}
        ]
        
        # Apply chat template
        text = llm_tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            add_generation_prompt=True
        )
        
        # Tokenize and move to model device
        model_inputs = llm_tokenizer([text], return_tensors="pt").to(llm_model.device)
        
        # Generate with optimized parameters
        generated_ids = llm_model.generate(
            **model_inputs,
            max_new_tokens=1024,
            do_sample=True,
            temperature=0.7,
            top_p=0.9,
            repetition_penalty=1.2,
            pad_token_id=llm_tokenizer.eos_token_id
        )
        
        # Decode the output
        output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist()
        lyrics = llm_tokenizer.decode(output_ids, skip_special_tokens=True).strip()
        
        # ULTRA AGGRESSIVE CLEANING - COMPLETELY REVISED
        # ------------------------------------------------
        
        # 1. First, look for any standard dividers that might separate thinking from lyrics
        divider_patterns = [
            r'Here are the lyrics:',
            r'Here is my song:',
            r'The lyrics:',
            r'My lyrics:',
            r'Song lyrics:',
            r'\*\*\*+',
            r'===+',
            r'---+',
            r'```',
            r'Lyrics:'
        ]
        
        for pattern in divider_patterns:
            matches = re.finditer(pattern, lyrics, re.IGNORECASE)
            for match in matches:
                # Keep only content after the divider
                lyrics = lyrics[match.end():].strip()
        
        # 2. Remove thinking tags completely before splitting into lines
        lyrics = re.sub(r'<think>.*?</think>', '', lyrics, flags=re.DOTALL)
        lyrics = re.sub(r'\[thinking\].*?\[/thinking\]', '', lyrics, flags=re.DOTALL)
        lyrics = re.sub(r'<think>', '', lyrics, flags=re.DOTALL)
        lyrics = re.sub(r'</think>', '', lyrics, flags=re.DOTALL)
        lyrics = re.sub(r'\[thinking\]', '', lyrics, flags=re.DOTALL)
        lyrics = re.sub(r'\[/thinking\]', '', lyrics, flags=re.DOTALL)
        
        # 3. Split text into lines for aggressive line-by-line filtering
        lines = lyrics.strip().split('\n')
        clean_lines = []
        
        # 4. Define comprehensive patterns for non-lyrical content
        non_lyric_patterns = [
            # Meta-commentary
            r'^(note|thinking|thoughts|let me|i will|i am going|i would|i can|i need to|i have to|i should|let\'s|here|now)',
            r'^(first|second|third|next|finally|importantly|remember|so|ok|okay|as requested|as asked|considering)',
            # Explanations
            r'syllable[s]?|phrase|rhythm|beats?|tempo|bpm|instruction|follow|alignment|match|corresponding',
            r'verses?|chorus|bridge|section|stanza|part|template|format|pattern|example',
            r'requirements?|guidelines?|song structure|stressed|unstressed',
            # Technical language
            r'generated|output|result|provide|create|write|draft|version',
            # Annotations and numbering
            r'^line \d+|^\d+[\.\):]|^\[\w+\]|^[\*\-\+] ',
            # Questions or analytical statements
            r'\?$|analysis|evaluate|review|check|ensure',
            # Instruction-like statements
            r'make sure|please note|important|notice|pay attention'
        ]
        
        # 5. Identify which lines are likely actual lyrics vs non-lyrics
        for line in lines:
            line = line.strip()
            
            # Skip empty lines or lines with just spaces/tabs
            if not line or line.isspace():
                continue
            
            # Skip lines that match any non-lyric pattern
            should_skip = False
            for pattern in non_lyric_patterns:
                if re.search(pattern, line.lower()):
                    should_skip = True
                    break
            
            if should_skip:
                continue
            
            # Skip section headers
            if (line.startswith('[') and ']' in line) or (line.startswith('(') and ')' in line and len(line) < 20):
                continue
            
            # Skip lines that look like annotations (not prose-like)
            if ':' in line and not any(word in line.lower() for word in ['like', 'when', 'where', 'how', 'why', 'what']):
                if len(line.split(':')[0]) < 15:  # Short prefixes followed by colon are likely annotations
                    continue
            
            # Skip very short lines that aren't likely to be lyrics (unless it's just a few words which could be valid)
            if len(line) < 3:
                continue
            
            # Skip lines that are numbered or bulleted
            if re.match(r'^\d+\.|\(#\d+\)|\d+\)', line):
                continue
            
            # Skip markdown-style emphasis or headers
            if re.match(r'^#{1,6} |^\*\*|^__', line):
                continue
            
            # Skip lines with think tags
            if '<think>' in line.lower() or '</think>' in line.lower() or '[thinking]' in line.lower() or '[/thinking]' in line.lower():
                continue
                
            # Add this line as it passed all filters
            clean_lines.append(line)
        
        # 6. Additional block-level filters for common patterns
        # Check beginning of lyrics for common prefixes
        if clean_lines and any(clean_lines[0].lower().startswith(prefix) for prefix in 
                            ['here are', 'these are', 'below are', 'following are']):
            clean_lines = clean_lines[1:]  # Skip the first line
        
        # 7. Process blocks of lines to detect explanation blocks
        if len(clean_lines) > 3:
            # Check for explanation blocks at the beginning
            first_three = ' '.join(clean_lines[:3]).lower()
            if any(term in first_three for term in ['i will', 'i have created', 'i\'ll provide', 'i\'ll write']):
                # This looks like an explanation, skip the first few lines
                start_idx = 0
                for i, line in enumerate(clean_lines):
                    if i >= 3 and not any(term in line.lower() for term in ['i will', 'created', 'write', 'provide']):
                        start_idx = i
                        break
                clean_lines = clean_lines[start_idx:]
            
            # Check for explanation blocks at the end
            last_three = ' '.join(clean_lines[-3:]).lower()
            if any(term in last_three for term in ['hope this', 'these lyrics', 'as you can see', 'this song', 'i have']):
                # This looks like an explanation at the end, truncate
                end_idx = len(clean_lines)
                for i in range(len(clean_lines) - 1, max(0, len(clean_lines) - 4), -1):
                    if i < len(clean_lines) and not any(term in clean_lines[i].lower() for term in 
                                                    ['hope', 'these lyrics', 'as you can see', 'this song']):
                        end_idx = i + 1
                        break
                clean_lines = clean_lines[:end_idx]
        
        # 8. Cleanup - Remove remaining annotations or thinking
        for i in range(len(clean_lines)):
            # Remove trailing thoughts/annotations
            clean_lines[i] = re.sub(r'\s+//.*$', '', clean_lines[i])
            clean_lines[i] = re.sub(r'\s+\(.*?\)$', '', clean_lines[i])
            
            # Remove thinking tags completely
            clean_lines[i] = re.sub(r'<think>.*?</think>', '', clean_lines[i], flags=re.DOTALL)
            clean_lines[i] = re.sub(r'\[thinking\].*?\[/thinking\]', '', clean_lines[i], flags=re.DOTALL)
            clean_lines[i] = re.sub(r'<think>', '', clean_lines[i])
            clean_lines[i] = re.sub(r'</think>', '', clean_lines[i])
            clean_lines[i] = re.sub(r'\[thinking\]', '', clean_lines[i])
            clean_lines[i] = re.sub(r'\[/thinking\]', '', clean_lines[i])
        
        # 9. Filter out any remaining empty lines after tag removal
        clean_lines = [line for line in clean_lines if line.strip() and not line.isspace()]
        
        # 10. If we have lyric templates, ensure we have the correct number of lines
        if lyric_templates:
            num_required = len(lyric_templates)
            
            # If we have too many lines, keep just the best ones
            if len(clean_lines) > num_required:
                # Keep the first num_required lines
                clean_lines = clean_lines[:num_required]
            
            # If we don't have enough lines, generate placeholders
            while len(clean_lines) < num_required:
                placeholder = f"Echoes of {emotion} fill the {genre} night"
                if len(clean_lines) > 0:
                    # Try to make the placeholder somewhat related to previous lines
                    last_words = [word for line in clean_lines[-1:] for word in line.split() if len(word) > 3]
                    if last_words:
                        import random
                        word = random.choice(last_words)
                        placeholder = f"{word.capitalize()} whispers through the {emotion} silence"
                
                clean_lines.append(placeholder)
        
        # Assemble final lyrics
        final_lyrics = '\n'.join(clean_lines)
        
        # 11. Final sanity check - if we have nothing or garbage, return an error
        if not final_lyrics or len(final_lyrics) < 10:
            return "The model generated only thinking content but no actual lyrics. Please try again."
        
        return final_lyrics
    
    except Exception as e:
        error_msg = f"Error generating lyrics: {str(e)}"
        print(error_msg)
        return error_msg

def analyze_lyrics_rhythm_match(lyrics, lyric_templates, genre="pop"):
    """Analyze how well the generated lyrics match the beat patterns and syllable requirements"""
    if not lyric_templates or not lyrics:
        return "No beat templates or lyrics available for analysis."
    
    # Split lyrics into lines
    lines = lyrics.strip().split('\n')
    lines = [line for line in lines if line.strip()]  # Remove empty lines
    
    # Prepare analysis result
    result = "### Beat & Syllable Match Analysis\n\n"
    result += "| Line | Syllables | Target Range | Match | Stress Pattern |\n"
    result += "| ---- | --------- | ------------ | ----- | -------------- |\n"
    
    # Maximum number of lines to analyze (either all lines or all templates)
    line_count = min(len(lines), len(lyric_templates))
    
    # Track overall match statistics
    total_matches = 0
    total_range_matches = 0
    total_stress_matches = 0
    total_stress_percentage = 0
    total_ideal_matches = 0
    
    for i in range(line_count):
        line = lines[i]
        template = lyric_templates[i]
        
        # Check match between line and template with genre awareness
        check_result = beat_analyzer.check_syllable_stress_match(line, template, genre)
        
        # Get match symbols
        syllable_match = "βœ“" if check_result["matches_beat_count"] else ("βœ“*" if check_result["within_range"] else "βœ—")
        stress_match = "βœ“" if check_result["stress_matches"] else f"{int(check_result['stress_match_percentage']*100)}%"
        
        # Update stats
        if check_result["matches_beat_count"]:
            total_matches += 1
        if check_result["within_range"]:
            total_range_matches += 1
        if check_result["stress_matches"]:
            total_stress_matches += 1
        total_stress_percentage += check_result["stress_match_percentage"]
        
        # Track how close we are to ideal count for this genre
        if abs(check_result["syllable_count"] - check_result["ideal_syllable_count"]) <= 1:
            total_ideal_matches += 1
        
        # Create visual representation of the stress pattern
        stress_visual = ""
        for char in template['stress_pattern']:
            if char == "S":
                stress_visual += "X"  # Strong
            elif char == "M":
                stress_visual += "x"  # Medium
            else:
                stress_visual += "."  # Weak
        
        # Add line to results table
        result += f"| {i+1} | {check_result['syllable_count']} | {check_result['min_expected']}-{check_result['max_expected']} | {syllable_match} | {stress_visual} |\n"
    
    # Add summary statistics
    if line_count > 0:
        exact_match_rate = (total_matches / line_count) * 100
        range_match_rate = (total_range_matches / line_count) * 100
        ideal_match_rate = (total_ideal_matches / line_count) * 100
        stress_match_rate = (total_stress_matches / line_count) * 100
        avg_stress_percentage = (total_stress_percentage / line_count) * 100
        
        result += f"\n**Summary:**\n"
        result += f"- Exact syllable match rate: {exact_match_rate:.1f}%\n"
        result += f"- Genre-appropriate syllable range match rate: {range_match_rate:.1f}%\n"
        result += f"- Ideal genre syllable count match rate: {ideal_match_rate:.1f}%\n"
        result += f"- Perfect stress pattern match rate: {stress_match_rate:.1f}%\n"
        result += f"- Average stress pattern accuracy: {avg_stress_percentage:.1f}%\n"
        result += f"- Overall rhythmic accuracy: {((range_match_rate + avg_stress_percentage) / 2):.1f}%\n"
        
        # Add genre-specific notes
        result += f"\n**Genre Notes ({genre}):**\n"
        
        # Add appropriate genre notes based on genre
        if genre.lower() == "pop":
            result += "- Pop music typically allows 1-3 syllables per beat using melisma and syncopation\n"
            result += "- Strong downbeats often align with stressed syllables of important words\n"
        elif genre.lower() == "rock":
            result += "- Rock music often uses 1-2 syllables per beat with some variation\n"
            result += "- Emphasis on strong beats for impact and rhythmic drive\n"
        elif genre.lower() in ["hiphop", "rap"]:
            result += "- Hip-hop/rap often features 2-5 syllables per beat through rapid delivery\n"
            result += "- Complex rhyme patterns and fast delivery create higher syllable density\n"
        elif genre.lower() in ["folk", "country"]:
            result += "- Folk/country music often stays closer to 1:1 syllable-to-beat ratio\n"
            result += "- Narrative focus leads to clearer enunciation of syllables\n"
        else:
            result += "- This genre typically allows for flexible syllable-to-beat relationships\n"
            result += "- Syllable count can vary based on vocal style and song section\n"
    
    return result

# Create Gradio interface
def create_interface():
    with gr.Blocks(title="Music Analysis & Lyrics Generator") as demo:
        gr.Markdown("# Music Analysis & Lyrics Generator")
        gr.Markdown("Upload a music file or record audio to analyze it and generate matching lyrics")
        
        with gr.Row():
            with gr.Column(scale=1):
                audio_input = gr.Audio(
                    label="Upload or Record Audio", 
                    type="filepath",
                    sources=["upload", "microphone"]
                )
                analyze_btn = gr.Button("Analyze and Generate Lyrics", variant="primary")
            
            with gr.Column(scale=2):
                with gr.Tab("Analysis"):
                    analysis_output = gr.Textbox(label="Music Analysis Results", lines=10)
                    
                    with gr.Row():
                        tempo_output = gr.Number(label="Tempo (BPM)")
                        time_sig_output = gr.Textbox(label="Time Signature")
                        emotion_output = gr.Textbox(label="Primary Emotion")
                        theme_output = gr.Textbox(label="Primary Theme")
                        genre_output = gr.Textbox(label="Primary Genre")
                
                with gr.Tab("Generated Lyrics"):
                    lyrics_output = gr.Textbox(label="Generated Lyrics", lines=20)
                
                with gr.Tab("Beat Matching"):
                    beat_match_output = gr.Markdown(label="Beat & Syllable Matching Analysis")
        
        # Set up event handlers
        analyze_btn.click(
            fn=process_audio,
            inputs=[audio_input],
            outputs=[analysis_output, lyrics_output, tempo_output, time_sig_output, 
                    emotion_output, theme_output, genre_output, beat_match_output]
        )
        
        gr.Markdown("""
        ## How it works
        1. Upload or record a music file
        2. The system analyzes tempo, beats, time signature and other musical features
        3. It detects emotion, theme, and music genre
        4. Using beat patterns and syllable stress analysis, it generates perfectly aligned lyrics
        5. Each line of the lyrics is matched to the beat pattern of the corresponding musical phrase
        """)
    
    return demo

# Launch the app
demo = create_interface()

if __name__ == "__main__":
    demo.launch()
else:
    # For Hugging Face Spaces
    app = demo