|
import os |
|
import io |
|
import gradio as gr |
|
import torch |
|
import numpy as np |
|
import re |
|
import pronouncing |
|
import functools |
|
from transformers import ( |
|
AutoModelForAudioClassification, |
|
AutoFeatureExtractor, |
|
AutoTokenizer, |
|
pipeline, |
|
AutoModelForCausalLM, |
|
BitsAndBytesConfig |
|
) |
|
from huggingface_hub import login |
|
from utils import ( |
|
load_audio, |
|
extract_audio_duration, |
|
extract_mfcc_features, |
|
format_genre_results, |
|
ensure_cuda_availability |
|
) |
|
from emotionanalysis import MusicAnalyzer |
|
import librosa |
|
|
|
|
|
if "HF_TOKEN" in os.environ: |
|
login(token=os.environ["HF_TOKEN"]) |
|
|
|
|
|
GENRE_MODEL_NAME = "dima806/music_genres_classification" |
|
MUSIC_DETECTION_MODEL = "MIT/ast-finetuned-audioset-10-10-0.4593" |
|
LLM_MODEL_NAME = "Qwen/Qwen3-32B" |
|
SAMPLE_RATE = 22050 |
|
|
|
|
|
CUDA_AVAILABLE = ensure_cuda_availability() |
|
|
|
|
|
@functools.lru_cache(maxsize=1) |
|
def load_genre_model(): |
|
print("Loading genre classification model...") |
|
return pipeline( |
|
"audio-classification", |
|
model=GENRE_MODEL_NAME, |
|
device=0 if CUDA_AVAILABLE else -1 |
|
) |
|
|
|
@functools.lru_cache(maxsize=1) |
|
def load_llm_pipeline(): |
|
print("Loading Qwen LLM model with 4-bit quantization...") |
|
|
|
quantization_config = BitsAndBytesConfig( |
|
load_in_4bit=True, |
|
bnb_4bit_quant_type="nf4", |
|
bnb_4bit_compute_dtype=torch.float16, |
|
bnb_4bit_use_double_quant=True |
|
) |
|
|
|
return pipeline( |
|
"text-generation", |
|
model=LLM_MODEL_NAME, |
|
device_map="auto", |
|
trust_remote_code=True, |
|
model_kwargs={ |
|
"torch_dtype": torch.float16, |
|
"quantization_config": quantization_config, |
|
"use_cache": True |
|
} |
|
) |
|
|
|
|
|
music_analyzer = MusicAnalyzer() |
|
|
|
|
|
def process_audio(audio_file): |
|
if audio_file is None: |
|
return "No audio file provided", None, None, None, None, None, None |
|
|
|
try: |
|
|
|
y, sr = load_audio(audio_file, sr=SAMPLE_RATE) |
|
|
|
|
|
duration = extract_audio_duration(y, sr) |
|
|
|
|
|
music_analysis = music_analyzer.analyze_music(audio_file) |
|
|
|
|
|
tempo = music_analysis["rhythm_analysis"]["tempo"] |
|
time_signature = music_analysis["rhythm_analysis"]["estimated_time_signature"] |
|
emotion = music_analysis["emotion_analysis"]["primary_emotion"] |
|
theme = music_analysis["theme_analysis"]["primary_theme"] |
|
|
|
|
|
genre_classifier = load_genre_model() |
|
|
|
|
|
y_16k = librosa.resample(y, orig_sr=sr, target_sr=16000) |
|
|
|
|
|
genre_results = genre_classifier({"raw": y_16k, "sampling_rate": 16000}) |
|
|
|
|
|
top_genres = [(genre["label"], genre["score"]) for genre in genre_results] |
|
|
|
|
|
genre_results_text = format_genre_results(top_genres) |
|
primary_genre = top_genres[0][0] |
|
|
|
|
|
lyrics = generate_lyrics(music_analysis, primary_genre, duration) |
|
|
|
|
|
analysis_summary = f""" |
|
### Music Analysis Results |
|
|
|
**Duration:** {duration:.2f} seconds |
|
**Tempo:** {tempo:.1f} BPM |
|
**Time Signature:** {time_signature} |
|
**Key:** {music_analysis["tonal_analysis"]["key"]} {music_analysis["tonal_analysis"]["mode"]} |
|
**Primary Emotion:** {emotion} |
|
**Primary Theme:** {theme} |
|
**Top Genre:** {primary_genre} |
|
|
|
{genre_results_text} |
|
""" |
|
|
|
return analysis_summary, lyrics, tempo, time_signature, emotion, theme, primary_genre |
|
|
|
except Exception as e: |
|
error_msg = f"Error processing audio: {str(e)}" |
|
print(error_msg) |
|
return error_msg, None, None, None, None, None, None |
|
|
|
def generate_lyrics(music_analysis, genre, duration): |
|
try: |
|
|
|
tempo = music_analysis["rhythm_analysis"]["tempo"] |
|
key = music_analysis["tonal_analysis"]["key"] |
|
mode = music_analysis["tonal_analysis"]["mode"] |
|
emotion = music_analysis["emotion_analysis"]["primary_emotion"] |
|
theme = music_analysis["theme_analysis"]["primary_theme"] |
|
|
|
|
|
text_generator = load_llm_pipeline() |
|
|
|
|
|
prompt = f"""Write lyrics for a {genre} song with these specifications: |
|
- Key: {key} {mode} |
|
- Tempo: {tempo} BPM |
|
- Emotion: {emotion} |
|
- Theme: {theme} |
|
- Duration: {duration:.1f} seconds |
|
- Time signature: {music_analysis["rhythm_analysis"]["estimated_time_signature"]} |
|
|
|
IMPORTANT INSTRUCTIONS: |
|
- The lyrics should be in English |
|
- Write ONLY the raw lyrics with no structural labels |
|
- DO NOT include [verse], [chorus], [bridge], or any other section markers |
|
- DO NOT include any explanations or thinking about the lyrics |
|
- DO NOT number the verses or lines |
|
- DO NOT use bullet points |
|
- Format as simple line-by-line lyrics only |
|
- Make sure the lyrics match the specified duration and tempo |
|
- Keep lyrics concise enough to fit the duration when sung at the given tempo |
|
""" |
|
|
|
|
|
generation_result = text_generator( |
|
prompt, |
|
max_new_tokens=1024, |
|
do_sample=True, |
|
temperature=0.7, |
|
top_p=0.9, |
|
return_full_text=False |
|
) |
|
|
|
lyrics = generation_result[0]["generated_text"] |
|
|
|
|
|
|
|
lyrics = re.sub(r'^\[.*?\].*$', '', lyrics, flags=re.MULTILINE) |
|
|
|
|
|
lyrics = re.sub(r'^(Here are|Here is|These are|This is|Let me|I will|I'll).*?:\s*', '', lyrics, flags=re.IGNORECASE) |
|
lyrics = re.sub(r'^Title:.*?$', '', lyrics, flags=re.MULTILINE).strip() |
|
|
|
# Remove all section markers in any format |
|
lyrics = re.sub(r'^\s*(Verse|Chorus|Bridge|Pre.?Chorus|Intro|Outro|Refrain|Hook|Breakdown)(\s*\d*|\s*[A-Z])?:?\s*$', '', lyrics, flags=re.MULTILINE|re.IGNORECASE) |
|
lyrics = re.sub(r'\[(Verse|Chorus|Bridge|Pre.?Chorus|Intro|Outro|Refrain|Hook|Breakdown)(\s*\d*|\s*[A-Z])?\]', '', lyrics, flags=re.IGNORECASE) |
|
|
|
# Remove any "thinking" or explanatory parts that might be at the beginning |
|
lyrics = re.sub(r'^.*?(Let\'s|Here\'s|I need|I want|I\'ll|First|The|This).*?:\s*', '', lyrics, flags=re.IGNORECASE) |
|
|
|
|
|
lyrics = re.sub(r'^\s*\n', '', lyrics) |
|
lyrics = re.sub(r'\n\s*\n\s*\n+', '\n\n', lyrics) |
|
lyrics = lyrics.strip() |
|
|
|
return lyrics |
|
|
|
except Exception as e: |
|
error_msg = f"Error generating lyrics: {str(e)}" |
|
print(error_msg) |
|
return error_msg |
|
|
|
|
|
def create_interface(): |
|
with gr.Blocks(title="Music Analysis & Lyrics Generator") as demo: |
|
gr.Markdown("# Music Analysis & Lyrics Generator") |
|
gr.Markdown("Upload a music file or record audio to analyze it and generate matching lyrics") |
|
|
|
with gr.Row(): |
|
with gr.Column(scale=1): |
|
audio_input = gr.Audio( |
|
label="Upload or Record Audio", |
|
type="filepath", |
|
sources=["upload", "microphone"] |
|
) |
|
analyze_btn = gr.Button("Analyze and Generate Lyrics", variant="primary") |
|
|
|
with gr.Column(scale=2): |
|
with gr.Tab("Analysis"): |
|
analysis_output = gr.Textbox(label="Music Analysis Results", lines=10) |
|
|
|
with gr.Row(): |
|
tempo_output = gr.Number(label="Tempo (BPM)") |
|
time_sig_output = gr.Textbox(label="Time Signature") |
|
emotion_output = gr.Textbox(label="Primary Emotion") |
|
theme_output = gr.Textbox(label="Primary Theme") |
|
genre_output = gr.Textbox(label="Primary Genre") |
|
|
|
with gr.Tab("Generated Lyrics"): |
|
lyrics_output = gr.Textbox(label="Generated Lyrics", lines=20) |
|
|
|
|
|
analyze_btn.click( |
|
fn=process_audio, |
|
inputs=[audio_input], |
|
outputs=[analysis_output, lyrics_output, tempo_output, time_sig_output, |
|
emotion_output, theme_output, genre_output] |
|
) |
|
|
|
gr.Markdown(""" |
|
## How it works |
|
1. Upload or record a music file |
|
2. The system analyzes tempo, beats, time signature and other musical features |
|
3. It detects emotion, theme, and music genre |
|
4. Using this information, it generates lyrics that match the style and length of your music |
|
""") |
|
|
|
return demo |
|
|
|
|
|
demo = create_interface() |
|
|
|
if __name__ == "__main__": |
|
demo.launch() |
|
else: |
|
|
|
app = demo |
|
|
|
|