File size: 6,022 Bytes
b63d954
 
64a8c4a
3b06717
5b86980
b8ff20e
0444752
3b06717
 
 
b63d954
0444752
 
 
b63d954
 
5b86980
 
0444752
5b86980
0444752
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b63d954
0444752
 
b8ff20e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0444752
b8ff20e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0444752
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b63d954
 
0444752
b63d954
 
5b86980
 
0444752
5b86980
0444752
12ceea1
3b06717
5b86980
0444752
 
5b86980
3b06717
0444752
 
5b86980
0444752
5b86980
 
 
 
0444752
b8ff20e
0444752
 
b8ff20e
0444752
 
 
 
5b86980
0444752
5b86980
0444752
3b06717
0444752
3b06717
b63d954
5b86980
 
0444752
b63d954
 
 
0444752
b63d954
0444752
 
 
 
 
 
 
b63d954
 
 
 
3b06717
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
import requests
import gradio as gr
import os
import torch
import json
import time
from transformers import AutoTokenizer, AutoModelForCausalLM

# Check if CUDA is available and set the device accordingly
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# API URLs and headers
AUDIO_API_URL = "https://api-inference.huggingface.co/models/MIT/ast-finetuned-audioset-10-10-0.4593"
JANUS_API_URL = "https://api-inference.huggingface.co/models/deepseek-ai/Janus-1.3B"
headers = {"Authorization": f"Bearer {os.environ.get('HF_TOKEN')}"}

def format_error(message):
    """Helper function to format error messages as JSON"""
    return {"error": message}

def create_lyrics_prompt(classification_results):
    """Create a prompt for lyrics generation based on classification results"""
    # Get the top genre and its characteristics
    top_result = classification_results[0]
    genre = top_result['label']
    confidence = float(top_result['score'].strip('%')) / 100

    # Create a detailed prompt
    prompt = f"""Write song lyrics in the style of {genre} music. The song should capture the essence of this genre.
    Additional musical elements detected: {', '.join(r['label'] for r in classification_results[1:3])}
    
    Please write creative and original lyrics that:
    1. Match the {genre} style
    2. Have a clear structure (verse, chorus)
    3. Reflect the mood and themes common in this genre
    
    Generate the lyrics:
    """
    return prompt

def generate_lyrics_with_retry(prompt, max_retries=5, initial_wait=2):
    """Generate lyrics using the Janus model with retry logic"""
    wait_time = initial_wait
    
    for attempt in range(max_retries):
        try:
            response = requests.post(
                JANUS_API_URL,
                headers=headers,
                json={
                    "inputs": prompt,
                    "parameters": {
                        "max_new_tokens": 200,
                        "temperature": 0.7,
                        "top_p": 0.9,
                        "return_full_text": False
                    }
                }
            )
            
            if response.status_code == 200:
                return response.json()[0]["generated_text"]
            elif response.status_code == 503:
                print(f"Model loading, attempt {attempt + 1}/{max_retries}. Waiting {wait_time} seconds...")
                time.sleep(wait_time)
                wait_time *= 1.5  # Increase wait time for next attempt
                continue
            else:
                return f"Error generating lyrics: {response.text}"
                
        except Exception as e:
            if attempt == max_retries - 1:  # Last attempt
                return f"Error after {max_retries} attempts: {str(e)}"
            time.sleep(wait_time)
            wait_time *= 1.5
    
    return "Failed to generate lyrics after multiple attempts. Please try again."

def format_results(classification_results, lyrics, prompt):
    """Format the results for display"""
    # Format classification results
    classification_text = "Classification Results:\n"
    for i, result in enumerate(classification_results):
        classification_text += f"{i+1}. {result['label']}: {result['score']}\n"
    
    # Format final output
    output = f"""
{classification_text}
\n---Generated Lyrics---\n
{lyrics}
"""
    return output

def classify_and_generate(audio_file):
    """
    Classify the audio and generate matching lyrics
    """
    if audio_file is None:
        return "Please upload an audio file."
    
    try:
        token = os.environ.get('HF_TOKEN')
        if not token:
            return "Error: HF_TOKEN environment variable is not set. Please set your Hugging Face API token."
        
        # First, classify the audio
        with open(audio_file, "rb") as f:
            data = f.read()
        
        print("Sending request to Audio Classification API...")
        response = requests.post(AUDIO_API_URL, headers=headers, data=data)
        
        if response.status_code == 200:
            classification_results = response.json()
            # Format classification results
            formatted_results = []
            for result in classification_results:
                formatted_results.append({
                    'label': result['label'],
                    'score': f"{result['score']*100:.2f}%"
                })
            
            # Generate lyrics based on classification with retry logic
            print("Generating lyrics based on classification...")
            prompt = create_lyrics_prompt(formatted_results)
            lyrics = generate_lyrics_with_retry(prompt)
            
            # Format and return results
            return format_results(formatted_results, lyrics, prompt)
            
        elif response.status_code == 401:
            return "Error: Invalid or missing API token. Please check your Hugging Face API token."
        elif response.status_code == 503:
            return "Error: Model is loading. Please try again in a few seconds."
        else:
            return f"Error: API returned status code {response.status_code}\nResponse: {response.text}"
            
    except Exception as e:
        import traceback
        error_details = traceback.format_exc()
        return f"Error processing request: {str(e)}\nDetails:\n{error_details}"

# Create Gradio interface
iface = gr.Interface(
    fn=classify_and_generate,
    inputs=gr.Audio(type="filepath", label="Upload Audio File"),
    outputs=gr.Textbox(
        label="Results",
        lines=15,
        placeholder="Upload an audio file to see classification results and generated lyrics..."
    ),
    title="Music Genre Classifier + Lyric Generator",
    description="Upload an audio file to classify its genre and generate matching lyrics using AI.",
    examples=[],
)

# Launch the interface
if __name__ == "__main__":
    iface.launch(server_name="0.0.0.0", server_port=7860)