Spaces:
Running
Running
import requests | |
import gradio as gr | |
import os | |
import torch | |
import json | |
import time | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
# Check if CUDA is available and set the device accordingly | |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') | |
# API URLs and headers | |
AUDIO_API_URL = "https://api-inference.huggingface.co/models/MIT/ast-finetuned-audioset-10-10-0.4593" | |
JANUS_API_URL = "https://api-inference.huggingface.co/models/deepseek-ai/Janus-1.3B" | |
headers = {"Authorization": f"Bearer {os.environ.get('HF_TOKEN')}"} | |
def format_error(message): | |
"""Helper function to format error messages as JSON""" | |
return {"error": message} | |
def create_lyrics_prompt(classification_results): | |
"""Create a prompt for lyrics generation based on classification results""" | |
# Get the top genre and its characteristics | |
top_result = classification_results[0] | |
genre = top_result['label'] | |
confidence = float(top_result['score'].strip('%')) / 100 | |
# Create a detailed prompt | |
prompt = f"""Write song lyrics in the style of {genre} music. The song should capture the essence of this genre. | |
Additional musical elements detected: {', '.join(r['label'] for r in classification_results[1:3])} | |
Please write creative and original lyrics that: | |
1. Match the {genre} style | |
2. Have a clear structure (verse, chorus) | |
3. Reflect the mood and themes common in this genre | |
Generate the lyrics: | |
""" | |
return prompt | |
def generate_lyrics_with_retry(prompt, max_retries=5, initial_wait=2): | |
"""Generate lyrics using the Janus model with retry logic""" | |
wait_time = initial_wait | |
for attempt in range(max_retries): | |
try: | |
response = requests.post( | |
JANUS_API_URL, | |
headers=headers, | |
json={ | |
"inputs": prompt, | |
"parameters": { | |
"max_new_tokens": 200, | |
"temperature": 0.7, | |
"top_p": 0.9, | |
"return_full_text": False | |
} | |
} | |
) | |
if response.status_code == 200: | |
return response.json()[0]["generated_text"] | |
elif response.status_code == 503: | |
print(f"Model loading, attempt {attempt + 1}/{max_retries}. Waiting {wait_time} seconds...") | |
time.sleep(wait_time) | |
wait_time *= 1.5 # Increase wait time for next attempt | |
continue | |
else: | |
return f"Error generating lyrics: {response.text}" | |
except Exception as e: | |
if attempt == max_retries - 1: # Last attempt | |
return f"Error after {max_retries} attempts: {str(e)}" | |
time.sleep(wait_time) | |
wait_time *= 1.5 | |
return "Failed to generate lyrics after multiple attempts. Please try again." | |
def format_results(classification_results, lyrics, prompt): | |
"""Format the results for display""" | |
# Format classification results | |
classification_text = "Classification Results:\n" | |
for i, result in enumerate(classification_results): | |
classification_text += f"{i+1}. {result['label']}: {result['score']}\n" | |
# Format final output | |
output = f""" | |
{classification_text} | |
\n---Generated Lyrics---\n | |
{lyrics} | |
""" | |
return output | |
def classify_and_generate(audio_file): | |
""" | |
Classify the audio and generate matching lyrics | |
""" | |
if audio_file is None: | |
return "Please upload an audio file." | |
try: | |
token = os.environ.get('HF_TOKEN') | |
if not token: | |
return "Error: HF_TOKEN environment variable is not set. Please set your Hugging Face API token." | |
# First, classify the audio | |
with open(audio_file, "rb") as f: | |
data = f.read() | |
print("Sending request to Audio Classification API...") | |
response = requests.post(AUDIO_API_URL, headers=headers, data=data) | |
if response.status_code == 200: | |
classification_results = response.json() | |
# Format classification results | |
formatted_results = [] | |
for result in classification_results: | |
formatted_results.append({ | |
'label': result['label'], | |
'score': f"{result['score']*100:.2f}%" | |
}) | |
# Generate lyrics based on classification with retry logic | |
print("Generating lyrics based on classification...") | |
prompt = create_lyrics_prompt(formatted_results) | |
lyrics = generate_lyrics_with_retry(prompt) | |
# Format and return results | |
return format_results(formatted_results, lyrics, prompt) | |
elif response.status_code == 401: | |
return "Error: Invalid or missing API token. Please check your Hugging Face API token." | |
elif response.status_code == 503: | |
return "Error: Model is loading. Please try again in a few seconds." | |
else: | |
return f"Error: API returned status code {response.status_code}\nResponse: {response.text}" | |
except Exception as e: | |
import traceback | |
error_details = traceback.format_exc() | |
return f"Error processing request: {str(e)}\nDetails:\n{error_details}" | |
# Create Gradio interface | |
iface = gr.Interface( | |
fn=classify_and_generate, | |
inputs=gr.Audio(type="filepath", label="Upload Audio File"), | |
outputs=gr.Textbox( | |
label="Results", | |
lines=15, | |
placeholder="Upload an audio file to see classification results and generated lyrics..." | |
), | |
title="Music Genre Classifier + Lyric Generator", | |
description="Upload an audio file to classify its genre and generate matching lyrics using AI.", | |
examples=[], | |
) | |
# Launch the interface | |
if __name__ == "__main__": | |
iface.launch(server_name="0.0.0.0", server_port=7860) |