Spaces:
Running
Running
app.py
CHANGED
@@ -3,6 +3,7 @@ import gradio as gr
|
|
3 |
import os
|
4 |
import torch
|
5 |
import json
|
|
|
6 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
7 |
|
8 |
# Check if CUDA is available and set the device accordingly
|
@@ -37,31 +38,43 @@ def create_lyrics_prompt(classification_results):
|
|
37 |
"""
|
38 |
return prompt
|
39 |
|
40 |
-
def
|
41 |
-
"""Generate lyrics using the Janus model"""
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
"
|
51 |
-
"
|
52 |
-
|
|
|
|
|
|
|
|
|
53 |
}
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
65 |
|
66 |
def format_results(classification_results, lyrics, prompt):
|
67 |
"""Format the results for display"""
|
@@ -107,10 +120,10 @@ def classify_and_generate(audio_file):
|
|
107 |
'score': f"{result['score']*100:.2f}%"
|
108 |
})
|
109 |
|
110 |
-
# Generate lyrics based on classification
|
111 |
print("Generating lyrics based on classification...")
|
112 |
prompt = create_lyrics_prompt(formatted_results)
|
113 |
-
lyrics =
|
114 |
|
115 |
# Format and return results
|
116 |
return format_results(formatted_results, lyrics, prompt)
|
|
|
3 |
import os
|
4 |
import torch
|
5 |
import json
|
6 |
+
import time
|
7 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
8 |
|
9 |
# Check if CUDA is available and set the device accordingly
|
|
|
38 |
"""
|
39 |
return prompt
|
40 |
|
41 |
+
def generate_lyrics_with_retry(prompt, max_retries=5, initial_wait=2):
|
42 |
+
"""Generate lyrics using the Janus model with retry logic"""
|
43 |
+
wait_time = initial_wait
|
44 |
+
|
45 |
+
for attempt in range(max_retries):
|
46 |
+
try:
|
47 |
+
response = requests.post(
|
48 |
+
JANUS_API_URL,
|
49 |
+
headers=headers,
|
50 |
+
json={
|
51 |
+
"inputs": prompt,
|
52 |
+
"parameters": {
|
53 |
+
"max_new_tokens": 200,
|
54 |
+
"temperature": 0.7,
|
55 |
+
"top_p": 0.9,
|
56 |
+
"return_full_text": False
|
57 |
+
}
|
58 |
}
|
59 |
+
)
|
60 |
+
|
61 |
+
if response.status_code == 200:
|
62 |
+
return response.json()[0]["generated_text"]
|
63 |
+
elif response.status_code == 503:
|
64 |
+
print(f"Model loading, attempt {attempt + 1}/{max_retries}. Waiting {wait_time} seconds...")
|
65 |
+
time.sleep(wait_time)
|
66 |
+
wait_time *= 1.5 # Increase wait time for next attempt
|
67 |
+
continue
|
68 |
+
else:
|
69 |
+
return f"Error generating lyrics: {response.text}"
|
70 |
+
|
71 |
+
except Exception as e:
|
72 |
+
if attempt == max_retries - 1: # Last attempt
|
73 |
+
return f"Error after {max_retries} attempts: {str(e)}"
|
74 |
+
time.sleep(wait_time)
|
75 |
+
wait_time *= 1.5
|
76 |
+
|
77 |
+
return "Failed to generate lyrics after multiple attempts. Please try again."
|
78 |
|
79 |
def format_results(classification_results, lyrics, prompt):
|
80 |
"""Format the results for display"""
|
|
|
120 |
'score': f"{result['score']*100:.2f}%"
|
121 |
})
|
122 |
|
123 |
+
# Generate lyrics based on classification with retry logic
|
124 |
print("Generating lyrics based on classification...")
|
125 |
prompt = create_lyrics_prompt(formatted_results)
|
126 |
+
lyrics = generate_lyrics_with_retry(prompt)
|
127 |
|
128 |
# Format and return results
|
129 |
return format_results(formatted_results, lyrics, prompt)
|