Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -33,6 +33,9 @@ unet.load_state_dict(load_file(hf_hub_download(repo, ckpt), device="cpu"))
|
|
33 |
pipe = StableDiffusionXLPipeline.from_pretrained(base, unet=unet, torch_dtype=torch.float16, variant="fp16").to("cpu")
|
34 |
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing")
|
35 |
|
|
|
|
|
|
|
36 |
|
37 |
# Function to transcribe, translate, and generate an image
|
38 |
def process_audio(audio_path, generate_image):
|
@@ -71,11 +74,31 @@ def process_audio(audio_path, generate_image):
|
|
71 |
return tamil_text, translation, None
|
72 |
|
73 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
74 |
# Function for direct prompt to image generation
|
75 |
def generate_image_from_prompt(prompt):
|
76 |
try:
|
77 |
-
|
78 |
-
return
|
79 |
except Exception as e:
|
80 |
return f"An error occurred during image generation: {str(e)}"
|
81 |
|
|
|
33 |
pipe = StableDiffusionXLPipeline.from_pretrained(base, unet=unet, torch_dtype=torch.float16, variant="fp16").to("cpu")
|
34 |
pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing")
|
35 |
|
36 |
+
os.environ['HF_API_KEY']
|
37 |
+
api_key = os.getenv('HF_API_KEY')
|
38 |
+
API_URL = "https://api-inference.huggingface.co/models/CompVis/stable-diffusion-v1-4"
|
39 |
|
40 |
# Function to transcribe, translate, and generate an image
|
41 |
def process_audio(audio_path, generate_image):
|
|
|
74 |
return tamil_text, translation, None
|
75 |
|
76 |
|
77 |
+
def query(payload, max_retries=5):
|
78 |
+
for attempt in range(max_retries):
|
79 |
+
response = requests.post(API_URL, headers=headers, json=payload)
|
80 |
+
|
81 |
+
if response.status_code == 503:
|
82 |
+
print(f"Model is still loading, retrying... Attempt {attempt + 1}/{max_retries}")
|
83 |
+
estimated_time = min(response.json().get("estimated_time", 60), 60)
|
84 |
+
time.sleep(estimated_time)
|
85 |
+
continue
|
86 |
+
|
87 |
+
if response.status_code != 200:
|
88 |
+
print(f"Error: Received status code {response.status_code}")
|
89 |
+
print(f"Response: {response.text}")
|
90 |
+
return None
|
91 |
+
|
92 |
+
return response.content
|
93 |
+
|
94 |
+
print(f"Failed to generate image after {max_retries} attempts.")
|
95 |
+
return None
|
96 |
+
|
97 |
# Function for direct prompt to image generation
|
98 |
def generate_image_from_prompt(prompt):
|
99 |
try:
|
100 |
+
image_bytes = query({"inputs": prompt})
|
101 |
+
return image_bytes
|
102 |
except Exception as e:
|
103 |
return f"An error occurred during image generation: {str(e)}"
|
104 |
|