Spaces:
Sleeping
Sleeping
disable spaces.GPU release
Browse files
app.py
CHANGED
@@ -14,6 +14,7 @@ import gc
|
|
14 |
from io import BytesIO
|
15 |
import base64
|
16 |
import functools
|
|
|
17 |
|
18 |
app = FastAPI()
|
19 |
|
@@ -30,50 +31,7 @@ MODELS = {
|
|
30 |
"height": {"min": 256, "max": 1024, "default": 512, "step": 64}
|
31 |
}
|
32 |
},
|
33 |
-
|
34 |
-
"model_id": "stabilityai/sdxl-turbo",
|
35 |
-
"pipeline": AutoPipelineForText2Image,
|
36 |
-
"supports_img2img": True,
|
37 |
-
"parameters": {
|
38 |
-
"num_inference_steps": {"min": 1, "max": 50, "default": 1},
|
39 |
-
"guidance_scale": {"min": 0.0, "max": 20.0, "default": 7.5},
|
40 |
-
"width": {"min": 256, "max": 1024, "default": 512, "step": 64},
|
41 |
-
"height": {"min": 256, "max": 1024, "default": 512, "step": 64}
|
42 |
-
}
|
43 |
-
},
|
44 |
-
"SD-1.5": {
|
45 |
-
"model_id": "runwayml/stable-diffusion-v1-5",
|
46 |
-
"pipeline": StableDiffusionPipeline,
|
47 |
-
"supports_img2img": True,
|
48 |
-
"parameters": {
|
49 |
-
"num_inference_steps": {"min": 1, "max": 50, "default": 30},
|
50 |
-
"guidance_scale": {"min": 1, "max": 20, "default": 7.5},
|
51 |
-
"width": {"min": 256, "max": 1024, "default": 512, "step": 64},
|
52 |
-
"height": {"min": 256, "max": 1024, "default": 512, "step": 64}
|
53 |
-
}
|
54 |
-
},
|
55 |
-
"Waifu-Diffusion": {
|
56 |
-
"model_id": "hakurei/waifu-diffusion",
|
57 |
-
"pipeline": StableDiffusionPipeline,
|
58 |
-
"supports_img2img": True,
|
59 |
-
"parameters": {
|
60 |
-
"num_inference_steps": {"min": 1, "max": 100, "default": 50},
|
61 |
-
"guidance_scale": {"min": 1, "max": 15, "default": 7.5},
|
62 |
-
"width": {"min": 256, "max": 1024, "default": 512, "step": 64},
|
63 |
-
"height": {"min": 256, "max": 1024, "default": 512, "step": 64}
|
64 |
-
}
|
65 |
-
},
|
66 |
-
"Flux": {
|
67 |
-
"model_id": "black-forest-labs/flux-1-1-dev",
|
68 |
-
"pipeline": AutoPipelineForText2Image,
|
69 |
-
"supports_img2img": True,
|
70 |
-
"parameters": {
|
71 |
-
"num_inference_steps": {"min": 1, "max": 50, "default": 25},
|
72 |
-
"guidance_scale": {"min": 1, "max": 15, "default": 7.5},
|
73 |
-
"width": {"min": 256, "max": 1024, "default": 512, "step": 64},
|
74 |
-
"height": {"min": 256, "max": 1024, "default": 512, "step": 64}
|
75 |
-
}
|
76 |
-
}
|
77 |
}
|
78 |
|
79 |
class ModelManager:
|
@@ -162,6 +120,7 @@ class ModelContext:
|
|
162 |
|
163 |
model_manager = ModelManager()
|
164 |
|
|
|
165 |
async def generate_image(
|
166 |
model_name: str,
|
167 |
prompt: str,
|
@@ -212,6 +171,7 @@ async def generate_image(
|
|
212 |
model_manager.unload_current_model()
|
213 |
raise HTTPException(status_code=500, detail=str(e))
|
214 |
|
|
|
215 |
@app.post("/generate")
|
216 |
async def generate_image_endpoint(
|
217 |
model_name: str,
|
@@ -246,7 +206,8 @@ async def unload_model():
|
|
246 |
model_manager.unload_current_model()
|
247 |
return {"status": "success", "message": "Model unloaded"}
|
248 |
|
249 |
-
|
|
|
250 |
with gr.Blocks() as interface:
|
251 |
gr.Markdown("# Text-to-Image Generation Interface")
|
252 |
|
@@ -312,7 +273,7 @@ def create_gradio_interface():
|
|
312 |
value=model_manager.get_memory_status()
|
313 |
)
|
314 |
|
315 |
-
def update_params(model_name):
|
316 |
model_config = MODELS[model_name]["parameters"]
|
317 |
return [
|
318 |
gr.update(
|
@@ -339,9 +300,8 @@ def create_gradio_interface():
|
|
339 |
)
|
340 |
]
|
341 |
|
342 |
-
|
343 |
-
|
344 |
-
response = generate_image(
|
345 |
model_name=model_name,
|
346 |
prompt=prompt_text,
|
347 |
height=h,
|
@@ -378,7 +338,7 @@ def create_gradio_interface():
|
|
378 |
)
|
379 |
|
380 |
return interface
|
381 |
-
|
382 |
if __name__ == "__main__":
|
383 |
import uvicorn
|
384 |
from threading import Thread
|
|
|
14 |
from io import BytesIO
|
15 |
import base64
|
16 |
import functools
|
17 |
+
import asyncio
|
18 |
|
19 |
app = FastAPI()
|
20 |
|
|
|
31 |
"height": {"min": 256, "max": 1024, "default": 512, "step": 64}
|
32 |
}
|
33 |
},
|
34 |
+
# Add other models here...
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
}
|
36 |
|
37 |
class ModelManager:
|
|
|
120 |
|
121 |
model_manager = ModelManager()
|
122 |
|
123 |
+
# @spaces.GPU
|
124 |
async def generate_image(
|
125 |
model_name: str,
|
126 |
prompt: str,
|
|
|
171 |
model_manager.unload_current_model()
|
172 |
raise HTTPException(status_code=500, detail=str(e))
|
173 |
|
174 |
+
|
175 |
@app.post("/generate")
|
176 |
async def generate_image_endpoint(
|
177 |
model_name: str,
|
|
|
206 |
model_manager.unload_current_model()
|
207 |
return {"status": "success", "message": "Model unloaded"}
|
208 |
|
209 |
+
|
210 |
+
def create_gradio_interface() -> gr.Blocks:
|
211 |
with gr.Blocks() as interface:
|
212 |
gr.Markdown("# Text-to-Image Generation Interface")
|
213 |
|
|
|
273 |
value=model_manager.get_memory_status()
|
274 |
)
|
275 |
|
276 |
+
def update_params(model_name: str) -> list:
|
277 |
model_config = MODELS[model_name]["parameters"]
|
278 |
return [
|
279 |
gr.update(
|
|
|
300 |
)
|
301 |
]
|
302 |
|
303 |
+
async def generate(model_name: str, prompt_text: str, h: int, w: int, steps: int, guide_scale: float, ref_img: Optional[Image.Image]) -> Image.Image:
|
304 |
+
response = await generate_image(
|
|
|
305 |
model_name=model_name,
|
306 |
prompt=prompt_text,
|
307 |
height=h,
|
|
|
338 |
)
|
339 |
|
340 |
return interface
|
341 |
+
|
342 |
if __name__ == "__main__":
|
343 |
import uvicorn
|
344 |
from threading import Thread
|