rawc0der commited on
Commit
8dd70aa
·
1 Parent(s): aca2598

update sync calling generation

Browse files
Files changed (1) hide show
  1. app.py +5 -8
app.py CHANGED
@@ -14,7 +14,6 @@ import gc
14
  from io import BytesIO
15
  import base64
16
  import functools
17
- import asyncio
18
 
19
  app = FastAPI()
20
 
@@ -120,8 +119,8 @@ class ModelContext:
120
 
121
  model_manager = ModelManager()
122
 
123
- # @spaces.GPU
124
- async def generate_image(
125
  model_name: str,
126
  prompt: str,
127
  height: int = 512,
@@ -171,7 +170,6 @@ async def generate_image(
171
  model_manager.unload_current_model()
172
  raise HTTPException(status_code=500, detail=str(e))
173
 
174
-
175
  @app.post("/generate")
176
  async def generate_image_endpoint(
177
  model_name: str,
@@ -187,7 +185,7 @@ async def generate_image_endpoint(
187
  content = await reference_image.read()
188
  ref_img = Image.open(BytesIO(content))
189
 
190
- return await generate_image(
191
  model_name=model_name,
192
  prompt=prompt,
193
  height=height,
@@ -206,7 +204,6 @@ async def unload_model():
206
  model_manager.unload_current_model()
207
  return {"status": "success", "message": "Model unloaded"}
208
 
209
-
210
  def create_gradio_interface() -> gr.Blocks:
211
  with gr.Blocks() as interface:
212
  gr.Markdown("# Text-to-Image Generation Interface")
@@ -300,8 +297,8 @@ def create_gradio_interface() -> gr.Blocks:
300
  )
301
  ]
302
 
303
- async def generate(model_name: str, prompt_text: str, h: int, w: int, steps: int, guide_scale: float, ref_img: Optional[Image.Image]) -> Image.Image:
304
- response = await generate_image(
305
  model_name=model_name,
306
  prompt=prompt_text,
307
  height=h,
 
14
  from io import BytesIO
15
  import base64
16
  import functools
 
17
 
18
  app = FastAPI()
19
 
 
119
 
120
  model_manager = ModelManager()
121
 
122
+ @spaces.GPU
123
+ def generate_image(
124
  model_name: str,
125
  prompt: str,
126
  height: int = 512,
 
170
  model_manager.unload_current_model()
171
  raise HTTPException(status_code=500, detail=str(e))
172
 
 
173
  @app.post("/generate")
174
  async def generate_image_endpoint(
175
  model_name: str,
 
185
  content = await reference_image.read()
186
  ref_img = Image.open(BytesIO(content))
187
 
188
+ return generate_image(
189
  model_name=model_name,
190
  prompt=prompt,
191
  height=height,
 
204
  model_manager.unload_current_model()
205
  return {"status": "success", "message": "Model unloaded"}
206
 
 
207
  def create_gradio_interface() -> gr.Blocks:
208
  with gr.Blocks() as interface:
209
  gr.Markdown("# Text-to-Image Generation Interface")
 
297
  )
298
  ]
299
 
300
+ def generate(model_name: str, prompt_text: str, h: int, w: int, steps: int, guide_scale: float, ref_img: Optional[Image.Image]) -> Image.Image:
301
+ response = generate_image(
302
  model_name=model_name,
303
  prompt=prompt_text,
304
  height=h,