Bobby commited on
Commit
0abe9ec
·
1 Parent(s): 352cc63
Files changed (1) hide show
  1. app.py +4 -6
app.py CHANGED
@@ -19,7 +19,7 @@ from diffusers import (
19
  ControlNetModel,
20
  DPMSolverMultistepScheduler,
21
  StableDiffusionControlNetPipeline,
22
- AutoencoderKL,
23
  )
24
  from controlnet_aux_local import NormalBaeDetector
25
 
@@ -371,10 +371,8 @@ with gr.Blocks(theme="bethecloud/storj_theme", css=css) as demo:
371
  @gr.on(triggers=[use_ai_button.click], inputs=[result] + config, outputs=[image, result], show_progress="minimal")
372
  def submit(previous_result, image, style_selection, prompt, a_prompt, n_prompt, num_images, image_resolution, preprocess_resolution, num_steps, guidance_scale, seed, progress=gr.Progress(track_tqdm=True)):
373
  # First, yield the previous result to update the input image immediately
374
- preprocessor.load("NormalBae")
375
  yield previous_result, gr.update()
376
  # Then, process the new input image
377
-
378
  new_result = process_image(previous_result, style_selection, prompt, a_prompt, n_prompt, num_images, image_resolution, preprocess_resolution, num_steps, guidance_scale, seed)
379
  # Finally, yield the new result
380
  yield previous_result, new_result
@@ -389,7 +387,7 @@ with gr.Blocks(theme="bethecloud/storj_theme", css=css) as demo:
389
  def turn_buttons_on():
390
  return gr.update(visible=True), gr.update(visible=True)
391
 
392
- @spaces.GPU(duration=10)
393
  @torch.inference_mode()
394
  def process_image(
395
  image,
@@ -410,7 +408,7 @@ def process_image(
410
 
411
  seed = random.randint(0, MAX_SEED)
412
  generator = torch.cuda.manual_seed(seed)
413
- # preprocessor.load("NormalBae")
414
  control_image = preprocessor(
415
  image=image,
416
  image_resolution=image_resolution,
@@ -436,7 +434,7 @@ def process_image(
436
  ).images[0]
437
  print(f"\n-------------------------Inference done in: {time.time() - start:.2f} seconds-------------------------")
438
  # torch.cuda.synchronize()
439
- torch.cuda.empty_cache()
440
  return results
441
 
442
  if prod:
 
19
  ControlNetModel,
20
  DPMSolverMultistepScheduler,
21
  StableDiffusionControlNetPipeline,
22
+ # AutoencoderKL,
23
  )
24
  from controlnet_aux_local import NormalBaeDetector
25
 
 
371
  @gr.on(triggers=[use_ai_button.click], inputs=[result] + config, outputs=[image, result], show_progress="minimal")
372
  def submit(previous_result, image, style_selection, prompt, a_prompt, n_prompt, num_images, image_resolution, preprocess_resolution, num_steps, guidance_scale, seed, progress=gr.Progress(track_tqdm=True)):
373
  # First, yield the previous result to update the input image immediately
 
374
  yield previous_result, gr.update()
375
  # Then, process the new input image
 
376
  new_result = process_image(previous_result, style_selection, prompt, a_prompt, n_prompt, num_images, image_resolution, preprocess_resolution, num_steps, guidance_scale, seed)
377
  # Finally, yield the new result
378
  yield previous_result, new_result
 
387
  def turn_buttons_on():
388
  return gr.update(visible=True), gr.update(visible=True)
389
 
390
+ @spaces.GPU(duration=12)
391
  @torch.inference_mode()
392
  def process_image(
393
  image,
 
408
 
409
  seed = random.randint(0, MAX_SEED)
410
  generator = torch.cuda.manual_seed(seed)
411
+ preprocessor.load("NormalBae")
412
  control_image = preprocessor(
413
  image=image,
414
  image_resolution=image_resolution,
 
434
  ).images[0]
435
  print(f"\n-------------------------Inference done in: {time.time() - start:.2f} seconds-------------------------")
436
  # torch.cuda.synchronize()
437
+ # torch.cuda.empty_cache()
438
  return results
439
 
440
  if prod: