developerskyebrowse commited on
Commit
e344a51
·
1 Parent(s): 963464a
Files changed (2) hide show
  1. app.py +0 -17
  2. local_app.py +22 -20
app.py CHANGED
@@ -366,23 +366,6 @@ with gr.Blocks(theme="bethecloud/storj_theme", css=css) as demo:
366
  @gr.on(triggers=[image.upload, prompt.submit, run_button.click], inputs=config, outputs=result, show_progress="minimal")
367
  def auto_process_image(image, style_selection, prompt, a_prompt, n_prompt, num_images, image_resolution, preprocess_resolution, num_steps, guidance_scale, seed, progress=gr.Progress(track_tqdm=True)):
368
  return process_image(image, style_selection, prompt, a_prompt, n_prompt, num_images, image_resolution, preprocess_resolution, num_steps, guidance_scale, seed)
369
-
370
- # AI Image Processing
371
- @gr.on(triggers=[use_ai_button.click], inputs=config, outputs=result, show_progress="minimal")
372
- def submit(result, style_selection, prompt, a_prompt, n_prompt, num_images, image_resolution, preprocess_resolution, num_steps, guidance_scale, seed, progress=gr.Progress(track_tqdm=True)):
373
- return process_image(result, style_selection, prompt, a_prompt, n_prompt, num_images, image_resolution, preprocess_resolution, num_steps, guidance_scale, seed)
374
-
375
- # # Change input to result
376
- # @gr.on(triggers=[use_ai_button.click], inputs=result, outputs=image, show_progress="hidden")
377
- # def update_input(result):
378
- # try:
379
- # print("Updating image to AI Temp Image")
380
- # return result
381
- # # ai_temp_image = Image.open(os.path.join("/data", "temp_image.jpg"))
382
- # # return ai_temp_image
383
- # except FileNotFoundError:
384
- # print("No AI Image Available")
385
- # return None
386
 
387
  @gr.on(triggers=[use_ai_button.click], inputs=[result] + config, outputs=[image, result], show_progress="full")
388
  def submit(previous_result, image, style_selection, prompt, a_prompt, n_prompt, num_images, image_resolution, preprocess_resolution, num_steps, guidance_scale, seed, progress=gr.Progress(track_tqdm=True)):
 
366
  @gr.on(triggers=[image.upload, prompt.submit, run_button.click], inputs=config, outputs=result, show_progress="minimal")
367
  def auto_process_image(image, style_selection, prompt, a_prompt, n_prompt, num_images, image_resolution, preprocess_resolution, num_steps, guidance_scale, seed, progress=gr.Progress(track_tqdm=True)):
368
  return process_image(image, style_selection, prompt, a_prompt, n_prompt, num_images, image_resolution, preprocess_resolution, num_steps, guidance_scale, seed)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
369
 
370
  @gr.on(triggers=[use_ai_button.click], inputs=[result] + config, outputs=[image, result], show_progress="full")
371
  def submit(previous_result, image, style_selection, prompt, a_prompt, n_prompt, num_images, image_resolution, preprocess_resolution, num_steps, guidance_scale, seed, progress=gr.Progress(track_tqdm=True)):
local_app.py CHANGED
@@ -333,36 +333,36 @@ with gr.Blocks(theme="bethecloud/storj_theme", css=css) as demo:
333
  guidance_scale,
334
  seed,
335
  ]
336
-
337
  with gr.Row():
338
  helper_text = gr.Markdown("## Tap and hold (on mobile) to save the image.", visible=True)
339
-
340
  # image processing
341
  @gr.on(triggers=[image.upload, prompt.submit, run_button.click], inputs=config, outputs=result, show_progress="minimal")
342
  def auto_process_image(image, style_selection, prompt, a_prompt, n_prompt, num_images, image_resolution, preprocess_resolution, num_steps, guidance_scale, seed, progress=gr.Progress(track_tqdm=True)):
343
  return process_image(image, style_selection, prompt, a_prompt, n_prompt, num_images, image_resolution, preprocess_resolution, num_steps, guidance_scale, seed)
344
 
345
- # AI Image Processing
346
- @gr.on(triggers=[use_ai_button.click], inputs=config, outputs=result, show_progress="minimal")
347
- def submit(image, style_selection, prompt, a_prompt, n_prompt, num_images, image_resolution, preprocess_resolution, num_steps, guidance_scale, seed, progress=gr.Progress(track_tqdm=True)):
348
- return process_image(image, style_selection, prompt, a_prompt, n_prompt, num_images, image_resolution, preprocess_resolution, num_steps, guidance_scale, seed)
349
-
350
- # Change input to result
351
- @gr.on(triggers=[use_ai_button.click], inputs=None, outputs=image, show_progress="hidden")
352
- def update_input():
353
- try:
354
- print("Updating image to AI Temp Image")
355
- ai_temp_image = Image.open("temp_image.jpg")
356
- return ai_temp_image
357
- except FileNotFoundError:
358
- print("No AI Image Available")
359
- return None
360
 
361
  # Turn off buttons when processing
362
  @gr.on(triggers=[image.upload, use_ai_button.click, run_button.click], inputs=None, outputs=[run_button, use_ai_button], show_progress="hidden")
363
  def turn_buttons_off():
364
  return gr.update(visible=False), gr.update(visible=False)
365
-
366
  # Turn on buttons when processing is complete
367
  @gr.on(triggers=[result.change], inputs=None, outputs=[use_ai_button, run_button], show_progress="hidden")
368
  def turn_buttons_on():
@@ -385,7 +385,9 @@ def process_image(
385
  # torch.cuda.synchronize()
386
  preprocess_start = time.time()
387
  print("processing image")
388
- preprocessor.load("NormalBae")
 
 
389
 
390
  seed = random.randint(0, MAX_SEED)
391
  generator = torch.cuda.manual_seed(seed)
@@ -413,7 +415,7 @@ def process_image(
413
  image=control_image,
414
  ).images[0]
415
  print(f"\n-------------------------Inference done in: {time.time() - start:.2f} seconds-------------------------")
416
- results.save("temp_image.jpg")
417
  # torch.cuda.synchronize()
418
  # torch.cuda.empty_cache()
419
  return results
 
333
  guidance_scale,
334
  seed,
335
  ]
336
+
337
  with gr.Row():
338
  helper_text = gr.Markdown("## Tap and hold (on mobile) to save the image.", visible=True)
339
+
340
  # image processing
341
  @gr.on(triggers=[image.upload, prompt.submit, run_button.click], inputs=config, outputs=result, show_progress="minimal")
342
  def auto_process_image(image, style_selection, prompt, a_prompt, n_prompt, num_images, image_resolution, preprocess_resolution, num_steps, guidance_scale, seed, progress=gr.Progress(track_tqdm=True)):
343
  return process_image(image, style_selection, prompt, a_prompt, n_prompt, num_images, image_resolution, preprocess_resolution, num_steps, guidance_scale, seed)
344
 
345
+ # # AI Image Processing
346
+ # @gr.on(triggers=[use_ai_button.click], inputs=config, outputs=result, show_progress="minimal")
347
+ # def submit(result, style_selection, prompt, a_prompt, n_prompt, num_images, image_resolution, preprocess_resolution, num_steps, guidance_scale, seed, progress=gr.Progress(track_tqdm=True)):
348
+ # return process_image(result, style_selection, prompt, a_prompt, n_prompt, num_images, image_resolution, preprocess_resolution, num_steps, guidance_scale, seed)
349
+
350
+ @gr.on(triggers=[use_ai_button.click], inputs=[result] + config, outputs=[image, result], show_progress="minimal")
351
+ def submit(previous_result, image, style_selection, prompt, a_prompt, n_prompt, num_images, image_resolution, preprocess_resolution, num_steps, guidance_scale, seed, progress=gr.Progress(track_tqdm=True)):
352
+ # First, yield the previous result to update the input image immediately
353
+ yield previous_result, gr.update()
354
+
355
+ # Then, process the new input image
356
+ new_result = process_image(previous_result, style_selection, prompt, a_prompt, n_prompt, num_images, image_resolution, preprocess_resolution, num_steps, guidance_scale, seed)
357
+
358
+ # Finally, yield the new result
359
+ yield previous_result, new_result
360
 
361
  # Turn off buttons when processing
362
  @gr.on(triggers=[image.upload, use_ai_button.click, run_button.click], inputs=None, outputs=[run_button, use_ai_button], show_progress="hidden")
363
  def turn_buttons_off():
364
  return gr.update(visible=False), gr.update(visible=False)
365
+
366
  # Turn on buttons when processing is complete
367
  @gr.on(triggers=[result.change], inputs=None, outputs=[use_ai_button, run_button], show_progress="hidden")
368
  def turn_buttons_on():
 
385
  # torch.cuda.synchronize()
386
  preprocess_start = time.time()
387
  print("processing image")
388
+
389
+ # global preprocessor
390
+ # preprocessor.load("NormalBae")
391
 
392
  seed = random.randint(0, MAX_SEED)
393
  generator = torch.cuda.manual_seed(seed)
 
415
  image=control_image,
416
  ).images[0]
417
  print(f"\n-------------------------Inference done in: {time.time() - start:.2f} seconds-------------------------")
418
+ # results.save(os.path.join("/data", "temp_image.jpg"))
419
  # torch.cuda.synchronize()
420
  # torch.cuda.empty_cache()
421
  return results