amildravid4292 commited on
Commit
695ec97
·
verified ·
1 Parent(s): 86d1598

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -5
app.py CHANGED
@@ -1,7 +1,7 @@
1
  import os
2
  # os.system("pip uninstall -y gradio")
3
- # os.system('pip install gradio==4.0.0')
4
- #3.43.1
5
  import torch
6
  import torchvision
7
  import torchvision.transforms as transforms
@@ -110,6 +110,7 @@ def inference(prompt, negative_prompt, guidance_scale, ddim_steps, seed):
110
 
111
 
112
 
 
113
  @torch.no_grad()
114
  def edit_inference(input_image, prompt, negative_prompt, guidance_scale, ddim_steps, seed, start_noise, a1, a2, a3, a4):
115
 
@@ -196,7 +197,7 @@ def edit_inference(input_image, prompt, negative_prompt, guidance_scale, ddim_st
196
 
197
  return (image, input_image["background"])
198
 
199
-
200
  def sample_then_run():
201
  sample_model()
202
  prompt = "sks person"
@@ -341,6 +342,7 @@ def invert(image, mask, pcs=10000, epochs=400, weight_decay = 1e-10, lr=1e-1):
341
  return network
342
 
343
 
 
344
  def run_inversion(input_image, pcs, epochs, weight_decay,lr):
345
  global network
346
  init_image = input_image["background"].convert("RGB").resize((512, 512))
@@ -362,6 +364,7 @@ def run_inversion(input_image, pcs, epochs, weight_decay,lr):
362
 
363
 
364
 
 
365
  def file_upload(file):
366
  global unet
367
  del unet
@@ -435,8 +438,8 @@ with gr.Blocks(css="style.css") as demo:
435
  with gr.Column():
436
  with gr.Row():
437
  with gr.Column():
438
- #input_image = gr.Image(source='upload', elem_id="image_upload", tool='sketch', type='pil', label="Upload image and draw to define mask",
439
- # height=512, width=512, brush_color='#00FFFF', mask_opacity=0.6)
440
  input_image = gr.ImageEditor(elem_id="image_upload", type='pil', label="Upload image and draw to define mask",
441
  height=512, width=512, brush=gr.Brush(), layers=False)
442
  with gr.Row():
 
1
  import os
2
  # os.system("pip uninstall -y gradio")
3
+ # #os.system('pip install gradio==3.43.1')
4
+
5
  import torch
6
  import torchvision
7
  import torchvision.transforms as transforms
 
110
 
111
 
112
 
113
+ # @spaces.GPU()
114
  @torch.no_grad()
115
  def edit_inference(input_image, prompt, negative_prompt, guidance_scale, ddim_steps, seed, start_noise, a1, a2, a3, a4):
116
 
 
197
 
198
  return (image, input_image["background"])
199
 
200
+ # @spaces.GPU()
201
  def sample_then_run():
202
  sample_model()
203
  prompt = "sks person"
 
342
  return network
343
 
344
 
345
+ # @spaces.GPU(duration=200)
346
  def run_inversion(input_image, pcs, epochs, weight_decay,lr):
347
  global network
348
  init_image = input_image["background"].convert("RGB").resize((512, 512))
 
364
 
365
 
366
 
367
+ # @spaces.GPU()
368
  def file_upload(file):
369
  global unet
370
  del unet
 
438
  with gr.Column():
439
  with gr.Row():
440
  with gr.Column():
441
+ # input_image = gr.Image(source='upload', elem_id="image_upload", tool='sketch', type='pil', label="Upload image and draw to define mask",
442
+ # height=512, width=512, brush_color='#00FFFF', mask_opacity=0.6)
443
  input_image = gr.ImageEditor(elem_id="image_upload", type='pil', label="Upload image and draw to define mask",
444
  height=512, width=512, brush=gr.Brush(), layers=False)
445
  with gr.Row():