NikhilJoson commited on
Commit
aaf177a
·
verified ·
1 Parent(s): a4c073c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -8
app.py CHANGED
@@ -19,7 +19,6 @@ from preprocess.humanparsing.run_parsing import Parsing
19
  from preprocess.openpose.run_openpose import OpenPose
20
  from detectron2.data.detection_utils import convert_PIL_to_numpy,_apply_exif_orientation
21
  from torchvision.transforms.functional import to_pil_image
22
- from datasets import Dataset
23
 
24
 
25
  def pil_to_binary_mask(pil_image, threshold=0):
@@ -85,7 +84,7 @@ pipe = TryonPipeline.from_pretrained(
85
  pipe.unet_encoder = UNet_Encoder
86
 
87
  @spaces.GPU
88
- def start_tryon(dict,garm_img,garment_des,cloth_type,is_checked,is_checked_crop,denoise_steps,seed):
89
  device = "cuda"
90
 
91
  openpose_model.preprocessor.body_estimation.model.to(device)
@@ -93,7 +92,7 @@ def start_tryon(dict,garm_img,garment_des,cloth_type,is_checked,is_checked_crop,
93
  pipe.unet_encoder.to(device)
94
 
95
  garm_img= garm_img.convert("RGB").resize((768,1024))
96
- human_img_orig = dict["background"].convert("RGB")
97
 
98
  if is_checked_crop:
99
  width, height = human_img_orig.size
@@ -116,7 +115,7 @@ def start_tryon(dict,garm_img,garment_des,cloth_type,is_checked,is_checked_crop,
116
  mask, mask_gray = get_mask_location('hd', cloth_type, model_parse, keypoints)
117
  mask = mask.resize((768,1024))
118
  else:
119
- mask = pil_to_binary_mask(dict['layers'][0].convert("RGB").resize((768, 1024)))
120
  # mask = transforms.ToTensor()(mask)
121
  # mask = mask.unsqueeze(0)
122
  mask_gray = (1-transforms.ToTensor()(mask)) * tensor_transfrom(human_img)
@@ -204,8 +203,6 @@ def main_(imgs,topwear_img,topwear_des,bottomwear_img,bottomwear_des,dress_img,d
204
  return start_tryon(imgs,bottomwear_img,bottomwear_des,"lower_body",is_checked,is_checked_crop,denoise_steps,seed)
205
  elif topwear_img!=None and bottomwear_img!=None:
206
  _, half_img, half_mask = start_tryon(imgs,topwear_img,topwear_des,"upper_body",is_checked,is_checked_crop,denoise_steps,seed)
207
- IMG_data = [{"image": half_img}]
208
- img_dataset = Dataset.from_dict(IMG_data)
209
  return start_tryon(img_dataset,bottomwear_img,bottomwear_des,"lower_body",is_checked,is_checked_crop,denoise_steps,seed)
210
 
211
 
@@ -232,7 +229,8 @@ with image_blocks as demo:
232
  gr.HTML("<center><p>Upload an image of a person and images of the clothes✨</p></center>")
233
  with gr.Row():
234
  with gr.Column():
235
- imgs = gr.ImageEditor(sources='upload', type="pil", label='Human. Mask with pen or use auto-masking', interactive=True)
 
236
  with gr.Row():
237
  is_checked = gr.Checkbox(label="Yes", info="Use auto-generated mask (Takes 5 seconds)",value=True)
238
  with gr.Row():
@@ -276,7 +274,7 @@ with image_blocks as demo:
276
  seed = gr.Number(label="Seed", minimum=-1, maximum=2147483647, step=1, value=42)
277
 
278
 
279
- try_button.click(fn=main_, inputs=[imgs,topwear_image,topwear_desc,bottomwear_image,bottomwear_desc,dress_image,dress_desc,is_checked,is_checked_crop,denoise_steps,seed],
280
  outputs=[image_in, image_out, masked_img], api_name='tryon')
281
 
282
 
 
19
  from preprocess.openpose.run_openpose import OpenPose
20
  from detectron2.data.detection_utils import convert_PIL_to_numpy,_apply_exif_orientation
21
  from torchvision.transforms.functional import to_pil_image
 
22
 
23
 
24
  def pil_to_binary_mask(pil_image, threshold=0):
 
84
  pipe.unet_encoder = UNet_Encoder
85
 
86
  @spaces.GPU
87
+ def start_tryon(img,garm_img,garment_des,cloth_type,is_checked,is_checked_crop,denoise_steps,seed):
88
  device = "cuda"
89
 
90
  openpose_model.preprocessor.body_estimation.model.to(device)
 
92
  pipe.unet_encoder.to(device)
93
 
94
  garm_img= garm_img.convert("RGB").resize((768,1024))
95
+ human_img_orig = img.copy() #dict["background"].convert("RGB")
96
 
97
  if is_checked_crop:
98
  width, height = human_img_orig.size
 
115
  mask, mask_gray = get_mask_location('hd', cloth_type, model_parse, keypoints)
116
  mask = mask.resize((768,1024))
117
  else:
118
+ mask = pil_to_binary_mask(img.resize((768, 1024)))
119
  # mask = transforms.ToTensor()(mask)
120
  # mask = mask.unsqueeze(0)
121
  mask_gray = (1-transforms.ToTensor()(mask)) * tensor_transfrom(human_img)
 
203
  return start_tryon(imgs,bottomwear_img,bottomwear_des,"lower_body",is_checked,is_checked_crop,denoise_steps,seed)
204
  elif topwear_img!=None and bottomwear_img!=None:
205
  _, half_img, half_mask = start_tryon(imgs,topwear_img,topwear_des,"upper_body",is_checked,is_checked_crop,denoise_steps,seed)
 
 
206
  return start_tryon(img_dataset,bottomwear_img,bottomwear_des,"lower_body",is_checked,is_checked_crop,denoise_steps,seed)
207
 
208
 
 
229
  gr.HTML("<center><p>Upload an image of a person and images of the clothes✨</p></center>")
230
  with gr.Row():
231
  with gr.Column():
232
+ inp_img = gr.ImageEditor(sources='upload',label='Person_Image', type="pil", label='Human. Mask with pen or use auto-masking',
233
+ image_mode='RGB', layers=False, interactive=True)
234
  with gr.Row():
235
  is_checked = gr.Checkbox(label="Yes", info="Use auto-generated mask (Takes 5 seconds)",value=True)
236
  with gr.Row():
 
274
  seed = gr.Number(label="Seed", minimum=-1, maximum=2147483647, step=1, value=42)
275
 
276
 
277
+ try_button.click(fn=main_, inputs=[inp_img,topwear_image,topwear_desc,bottomwear_image,bottomwear_desc,dress_image,dress_desc,is_checked,is_checked_crop,denoise_steps,seed],
278
  outputs=[image_in, image_out, masked_img], api_name='tryon')
279
 
280