Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -122,9 +122,16 @@ pipe = TryonPipeline.from_pretrained(
|
|
122 |
pipe.unet_encoder = UNet_Encoder
|
123 |
|
124 |
@spaces.GPU
|
125 |
-
def start_tryon(dict,garm_img,garment_des,is_checked,is_checked_crop,denoise_steps,seed):
|
126 |
device = "cuda"
|
|
|
|
|
|
|
127 |
|
|
|
|
|
|
|
|
|
128 |
openpose_model.preprocessor.body_estimation.model.to(device)
|
129 |
pipe.to(device)
|
130 |
pipe.unet_encoder.to(device)
|
@@ -134,15 +141,20 @@ def start_tryon(dict,garm_img,garment_des,is_checked,is_checked_crop,denoise_ste
|
|
134 |
|
135 |
if is_checked_crop:
|
136 |
width, height = human_img_orig.size
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
|
|
|
|
|
|
|
|
|
|
146 |
else:
|
147 |
human_img = human_img_orig.resize((768,1024))
|
148 |
|
@@ -150,7 +162,7 @@ def start_tryon(dict,garm_img,garment_des,is_checked,is_checked_crop,denoise_ste
|
|
150 |
if is_checked:
|
151 |
keypoints = openpose_model(human_img.resize((384,512)))
|
152 |
model_parse, _ = parsing_model(human_img.resize((384,512)))
|
153 |
-
mask, mask_gray = get_mask_location('hd',
|
154 |
mask = mask.resize((768,1024))
|
155 |
else:
|
156 |
mask = pil_to_binary_mask(dict['layers'][0].convert("RGB").resize((768, 1024)))
|
@@ -176,7 +188,7 @@ def start_tryon(dict,garm_img,garment_des,is_checked,is_checked_crop,denoise_ste
|
|
176 |
with torch.cuda.amp.autocast():
|
177 |
with torch.no_grad():
|
178 |
prompt = "model is wearing " + garment_des
|
179 |
-
negative_prompt = "monochrome, lowres, bad anatomy, worst quality, low quality"
|
180 |
with torch.inference_mode():
|
181 |
(
|
182 |
prompt_embeds,
|
@@ -191,7 +203,7 @@ def start_tryon(dict,garm_img,garment_des,is_checked,is_checked_crop,denoise_ste
|
|
191 |
)
|
192 |
|
193 |
prompt = "a photo of " + garment_des
|
194 |
-
negative_prompt = "monochrome, lowres, bad anatomy, worst quality, low quality"
|
195 |
if not isinstance(prompt, List):
|
196 |
prompt = [prompt] * 1
|
197 |
if not isinstance(negative_prompt, List):
|
@@ -234,8 +246,12 @@ def start_tryon(dict,garm_img,garment_des,is_checked,is_checked_crop,denoise_ste
|
|
234 |
)[0]
|
235 |
|
236 |
if is_checked_crop:
|
237 |
-
|
238 |
-
|
|
|
|
|
|
|
|
|
239 |
return human_img_orig, mask_gray
|
240 |
else:
|
241 |
return images[0], mask_gray
|
@@ -260,8 +276,8 @@ for ex_human in human_list_path:
|
|
260 |
|
261 |
image_blocks = gr.Blocks().queue()
|
262 |
with image_blocks as demo:
|
263 |
-
gr.Markdown("##
|
264 |
-
gr.Markdown("
|
265 |
with gr.Row():
|
266 |
with gr.Column():
|
267 |
imgs = gr.ImageEditor(sources='upload', type="pil", label='Human. Mask with pen or use auto-masking', interactive=True)
|
@@ -269,12 +285,15 @@ with image_blocks as demo:
|
|
269 |
is_checked = gr.Checkbox(label="Yes", info="Use auto-generated mask (Takes 5 seconds)",value=True)
|
270 |
with gr.Row():
|
271 |
is_checked_crop = gr.Checkbox(label="Yes", info="Use auto-crop & resizing",value=False)
|
|
|
|
|
272 |
|
273 |
example = gr.Examples(
|
274 |
inputs=imgs,
|
275 |
examples_per_page=10,
|
276 |
examples=human_ex_list
|
277 |
)
|
|
|
278 |
|
279 |
with gr.Column():
|
280 |
garm_img = gr.Image(label="Garment", sources='upload', type="pil")
|
@@ -303,8 +322,7 @@ with image_blocks as demo:
|
|
303 |
seed = gr.Number(label="Seed", minimum=-1, maximum=2147483647, step=1, value=42)
|
304 |
|
305 |
|
306 |
-
|
307 |
-
try_button.click(fn=start_tryon, inputs=[imgs, garm_img, prompt, is_checked,is_checked_crop, denoise_steps, seed], outputs=[image_out,masked_img], api_name='tryon')
|
308 |
|
309 |
|
310 |
|
|
|
122 |
pipe.unet_encoder = UNet_Encoder
|
123 |
|
124 |
@spaces.GPU
|
125 |
+
def start_tryon(dict,garm_img,garment_des,is_checked,is_checked_crop,denoise_steps,seed, category):
|
126 |
device = "cuda"
|
127 |
+
category = int(category)
|
128 |
+
if category==0:
|
129 |
+
category='upper_body'
|
130 |
|
131 |
+
elif category==1:
|
132 |
+
category='lower_body'
|
133 |
+
else:
|
134 |
+
category='dresses'
|
135 |
openpose_model.preprocessor.body_estimation.model.to(device)
|
136 |
pipe.to(device)
|
137 |
pipe.unet_encoder.to(device)
|
|
|
141 |
|
142 |
if is_checked_crop:
|
143 |
width, height = human_img_orig.size
|
144 |
+
aspect_ratio = width / height
|
145 |
+
|
146 |
+
if not (0.45 < aspect_ratio < 0.46):
|
147 |
+
target_width = int(min(width, height * (3 / 4)))
|
148 |
+
target_height = int(min(height, width * (4 / 3)))
|
149 |
+
left = (width - target_width) / 2
|
150 |
+
top = (height - target_height) / 2
|
151 |
+
right = (width + target_width) / 2
|
152 |
+
bottom = (height + target_height) / 2
|
153 |
+
cropped_img = human_img_orig.crop((left, top, right, bottom))
|
154 |
+
crop_size = cropped_img.size
|
155 |
+
human_img = cropped_img.resize((768, 1024))
|
156 |
+
else:
|
157 |
+
human_img = human_img_orig.resize((768,1024))
|
158 |
else:
|
159 |
human_img = human_img_orig.resize((768,1024))
|
160 |
|
|
|
162 |
if is_checked:
|
163 |
keypoints = openpose_model(human_img.resize((384,512)))
|
164 |
model_parse, _ = parsing_model(human_img.resize((384,512)))
|
165 |
+
mask, mask_gray = get_mask_location('hd', category, model_parse, keypoints)
|
166 |
mask = mask.resize((768,1024))
|
167 |
else:
|
168 |
mask = pil_to_binary_mask(dict['layers'][0].convert("RGB").resize((768, 1024)))
|
|
|
188 |
with torch.cuda.amp.autocast():
|
189 |
with torch.no_grad():
|
190 |
prompt = "model is wearing " + garment_des
|
191 |
+
negative_prompt = "monochrome, lowres, bad anatomy, worst quality, low quality, contortionist, amputee, polydactyly, deformed, distorted, misshapen, malformed, abnormal, mutant, defaced, shapeless, unreal, missing arms, three hands, bad face, extra fingers, cartoon, fused face, cg, ugly fingers, three legs, bad hands, fused feet, worst face, extra eyes, long fingers, three feet, missing legs, cloned face, worst feet, extra crus, huge eyes, fused crus, three thigh, bad anatomy, disconnected limbs, animate, 3d, worst thigh, extra thigh, fused thigh, missing fingers, amputation, poorly drawn face, three crus, horn, 2girl, bad arms"
|
192 |
with torch.inference_mode():
|
193 |
(
|
194 |
prompt_embeds,
|
|
|
203 |
)
|
204 |
|
205 |
prompt = "a photo of " + garment_des
|
206 |
+
negative_prompt = "monochrome, lowres, bad anatomy, worst quality, low quality, contortionist, amputee, polydactyly, deformed, distorted, misshapen, malformed, abnormal, mutant, defaced, shapeless, unreal, missing arms, three hands, bad face, extra fingers, cartoon, fused face, cg, ugly fingers, three legs, bad hands, fused feet, worst face, extra eyes, long fingers, three feet, missing legs, cloned face, worst feet, extra crus, huge eyes, fused crus, three thigh, bad anatomy, disconnected limbs, animate, 3d, worst thigh, extra thigh, fused thigh, missing fingers, amputation, poorly drawn face, three crus, horn, 2girl, bad arms"
|
207 |
if not isinstance(prompt, List):
|
208 |
prompt = [prompt] * 1
|
209 |
if not isinstance(negative_prompt, List):
|
|
|
246 |
)[0]
|
247 |
|
248 |
if is_checked_crop:
|
249 |
+
if not (0.45 < aspect_ratio < 0.46):
|
250 |
+
out_img =images[0].resize(crop_size)
|
251 |
+
human_img_orig.paste(out_img, (int(left), int(top)))
|
252 |
+
else:
|
253 |
+
return images[0], mask_gray
|
254 |
+
|
255 |
return human_img_orig, mask_gray
|
256 |
else:
|
257 |
return images[0], mask_gray
|
|
|
276 |
|
277 |
image_blocks = gr.Blocks().queue()
|
278 |
with image_blocks as demo:
|
279 |
+
gr.Markdown("## Automata dress it up")
|
280 |
+
gr.Markdown("Dress it up Demo")
|
281 |
with gr.Row():
|
282 |
with gr.Column():
|
283 |
imgs = gr.ImageEditor(sources='upload', type="pil", label='Human. Mask with pen or use auto-masking', interactive=True)
|
|
|
285 |
is_checked = gr.Checkbox(label="Yes", info="Use auto-generated mask (Takes 5 seconds)",value=True)
|
286 |
with gr.Row():
|
287 |
is_checked_crop = gr.Checkbox(label="Yes", info="Use auto-crop & resizing",value=False)
|
288 |
+
with gr.Row():
|
289 |
+
category = gr.Textbox(placeholder="0 = upper body, 1 = lower body, 2 = full body", show_label=False, elem_id="prompt")
|
290 |
|
291 |
example = gr.Examples(
|
292 |
inputs=imgs,
|
293 |
examples_per_page=10,
|
294 |
examples=human_ex_list
|
295 |
)
|
296 |
+
|
297 |
|
298 |
with gr.Column():
|
299 |
garm_img = gr.Image(label="Garment", sources='upload', type="pil")
|
|
|
322 |
seed = gr.Number(label="Seed", minimum=-1, maximum=2147483647, step=1, value=42)
|
323 |
|
324 |
|
325 |
+
try_button.click(fn=start_tryon, inputs=[imgs, garm_img, prompt, is_checked,is_checked_crop, denoise_steps, seed, category], outputs=[image_out,masked_img], api_name='tryon')
|
|
|
326 |
|
327 |
|
328 |
|