Spaces:
Saad0KH
/
Running on Zero

Saad0KH commited on
Commit
95c0fd4
·
verified ·
1 Parent(s): 1e10690

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -29
app.py CHANGED
@@ -132,9 +132,8 @@ def save_image(img):
132
  img.save(unique_name, format="WEBP", lossless=True)
133
  return unique_name
134
 
135
- #@spaces.GPU
136
- # Assurez-vous que les fonctions de traitement utilisent asyncio
137
- async def start_tryon(dict, garm_img, garment_des, is_checked, is_checked_crop, denoise_steps, seed, categorie='upper_body'):
138
  device = "cuda"
139
  openpose_model.preprocessor.body_estimation.model.to(device)
140
  pipe.to(device)
@@ -158,27 +157,20 @@ async def start_tryon(dict, garm_img, garment_des, is_checked, is_checked_crop,
158
  human_img = human_img_orig.resize((768, 1024))
159
 
160
  if is_checked:
161
- # Utilisation de asyncio pour rendre le calcul non bloquant
162
- keypoints = await asyncio.to_thread(openpose_model, human_img.resize((384, 512)))
163
- model_parse, _ = await asyncio.to_thread(parsing_model, human_img.resize((384, 512)))
164
- mask, mask_gray = await asyncio.to_thread(get_mask_location, 'hd', categorie, model_parse, keypoints)
165
  mask = mask.resize((768, 1024))
166
  else:
167
- mask = dict['layers'][0].convert("RGB").resize((768, 1024))
168
-
169
  mask_gray = (1 - transforms.ToTensor()(mask)) * tensor_transfrom(human_img)
170
  mask_gray = to_pil_image((mask_gray + 1.0) / 2.0)
171
 
172
  human_img_arg = _apply_exif_orientation(human_img.resize((384, 512)))
173
  human_img_arg = convert_PIL_to_numpy(human_img_arg, format="BGR")
174
 
175
- # Création des arguments de façon non bloquante
176
- args = await asyncio.to_thread(
177
- apply_net.create_argument_parser().parse_args,
178
- ('show', './configs/densepose_rcnn_R_50_FPN_s1x.yaml', './ckpt/densepose/model_final_162be9.pkl', 'dp_segm', '-v', '--opts', 'MODEL.DEVICE', 'cuda')
179
- )
180
-
181
- pose_img = await asyncio.to_thread(args.func, args, human_img_arg)
182
  pose_img = pose_img[:, :, ::-1]
183
  pose_img = Image.fromarray(pose_img).resize((768, 1024))
184
 
@@ -221,10 +213,7 @@ async def start_tryon(dict, garm_img, garment_des, is_checked, is_checked_crop,
221
  pose_img = tensor_transfrom(pose_img).unsqueeze(0).to(device, torch.float16)
222
  garm_tensor = tensor_transfrom(garm_img).unsqueeze(0).to(device, torch.float16)
223
  generator = torch.Generator(device).manual_seed(seed) if seed is not None else None
224
-
225
- # L'appel principal au modèle doit également être asynchrone
226
- images = await asyncio.to_thread(
227
- pipe,
228
  prompt_embeds=prompt_embeds.to(device, torch.float16),
229
  negative_prompt_embeds=negative_prompt_embeds.to(device, torch.float16),
230
  pooled_prompt_embeds=pooled_prompt_embeds.to(device, torch.float16),
@@ -248,12 +237,13 @@ async def start_tryon(dict, garm_img, garment_des, is_checked, is_checked_crop,
248
  human_img_orig.paste(out_img, (int(left), int(top)))
249
  return human_img_orig, mask_gray
250
  else:
251
- return images[0], mask_gray, mask
252
-
253
 
 
254
  @app.route('/tryon-v2', methods=['POST'])
255
- async def tryon_v2():
256
- data = await request.json
 
257
  human_image_data = data['human_image']
258
  garment_image_data = data['garment_image']
259
 
@@ -278,13 +268,11 @@ async def tryon_v2():
278
  'composite': None
279
  }
280
 
281
- # Exécuter le traitement principal de manière asynchrone
282
- output_image, mask_image, mask = await start_tryon(human_dict, garment_image, description, use_auto_mask, use_auto_crop, denoise_steps, seed, categorie)
283
-
284
  return jsonify({
285
  'image_id': save_image(output_image),
286
- 'mask_gray_id': save_image(mask_image),
287
- 'mask_id': save_image(mask)
288
  })
289
 
290
  def clear_gpu_memory():
 
132
  img.save(unique_name, format="WEBP", lossless=True)
133
  return unique_name
134
 
135
+ @spaces.GPU
136
+ def start_tryon(dict, garm_img, garment_des, is_checked, is_checked_crop, denoise_steps, seed, categorie = 'upper_body'):
 
137
  device = "cuda"
138
  openpose_model.preprocessor.body_estimation.model.to(device)
139
  pipe.to(device)
 
157
  human_img = human_img_orig.resize((768, 1024))
158
 
159
  if is_checked:
160
+ keypoints = openpose_model(human_img.resize((384, 512)))
161
+ model_parse, _ = parsing_model(human_img.resize((384, 512)))
162
+ mask, mask_gray = get_mask_location('hd', categorie , model_parse, keypoints)
 
163
  mask = mask.resize((768, 1024))
164
  else:
165
+ mask = dict['layers'][0].convert("RGB").resize((768, 1024))#pil_to_binary_mask(dict['layers'][0].convert("RGB").resize((768, 1024)))
 
166
  mask_gray = (1 - transforms.ToTensor()(mask)) * tensor_transfrom(human_img)
167
  mask_gray = to_pil_image((mask_gray + 1.0) / 2.0)
168
 
169
  human_img_arg = _apply_exif_orientation(human_img.resize((384, 512)))
170
  human_img_arg = convert_PIL_to_numpy(human_img_arg, format="BGR")
171
 
172
+ args = apply_net.create_argument_parser().parse_args(('show', './configs/densepose_rcnn_R_50_FPN_s1x.yaml', './ckpt/densepose/model_final_162be9.pkl', 'dp_segm', '-v', '--opts', 'MODEL.DEVICE', 'cuda'))
173
+ pose_img = args.func(args, human_img_arg)
 
 
 
 
 
174
  pose_img = pose_img[:, :, ::-1]
175
  pose_img = Image.fromarray(pose_img).resize((768, 1024))
176
 
 
213
  pose_img = tensor_transfrom(pose_img).unsqueeze(0).to(device, torch.float16)
214
  garm_tensor = tensor_transfrom(garm_img).unsqueeze(0).to(device, torch.float16)
215
  generator = torch.Generator(device).manual_seed(seed) if seed is not None else None
216
+ images = pipe(
 
 
 
217
  prompt_embeds=prompt_embeds.to(device, torch.float16),
218
  negative_prompt_embeds=negative_prompt_embeds.to(device, torch.float16),
219
  pooled_prompt_embeds=pooled_prompt_embeds.to(device, torch.float16),
 
237
  human_img_orig.paste(out_img, (int(left), int(top)))
238
  return human_img_orig, mask_gray
239
  else:
240
+ return images[0], mask_gray , mask
 
241
 
242
+
243
  @app.route('/tryon-v2', methods=['POST'])
244
+ def tryon_v2():
245
+
246
+ data = request.json
247
  human_image_data = data['human_image']
248
  garment_image_data = data['garment_image']
249
 
 
268
  'composite': None
269
  }
270
 
271
+ output_image, mask_image , mask = start_tryon(human_dict, garment_image, description, use_auto_mask, use_auto_crop, denoise_steps, seed, categorie)
 
 
272
  return jsonify({
273
  'image_id': save_image(output_image),
274
+ 'mask_gray_id' : save_image(mask_image),
275
+ 'mask_id' : save_image(mask)
276
  })
277
 
278
  def clear_gpu_memory():