alexff91 commited on
Commit
01a92d0
Β·
1 Parent(s): 810e590

update to dresses

Browse files
Files changed (1) hide show
  1. app.py +12 -11
app.py CHANGED
@@ -1,5 +1,12 @@
1
- import gradio as gr
 
 
 
2
  from PIL import Image
 
 
 
 
3
  from src.tryon_pipeline import StableDiffusionXLInpaintPipeline as TryonPipeline
4
  from src.unet_hacked_garmnet import UNet2DConditionModel as UNet2DConditionModel_ref
5
  from src.unet_hacked_tryon import UNet2DConditionModel
@@ -8,21 +15,15 @@ from transformers import (
8
  CLIPVisionModelWithProjection,
9
  CLIPTextModel,
10
  CLIPTextModelWithProjection,
 
11
  )
12
- from diffusers import DDPMScheduler,AutoencoderKL
13
- from typing import List
14
-
15
- import torch
16
- import os
17
- from transformers import AutoTokenizer
18
- import spaces
19
- import numpy as np
20
  from utils_mask import get_mask_location
21
  from torchvision import transforms
22
  import apply_net
23
  from preprocess.humanparsing.run_parsing import Parsing
24
  from preprocess.openpose.run_openpose import OpenPose
25
- from detectron2.data.detection_utils import convert_PIL_to_numpy,_apply_exif_orientation
26
  from torchvision.transforms.functional import to_pil_image
27
 
28
 
@@ -150,7 +151,7 @@ def start_tryon(dict,garm_img,garment_des,is_checked,is_checked_crop,denoise_ste
150
  if is_checked:
151
  keypoints = openpose_model(human_img.resize((384,512)))
152
  model_parse, _ = parsing_model(human_img.resize((384,512)))
153
- mask, mask_gray = get_mask_location('hd', "upper_body", model_parse, keypoints)
154
  mask = mask.resize((768,1024))
155
  else:
156
  mask = pil_to_binary_mask(dict['layers'][0].convert("RGB").resize((768, 1024)))
 
1
+ import os
2
+ import torch
3
+ import numpy as np
4
+ from typing import List
5
  from PIL import Image
6
+ import gradio as gr
7
+
8
+ import spaces # Moved here to avoid CUDA initialization issues
9
+
10
  from src.tryon_pipeline import StableDiffusionXLInpaintPipeline as TryonPipeline
11
  from src.unet_hacked_garmnet import UNet2DConditionModel as UNet2DConditionModel_ref
12
  from src.unet_hacked_tryon import UNet2DConditionModel
 
15
  CLIPVisionModelWithProjection,
16
  CLIPTextModel,
17
  CLIPTextModelWithProjection,
18
+ AutoTokenizer # Add this line to import AutoTokenizer
19
  )
20
+ from diffusers import DDPMScheduler, AutoencoderKL
 
 
 
 
 
 
 
21
  from utils_mask import get_mask_location
22
  from torchvision import transforms
23
  import apply_net
24
  from preprocess.humanparsing.run_parsing import Parsing
25
  from preprocess.openpose.run_openpose import OpenPose
26
+ from detectron2.data.detection_utils import convert_PIL_to_numpy, _apply_exif_orientation
27
  from torchvision.transforms.functional import to_pil_image
28
 
29
 
 
151
  if is_checked:
152
  keypoints = openpose_model(human_img.resize((384,512)))
153
  model_parse, _ = parsing_model(human_img.resize((384,512)))
154
+ mask, mask_gray = get_mask_location('hd', "dresses", model_parse, keypoints)
155
  mask = mask.resize((768,1024))
156
  else:
157
  mask = pil_to_binary_mask(dict['layers'][0].convert("RGB").resize((768, 1024)))