Spaces:
Runtime error
Runtime error
Initial commit
Browse files- .gitignore +3 -0
- app.py +42 -0
- helpers/processor.py +196 -0
- requirements.txt +2 -0
.gitignore
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
data/
|
2 |
+
flagged/
|
3 |
+
texture.png
|
app.py
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import os
|
3 |
+
import wget
|
4 |
+
from helpers.processor import TextureProcessor
|
5 |
+
|
6 |
+
def image_processing(person_img, model_img):
|
7 |
+
return texture_processor.extract(person_img, model_img)
|
8 |
+
|
9 |
+
def load_model(current_path):
|
10 |
+
data_path = os.path.join(current_path, 'data')
|
11 |
+
if not os.path.isdir(data_path):
|
12 |
+
os.mkdir(data_path)
|
13 |
+
url = "https://raw.githubusercontent.com/facebookresearch/detectron2/main/projects/DensePose/configs/densepose_rcnn_R_50_FPN_WC1M_s1x.yaml"
|
14 |
+
wget.download(url, os.path.join(data_path, 'config.yaml'))
|
15 |
+
url = "https://dl.fbaipublicfiles.com/densepose/densepose_rcnn_R_50_FPN_WC1M_s1x/217144516/model_final_48a9d9.pkl"
|
16 |
+
wget.download(url, os.path.join(data_path, 'weights.pkl'))
|
17 |
+
url = "https://raw.githubusercontent.com/facebookresearch/detectron2/main/projects/DensePose/configs/Base-DensePose-RCNN-FPN.yaml"
|
18 |
+
wget.download(url, os.path.join(data_path, 'Base-DensePose-RCNN-FPN.yaml'))
|
19 |
+
|
20 |
+
current_path = os.getcwd()
|
21 |
+
load_model(current_path)
|
22 |
+
densepose_config = os.path.join(current_path, 'data', 'config.yaml')
|
23 |
+
densepose_weights = os.path.join(current_path, 'data', 'weights.pkl')
|
24 |
+
|
25 |
+
texture_processor = TextureProcessor(densepose_config, densepose_weights)
|
26 |
+
|
27 |
+
inputs = [
|
28 |
+
gr.inputs.Image(label="Person Image", type='numpy'),
|
29 |
+
gr.inputs.Image(label="Model Image (with clothes)", type='numpy')
|
30 |
+
]
|
31 |
+
|
32 |
+
outputs = gr.outputs.Image(label="Result Image", type='numpy')
|
33 |
+
|
34 |
+
title = "JustClothify"
|
35 |
+
description = "Upload an image of a person and image of model with clothes, and the model will generate image of the person, wearing this clothing."
|
36 |
+
|
37 |
+
gr.Interface(
|
38 |
+
fn=image_processing,
|
39 |
+
inputs=inputs,
|
40 |
+
outputs=outputs,
|
41 |
+
title=title,
|
42 |
+
description=description).launch()
|
helpers/processor.py
ADDED
@@ -0,0 +1,196 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import imageio
|
3 |
+
import numpy as np
|
4 |
+
import torch
|
5 |
+
from typing import Any, Dict
|
6 |
+
|
7 |
+
from detectron2.config import get_cfg
|
8 |
+
from detectron2.engine.defaults import DefaultPredictor
|
9 |
+
from detectron2.structures.instances import Instances
|
10 |
+
|
11 |
+
from densepose import add_densepose_config
|
12 |
+
from densepose.structures import (
|
13 |
+
DensePoseChartPredictorOutput,
|
14 |
+
DensePoseEmbeddingPredictorOutput
|
15 |
+
)
|
16 |
+
from densepose.vis.base import CompoundVisualizer
|
17 |
+
from densepose.vis.densepose_outputs_vertex import get_texture_atlases
|
18 |
+
from densepose.vis.densepose_results_textures import (
|
19 |
+
DensePoseResultsVisualizerWithTexture as dp_iuv_texture,
|
20 |
+
get_texture_atlas
|
21 |
+
)
|
22 |
+
from densepose.vis.extractor import (
|
23 |
+
CompoundExtractor,
|
24 |
+
create_extractor,
|
25 |
+
DensePoseOutputsExtractor,
|
26 |
+
DensePoseResultExtractor
|
27 |
+
)
|
28 |
+
|
29 |
+
from detectron2.data.detection_utils import read_image
|
30 |
+
|
31 |
+
class TextureProcessor:
|
32 |
+
def __init__(self, config, weights):
|
33 |
+
self.config = self.get_config(config, weights)
|
34 |
+
self.predictor = DefaultPredictor(self.config)
|
35 |
+
|
36 |
+
def process_texture(self, image, output_filename):
|
37 |
+
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
38 |
+
output = self.execute(image)[0]
|
39 |
+
|
40 |
+
if 'pred_densepose' in output:
|
41 |
+
texture = self.create_iuv(output, image)
|
42 |
+
imageio.imwrite(output_filename, texture)
|
43 |
+
else:
|
44 |
+
print(f'Одежда не распознана')
|
45 |
+
|
46 |
+
def extract(self, person_img, model_img):
|
47 |
+
texture_filename = 'texture.png'
|
48 |
+
self.process_texture(model_img, texture_filename)
|
49 |
+
return self.overlay_texture(texture_filename, person_img)
|
50 |
+
|
51 |
+
def overlay_texture(self, texture_name, original_image):
|
52 |
+
texture_atlas = get_texture_atlas(texture_name)
|
53 |
+
texture_atlases_dict = get_texture_atlases(None)
|
54 |
+
vis = dp_iuv_texture(
|
55 |
+
cfg=self.config,
|
56 |
+
texture_atlas=texture_atlas,
|
57 |
+
texture_atlases_dict=texture_atlases_dict
|
58 |
+
)
|
59 |
+
|
60 |
+
visualizers = [vis]
|
61 |
+
extractor = create_extractor(vis)
|
62 |
+
extractors = [extractor]
|
63 |
+
|
64 |
+
visualizer = CompoundVisualizer(visualizers)
|
65 |
+
extractor = CompoundExtractor(extractors)
|
66 |
+
|
67 |
+
#img = read_image(original_image, format="BGR")
|
68 |
+
with torch.no_grad():
|
69 |
+
outputs = self.predictor(original_image)["instances"]
|
70 |
+
|
71 |
+
image = cv2.cvtColor(original_image, cv2.COLOR_BGR2GRAY)
|
72 |
+
image = np.tile(image[:, :, np.newaxis], [1, 1, 3])
|
73 |
+
data = extractor(outputs)
|
74 |
+
image_vis = visualizer.visualize(image, data)
|
75 |
+
|
76 |
+
return image_vis
|
77 |
+
|
78 |
+
def parse_iuv(self, result):
|
79 |
+
i = result['pred_densepose'][0].labels.cpu().numpy().astype(float)
|
80 |
+
uv = (result['pred_densepose'][0].uv.cpu().numpy() * 255.0).astype(float)
|
81 |
+
iuv = np.stack((uv[1, :, :], uv[0, :, :], i))
|
82 |
+
iuv = np.transpose(iuv, (1, 2, 0))
|
83 |
+
return iuv
|
84 |
+
|
85 |
+
def parse_bbox(self, result):
|
86 |
+
return result["pred_boxes_XYXY"][0].cpu().numpy()
|
87 |
+
|
88 |
+
def interpolate_tex(self, tex):
|
89 |
+
# code is adopted from https://github.com/facebookresearch/DensePose/issues/68
|
90 |
+
valid_mask = np.array((tex.sum(0) != 0) * 1, dtype='uint8')
|
91 |
+
radius_increase = 10
|
92 |
+
kernel = np.ones((radius_increase, radius_increase), np.uint8)
|
93 |
+
dilated_mask = cv2.dilate(valid_mask, kernel, iterations=1)
|
94 |
+
region_to_fill = dilated_mask - valid_mask
|
95 |
+
invalid_region = 1 - valid_mask
|
96 |
+
actual_part_max = tex.max()
|
97 |
+
actual_part_min = tex.min()
|
98 |
+
actual_part_uint = np.array(
|
99 |
+
(tex - actual_part_min) / (actual_part_max - actual_part_min) * 255, dtype='uint8')
|
100 |
+
actual_part_uint = cv2.inpaint(actual_part_uint.transpose((1, 2, 0)), invalid_region, 1,
|
101 |
+
cv2.INPAINT_TELEA).transpose((2, 0, 1))
|
102 |
+
actual_part = (actual_part_uint / 255.0) * \
|
103 |
+
(actual_part_max - actual_part_min) + actual_part_min
|
104 |
+
# only use dilated part
|
105 |
+
actual_part = actual_part * dilated_mask
|
106 |
+
|
107 |
+
return actual_part
|
108 |
+
|
109 |
+
def concat_textures(self, array):
|
110 |
+
texture = []
|
111 |
+
for i in range(4):
|
112 |
+
tmp = array[6 * i]
|
113 |
+
for j in range(6 * i + 1, 6 * i + 6):
|
114 |
+
tmp = np.concatenate((tmp, array[j]), axis=1)
|
115 |
+
texture = tmp if len(texture) == 0 else np.concatenate(
|
116 |
+
(texture, tmp), axis=0)
|
117 |
+
return texture
|
118 |
+
|
119 |
+
def get_texture(self, im, iuv, bbox, tex_part_size=200):
|
120 |
+
im = im.transpose(2, 1, 0) / 255
|
121 |
+
image_w, image_h = im.shape[1], im.shape[2]
|
122 |
+
bbox[2] = bbox[2] - bbox[0]
|
123 |
+
bbox[3] = bbox[3] - bbox[1]
|
124 |
+
x, y, w, h = [int(v) for v in bbox]
|
125 |
+
bg = np.zeros((image_h, image_w, 3))
|
126 |
+
bg[y:y + h, x:x + w, :] = iuv
|
127 |
+
iuv = bg
|
128 |
+
iuv = iuv.transpose((2, 1, 0))
|
129 |
+
i, u, v = iuv[2], iuv[1], iuv[0]
|
130 |
+
|
131 |
+
n_parts = 22
|
132 |
+
texture = np.zeros((n_parts, 3, tex_part_size, tex_part_size))
|
133 |
+
|
134 |
+
for part_id in range(1, n_parts + 1):
|
135 |
+
generated = np.zeros((3, tex_part_size, tex_part_size))
|
136 |
+
|
137 |
+
x, y = u[i == part_id], v[i == part_id]
|
138 |
+
|
139 |
+
tex_u_coo = (x * (tex_part_size - 1) / 255).astype(int)
|
140 |
+
tex_v_coo = (y * (tex_part_size - 1) / 255).astype(int)
|
141 |
+
|
142 |
+
tex_u_coo = np.clip(tex_u_coo, 0, tex_part_size - 1)
|
143 |
+
tex_v_coo = np.clip(tex_v_coo, 0, tex_part_size - 1)
|
144 |
+
|
145 |
+
for channel in range(3):
|
146 |
+
generated[channel][tex_v_coo,
|
147 |
+
tex_u_coo] = im[channel][i == part_id]
|
148 |
+
|
149 |
+
if np.sum(generated) > 0:
|
150 |
+
generated = self.interpolate_tex(generated)
|
151 |
+
|
152 |
+
texture[part_id - 1] = generated[:, ::-1, :]
|
153 |
+
|
154 |
+
tex_concat = np.zeros((24, tex_part_size, tex_part_size, 3))
|
155 |
+
for i in range(texture.shape[0]):
|
156 |
+
tex_concat[i] = texture[i].transpose(2, 1, 0)
|
157 |
+
tex = self.concat_textures(tex_concat)
|
158 |
+
|
159 |
+
return tex
|
160 |
+
|
161 |
+
def create_iuv(self, results, image):
|
162 |
+
iuv = self.parse_iuv(results)
|
163 |
+
bbox = self.parse_bbox(results)
|
164 |
+
uv_texture = self.get_texture(image, iuv, bbox)
|
165 |
+
uv_texture = uv_texture.transpose([1, 0, 2])
|
166 |
+
return uv_texture
|
167 |
+
|
168 |
+
def get_config(self, config_fpath, model_fpath):
|
169 |
+
cfg = get_cfg()
|
170 |
+
add_densepose_config(cfg)
|
171 |
+
cfg.merge_from_file(config_fpath)
|
172 |
+
cfg.MODEL.WEIGHTS = model_fpath
|
173 |
+
cfg.MODEL.DEVICE = "cpu"
|
174 |
+
cfg.freeze()
|
175 |
+
return cfg
|
176 |
+
|
177 |
+
def execute(self, image):
|
178 |
+
context = {'results': []}
|
179 |
+
with torch.no_grad():
|
180 |
+
outputs = self.predictor(image)["instances"]
|
181 |
+
self.execute_on_outputs(context, outputs)
|
182 |
+
return context["results"]
|
183 |
+
|
184 |
+
def execute_on_outputs(self, context: Dict[str, Any], outputs: Instances):
|
185 |
+
result = {}
|
186 |
+
if outputs.has("scores"):
|
187 |
+
result["scores"] = outputs.get("scores").cpu()
|
188 |
+
if outputs.has("pred_boxes"):
|
189 |
+
result["pred_boxes_XYXY"] = outputs.get("pred_boxes").tensor.cpu()
|
190 |
+
if outputs.has("pred_densepose"):
|
191 |
+
if isinstance(outputs.pred_densepose, DensePoseChartPredictorOutput):
|
192 |
+
extractor = DensePoseResultExtractor()
|
193 |
+
elif isinstance(outputs.pred_densepose, DensePoseEmbeddingPredictorOutput):
|
194 |
+
extractor = DensePoseOutputsExtractor()
|
195 |
+
result["pred_densepose"] = extractor(outputs)[0]
|
196 |
+
context["results"].append(result)
|
requirements.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
git+https://github.com/facebookresearch/detectron2@main#subdirectory=projects/DensePose
|
2 |
+
wget
|