Akjava commited on
Commit
3f3c15f
·
1 Parent(s): 531da56

fix broken when image is not square

Browse files
Files changed (1) hide show
  1. mediapipe_transform.py +227 -136
mediapipe_transform.py CHANGED
@@ -4,36 +4,58 @@ import mp_triangles
4
  import time
5
  from PIL import Image
6
 
7
- from glibvision.cv2_utils import blend_rgb_images,pil_to_bgr_image,fill_points,crop,paste
8
- from mp_utils import get_pixel_cordinate_list,extract_landmark,get_pixel_cordinate,get_normalized_landmarks,sort_triangles_by_depth,get_landmark_bbox
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
  import numba as nb
 
 
11
  @nb.jit(nopython=True, parallel=True)
12
  def blend_rgb_images_numba(image1, image2, mask):
13
  height, width, _ = image1.shape
14
  result = np.empty((height, width, 3), dtype=np.float32)
15
-
16
  for i in nb.prange(height):
17
  for j in range(width):
18
  alpha = mask[i, j] / 255.0
19
  for k in range(3):
20
- result[i, j, k] = (1 - alpha) * image1[i, j, k] + alpha * image2[i, j, k]
21
-
 
 
22
  return result.astype(np.uint8)
23
 
 
24
  @nb.jit(nopython=True, parallel=True)
25
  def blend_rgba_images_numba(image1, image2, mask):
26
- assert image1.shape[2] == image2.shape[2] , f"Input images must be same image1 = {image1.shape[2]} image2 ={image2.shape[2]}"
 
 
27
  channel = image1.shape[2]
28
  height, width, _ = image1.shape
29
  result = np.empty((height, width, channel), dtype=np.float32)
30
-
31
  for i in nb.prange(height):
32
  for j in range(width):
33
  alpha = mask[i, j] / 255.0
34
  for k in range(channel):
35
- result[i, j, k] = (1 - alpha) * image1[i, j, k] + alpha * image2[i, j, k]
36
-
 
 
37
  return result.astype(np.uint8)
38
 
39
 
@@ -46,26 +68,30 @@ bug some hide value make white
46
  """
47
  debug_affinn = False
48
  min_affin_plus = 0.1
 
 
49
  def apply_affine_transformation_to_triangle(src_tri, dst_tri, src_img, dst_img):
50
-
51
  src_tri_np = np.float32(src_tri)
52
  dst_tri_np = np.float32(dst_tri)
53
 
54
  assert src_tri_np.shape == (3, 2), f"src_tri_np の形状が不正 {src_tri_np.shape}"
55
  assert dst_tri_np.shape == (3, 2), f"dst_tri_np の形状が不正 {dst_tri_np.shape}"
56
 
57
-
58
- #trying avoid same value,or M will broken
59
  if (src_tri_np[0] == src_tri_np[1]).all():
60
- src_tri_np[0]+=min_affin_plus
61
  if (src_tri_np[0] == src_tri_np[2]).all():
62
- src_tri_np[0]+=min_affin_plus
63
  if (src_tri_np[1] == src_tri_np[2]).all():
64
- src_tri_np[1]+=min_affin_plus
65
  if (src_tri_np[1] == src_tri_np[0]).all():
66
- src_tri_np[1]+=min_affin_plus
67
 
68
- if (src_tri_np[1] == src_tri_np[0]).all() or (src_tri_np[1] == src_tri_np[2]).all() or (src_tri_np[2] == src_tri_np[0]).all():
 
 
 
 
69
  print("same will white noise happen")
70
  # 透視変換行列の計算
71
  M = cv2.getAffineTransform(src_tri_np, dst_tri_np)
@@ -74,43 +100,42 @@ def apply_affine_transformation_to_triangle(src_tri, dst_tri, src_img, dst_img):
74
  h_dst, w_dst = dst_img.shape[:2]
75
 
76
  # 元画像から三角形領域を切り抜くマスク生成
77
- #src_mask = np.zeros((h_src, w_src), dtype=np.uint8)
78
- #cv2.fillPoly(src_mask, [np.int32(src_tri)], 255)
79
 
80
  # Not 元画像の三角形領域のみをマスクで抽出
81
- src_triangle = src_img #cv2.bitwise_and(src_img, src_img, mask=src_mask)
82
 
83
  # 変換行列を使って元画像の三角形領域を目標画像のサイズへ変換
84
-
85
-
86
  transformed = cv2.warpAffine(src_triangle, M, (w_dst, h_dst))
87
  if debug_affinn:
88
- cv2.imwrite('affin_src.jpg', src_triangle)
89
- cv2.imwrite('affin_transformed.jpg', transformed)
90
 
91
- #print(f"dst_img={dst_img.shape}")
92
- #print(f"transformed={transformed.shape}")
93
  # 変換後のマスクの生成
94
  dst_mask = np.zeros((h_dst, w_dst), dtype=np.uint8)
95
  cv2.fillPoly(dst_mask, [np.int32(dst_tri)], 255)
96
 
97
  # ���標画像のマスク領域をクリアするためにデストのインバートマスクを作成
98
- #dst_mask_inv = cv2.bitwise_not(dst_mask)
99
 
100
  # 目標画像のマスク部分をクリア
101
- #dst_background = cv2.bitwise_and(dst_img, dst_img, mask=dst_mask_inv)
102
 
103
  # 変換された元画像の三角形部分と目標画像の背景部分を合成
104
- #dst_img = cv2.add(dst_background, transformed)
105
- #s = time.time()
106
- #dst_img = blend_rgb_images(dst_img,transformed,dst_mask)
107
 
108
  use_blend_rgb = False
109
  if use_blend_rgb:
110
- if src_img.shape[2] == 3:
111
- dst_img = blend_rgb_images_numba(dst_img,transformed,dst_mask)
112
  else:
113
- dst_img = blend_rgba_images_numba(dst_img,transformed,dst_mask)
114
  else:
115
  dst_mask_inv = cv2.bitwise_not(dst_mask)
116
  transformed = cv2.bitwise_and(transformed, transformed, mask=dst_mask)
@@ -118,166 +143,232 @@ def apply_affine_transformation_to_triangle(src_tri, dst_tri, src_img, dst_img):
118
  dst_img = cv2.add(dst_img, transformed)
119
 
120
  # TODO add rgb mode
121
-
122
-
123
- #print(f"blend {time.time() -s}")
124
  if debug_affinn:
125
- cv2.imwrite('affin_transformed_masked.jpg', transformed)
126
- cv2.imwrite('affin_dst_mask.jpg', dst_mask)
127
  return dst_img
128
 
129
 
 
130
 
131
- from skimage.exposure import match_histograms
132
- def color_match(base_image,cropped_image,color_match_format="RGB"):
133
- reference = np.array(base_image.convert(color_match_format))
134
- target =np.array(cropped_image.convert(color_match_format))
135
- matched = match_histograms(target, reference,channel_axis=-1)
136
-
137
- return Image.fromarray(matched,mode=color_match_format)
138
-
139
- def process_landmark_transform(image,transform_target_image,
140
- innner_mouth,innner_eyes,
141
- color_matching=False,transparent_background=False,add_align_mouth=False,add_align_eyes=False,blur_size=0):
142
- image_h,image_w = image.shape[:2]
143
- align_h,align_w = transform_target_image.shape[:2]
144
-
145
- mp_image,image_face_landmarker_result = extract_landmark(image)
146
- image_larndmarks=image_face_landmarker_result.face_landmarks
147
- image_bbox = get_landmark_bbox(image_larndmarks,image_w,image_h,16,16)
148
 
149
- mp_image,align_face_landmarker_result = extract_landmark(transform_target_image)
150
- align_larndmarks=align_face_landmarker_result.face_landmarks
151
- align_bbox = get_landmark_bbox(align_larndmarks,align_w,align_h,16,16)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
152
 
153
  if color_matching:
154
- image_cropped = crop(image,image_bbox)
155
- target_cropped = crop(transform_target_image,align_bbox)
156
- matched = match_histograms(image_cropped, target_cropped,channel_axis=-1)
157
- paste(image,matched,image_bbox[0],image_bbox[1])
158
-
159
 
160
  landmark_points = get_normalized_landmarks(align_larndmarks)
161
-
162
- mesh_triangle_indices = mp_triangles.mesh_triangle_indices.copy()#using directly sometime share
163
-
164
- #always mix for blur
 
 
165
  mesh_triangle_indices += mp_triangles.INNER_MOUTH
166
-
167
- mesh_triangle_indices += mp_triangles.INNER_LEFT_EYES + mp_triangles.INNER_RIGHT_EYES
168
- #print(mesh_triangle_indices)
169
- sort_triangles_by_depth(landmark_points,mesh_triangle_indices)
170
-
171
- #mesh_triangle_indices = mp_triangles.contour_to_triangles(True,draw_updown_contour) + mp_triangles.contour_to_triangles(False,draw_updown_contour)+ mp_triangles.mesh_triangle_indices
172
-
 
173
 
174
  triangle_size = len(mesh_triangle_indices)
175
- print(f"triangle_size = {triangle_size},time ={0.1*triangle_size}")
176
  s = time.time()
177
-
178
  need_transparent_way = transparent_background == True or blur_size > 0
179
- if need_transparent_way:# convert Alpha
180
- transparent_image = np.zeros_like(cv2.cvtColor(transform_target_image, cv2.COLOR_BGR2BGRA))
 
 
181
  h, w = transparent_image.shape[:2]
182
- cv2.rectangle(transparent_image, (0, 0), (w, h), (0,0,0,0), -1)
183
 
184
  applied_image = transparent_image
185
  image = cv2.cvtColor(image, cv2.COLOR_BGR2BGRA)
186
-
187
  else:
188
  applied_image = transform_target_image
189
-
190
- for i in range(0,triangle_size):#
191
  triangle_indices = mesh_triangle_indices[i]
192
-
193
-
194
- image_points = get_pixel_cordinate_list(image_larndmarks,triangle_indices,image_w,image_h)
195
-
196
- align_points = get_pixel_cordinate_list(align_larndmarks,triangle_indices,align_w,align_h)
197
- #print(image_points)
198
- #print(align_points)
199
- #fill_points(image,image_points,thickness=3,fill_color=(0,0,0,0))
200
- #s = time.time()
201
- #print(f"applied_image={applied_image.shape}")
202
- applied_image=apply_affine_transformation_to_triangle(image_points,align_points,image,applied_image)
203
-
204
- print(f"take time {time.time()-s}")
 
 
 
 
 
205
  if need_transparent_way:
206
  blur_radius = blur_size
207
- if blur_radius!=0 and blur_radius%2 == 0:
208
- blur_radius+=1
209
-
210
- b, g, r,a = cv2.split(applied_image)
211
- applied_image = cv2.merge([b,g,r])
212
  mask = a.copy()
213
  dilate = blur_radius
214
  kernel = np.ones((dilate, dilate), np.uint8)
215
  mask = cv2.erode(mask, kernel, iterations=1)
216
 
217
- if blur_radius>0:
218
- blurred_image = cv2.GaussianBlur(mask, (blur_radius, blur_radius), 0) #should be odd
 
 
219
  else:
220
  blurred_image = mask
221
 
222
  if transparent_background:
223
- #transform_target_image = np.zeros_like(cv2.cvtColor(transform_target_image, cv2.COLOR_BGR2BGRA))
224
- transform_target_image=cv2.cvtColor(transform_target_image, cv2.COLOR_BGR2BGRA)
225
- applied_image = cv2.merge([b,g,r,blurred_image])
 
 
226
  else:
227
- applied_image = blend_rgb_images(transform_target_image,applied_image,blurred_image)
 
 
228
 
229
  # after mix
230
- if not innner_mouth or not innner_eyes or (transparent_background and (add_align_mouth or add_align_eyes)):
231
-
 
 
 
232
  import mp_constants
233
- dst_mask = np.zeros((align_h,align_w), dtype=np.uint8)
 
234
  if not innner_mouth or (transparent_background and add_align_mouth):
235
- mouth_cordinates = get_pixel_cordinate_list(align_larndmarks,mp_constants.LINE_INNER_MOUTH,align_h,align_w)
 
 
236
  cv2.fillPoly(dst_mask, [np.int32(mouth_cordinates)], 255)
237
 
238
- if (transparent_background and not add_align_mouth):
239
- cv2.fillPoly(transform_target_image, [np.int32(mouth_cordinates)], [0,0,0,0])
 
 
 
240
  if not innner_eyes or (transparent_background and add_align_eyes):
241
-
242
- left_eyes_cordinates = get_pixel_cordinate_list(align_larndmarks,mp_constants.LINE_LEFT_INNER_EYES,align_h,align_w)
243
-
 
244
  cv2.fillPoly(dst_mask, [np.int32(left_eyes_cordinates)], 255)
245
 
246
- right_eyes_cordinates = get_pixel_cordinate_list(align_larndmarks,mp_constants.LINE_RIGHT_INNER_EYES,align_h,align_w)
 
 
247
  cv2.fillPoly(dst_mask, [np.int32(right_eyes_cordinates)], 255)
248
 
249
- if (transparent_background and not add_align_eyes):
250
- cv2.fillPoly(transform_target_image, [np.int32(left_eyes_cordinates)], [0,0,0,0])
251
- cv2.fillPoly(transform_target_image, [np.int32(right_eyes_cordinates)], [0,0,0,0])
252
- #cv2.imwrite("deb_transform_target_image.jpg",transform_target_image)
253
- #cv2.imwrite("deb_dst_mask.jpg",dst_mask)
254
- #cv2.imwrite("deb_applied_image.jpg",applied_image)
255
- applied_image = blend_rgba_images_numba(applied_image,transform_target_image,dst_mask)
 
 
 
 
 
 
 
 
 
 
 
 
256
 
257
  return applied_image
258
 
259
 
260
-
261
- def process_landmark_transform_pil(pil_image,pil_align_target_image,
262
- innner_mouth,innner_eyes,
263
- color_matching=False,transparent_background=False,add_align_mouth=False,add_align_eyes=False,blur_size=0):
 
 
 
 
 
 
 
264
  image = pil_to_bgr_image(pil_image)
265
  align_target_image = pil_to_bgr_image(pil_align_target_image)
266
- cv_result = process_landmark_transform(image,align_target_image,innner_mouth,innner_eyes,color_matching,transparent_background,add_align_mouth,add_align_eyes,blur_size)
 
 
 
 
 
 
 
 
 
 
267
  if transparent_background:
268
  return Image.fromarray(cv2.cvtColor(cv_result, cv2.COLOR_BGRA2RGBA))
269
  else:
270
  return Image.fromarray(cv2.cvtColor(cv_result, cv2.COLOR_BGR2RGB))
271
 
 
272
  if __name__ == "__main__":
273
- #image = Image.open('examples/00002062.jpg')
274
- #align_target = Image.open('examples/02316230.jpg')
275
- image = cv2.imread('examples/02316230.jpg') # 元画像
276
- align_target = cv2.imread('examples/00003245_00.jpg') # 目標画像
277
- result_img = process_landmark_transform(image,align_target)
278
 
279
- cv2.imshow('Transformed Image', result_img)
280
  cv2.waitKey(0)
281
  cv2.destroyAllWindows()
282
 
283
- cv2.imwrite('align.png', result_img)
 
4
  import time
5
  from PIL import Image
6
 
7
+ from glibvision.cv2_utils import (
8
+ blend_rgb_images,
9
+ pil_to_bgr_image,
10
+ fill_points,
11
+ crop,
12
+ paste,
13
+ )
14
+ from mp_utils import (
15
+ get_pixel_cordinate_list,
16
+ extract_landmark,
17
+ get_pixel_cordinate,
18
+ get_normalized_landmarks,
19
+ sort_triangles_by_depth,
20
+ get_landmark_bbox,
21
+ )
22
 
23
  import numba as nb
24
+
25
+
26
  @nb.jit(nopython=True, parallel=True)
27
  def blend_rgb_images_numba(image1, image2, mask):
28
  height, width, _ = image1.shape
29
  result = np.empty((height, width, 3), dtype=np.float32)
30
+
31
  for i in nb.prange(height):
32
  for j in range(width):
33
  alpha = mask[i, j] / 255.0
34
  for k in range(3):
35
+ result[i, j, k] = (1 - alpha) * image1[i, j, k] + alpha * image2[
36
+ i, j, k
37
+ ]
38
+
39
  return result.astype(np.uint8)
40
 
41
+
42
  @nb.jit(nopython=True, parallel=True)
43
  def blend_rgba_images_numba(image1, image2, mask):
44
+ assert (
45
+ image1.shape[2] == image2.shape[2]
46
+ ), f"Input images must be same image1 = {image1.shape[2]} image2 ={image2.shape[2]}"
47
  channel = image1.shape[2]
48
  height, width, _ = image1.shape
49
  result = np.empty((height, width, channel), dtype=np.float32)
50
+
51
  for i in nb.prange(height):
52
  for j in range(width):
53
  alpha = mask[i, j] / 255.0
54
  for k in range(channel):
55
+ result[i, j, k] = (1 - alpha) * image1[i, j, k] + alpha * image2[
56
+ i, j, k
57
+ ]
58
+
59
  return result.astype(np.uint8)
60
 
61
 
 
68
  """
69
  debug_affinn = False
70
  min_affin_plus = 0.1
71
+
72
+
73
  def apply_affine_transformation_to_triangle(src_tri, dst_tri, src_img, dst_img):
 
74
  src_tri_np = np.float32(src_tri)
75
  dst_tri_np = np.float32(dst_tri)
76
 
77
  assert src_tri_np.shape == (3, 2), f"src_tri_np の形状が不正 {src_tri_np.shape}"
78
  assert dst_tri_np.shape == (3, 2), f"dst_tri_np の形状が不正 {dst_tri_np.shape}"
79
 
80
+ # trying avoid same value,or M will broken
 
81
  if (src_tri_np[0] == src_tri_np[1]).all():
82
+ src_tri_np[0] += min_affin_plus
83
  if (src_tri_np[0] == src_tri_np[2]).all():
84
+ src_tri_np[0] += min_affin_plus
85
  if (src_tri_np[1] == src_tri_np[2]).all():
86
+ src_tri_np[1] += min_affin_plus
87
  if (src_tri_np[1] == src_tri_np[0]).all():
88
+ src_tri_np[1] += min_affin_plus
89
 
90
+ if (
91
+ (src_tri_np[1] == src_tri_np[0]).all()
92
+ or (src_tri_np[1] == src_tri_np[2]).all()
93
+ or (src_tri_np[2] == src_tri_np[0]).all()
94
+ ):
95
  print("same will white noise happen")
96
  # 透視変換行列の計算
97
  M = cv2.getAffineTransform(src_tri_np, dst_tri_np)
 
100
  h_dst, w_dst = dst_img.shape[:2]
101
 
102
  # 元画像から三角形領域を切り抜くマスク生成
103
+ # src_mask = np.zeros((h_src, w_src), dtype=np.uint8)
104
+ # cv2.fillPoly(src_mask, [np.int32(src_tri)], 255)
105
 
106
  # Not 元画像の三角形領域のみをマスクで抽出
107
+ src_triangle = src_img # cv2.bitwise_and(src_img, src_img, mask=src_mask)
108
 
109
  # 変換行列を使って元画像の三角形領域を目標画像のサイズへ変換
110
+
 
111
  transformed = cv2.warpAffine(src_triangle, M, (w_dst, h_dst))
112
  if debug_affinn:
113
+ cv2.imwrite("affin_src.jpg", src_triangle)
114
+ cv2.imwrite("affin_transformed.jpg", transformed)
115
 
116
+ # print(f"dst_img={dst_img.shape}")
117
+ # print(f"transformed={transformed.shape}")
118
  # 変換後のマスクの生成
119
  dst_mask = np.zeros((h_dst, w_dst), dtype=np.uint8)
120
  cv2.fillPoly(dst_mask, [np.int32(dst_tri)], 255)
121
 
122
  # ���標画像のマスク領域をクリアするためにデストのインバートマスクを作成
123
+ # dst_mask_inv = cv2.bitwise_not(dst_mask)
124
 
125
  # 目標画像のマスク部分をクリア
126
+ # dst_background = cv2.bitwise_and(dst_img, dst_img, mask=dst_mask_inv)
127
 
128
  # 変換された元画像の三角形部分と目標画像の背景部分を合成
129
+ # dst_img = cv2.add(dst_background, transformed)
130
+ # s = time.time()
131
+ # dst_img = blend_rgb_images(dst_img,transformed,dst_mask)
132
 
133
  use_blend_rgb = False
134
  if use_blend_rgb:
135
+ if src_img.shape[2] == 3:
136
+ dst_img = blend_rgb_images_numba(dst_img, transformed, dst_mask)
137
  else:
138
+ dst_img = blend_rgba_images_numba(dst_img, transformed, dst_mask)
139
  else:
140
  dst_mask_inv = cv2.bitwise_not(dst_mask)
141
  transformed = cv2.bitwise_and(transformed, transformed, mask=dst_mask)
 
143
  dst_img = cv2.add(dst_img, transformed)
144
 
145
  # TODO add rgb mode
146
+
147
+ # print(f"blend {time.time() -s}")
 
148
  if debug_affinn:
149
+ cv2.imwrite("affin_transformed_masked.jpg", transformed)
150
+ cv2.imwrite("affin_dst_mask.jpg", dst_mask)
151
  return dst_img
152
 
153
 
154
+ from skimage.exposure import match_histograms
155
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
156
 
157
+ def color_match(base_image, cropped_image, color_match_format="RGB"):
158
+ reference = np.array(base_image.convert(color_match_format))
159
+ target = np.array(cropped_image.convert(color_match_format))
160
+ matched = match_histograms(target, reference, channel_axis=-1)
161
+
162
+ return Image.fromarray(matched, mode=color_match_format)
163
+
164
+
165
+ def process_landmark_transform(
166
+ image,
167
+ transform_target_image,
168
+ innner_mouth,
169
+ innner_eyes,
170
+ color_matching=False,
171
+ transparent_background=False,
172
+ add_align_mouth=False,
173
+ add_align_eyes=False,
174
+ blur_size=0,
175
+ ):
176
+ image_h, image_w = image.shape[:2]
177
+ align_h, align_w = transform_target_image.shape[:2]
178
+
179
+ mp_image, image_face_landmarker_result = extract_landmark(image)
180
+ image_larndmarks = image_face_landmarker_result.face_landmarks
181
+ image_bbox = get_landmark_bbox(image_larndmarks, image_w, image_h, 16, 16)
182
+
183
+ mp_image, align_face_landmarker_result = extract_landmark(transform_target_image)
184
+ align_larndmarks = align_face_landmarker_result.face_landmarks
185
+ align_bbox = get_landmark_bbox(align_larndmarks, align_w, align_h, 16, 16)
186
 
187
  if color_matching:
188
+ image_cropped = crop(image, image_bbox)
189
+ target_cropped = crop(transform_target_image, align_bbox)
190
+ matched = match_histograms(image_cropped, target_cropped, channel_axis=-1)
191
+ paste(image, matched, image_bbox[0], image_bbox[1])
 
192
 
193
  landmark_points = get_normalized_landmarks(align_larndmarks)
194
+
195
+ mesh_triangle_indices = (
196
+ mp_triangles.mesh_triangle_indices.copy()
197
+ ) # using directly sometime share
198
+
199
+ # always mix for blur
200
  mesh_triangle_indices += mp_triangles.INNER_MOUTH
201
+
202
+ mesh_triangle_indices += (
203
+ mp_triangles.INNER_LEFT_EYES + mp_triangles.INNER_RIGHT_EYES
204
+ )
205
+ # print(mesh_triangle_indices)
206
+ sort_triangles_by_depth(landmark_points, mesh_triangle_indices)
207
+
208
+ # mesh_triangle_indices = mp_triangles.contour_to_triangles(True,draw_updown_contour) + mp_triangles.contour_to_triangles(False,draw_updown_contour)+ mp_triangles.mesh_triangle_indices
209
 
210
  triangle_size = len(mesh_triangle_indices)
211
+ # print(f"triangle_size = {triangle_size},time ={0.1*triangle_size}")
212
  s = time.time()
213
+
214
  need_transparent_way = transparent_background == True or blur_size > 0
215
+ if need_transparent_way: # convert Alpha
216
+ transparent_image = np.zeros_like(
217
+ cv2.cvtColor(transform_target_image, cv2.COLOR_BGR2BGRA)
218
+ )
219
  h, w = transparent_image.shape[:2]
220
+ cv2.rectangle(transparent_image, (0, 0), (w, h), (0, 0, 0, 0), -1)
221
 
222
  applied_image = transparent_image
223
  image = cv2.cvtColor(image, cv2.COLOR_BGR2BGRA)
224
+
225
  else:
226
  applied_image = transform_target_image
227
+
228
+ for i in range(0, triangle_size): #
229
  triangle_indices = mesh_triangle_indices[i]
230
+
231
+ image_points = get_pixel_cordinate_list(
232
+ image_larndmarks, triangle_indices, image_w, image_h
233
+ )
234
+
235
+ align_points = get_pixel_cordinate_list(
236
+ align_larndmarks, triangle_indices, align_w, align_h
237
+ )
238
+ # print(image_points)
239
+ # print(align_points)
240
+ # fill_points(image,image_points,thickness=3,fill_color=(0,0,0,0))
241
+ # s = time.time()
242
+ # print(f"applied_image={applied_image.shape}")
243
+ applied_image = apply_affine_transformation_to_triangle(
244
+ image_points, align_points, image, applied_image
245
+ )
246
+
247
+ # print(f"take time {time.time()-s}")
248
  if need_transparent_way:
249
  blur_radius = blur_size
250
+ if blur_radius != 0 and blur_radius % 2 == 0:
251
+ blur_radius += 1
252
+
253
+ b, g, r, a = cv2.split(applied_image)
254
+ applied_image = cv2.merge([b, g, r])
255
  mask = a.copy()
256
  dilate = blur_radius
257
  kernel = np.ones((dilate, dilate), np.uint8)
258
  mask = cv2.erode(mask, kernel, iterations=1)
259
 
260
+ if blur_radius > 0:
261
+ blurred_image = cv2.GaussianBlur(
262
+ mask, (blur_radius, blur_radius), 0
263
+ ) # should be odd
264
  else:
265
  blurred_image = mask
266
 
267
  if transparent_background:
268
+ # transform_target_image = np.zeros_like(cv2.cvtColor(transform_target_image, cv2.COLOR_BGR2BGRA))
269
+ transform_target_image = cv2.cvtColor(
270
+ transform_target_image, cv2.COLOR_BGR2BGRA
271
+ )
272
+ applied_image = cv2.merge([b, g, r, blurred_image])
273
  else:
274
+ applied_image = blend_rgb_images(
275
+ transform_target_image, applied_image, blurred_image
276
+ )
277
 
278
  # after mix
279
+ if (
280
+ not innner_mouth
281
+ or not innner_eyes
282
+ or (transparent_background and (add_align_mouth or add_align_eyes))
283
+ ):
284
  import mp_constants
285
+
286
+ dst_mask = np.zeros((align_h, align_w), dtype=np.uint8)
287
  if not innner_mouth or (transparent_background and add_align_mouth):
288
+ mouth_cordinates = get_pixel_cordinate_list(
289
+ align_larndmarks, mp_constants.LINE_INNER_MOUTH, align_w, align_h
290
+ )
291
  cv2.fillPoly(dst_mask, [np.int32(mouth_cordinates)], 255)
292
 
293
+ if transparent_background and not add_align_mouth:
294
+ cv2.fillPoly(
295
+ transform_target_image, [np.int32(mouth_cordinates)], [0, 0, 0, 0]
296
+ )
297
+
298
  if not innner_eyes or (transparent_background and add_align_eyes):
299
+ left_eyes_cordinates = get_pixel_cordinate_list(
300
+ align_larndmarks, mp_constants.LINE_LEFT_INNER_EYES, align_w, align_h
301
+ )
302
+
303
  cv2.fillPoly(dst_mask, [np.int32(left_eyes_cordinates)], 255)
304
 
305
+ right_eyes_cordinates = get_pixel_cordinate_list(
306
+ align_larndmarks, mp_constants.LINE_RIGHT_INNER_EYES, align_w, align_h
307
+ )
308
  cv2.fillPoly(dst_mask, [np.int32(right_eyes_cordinates)], 255)
309
 
310
+ if transparent_background and not add_align_eyes:
311
+ cv2.fillPoly(
312
+ transform_target_image,
313
+ [np.int32(left_eyes_cordinates)],
314
+ [0, 0, 0, 0],
315
+ )
316
+ cv2.fillPoly(
317
+ transform_target_image,
318
+ [np.int32(right_eyes_cordinates)],
319
+ [0, 0, 0, 0],
320
+ )
321
+
322
+ # cv2.imwrite("deb_transform_target_image.jpg",transform_target_image)
323
+ # cv2.imwrite("deb_dst_mask.jpg",dst_mask)
324
+ # cv2.imwrite("deb_applied_image.jpg",applied_image)
325
+ applied_image = blend_rgba_images_numba(
326
+ applied_image, transform_target_image, dst_mask
327
+ )
328
+ cv2.imwrite("deb_final_transform_target_image.jpg", transform_target_image)
329
 
330
  return applied_image
331
 
332
 
333
+ def process_landmark_transform_pil(
334
+ pil_image,
335
+ pil_align_target_image,
336
+ innner_mouth,
337
+ innner_eyes,
338
+ color_matching=False,
339
+ transparent_background=False,
340
+ add_align_mouth=False,
341
+ add_align_eyes=False,
342
+ blur_size=0,
343
+ ):
344
  image = pil_to_bgr_image(pil_image)
345
  align_target_image = pil_to_bgr_image(pil_align_target_image)
346
+ cv_result = process_landmark_transform(
347
+ image,
348
+ align_target_image,
349
+ innner_mouth,
350
+ innner_eyes,
351
+ color_matching,
352
+ transparent_background,
353
+ add_align_mouth,
354
+ add_align_eyes,
355
+ blur_size,
356
+ )
357
  if transparent_background:
358
  return Image.fromarray(cv2.cvtColor(cv_result, cv2.COLOR_BGRA2RGBA))
359
  else:
360
  return Image.fromarray(cv2.cvtColor(cv_result, cv2.COLOR_BGR2RGB))
361
 
362
+
363
  if __name__ == "__main__":
364
+ # image = Image.open('examples/00002062.jpg')
365
+ # align_target = Image.open('examples/02316230.jpg')
366
+ image = cv2.imread("examples/02316230.jpg") # 元画像
367
+ align_target = cv2.imread("examples/00003245_00.jpg") # 目標画像
368
+ result_img = process_landmark_transform(image, align_target)
369
 
370
+ cv2.imshow("Transformed Image", result_img)
371
  cv2.waitKey(0)
372
  cv2.destroyAllWindows()
373
 
374
+ cv2.imwrite("align.png", result_img)