Spanicin commited on
Commit
16ee39a
·
verified ·
1 Parent(s): ad9d79b

Update src/facerender/modules/make_animation.py

Browse files
Files changed (1) hide show
  1. src/facerender/modules/make_animation.py +205 -169
src/facerender/modules/make_animation.py CHANGED
@@ -1,170 +1,206 @@
1
- from scipy.spatial import ConvexHull
2
- import torch
3
- import torch.nn.functional as F
4
- import numpy as np
5
- from tqdm import tqdm
6
-
7
- def normalize_kp(kp_source, kp_driving, kp_driving_initial, adapt_movement_scale=False,
8
- use_relative_movement=False, use_relative_jacobian=False):
9
- if adapt_movement_scale:
10
- source_area = ConvexHull(kp_source['value'][0].data.cpu().numpy()).volume
11
- driving_area = ConvexHull(kp_driving_initial['value'][0].data.cpu().numpy()).volume
12
- adapt_movement_scale = np.sqrt(source_area) / np.sqrt(driving_area)
13
- else:
14
- adapt_movement_scale = 1
15
-
16
- kp_new = {k: v for k, v in kp_driving.items()}
17
-
18
- if use_relative_movement:
19
- kp_value_diff = (kp_driving['value'] - kp_driving_initial['value'])
20
- kp_value_diff *= adapt_movement_scale
21
- kp_new['value'] = kp_value_diff + kp_source['value']
22
-
23
- if use_relative_jacobian:
24
- jacobian_diff = torch.matmul(kp_driving['jacobian'], torch.inverse(kp_driving_initial['jacobian']))
25
- kp_new['jacobian'] = torch.matmul(jacobian_diff, kp_source['jacobian'])
26
-
27
- return kp_new
28
-
29
- def headpose_pred_to_degree(pred):
30
- device = pred.device
31
- idx_tensor = [idx for idx in range(66)]
32
- idx_tensor = torch.FloatTensor(idx_tensor).to(device)
33
- pred = F.softmax(pred)
34
- degree = torch.sum(pred*idx_tensor, 1) * 3 - 99
35
- return degree
36
-
37
- def get_rotation_matrix(yaw, pitch, roll):
38
- yaw = yaw / 180 * 3.14
39
- pitch = pitch / 180 * 3.14
40
- roll = roll / 180 * 3.14
41
-
42
- roll = roll.unsqueeze(1)
43
- pitch = pitch.unsqueeze(1)
44
- yaw = yaw.unsqueeze(1)
45
-
46
- pitch_mat = torch.cat([torch.ones_like(pitch), torch.zeros_like(pitch), torch.zeros_like(pitch),
47
- torch.zeros_like(pitch), torch.cos(pitch), -torch.sin(pitch),
48
- torch.zeros_like(pitch), torch.sin(pitch), torch.cos(pitch)], dim=1)
49
- pitch_mat = pitch_mat.view(pitch_mat.shape[0], 3, 3)
50
-
51
- yaw_mat = torch.cat([torch.cos(yaw), torch.zeros_like(yaw), torch.sin(yaw),
52
- torch.zeros_like(yaw), torch.ones_like(yaw), torch.zeros_like(yaw),
53
- -torch.sin(yaw), torch.zeros_like(yaw), torch.cos(yaw)], dim=1)
54
- yaw_mat = yaw_mat.view(yaw_mat.shape[0], 3, 3)
55
-
56
- roll_mat = torch.cat([torch.cos(roll), -torch.sin(roll), torch.zeros_like(roll),
57
- torch.sin(roll), torch.cos(roll), torch.zeros_like(roll),
58
- torch.zeros_like(roll), torch.zeros_like(roll), torch.ones_like(roll)], dim=1)
59
- roll_mat = roll_mat.view(roll_mat.shape[0], 3, 3)
60
-
61
- rot_mat = torch.einsum('bij,bjk,bkm->bim', pitch_mat, yaw_mat, roll_mat)
62
-
63
- return rot_mat
64
-
65
- def keypoint_transformation(kp_canonical, he, wo_exp=False):
66
- kp = kp_canonical['value'] # (bs, k, 3)
67
- yaw, pitch, roll= he['yaw'], he['pitch'], he['roll']
68
- yaw = headpose_pred_to_degree(yaw)
69
- pitch = headpose_pred_to_degree(pitch)
70
- roll = headpose_pred_to_degree(roll)
71
-
72
- if 'yaw_in' in he:
73
- yaw = he['yaw_in']
74
- if 'pitch_in' in he:
75
- pitch = he['pitch_in']
76
- if 'roll_in' in he:
77
- roll = he['roll_in']
78
-
79
- rot_mat = get_rotation_matrix(yaw, pitch, roll) # (bs, 3, 3)
80
-
81
- t, exp = he['t'], he['exp']
82
- if wo_exp:
83
- exp = exp*0
84
-
85
- # keypoint rotation
86
- kp_rotated = torch.einsum('bmp,bkp->bkm', rot_mat, kp)
87
-
88
- # keypoint translation
89
- t[:, 0] = t[:, 0]*0
90
- t[:, 2] = t[:, 2]*0
91
- t = t.unsqueeze(1).repeat(1, kp.shape[1], 1)
92
- kp_t = kp_rotated + t
93
-
94
- # add expression deviation
95
- exp = exp.view(exp.shape[0], -1, 3)
96
- kp_transformed = kp_t + exp
97
-
98
- return {'value': kp_transformed}
99
-
100
-
101
-
102
- def make_animation(source_image, source_semantics, target_semantics,
103
- generator, kp_detector, he_estimator, mapping,
104
- yaw_c_seq=None, pitch_c_seq=None, roll_c_seq=None,
105
- use_exp=True):
106
- with torch.no_grad():
107
- predictions = []
108
-
109
- kp_canonical = kp_detector(source_image)
110
- he_source = mapping(source_semantics)
111
- kp_source = keypoint_transformation(kp_canonical, he_source)
112
-
113
- for frame_idx in tqdm(range(target_semantics.shape[1]), 'Face Renderer:'):
114
- target_semantics_frame = target_semantics[:, frame_idx]
115
- he_driving = mapping(target_semantics_frame)
116
- if yaw_c_seq is not None:
117
- he_driving['yaw_in'] = yaw_c_seq[:, frame_idx]
118
- if pitch_c_seq is not None:
119
- he_driving['pitch_in'] = pitch_c_seq[:, frame_idx]
120
- if roll_c_seq is not None:
121
- he_driving['roll_in'] = roll_c_seq[:, frame_idx]
122
-
123
- kp_driving = keypoint_transformation(kp_canonical, he_driving)
124
-
125
- #kp_norm = normalize_kp(kp_source=kp_source, kp_driving=kp_driving,
126
- #kp_driving_initial=kp_driving_initial)
127
- kp_norm = kp_driving
128
- out = generator(source_image, kp_source=kp_source, kp_driving=kp_norm)
129
- '''
130
- source_image_new = out['prediction'].squeeze(1)
131
- kp_canonical_new = kp_detector(source_image_new)
132
- he_source_new = he_estimator(source_image_new)
133
- kp_source_new = keypoint_transformation(kp_canonical_new, he_source_new, wo_exp=True)
134
- kp_driving_new = keypoint_transformation(kp_canonical_new, he_driving, wo_exp=True)
135
- out = generator(source_image_new, kp_source=kp_source_new, kp_driving=kp_driving_new)
136
- '''
137
- predictions.append(out['prediction'])
138
- predictions_ts = torch.stack(predictions, dim=1)
139
- return predictions_ts
140
-
141
- class AnimateModel(torch.nn.Module):
142
- """
143
- Merge all generator related updates into single model for better multi-gpu usage
144
- """
145
-
146
- def __init__(self, generator, kp_extractor, mapping):
147
- super(AnimateModel, self).__init__()
148
- self.kp_extractor = kp_extractor
149
- self.generator = generator
150
- self.mapping = mapping
151
-
152
- self.kp_extractor.eval()
153
- self.generator.eval()
154
- self.mapping.eval()
155
-
156
- def forward(self, x):
157
-
158
- source_image = x['source_image']
159
- source_semantics = x['source_semantics']
160
- target_semantics = x['target_semantics']
161
- yaw_c_seq = x['yaw_c_seq']
162
- pitch_c_seq = x['pitch_c_seq']
163
- roll_c_seq = x['roll_c_seq']
164
-
165
- predictions_video = make_animation(source_image, source_semantics, target_semantics,
166
- self.generator, self.kp_extractor,
167
- self.mapping, use_exp = True,
168
- yaw_c_seq=yaw_c_seq, pitch_c_seq=pitch_c_seq, roll_c_seq=roll_c_seq)
169
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
170
  return predictions_video
 
1
+ from scipy.spatial import ConvexHull
2
+ import torch
3
+ import torch.nn.functional as F
4
+ import numpy as np
5
+ from tqdm import tqdm
6
+
7
+ def normalize_kp(kp_source, kp_driving, kp_driving_initial, adapt_movement_scale=False,
8
+ use_relative_movement=False, use_relative_jacobian=False):
9
+ if adapt_movement_scale:
10
+ source_area = ConvexHull(kp_source['value'][0].data.cpu().numpy()).volume
11
+ driving_area = ConvexHull(kp_driving_initial['value'][0].data.cpu().numpy()).volume
12
+ adapt_movement_scale = np.sqrt(source_area) / np.sqrt(driving_area)
13
+ else:
14
+ adapt_movement_scale = 1
15
+
16
+ kp_new = {k: v for k, v in kp_driving.items()}
17
+
18
+ if use_relative_movement:
19
+ kp_value_diff = (kp_driving['value'] - kp_driving_initial['value'])
20
+ kp_value_diff *= adapt_movement_scale
21
+ kp_new['value'] = kp_value_diff + kp_source['value']
22
+
23
+ if use_relative_jacobian:
24
+ jacobian_diff = torch.matmul(kp_driving['jacobian'], torch.inverse(kp_driving_initial['jacobian']))
25
+ kp_new['jacobian'] = torch.matmul(jacobian_diff, kp_source['jacobian'])
26
+
27
+ return kp_new
28
+
29
+ def headpose_pred_to_degree(pred):
30
+ device = pred.device
31
+ idx_tensor = [idx for idx in range(66)]
32
+ idx_tensor = torch.FloatTensor(idx_tensor).to(device)
33
+ pred = F.softmax(pred)
34
+ degree = torch.sum(pred*idx_tensor, 1) * 3 - 99
35
+ return degree
36
+
37
+ def get_rotation_matrix(yaw, pitch, roll):
38
+ yaw = yaw / 180 * 3.14
39
+ pitch = pitch / 180 * 3.14
40
+ roll = roll / 180 * 3.14
41
+
42
+ roll = roll.unsqueeze(1)
43
+ pitch = pitch.unsqueeze(1)
44
+ yaw = yaw.unsqueeze(1)
45
+
46
+ pitch_mat = torch.cat([torch.ones_like(pitch), torch.zeros_like(pitch), torch.zeros_like(pitch),
47
+ torch.zeros_like(pitch), torch.cos(pitch), -torch.sin(pitch),
48
+ torch.zeros_like(pitch), torch.sin(pitch), torch.cos(pitch)], dim=1)
49
+ pitch_mat = pitch_mat.view(pitch_mat.shape[0], 3, 3)
50
+
51
+ yaw_mat = torch.cat([torch.cos(yaw), torch.zeros_like(yaw), torch.sin(yaw),
52
+ torch.zeros_like(yaw), torch.ones_like(yaw), torch.zeros_like(yaw),
53
+ -torch.sin(yaw), torch.zeros_like(yaw), torch.cos(yaw)], dim=1)
54
+ yaw_mat = yaw_mat.view(yaw_mat.shape[0], 3, 3)
55
+
56
+ roll_mat = torch.cat([torch.cos(roll), -torch.sin(roll), torch.zeros_like(roll),
57
+ torch.sin(roll), torch.cos(roll), torch.zeros_like(roll),
58
+ torch.zeros_like(roll), torch.zeros_like(roll), torch.ones_like(roll)], dim=1)
59
+ roll_mat = roll_mat.view(roll_mat.shape[0], 3, 3)
60
+
61
+ rot_mat = torch.einsum('bij,bjk,bkm->bim', pitch_mat, yaw_mat, roll_mat)
62
+
63
+ return rot_mat
64
+
65
+ def keypoint_transformation(kp_canonical, he, wo_exp=False):
66
+ kp = kp_canonical['value'] # (bs, k, 3)
67
+ yaw, pitch, roll= he['yaw'], he['pitch'], he['roll']
68
+ yaw = headpose_pred_to_degree(yaw)
69
+ pitch = headpose_pred_to_degree(pitch)
70
+ roll = headpose_pred_to_degree(roll)
71
+
72
+ if 'yaw_in' in he:
73
+ yaw = he['yaw_in']
74
+ if 'pitch_in' in he:
75
+ pitch = he['pitch_in']
76
+ if 'roll_in' in he:
77
+ roll = he['roll_in']
78
+
79
+ rot_mat = get_rotation_matrix(yaw, pitch, roll) # (bs, 3, 3)
80
+
81
+ t, exp = he['t'], he['exp']
82
+ if wo_exp:
83
+ exp = exp*0
84
+
85
+ # keypoint rotation
86
+ kp_rotated = torch.einsum('bmp,bkp->bkm', rot_mat, kp)
87
+
88
+ # keypoint translation
89
+ t[:, 0] = t[:, 0]*0
90
+ t[:, 2] = t[:, 2]*0
91
+ t = t.unsqueeze(1).repeat(1, kp.shape[1], 1)
92
+ kp_t = kp_rotated + t
93
+
94
+ # add expression deviation
95
+ exp = exp.view(exp.shape[0], -1, 3)
96
+ kp_transformed = kp_t + exp
97
+
98
+ return {'value': kp_transformed}
99
+
100
+ from concurrent.futures import ThreadPoolExecutor, as_completed
101
+
102
+ def process_frame(frame_idx, source_image, kp_canonical, kp_source, generator, mapping,
103
+ yaw_c_seq, pitch_c_seq, roll_c_seq,target_semantics):
104
+ target_semantics_frame = target_semantics[:, frame_idx]
105
+ he_driving = mapping(target_semantics_frame)
106
+
107
+ if yaw_c_seq is not None:
108
+ he_driving['yaw_in'] = yaw_c_seq[:, frame_idx]
109
+ if pitch_c_seq is not None:
110
+ he_driving['pitch_in'] = pitch_c_seq[:, frame_idx]
111
+ if roll_c_seq is not None:
112
+ he_driving['roll_in'] = roll_c_seq[:, frame_idx]
113
+
114
+ kp_driving = keypoint_transformation(kp_canonical, he_driving)
115
+ kp_norm = kp_driving
116
+ out = generator(source_image, kp_source=kp_source, kp_driving=kp_norm)
117
+ return out['prediction']
118
+
119
+
120
+
121
+ def make_animation(source_image, source_semantics, target_semantics,
122
+ generator, kp_detector, he_estimator, mapping,
123
+ yaw_c_seq=None, pitch_c_seq=None, roll_c_seq=None,
124
+ use_exp=True):
125
+ with torch.no_grad():
126
+ predictions = []
127
+ device = 'cuda'
128
+ source_image = source_image.to(device)
129
+ source_semantics = source_semantics.to(device)
130
+ target_semantics = target_semantics.to(device)
131
+
132
+ kp_canonical = kp_detector(source_image)
133
+ he_source = mapping(source_semantics)
134
+ kp_source = keypoint_transformation(kp_canonical, he_source)
135
+
136
+ frame_indices = range(target_semantics.shape[1])
137
+ with ThreadPoolExecutor() as executor:
138
+ futures = {
139
+ executor.submit(process_frame, idx, source_image, kp_canonical, kp_source,
140
+ generator, mapping, yaw_c_seq, pitch_c_seq, roll_c_seq,target_semantics): idx
141
+ for idx in frame_indices
142
+ }
143
+ for future in tqdm(as_completed(futures), total=len(futures), desc='Face Renderer:'):
144
+ predictions.append(future.result())
145
+
146
+ predictions_ts = torch.stack(predictions, dim=1)
147
+ return predictions_ts
148
+
149
+ # for frame_idx in tqdm(range(target_semantics.shape[1]), 'Face Renderer:'):
150
+ # target_semantics_frame = target_semantics[:, frame_idx]
151
+ # he_driving = mapping(target_semantics_frame)
152
+ # if yaw_c_seq is not None:
153
+ # he_driving['yaw_in'] = yaw_c_seq[:, frame_idx]
154
+ # if pitch_c_seq is not None:
155
+ # he_driving['pitch_in'] = pitch_c_seq[:, frame_idx]
156
+ # if roll_c_seq is not None:
157
+ # he_driving['roll_in'] = roll_c_seq[:, frame_idx]
158
+
159
+ # kp_driving = keypoint_transformation(kp_canonical, he_driving)
160
+
161
+ # #kp_norm = normalize_kp(kp_source=kp_source, kp_driving=kp_driving,
162
+ # #kp_driving_initial=kp_driving_initial)
163
+ # kp_norm = kp_driving
164
+ # out = generator(source_image, kp_source=kp_source, kp_driving=kp_norm)
165
+ # '''
166
+ # source_image_new = out['prediction'].squeeze(1)
167
+ # kp_canonical_new = kp_detector(source_image_new)
168
+ # he_source_new = he_estimator(source_image_new)
169
+ # kp_source_new = keypoint_transformation(kp_canonical_new, he_source_new, wo_exp=True)
170
+ # kp_driving_new = keypoint_transformation(kp_canonical_new, he_driving, wo_exp=True)
171
+ # out = generator(source_image_new, kp_source=kp_source_new, kp_driving=kp_driving_new)
172
+ # '''
173
+ # predictions.append(out['prediction'])
174
+ # predictions_ts = torch.stack(predictions, dim=1)
175
+ # return predictions_ts
176
+
177
+ class AnimateModel(torch.nn.Module):
178
+ """
179
+ Merge all generator related updates into single model for better multi-gpu usage
180
+ """
181
+
182
+ def __init__(self, generator, kp_extractor, mapping):
183
+ super(AnimateModel, self).__init__()
184
+ self.kp_extractor = kp_extractor
185
+ self.generator = generator
186
+ self.mapping = mapping
187
+
188
+ self.kp_extractor.eval()
189
+ self.generator.eval()
190
+ self.mapping.eval()
191
+
192
+ def forward(self, x):
193
+
194
+ source_image = x['source_image']
195
+ source_semantics = x['source_semantics']
196
+ target_semantics = x['target_semantics']
197
+ yaw_c_seq = x['yaw_c_seq']
198
+ pitch_c_seq = x['pitch_c_seq']
199
+ roll_c_seq = x['roll_c_seq']
200
+
201
+ predictions_video = make_animation(source_image, source_semantics, target_semantics,
202
+ self.generator, self.kp_extractor,
203
+ self.mapping, use_exp = True,
204
+ yaw_c_seq=yaw_c_seq, pitch_c_seq=pitch_c_seq, roll_c_seq=roll_c_seq)
205
+
206
  return predictions_video