code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
194
| url
stringlengths 46
254
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def trans_forward(self, motion_codes, qids, cond, padding_mask, force_mask=False):
'''
:param motion_codes: (b, seqlen, d)
:padding_mask: (b, seqlen), all pad positions are TRUE else FALSE
:param qids: (b), quantizer layer ids
:param cond: (b, embed_dim) for text, (b, num_actions) for action
:return:
-logits: (b, num_token, seqlen)
'''
cond = self.mask_cond(cond, force_mask=force_mask)
# (b, seqlen, d) -> (seqlen, b, latent_dim)
x = self.input_process(motion_codes)
# (b, num_quantizer)
q_onehot = self.encode_quant(qids).float().to(x.device)
q_emb = self.quant_emb(q_onehot).unsqueeze(0) # (1, b, latent_dim)
cond = self.cond_emb(cond).unsqueeze(0) # (1, b, latent_dim)
x = self.position_enc(x)
xseq = torch.cat([cond, q_emb, x], dim=0) # (seqlen+2, b, latent_dim)
padding_mask = torch.cat([torch.zeros_like(padding_mask[:, 0:2]), padding_mask], dim=1) # (b, seqlen+2)
output = self.seqTransEncoder(xseq, src_key_padding_mask=padding_mask)[2:] # (seqlen, b, e)
logits = self.output_process(output)
return logits | :param motion_codes: (b, seqlen, d)
:padding_mask: (b, seqlen), all pad positions are TRUE else FALSE
:param qids: (b), quantizer layer ids
:param cond: (b, embed_dim) for text, (b, num_actions) for action
:return:
-logits: (b, num_token, seqlen) | trans_forward | python | EricGuo5513/momask-codes | models/mask_transformer/transformer.py | https://github.com/EricGuo5513/momask-codes/blob/master/models/mask_transformer/transformer.py | MIT |
def forward(self, all_indices, y, m_lens):
'''
:param all_indices: (b, n, q)
:param y: raw text for cond_mode=text, (b, ) for cond_mode=action
:m_lens: (b,)
:return:
'''
self.process_embed_proj_weight()
bs, ntokens, num_quant_layers = all_indices.shape
device = all_indices.device
# Positions that are PADDED are ALL FALSE
non_pad_mask = lengths_to_mask(m_lens, ntokens) # (b, n)
q_non_pad_mask = repeat(non_pad_mask, 'b n -> b n q', q=num_quant_layers)
all_indices = torch.where(q_non_pad_mask, all_indices, self.pad_id) #(b, n, q)
# randomly sample quantization layers to work on, [1, num_q)
active_q_layers = q_schedule(bs, low=1, high=num_quant_layers, device=device)
# print(self.token_embed_weight.shape, all_indices.shape)
token_embed = repeat(self.token_embed_weight, 'q c d-> b c d q', b=bs)
gather_indices = repeat(all_indices[..., :-1], 'b n q -> b n d q', d=token_embed.shape[2])
# print(token_embed.shape, gather_indices.shape)
all_codes = token_embed.gather(1, gather_indices) # (b, n, d, q-1)
cumsum_codes = torch.cumsum(all_codes, dim=-1) #(b, n, d, q-1)
active_indices = all_indices[torch.arange(bs), :, active_q_layers] # (b, n)
history_sum = cumsum_codes[torch.arange(bs), :, :, active_q_layers - 1]
force_mask = False
if self.cond_mode == 'text':
with torch.no_grad():
cond_vector = self.encode_text(y)
elif self.cond_mode == 'action':
cond_vector = self.enc_action(y).to(device).float()
elif self.cond_mode == 'uncond':
cond_vector = torch.zeros(bs, self.latent_dim).float().to(device)
force_mask = True
else:
raise NotImplementedError("Unsupported condition mode!!!")
logits = self.trans_forward(history_sum, active_q_layers, cond_vector, ~non_pad_mask, force_mask)
logits = self.output_project(logits, active_q_layers-1)
ce_loss, pred_id, acc = cal_performance(logits, active_indices, ignore_index=self.pad_id)
return ce_loss, pred_id, acc | :param all_indices: (b, n, q)
:param y: raw text for cond_mode=text, (b, ) for cond_mode=action
:m_lens: (b,)
:return: | forward | python | EricGuo5513/momask-codes | models/mask_transformer/transformer.py | https://github.com/EricGuo5513/momask-codes/blob/master/models/mask_transformer/transformer.py | MIT |
def initialize(self):
BaseOptions.initialize(self)
self.parser.add_argument('--batch_size', type=int, default=64, help='Batch size')
self.parser.add_argument('--max_epoch', type=int, default=500, help='Maximum number of epoch for training')
# self.parser.add_argument('--max_iters', type=int, default=150_000, help='Training iterations')
'''LR scheduler'''
self.parser.add_argument('--lr', type=float, default=2e-4, help='Learning rate')
self.parser.add_argument('--gamma', type=float, default=0.1, help='Learning rate schedule factor')
self.parser.add_argument('--milestones', default=[50_000], nargs="+", type=int,
help="learning rate schedule (iterations)")
self.parser.add_argument('--warm_up_iter', default=2000, type=int, help='number of total iterations for warmup')
'''Condition'''
self.parser.add_argument('--cond_drop_prob', type=float, default=0.1, help='Drop ratio of condition, for classifier-free guidance')
self.parser.add_argument("--seed", default=3407, type=int, help="Seed")
self.parser.add_argument('--is_continue', action="store_true", help='Is this trial continuing previous state?')
self.parser.add_argument('--gumbel_sample', action="store_true", help='Strategy for token sampling, True: Gumbel sampling, False: Categorical sampling')
self.parser.add_argument('--share_weight', action="store_true", help='Whether to share weight for projection/embedding, for residual transformer.')
self.parser.add_argument('--log_every', type=int, default=50, help='Frequency of printing training progress, (iteration)')
# self.parser.add_argument('--save_every_e', type=int, default=100, help='Frequency of printing training progress')
self.parser.add_argument('--eval_every_e', type=int, default=10, help='Frequency of animating eval results, (epoch)')
self.parser.add_argument('--save_latest', type=int, default=500, help='Frequency of saving checkpoint, (iteration)')
self.is_train = True | LR scheduler | initialize | python | EricGuo5513/momask-codes | options/train_option.py | https://github.com/EricGuo5513/momask-codes/blob/master/options/train_option.py | MIT |
def smpl2bvh(model_path:str, poses:str, output:str, mirror:bool,
model_type="smpl", gender="MALE",
num_betas=10, fps=60) -> None:
"""Save bvh file created by smpl parameters.
Args:
model_path (str): Path to smpl models.
poses (str): Path to npz or pkl file.
output (str): Where to save bvh.
mirror (bool): Whether save mirror motion or not.
model_type (str, optional): I prepared "smpl" only. Defaults to "smpl".
gender (str, optional): Gender Information. Defaults to "MALE".
num_betas (int, optional): How many pca parameters to use in SMPL. Defaults to 10.
fps (int, optional): Frame per second. Defaults to 30.
"""
# names = [
# "Pelvis",
# "Left_hip",
# "Right_hip",
# "Spine1",
# "Left_knee",
# "Right_knee",
# "Spine2",
# "Left_ankle",
# "Right_ankle",
# "Spine3",
# "Left_foot",
# "Right_foot",
# "Neck",
# "Left_collar",
# "Right_collar",
# "Head",
# "Left_shoulder",
# "Right_shoulder",
# "Left_elbow",
# "Right_elbow",
# "Left_wrist",
# "Right_wrist",
# "Left_palm",
# "Right_palm",
# ]
names = [
"Hips",
"LeftUpLeg",
"RightUpLeg",
"Spine",
"LeftLeg",
"RightLeg",
"Spine1",
"LeftFoot",
"RightFoot",
"Spine2",
"LeftToe",
"RightToe",
"Neck",
"LeftShoulder",
"RightShoulder",
"Head",
"LeftArm",
"RightArm",
"LeftForeArm",
"RightForeArm",
"LeftHand",
"RightHand",
"LeftThumb",
"RightThumb",
]
# I prepared smpl models only,
# but I will release for smplx models recently.
model = smplx.create(model_path=model_path,
model_type=model_type,
gender=gender,
batch_size=1)
parents = model.parents.detach().cpu().numpy()
# You can define betas like this.(default betas are 0 at all.)
rest = model(
# betas = torch.randn([1, num_betas], dtype=torch.float32)
)
rest_pose = rest.joints.detach().cpu().numpy().squeeze()[:24,:]
root_offset = rest_pose[0]
offsets = rest_pose - rest_pose[parents]
offsets[0] = root_offset
offsets *= 1
scaling = None
# Pose setting.
if poses.endswith(".npz"):
poses = np.load(poses)
rots = np.squeeze(poses["poses"], axis=0) # (N, 24, 3)
trans = np.squeeze(poses["trans"], axis=0) # (N, 3)
elif poses.endswith(".pkl"):
with open(poses, "rb") as f:
poses = pickle.load(f)
rots = poses["smpl_poses"] # (N, 72)
rots = rots.reshape(rots.shape[0], -1, 3) # (N, 24, 3)
scaling = poses["smpl_scaling"] # (1,)
trans = poses["smpl_trans"] # (N, 3)
else:
raise Exception("This file type is not supported!")
if scaling is not None:
trans /= scaling
# to quaternion
rots = quat.from_axis_angle(rots)
order = "zyx"
pos = offsets[None].repeat(len(rots), axis=0)
positions = pos.copy()
# positions[:,0] += trans * 10
positions[:, 0] += trans
rotations = np.degrees(quat.to_euler(rots, order=order))
bvh_data ={
"rotations": rotations[:, :22],
"positions": positions[:, :22],
"offsets": offsets[:22],
"parents": parents[:22],
"names": names[:22],
"order": order,
"frametime": 1 / fps,
}
if not output.endswith(".bvh"):
output = output + ".bvh"
bvh.save(output, bvh_data)
if mirror:
rots_mirror, trans_mirror = mirror_rot_trans(
rots, trans, names, parents)
positions_mirror = pos.copy()
positions_mirror[:,0] += trans_mirror
rotations_mirror = np.degrees(
quat.to_euler(rots_mirror, order=order))
bvh_data ={
"rotations": rotations_mirror,
"positions": positions_mirror,
"offsets": offsets,
"parents": parents,
"names": names,
"order": order,
"frametime": 1 / fps,
}
output_mirror = output.split(".")[0] + "_mirror.bvh"
bvh.save(output_mirror, bvh_data) | Save bvh file created by smpl parameters.
Args:
model_path (str): Path to smpl models.
poses (str): Path to npz or pkl file.
output (str): Where to save bvh.
mirror (bool): Whether save mirror motion or not.
model_type (str, optional): I prepared "smpl" only. Defaults to "smpl".
gender (str, optional): Gender Information. Defaults to "MALE".
num_betas (int, optional): How many pca parameters to use in SMPL. Defaults to 10.
fps (int, optional): Frame per second. Defaults to 30. | smpl2bvh | python | EricGuo5513/momask-codes | visualization/smpl2bvh.py | https://github.com/EricGuo5513/momask-codes/blob/master/visualization/smpl2bvh.py | MIT |
def __mul__(self, other):
"""
Quaternion multiplication has three main methods.
When multiplying a Quaternions array by Quaternions
normal quater multiplication is performed.
When multiplying a Quaternions array by a vector
array of the same shape, where the last axis is 3,
it is assumed to be a Quaternion by 3D-Vector
multiplication and the 3D-Vectors are rotated
in space by the Quaternions.
When multipplying a Quaternions array by a scalar
or vector of different shape it is assumed to be
a Quaternions by Scalars multiplication and the
Quaternions are scaled using Slerp and the identity
quaternions.
"""
""" If Quaternions type do Quaternions * Quaternions """
if isinstance(other, Quaternions):
sqs, oqs = Quaternions._broadcast(self.qs, other.qs)
q0 = sqs[..., 0];
q1 = sqs[..., 1];
q2 = sqs[..., 2];
q3 = sqs[..., 3];
r0 = oqs[..., 0];
r1 = oqs[..., 1];
r2 = oqs[..., 2];
r3 = oqs[..., 3];
qs = np.empty(sqs.shape)
qs[..., 0] = r0 * q0 - r1 * q1 - r2 * q2 - r3 * q3
qs[..., 1] = r0 * q1 + r1 * q0 - r2 * q3 + r3 * q2
qs[..., 2] = r0 * q2 + r1 * q3 + r2 * q0 - r3 * q1
qs[..., 3] = r0 * q3 - r1 * q2 + r2 * q1 + r3 * q0
return Quaternions(qs)
""" If array type do Quaternions * Vectors """
if isinstance(other, np.ndarray) and other.shape[-1] == 3:
vs = Quaternions(np.concatenate([np.zeros(other.shape[:-1] + (1,)), other], axis=-1))
return (self * (vs * -self)).imaginaries
""" If float do Quaternions * Scalars """
if isinstance(other, np.ndarray) or isinstance(other, float):
return Quaternions.slerp(Quaternions.id_like(self), self, other)
raise TypeError('Cannot multiply/add Quaternions with type %s' % str(type(other))) | Quaternion multiplication has three main methods.
When multiplying a Quaternions array by Quaternions
normal quater multiplication is performed.
When multiplying a Quaternions array by a vector
array of the same shape, where the last axis is 3,
it is assumed to be a Quaternion by 3D-Vector
multiplication and the 3D-Vectors are rotated
in space by the Quaternions.
When multipplying a Quaternions array by a scalar
or vector of different shape it is assumed to be
a Quaternions by Scalars multiplication and the
Quaternions are scaled using Slerp and the identity
quaternions. | __mul__ | python | EricGuo5513/momask-codes | visualization/Quaternions.py | https://github.com/EricGuo5513/momask-codes/blob/master/visualization/Quaternions.py | MIT |
def __div__(self, other):
"""
When a Quaternion type is supplied, division is defined
as multiplication by the inverse of that Quaternion.
When a scalar or vector is supplied it is defined
as multiplicaion of one over the supplied value.
Essentially a scaling.
"""
if isinstance(other, Quaternions): return self * (-other)
if isinstance(other, np.ndarray): return self * (1.0 / other)
if isinstance(other, float): return self * (1.0 / other)
raise TypeError('Cannot divide/subtract Quaternions with type %s' + str(type(other))) | When a Quaternion type is supplied, division is defined
as multiplication by the inverse of that Quaternion.
When a scalar or vector is supplied it is defined
as multiplicaion of one over the supplied value.
Essentially a scaling. | __div__ | python | EricGuo5513/momask-codes | visualization/Quaternions.py | https://github.com/EricGuo5513/momask-codes/blob/master/visualization/Quaternions.py | MIT |
def __neg__(self):
""" Invert Quaternions """
return Quaternions(self.qs * np.array([[1, -1, -1, -1]])) | Invert Quaternions | __neg__ | python | EricGuo5513/momask-codes | visualization/Quaternions.py | https://github.com/EricGuo5513/momask-codes/blob/master/visualization/Quaternions.py | MIT |
def __abs__(self):
""" Unify Quaternions To Single Pole """
qabs = self.normalized().copy()
top = np.sum((qabs.qs) * np.array([1, 0, 0, 0]), axis=-1)
bot = np.sum((-qabs.qs) * np.array([1, 0, 0, 0]), axis=-1)
qabs.qs[top < bot] = -qabs.qs[top < bot]
return qabs | Unify Quaternions To Single Pole | __abs__ | python | EricGuo5513/momask-codes | visualization/Quaternions.py | https://github.com/EricGuo5513/momask-codes/blob/master/visualization/Quaternions.py | MIT |
def euler(self, order='xyz'): # fix the wrong convert, this should convert to world euler by default.
q = self.normalized().qs
q0 = q[..., 0]
q1 = q[..., 1]
q2 = q[..., 2]
q3 = q[..., 3]
es = np.zeros(self.shape + (3,))
if order == 'xyz':
es[..., 0] = np.arctan2(2 * (q0 * q1 + q2 * q3), 1 - 2 * (q1 * q1 + q2 * q2))
es[..., 1] = np.arcsin((2 * (q0 * q2 - q3 * q1)).clip(-1, 1))
es[..., 2] = np.arctan2(2 * (q0 * q3 + q1 * q2), 1 - 2 * (q2 * q2 + q3 * q3))
elif order == 'yzx':
es[..., 0] = np.arctan2(2 * (q1 * q0 - q2 * q3), -q1 * q1 + q2 * q2 - q3 * q3 + q0 * q0)
es[..., 1] = np.arctan2(2 * (q2 * q0 - q1 * q3), q1 * q1 - q2 * q2 - q3 * q3 + q0 * q0)
es[..., 2] = np.arcsin((2 * (q1 * q2 + q3 * q0)).clip(-1, 1))
else:
raise NotImplementedError('Cannot convert from ordering %s' % order)
"""
# These conversion don't appear to work correctly for Maya.
# http://bediyap.com/programming/convert-quaternion-to-euler-rotations/
if order == 'xyz':
es[fa + (0,)] = np.arctan2(2 * (q0 * q3 - q1 * q2), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q1 * q3 + q0 * q2)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q1 - q2 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
elif order == 'yzx':
es[fa + (0,)] = np.arctan2(2 * (q0 * q1 - q2 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q1 * q2 + q0 * q3)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q2 - q1 * q3), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
elif order == 'zxy':
es[fa + (0,)] = np.arctan2(2 * (q0 * q2 - q1 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q0 * q1 + q2 * q3)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q3 - q1 * q2), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)
elif order == 'xzy':
es[fa + (0,)] = np.arctan2(2 * (q0 * q2 + q1 * q3), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q0 * q3 - q1 * q2)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q1 + q2 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)
elif order == 'yxz':
es[fa + (0,)] = np.arctan2(2 * (q1 * q2 + q0 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q0 * q1 - q2 * q3)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q1 * q3 + q0 * q2), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
elif order == 'zyx':
es[fa + (0,)] = np.arctan2(2 * (q0 * q1 + q2 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q0 * q2 - q1 * q3)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q3 + q1 * q2), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
else:
raise KeyError('Unknown ordering %s' % order)
"""
# https://github.com/ehsan/ogre/blob/master/OgreMain/src/OgreMatrix3.cpp
# Use this class and convert from matrix
return es | # These conversion don't appear to work correctly for Maya.
# http://bediyap.com/programming/convert-quaternion-to-euler-rotations/
if order == 'xyz':
es[fa + (0,)] = np.arctan2(2 * (q0 * q3 - q1 * q2), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q1 * q3 + q0 * q2)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q1 - q2 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
elif order == 'yzx':
es[fa + (0,)] = np.arctan2(2 * (q0 * q1 - q2 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q1 * q2 + q0 * q3)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q2 - q1 * q3), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
elif order == 'zxy':
es[fa + (0,)] = np.arctan2(2 * (q0 * q2 - q1 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q0 * q1 + q2 * q3)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q3 - q1 * q2), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)
elif order == 'xzy':
es[fa + (0,)] = np.arctan2(2 * (q0 * q2 + q1 * q3), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q0 * q3 - q1 * q2)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q1 + q2 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)
elif order == 'yxz':
es[fa + (0,)] = np.arctan2(2 * (q1 * q2 + q0 * q3), q0 * q0 - q1 * q1 + q2 * q2 - q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q0 * q1 - q2 * q3)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q1 * q3 + q0 * q2), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
elif order == 'zyx':
es[fa + (0,)] = np.arctan2(2 * (q0 * q1 + q2 * q3), q0 * q0 - q1 * q1 - q2 * q2 + q3 * q3)
es[fa + (1,)] = np.arcsin((2 * (q0 * q2 - q1 * q3)).clip(-1,1))
es[fa + (2,)] = np.arctan2(2 * (q0 * q3 + q1 * q2), q0 * q0 + q1 * q1 - q2 * q2 - q3 * q3)
else:
raise KeyError('Unknown ordering %s' % order) | euler | python | EricGuo5513/momask-codes | visualization/Quaternions.py | https://github.com/EricGuo5513/momask-codes/blob/master/visualization/Quaternions.py | MIT |
def transforms_local(anim):
"""
Computes Animation Local Transforms
As well as a number of other uses this can
be used to compute global joint transforms,
which in turn can be used to compete global
joint positions
Parameters
----------
anim : Animation
Input animation
Returns
-------
transforms : (F, J, 4, 4) ndarray
For each frame F, joint local
transforms for each joint J
"""
transforms = anim.rotations.transforms()
transforms = np.concatenate([transforms, np.zeros(transforms.shape[:2] + (3, 1))], axis=-1)
transforms = np.concatenate([transforms, np.zeros(transforms.shape[:2] + (1, 4))], axis=-2)
# the last column is filled with the joint positions!
transforms[:, :, 0:3, 3] = anim.positions
transforms[:, :, 3:4, 3] = 1.0
return transforms | Computes Animation Local Transforms
As well as a number of other uses this can
be used to compute global joint transforms,
which in turn can be used to compete global
joint positions
Parameters
----------
anim : Animation
Input animation
Returns
-------
transforms : (F, J, 4, 4) ndarray
For each frame F, joint local
transforms for each joint J | transforms_local | python | EricGuo5513/momask-codes | visualization/Animation.py | https://github.com/EricGuo5513/momask-codes/blob/master/visualization/Animation.py | MIT |
def transforms_multiply(t0s, t1s):
"""
Transforms Multiply
Multiplies two arrays of animation transforms
Parameters
----------
t0s, t1s : (F, J, 4, 4) ndarray
Two arrays of transforms
for each frame F and each
joint J
Returns
-------
transforms : (F, J, 4, 4) ndarray
Array of transforms for each
frame F and joint J multiplied
together
"""
return ut.matrix_multiply(t0s, t1s) | Transforms Multiply
Multiplies two arrays of animation transforms
Parameters
----------
t0s, t1s : (F, J, 4, 4) ndarray
Two arrays of transforms
for each frame F and each
joint J
Returns
-------
transforms : (F, J, 4, 4) ndarray
Array of transforms for each
frame F and joint J multiplied
together | transforms_multiply | python | EricGuo5513/momask-codes | visualization/Animation.py | https://github.com/EricGuo5513/momask-codes/blob/master/visualization/Animation.py | MIT |
def transforms_blank(anim):
"""
Blank Transforms
Parameters
----------
anim : Animation
Input animation
Returns
-------
transforms : (F, J, 4, 4) ndarray
Array of identity transforms for
each frame F and joint J
"""
ts = np.zeros(anim.shape + (4, 4))
ts[:, :, 0, 0] = 1.0;
ts[:, :, 1, 1] = 1.0;
ts[:, :, 2, 2] = 1.0;
ts[:, :, 3, 3] = 1.0;
return ts | Blank Transforms
Parameters
----------
anim : Animation
Input animation
Returns
-------
transforms : (F, J, 4, 4) ndarray
Array of identity transforms for
each frame F and joint J | transforms_blank | python | EricGuo5513/momask-codes | visualization/Animation.py | https://github.com/EricGuo5513/momask-codes/blob/master/visualization/Animation.py | MIT |
def transforms_global(anim):
"""
Global Animation Transforms
This relies on joint ordering
being incremental. That means a joint
J1 must not be a ancestor of J0 if
J0 appears before J1 in the joint
ordering.
Parameters
----------
anim : Animation
Input animation
Returns
------
transforms : (F, J, 4, 4) ndarray
Array of global transforms for
each frame F and joint J
"""
locals = transforms_local(anim)
globals = transforms_blank(anim)
globals[:, 0] = locals[:, 0]
for i in range(1, anim.shape[1]):
globals[:, i] = transforms_multiply(globals[:, anim.parents[i]], locals[:, i])
return globals | Global Animation Transforms
This relies on joint ordering
being incremental. That means a joint
J1 must not be a ancestor of J0 if
J0 appears before J1 in the joint
ordering.
Parameters
----------
anim : Animation
Input animation
Returns
------
transforms : (F, J, 4, 4) ndarray
Array of global transforms for
each frame F and joint J | transforms_global | python | EricGuo5513/momask-codes | visualization/Animation.py | https://github.com/EricGuo5513/momask-codes/blob/master/visualization/Animation.py | MIT |
def positions_global(anim):
"""
Global Joint Positions
Given an animation compute the global joint
positions at at every frame
Parameters
----------
anim : Animation
Input animation
Returns
-------
positions : (F, J, 3) ndarray
Positions for every frame F
and joint position J
"""
# get the last column -- corresponding to the coordinates
positions = transforms_global(anim)[:, :, :, 3]
return positions[:, :, :3] / positions[:, :, 3, np.newaxis] | Global Joint Positions
Given an animation compute the global joint
positions at at every frame
Parameters
----------
anim : Animation
Input animation
Returns
-------
positions : (F, J, 3) ndarray
Positions for every frame F
and joint position J | positions_global | python | EricGuo5513/momask-codes | visualization/Animation.py | https://github.com/EricGuo5513/momask-codes/blob/master/visualization/Animation.py | MIT |
def rotations_global(anim):
"""
Global Animation Rotations
This relies on joint ordering
being incremental. That means a joint
J1 must not be a ancestor of J0 if
J0 appears before J1 in the joint
ordering.
Parameters
----------
anim : Animation
Input animation
Returns
-------
points : (F, J) Quaternions
global rotations for every frame F
and joint J
"""
joints = np.arange(anim.shape[1])
parents = np.arange(anim.shape[1])
locals = anim.rotations
globals = Quaternions.id(anim.shape)
globals[:, 0] = locals[:, 0]
for i in range(1, anim.shape[1]):
globals[:, i] = globals[:, anim.parents[i]] * locals[:, i]
return globals | Global Animation Rotations
This relies on joint ordering
being incremental. That means a joint
J1 must not be a ancestor of J0 if
J0 appears before J1 in the joint
ordering.
Parameters
----------
anim : Animation
Input animation
Returns
-------
points : (F, J) Quaternions
global rotations for every frame F
and joint J | rotations_global | python | EricGuo5513/momask-codes | visualization/Animation.py | https://github.com/EricGuo5513/momask-codes/blob/master/visualization/Animation.py | MIT |
def remove_fs_old(anim, glb, foot_contact, fid_l=(3, 4), fid_r=(7, 8), interp_length=5, force_on_floor=True):
# glb_height = 2.06820832 Not the case, may be use upper leg length
scale = 1. #glb_height / 1.65 #scale to meter
# fps = 20 #
# velocity_thres = 10. # m/s
height_thres = [0.06, 0.03] #[ankle, toe] meter
if foot_contact is None:
def foot_detect(positions, velfactor, heightfactor):
feet_l_x = (positions[1:, fid_l, 0] - positions[:-1, fid_l, 0]) ** 2
feet_l_y = (positions[1:, fid_l, 1] - positions[:-1, fid_l, 1]) ** 2
feet_l_z = (positions[1:, fid_l, 2] - positions[:-1, fid_l, 2]) ** 2
feet_l_h = positions[:-1, fid_l, 1]
feet_l = (((feet_l_x + feet_l_y + feet_l_z) < velfactor) & (feet_l_h < heightfactor)).astype(np.float)
feet_r_x = (positions[1:, fid_r, 0] - positions[:-1, fid_r, 0]) ** 2
feet_r_y = (positions[1:, fid_r, 1] - positions[:-1, fid_r, 1]) ** 2
feet_r_z = (positions[1:, fid_r, 2] - positions[:-1, fid_r, 2]) ** 2
feet_r_h = positions[:-1, fid_r, 1]
feet_r = (((feet_r_x + feet_r_y + feet_r_z) < velfactor) & (feet_r_h < heightfactor)).astype(np.float)
return feet_l, feet_r
# feet_thre = 0.002
# feet_vel_thre = np.array([velocity_thres**2, velocity_thres**2]) * scale**2 / fps**2
feet_vel_thre = np.array([0.05, 0.2])
# height_thre = np.array([0.06, 0.04]) * scale
feet_h_thre = np.array(height_thres) * scale
feet_l, feet_r = foot_detect(glb, velfactor=feet_vel_thre, heightfactor=feet_h_thre)
foot = np.concatenate([feet_l, feet_r], axis=-1).transpose(1, 0) # [4, T-1]
foot = np.concatenate([foot, foot[:, -1:]], axis=-1)
else:
foot = foot_contact.transpose(1, 0)
T = len(glb)
fid = list(fid_l) + list(fid_r)
fid_l, fid_r = np.array(fid_l), np.array(fid_r)
foot_heights = np.minimum(glb[:, fid_l, 1],
glb[:, fid_r, 1]).min(axis=1) # [T, 2] -> [T]
# print(foot_heights)
# floor_height = softmin(foot_heights, softness=0.03, axis=0)
sort_height = np.sort(foot_heights)
temp_len = len(sort_height)
floor_height = np.mean(sort_height[int(0.25*temp_len):int(0.5*temp_len)])
if floor_height > 0.5: # for motion like swim
floor_height = 0
# print(floor_height)
# floor_height = foot_heights.min()
# print(floor_height)
# print(foot)
# print(foot_heights.min())
# print(floor_height)
glb[:, :, 1] -= floor_height
anim.positions[:, 0, 1] -= floor_height
for i, fidx in enumerate(fid):
fixed = foot[i] # [T]
"""
for t in range(T):
glb[t, fidx][1] = max(glb[t, fidx][1], 0.25)
"""
s = 0
while s < T:
while s < T and fixed[s] == 0:
s += 1
if s >= T:
break
t = s
avg = glb[t, fidx].copy()
while t + 1 < T and fixed[t + 1] == 1:
t += 1
avg += glb[t, fidx].copy()
avg /= (t - s + 1)
if force_on_floor:
avg[1] = 0.0
for j in range(s, t + 1):
glb[j, fidx] = avg.copy()
s = t + 1
for s in range(T):
if fixed[s] == 1:
continue
l, r = None, None
consl, consr = False, False
for k in range(interp_length):
if s - k - 1 < 0:
break
if fixed[s - k - 1]:
l = s - k - 1
consl = True
break
for k in range(interp_length):
if s + k + 1 >= T:
break
if fixed[s + k + 1]:
r = s + k + 1
consr = True
break
if not consl and not consr:
continue
if consl and consr:
litp = lerp(alpha(1.0 * (s - l + 1) / (interp_length + 1)),
glb[s, fidx], glb[l, fidx])
ritp = lerp(alpha(1.0 * (r - s + 1) / (interp_length + 1)),
glb[s, fidx], glb[r, fidx])
itp = lerp(alpha(1.0 * (s - l + 1) / (r - l + 1)),
ritp, litp)
glb[s, fidx] = itp.copy()
continue
if consl:
litp = lerp(alpha(1.0 * (s - l + 1) / (interp_length + 1)),
glb[s, fidx], glb[l, fidx])
glb[s, fidx] = litp.copy()
continue
if consr:
ritp = lerp(alpha(1.0 * (r - s + 1) / (interp_length + 1)),
glb[s, fidx], glb[r, fidx])
glb[s, fidx] = ritp.copy()
targetmap = {}
for j in range(glb.shape[1]):
targetmap[j] = glb[:, j]
# ik = BasicInverseKinematics(anim, glb, iterations=5,
# silent=True)
# slightly larger loss, but better visual
ik = JacobianInverseKinematics(anim, targetmap, iterations=30, damping=5, recalculate=False, silent=True)
anim = ik()
return anim | for t in range(T):
glb[t, fidx][1] = max(glb[t, fidx][1], 0.25) | remove_fs_old | python | EricGuo5513/momask-codes | visualization/remove_fs.py | https://github.com/EricGuo5513/momask-codes/blob/master/visualization/remove_fs.py | MIT |
def remove_fs(glb, foot_contact, fid_l=(3, 4), fid_r=(7, 8), interp_length=5, force_on_floor=True):
# glb_height = 2.06820832 Not the case, may be use upper leg length
scale = 1. #glb_height / 1.65 #scale to meter
# fps = 20 #
# velocity_thres = 10. # m/s
height_thres = [0.06, 0.03] #[ankle, toe] meter
if foot_contact is None:
def foot_detect(positions, velfactor, heightfactor):
feet_l_x = (positions[1:, fid_l, 0] - positions[:-1, fid_l, 0]) ** 2
feet_l_y = (positions[1:, fid_l, 1] - positions[:-1, fid_l, 1]) ** 2
feet_l_z = (positions[1:, fid_l, 2] - positions[:-1, fid_l, 2]) ** 2
feet_l_h = positions[:-1, fid_l, 1]
feet_l = (((feet_l_x + feet_l_y + feet_l_z) < velfactor) & (feet_l_h < heightfactor)).astype(np.float)
feet_r_x = (positions[1:, fid_r, 0] - positions[:-1, fid_r, 0]) ** 2
feet_r_y = (positions[1:, fid_r, 1] - positions[:-1, fid_r, 1]) ** 2
feet_r_z = (positions[1:, fid_r, 2] - positions[:-1, fid_r, 2]) ** 2
feet_r_h = positions[:-1, fid_r, 1]
feet_r = (((feet_r_x + feet_r_y + feet_r_z) < velfactor) & (feet_r_h < heightfactor)).astype(np.float)
return feet_l, feet_r
# feet_thre = 0.002
# feet_vel_thre = np.array([velocity_thres**2, velocity_thres**2]) * scale**2 / fps**2
feet_vel_thre = np.array([0.05, 0.2])
# height_thre = np.array([0.06, 0.04]) * scale
feet_h_thre = np.array(height_thres) * scale
feet_l, feet_r = foot_detect(glb, velfactor=feet_vel_thre, heightfactor=feet_h_thre)
foot = np.concatenate([feet_l, feet_r], axis=-1).transpose(1, 0) # [4, T-1]
foot = np.concatenate([foot, foot[:, -1:]], axis=-1)
else:
foot = foot_contact.transpose(1, 0)
T = len(glb)
fid = list(fid_l) + list(fid_r)
fid_l, fid_r = np.array(fid_l), np.array(fid_r)
foot_heights = np.minimum(glb[:, fid_l, 1],
glb[:, fid_r, 1]).min(axis=1) # [T, 2] -> [T]
# print(foot_heights)
# floor_height = softmin(foot_heights, softness=0.03, axis=0)
sort_height = np.sort(foot_heights)
temp_len = len(sort_height)
floor_height = np.mean(sort_height[int(0.25*temp_len):int(0.5*temp_len)])
if floor_height > 0.5: # for motion like swim
floor_height = 0
# print(floor_height)
# floor_height = foot_heights.min()
# print(floor_height)
# print(foot)
# print(foot_heights.min())
# print(floor_height)
glb[:, :, 1] -= floor_height
# anim.positions[:, 0, 1] -= floor_height
for i, fidx in enumerate(fid):
fixed = foot[i] # [T]
"""
for t in range(T):
glb[t, fidx][1] = max(glb[t, fidx][1], 0.25)
"""
s = 0
while s < T:
while s < T and fixed[s] == 0:
s += 1
if s >= T:
break
t = s
avg = glb[t, fidx].copy()
while t + 1 < T and fixed[t + 1] == 1:
t += 1
avg += glb[t, fidx].copy()
avg /= (t - s + 1)
if force_on_floor:
avg[1] = 0.0
for j in range(s, t + 1):
glb[j, fidx] = avg.copy()
s = t + 1
for s in range(T):
if fixed[s] == 1:
continue
l, r = None, None
consl, consr = False, False
for k in range(interp_length):
if s - k - 1 < 0:
break
if fixed[s - k - 1]:
l = s - k - 1
consl = True
break
for k in range(interp_length):
if s + k + 1 >= T:
break
if fixed[s + k + 1]:
r = s + k + 1
consr = True
break
if not consl and not consr:
continue
if consl and consr:
litp = lerp(alpha(1.0 * (s - l + 1) / (interp_length + 1)),
glb[s, fidx], glb[l, fidx])
ritp = lerp(alpha(1.0 * (r - s + 1) / (interp_length + 1)),
glb[s, fidx], glb[r, fidx])
itp = lerp(alpha(1.0 * (s - l + 1) / (r - l + 1)),
ritp, litp)
glb[s, fidx] = itp.copy()
continue
if consl:
litp = lerp(alpha(1.0 * (s - l + 1) / (interp_length + 1)),
glb[s, fidx], glb[l, fidx])
glb[s, fidx] = litp.copy()
continue
if consr:
ritp = lerp(alpha(1.0 * (r - s + 1) / (interp_length + 1)),
glb[s, fidx], glb[r, fidx])
glb[s, fidx] = ritp.copy()
targetmap = {}
for j in range(glb.shape[1]):
targetmap[j] = glb[:, j]
# ik = BasicInverseKinematics(anim, glb, iterations=5,
# silent=True)
# slightly larger loss, but better visual
# ik = JacobianInverseKinematics(anim, targetmap, iterations=30, damping=5, recalculate=False, silent=True)
# anim = ik()
return glb | for t in range(T):
glb[t, fidx][1] = max(glb[t, fidx][1], 0.25) | remove_fs | python | EricGuo5513/momask-codes | visualization/remove_fs.py | https://github.com/EricGuo5513/momask-codes/blob/master/visualization/remove_fs.py | MIT |
def load(filename, start=None, end=None, world=False, need_quater=True):
"""
Reads a BVH file and constructs an animation
Parameters
----------
filename: str
File to be opened
start : int
Optional Starting Frame
end : int
Optional Ending Frame
order : str
Optional Specifier for joint order.
Given as string E.G 'xyz', 'zxy'
world : bool
If set to true euler angles are applied
together in world space rather than local
space
Returns
-------
(animation, joint_names, frametime)
Tuple of loaded animation and joint names
"""
f = open(filename, "r")
i = 0
active = -1
end_site = False
names = []
orients = Quaterions.id(0)
offsets = np.array([]).reshape((0, 3))
parents = np.array([], dtype=int)
orders = []
for line in f:
if "HIERARCHY" in line: continue
if "MOTION" in line: continue
# """ Modified line read to handle mixamo data """
rmatch = re.match(r"ROOT (\w+)", line)
# rmatch = re.match(r"ROOT (\w+:?\w+)", line)
if rmatch:
names.append(rmatch.group(1))
offsets = np.append(offsets, np.array([[0, 0, 0]]), axis=0)
orients = np.append(orients, np.array([[1, 0, 0, 0]]), axis=0)
parents = np.append(parents, active)
active = (len(parents) - 1)
continue
if "{" in line: continue
if "}" in line:
if end_site:
end_site = False
else:
active = parents[active]
continue
offmatch = re.match(r"\s*OFFSET\s+([\-\d\.e]+)\s+([\-\d\.e]+)\s+([\-\d\.e]+)", line)
if offmatch:
if not end_site:
offsets[active] = np.array([list(map(float, offmatch.groups()))])
continue
chanmatch = re.match(r"\s*CHANNELS\s+(\d+)", line)
if chanmatch:
channels = int(chanmatch.group(1))
channelis = 0 if channels == 3 else 3
channelie = 3 if channels == 3 else 6
parts = line.split()[2 + channelis:2 + channelie]
if any([p not in channelmap for p in parts]):
continue
order = "".join([channelmap[p] for p in parts])
orders.append(order)
continue
# """ Modified line read to handle mixamo data """
jmatch = re.match("\s*JOINT\s+(\w+)", line)
# jmatch = re.match("\s*JOINT\s+(\w+:?\w+)", line)
if jmatch:
names.append(jmatch.group(1))
offsets = np.append(offsets, np.array([[0, 0, 0]]), axis=0)
orients = np.append(orients, np.array([[1, 0, 0, 0]]), axis=0)
parents = np.append(parents, active)
active = (len(parents) - 1)
continue
if "End Site" in line:
end_site = True
continue
fmatch = re.match("\s*Frames:\s+(\d+)", line)
if fmatch:
if start and end:
fnum = (end - start) - 1
else:
fnum = int(fmatch.group(1))
jnum = len(parents)
positions = offsets[np.newaxis].repeat(fnum, axis=0)
rotations = np.zeros((fnum, len(orients), 3))
continue
fmatch = re.match("\s*Frame Time:\s+([\d\.]+)", line)
if fmatch:
frametime = float(fmatch.group(1))
continue
if (start and end) and (i < start or i >= end - 1):
i += 1
continue
# dmatch = line.strip().split(' ')
dmatch = line.strip().split()
if dmatch:
data_block = np.array(list(map(float, dmatch)))
N = len(parents)
fi = i - start if start else i
if channels == 3:
positions[fi, 0:1] = data_block[0:3]
rotations[fi, :] = data_block[3:].reshape(N, 3)
elif channels == 6:
data_block = data_block.reshape(N, 6)
positions[fi, :] = data_block[:, 0:3]
rotations[fi, :] = data_block[:, 3:6]
elif channels == 9:
positions[fi, 0] = data_block[0:3]
data_block = data_block[3:].reshape(N - 1, 9)
rotations[fi, 1:] = data_block[:, 3:6]
positions[fi, 1:] += data_block[:, 0:3] * data_block[:, 6:9]
else:
raise Exception("Too many channels! %i" % channels)
i += 1
f.close()
all_rotations = []
canonical_order = 'xyz'
for i, order in enumerate(orders):
rot = rotations[:, i:i + 1]
if need_quater:
quat = euler_to_quat_np(np.radians(rot), order=order, world=world)
all_rotations.append(quat)
continue
elif order != canonical_order:
quat = euler_to_quat_np(np.radians(rot), order=order, world=world)
rot = np.degrees(qeuler_np(quat, order=canonical_order))
all_rotations.append(rot)
rotations = np.concatenate(all_rotations, axis=1)
return Animation(rotations, positions, orients, offsets, parents, names, frametime) | Reads a BVH file and constructs an animation
Parameters
----------
filename: str
File to be opened
start : int
Optional Starting Frame
end : int
Optional Ending Frame
order : str
Optional Specifier for joint order.
Given as string E.G 'xyz', 'zxy'
world : bool
If set to true euler angles are applied
together in world space rather than local
space
Returns
-------
(animation, joint_names, frametime)
Tuple of loaded animation and joint names | load | python | EricGuo5513/momask-codes | visualization/BVH.py | https://github.com/EricGuo5513/momask-codes/blob/master/visualization/BVH.py | MIT |
def write(self, filename, rot, r_pos, order, offset=None, names=None, repr='quat'):
"""
Write animation to bvh file
:param filename:
:param rot: Quaternion as (w, x, y, z)
:param pos:
:param offset:
:return:
"""
if repr not in ['euler', 'quat', 'quaternion', 'cont6d']:
raise Exception('Unknown rotation representation')
if offset is None:
offset = self.offset
if not isinstance(offset, torch.Tensor):
offset = torch.tensor(offset)
n_bone = offset.shape[0]
if repr == 'cont6d':
rot = rot.reshape(rot.shape[0], -1, 6)
rot = cont6d_to_quat_np(rot)
if repr == 'cont6d' or repr == 'quat' or repr == 'quaternion':
# rot = rot.reshape(rot.shape[0], -1, 4)
# rot /= rot.norm(dim=-1, keepdim=True) ** 0.5
euler = qeuler_np(rot, order=order)
rot = euler
if names is None:
if self.names is None:
names = ['%02d' % i for i in range(n_bone)]
else:
names = self.names
write_bvh(self.parents, offset, rot, r_pos, names, self.frametime, order, filename) | Write animation to bvh file
:param filename:
:param rot: Quaternion as (w, x, y, z)
:param pos:
:param offset:
:return: | write | python | EricGuo5513/momask-codes | visualization/BVH.py | https://github.com/EricGuo5513/momask-codes/blob/master/visualization/BVH.py | MIT |
def convert(self, positions, filename, iterations=10, foot_ik=True):
'''
Convert the SMPL joint positions to Mocap BVH
:param positions: (N, 22, 3)
:param filename: Save path for resulting BVH
:param iterations: iterations for optimizing rotations, 10 is usually enough
:param foot_ik: whether to enfore foot inverse kinematics, removing foot slide issue.
:return:
'''
positions = positions[:, self.re_order]
new_anim = self.template.copy()
new_anim.rotations = Quaternions.id(positions.shape[:-1])
new_anim.positions = new_anim.positions[0:1].repeat(positions.shape[0], axis=-0)
new_anim.positions[:, 0] = positions[:, 0]
if foot_ik:
positions = remove_fs(positions, None, fid_l=(3, 4), fid_r=(7, 8), interp_length=5,
force_on_floor=True)
ik_solver = BasicInverseKinematics(new_anim, positions, iterations=iterations, silent=True)
new_anim = ik_solver()
# BVH.save(filename, new_anim, names=new_anim.names, frametime=1 / 20, order='zyx', quater=True)
glb = Animation.positions_global(new_anim)[:, self.re_order_inv]
if filename is not None:
BVH.save(filename, new_anim, names=new_anim.names, frametime=1 / 20, order='zyx', quater=True)
return new_anim, glb | Convert the SMPL joint positions to Mocap BVH
:param positions: (N, 22, 3)
:param filename: Save path for resulting BVH
:param iterations: iterations for optimizing rotations, 10 is usually enough
:param foot_ik: whether to enfore foot inverse kinematics, removing foot slide issue.
:return: | convert | python | EricGuo5513/momask-codes | visualization/joints2bvh.py | https://github.com/EricGuo5513/momask-codes/blob/master/visualization/joints2bvh.py | MIT |
def convert_sgd(self, positions, filename, iterations=100, foot_ik=True):
'''
Convert the SMPL joint positions to Mocap BVH
:param positions: (N, 22, 3)
:param filename: Save path for resulting BVH
:param iterations: iterations for optimizing rotations, 10 is usually enough
:param foot_ik: whether to enfore foot inverse kinematics, removing foot slide issue.
:return:
'''
## Positional Foot locking ##
glb = positions[:, self.re_order]
if foot_ik:
glb = remove_fs(glb, None, fid_l=(3, 4), fid_r=(7, 8), interp_length=2,
force_on_floor=True)
## Fit BVH ##
new_anim = self.template.copy()
new_anim.rotations = Quaternions.id(glb.shape[:-1])
new_anim.positions = new_anim.positions[0:1].repeat(glb.shape[0], axis=-0)
new_anim.positions[:, 0] = glb[:, 0]
anim = new_anim.copy()
rot = torch.tensor(anim.rotations.qs, dtype=torch.float)
pos = torch.tensor(anim.positions[:, 0, :], dtype=torch.float)
offset = torch.tensor(anim.offsets, dtype=torch.float)
glb = torch.tensor(glb, dtype=torch.float)
ik_solver = InverseKinematics(rot, pos, offset, anim.parents, glb)
print('Fixing foot contact using IK...')
for i in tqdm(range(iterations)):
mse = ik_solver.step()
# print(i, mse)
rotations = ik_solver.rotations.detach().cpu()
norm = torch.norm(rotations, dim=-1, keepdim=True)
rotations /= norm
anim.rotations = Quaternions(rotations.numpy())
anim.rotations[:, self.end_points] = Quaternions.id((anim.rotations.shape[0], len(self.end_points)))
anim.positions[:, 0, :] = ik_solver.position.detach().cpu().numpy()
if filename is not None:
BVH.save(filename, anim, names=new_anim.names, frametime=1 / 20, order='zyx', quater=True)
# BVH.save(filename[:-3] + 'bvh', anim, names=new_anim.names, frametime=1 / 20, order='zyx', quater=True)
glb = Animation.positions_global(anim)[:, self.re_order_inv]
return anim, glb | Convert the SMPL joint positions to Mocap BVH
:param positions: (N, 22, 3)
:param filename: Save path for resulting BVH
:param iterations: iterations for optimizing rotations, 10 is usually enough
:param foot_ik: whether to enfore foot inverse kinematics, removing foot slide issue.
:return: | convert_sgd | python | EricGuo5513/momask-codes | visualization/joints2bvh.py | https://github.com/EricGuo5513/momask-codes/blob/master/visualization/joints2bvh.py | MIT |
def jacobian(self, x, fp, fr, ts, dsc, tdsc):
""" Find parent rotations """
prs = fr[:, self.animation.parents]
prs[:, 0] = Quaternions.id((1))
""" Find global positions of target joints """
tps = fp[:, np.array(list(ts.keys()))]
""" Get partial rotations """
qys = Quaternions.from_angle_axis(x[:, 1:prs.shape[1] * 3:3], np.array([[[0, 1, 0]]]))
qzs = Quaternions.from_angle_axis(x[:, 2:prs.shape[1] * 3:3], np.array([[[0, 0, 1]]]))
""" Find axis of rotations """
es = np.empty((len(x), fr.shape[1] * 3, 3))
es[:, 0::3] = ((prs * qzs) * qys) * np.array([[[1, 0, 0]]])
es[:, 1::3] = ((prs * qzs) * np.array([[[0, 1, 0]]]))
es[:, 2::3] = ((prs * np.array([[[0, 0, 1]]])))
""" Construct Jacobian """
j = fp.repeat(3, axis=1)
j = dsc[np.newaxis, :, :, np.newaxis] * (tps[:, np.newaxis, :] - j[:, :, np.newaxis])
j = self.cross(es[:, :, np.newaxis, :], j)
j = np.swapaxes(j.reshape((len(x), fr.shape[1] * 3, len(ts) * 3)), 1, 2)
if self.translate:
es = np.empty((len(x), fr.shape[1] * 3, 3))
es[:, 0::3] = prs * np.array([[[1, 0, 0]]])
es[:, 1::3] = prs * np.array([[[0, 1, 0]]])
es[:, 2::3] = prs * np.array([[[0, 0, 1]]])
jt = tdsc[np.newaxis, :, :, np.newaxis] * es[:, :, np.newaxis, :].repeat(tps.shape[1], axis=2)
jt = np.swapaxes(jt.reshape((len(x), fr.shape[1] * 3, len(ts) * 3)), 1, 2)
j = np.concatenate([j, jt], axis=-1)
return j | Find parent rotations | jacobian | python | EricGuo5513/momask-codes | visualization/InverseKinematics.py | https://github.com/EricGuo5513/momask-codes/blob/master/visualization/InverseKinematics.py | MIT |
def __call__(self, descendants=None, gamma=1.0):
self.descendants = descendants
""" Calculate Masses """
if self.weights is None:
self.weights = np.ones(self.animation.shape[1])
if self.weights_translate is None:
self.weights_translate = np.ones(self.animation.shape[1])
""" Calculate Descendants """
if self.descendants is None:
self.descendants = AnimationStructure.descendants_mask(self.animation.parents)
self.tdescendants = np.eye(self.animation.shape[1]) + self.descendants
self.first_descendants = self.descendants[:, np.array(list(self.targets.keys()))].repeat(3, axis=0).astype(int)
self.first_tdescendants = self.tdescendants[:, np.array(list(self.targets.keys()))].repeat(3, axis=0).astype(
int)
""" Calculate End Effectors """
self.endeff = np.array(list(self.targets.values()))
self.endeff = np.swapaxes(self.endeff, 0, 1)
if not self.references is None:
self.second_descendants = self.descendants.repeat(3, axis=0).astype(int)
self.second_tdescendants = self.tdescendants.repeat(3, axis=0).astype(int)
self.second_targets = dict([(i, self.references[:, i]) for i in range(self.references.shape[1])])
nf = len(self.animation)
nj = self.animation.shape[1]
if not self.silent:
gp = Animation.positions_global(self.animation)
gp = gp[:, np.array(list(self.targets.keys()))]
error = np.mean(np.sqrt(np.sum((self.endeff - gp) ** 2.0, axis=2)))
print('[JacobianInverseKinematics] Start | Error: %f' % error)
for i in range(self.iterations):
""" Get Global Rotations & Positions """
gt = Animation.transforms_global(self.animation)
gp = gt[:, :, :, 3]
gp = gp[:, :, :3] / gp[:, :, 3, np.newaxis]
gr = Quaternions.from_transforms(gt)
x = self.animation.rotations.euler().reshape(nf, -1)
w = self.weights.repeat(3)
if self.translate:
x = np.hstack([x, self.animation.positions.reshape(nf, -1)])
w = np.hstack([w, self.weights_translate.repeat(3)])
""" Generate Jacobian """
if self.recalculate or i == 0:
j = self.jacobian(x, gp, gr, self.targets, self.first_descendants, self.first_tdescendants)
""" Update Variables """
l = self.damping * (1.0 / (w + 0.001))
d = (l * l) * np.eye(x.shape[1])
e = gamma * (self.endeff.reshape(nf, -1) - gp[:, np.array(list(self.targets.keys()))].reshape(nf, -1))
x += np.array(list(map(lambda jf, ef:
linalg.lu_solve(linalg.lu_factor(jf.T.dot(jf) + d), jf.T.dot(ef)), j, e)))
""" Generate Secondary Jacobian """
if self.references is not None:
ns = np.array(list(map(lambda jf:
np.eye(x.shape[1]) - linalg.solve(jf.T.dot(jf) + d, jf.T.dot(jf)), j)))
if self.recalculate or i == 0:
j2 = self.jacobian(x, gp, gr, self.second_targets, self.second_descendants,
self.second_tdescendants)
e2 = self.secondary * (self.references.reshape(nf, -1) - gp.reshape(nf, -1))
x += np.array(list(map(lambda nsf, j2f, e2f:
nsf.dot(linalg.lu_solve(linalg.lu_factor(j2f.T.dot(j2f) + d), j2f.T.dot(e2f))),
ns, j2, e2)))
""" Set Back Rotations / Translations """
self.animation.rotations = Quaternions.from_euler(
x[:, :nj * 3].reshape((nf, nj, 3)), order='xyz', world=True)
if self.translate:
self.animation.positions = x[:, nj * 3:].reshape((nf, nj, 3))
""" Generate Error """
if not self.silent:
gp = Animation.positions_global(self.animation)
gp = gp[:, np.array(list(self.targets.keys()))]
error = np.mean(np.sum((self.endeff - gp) ** 2.0, axis=2) ** 0.5)
print('[JacobianInverseKinematics] Iteration %i | Error: %f' % (i + 1, error))
return self.animation | Calculate Masses | __call__ | python | EricGuo5513/momask-codes | visualization/InverseKinematics.py | https://github.com/EricGuo5513/momask-codes/blob/master/visualization/InverseKinematics.py | MIT |
def jacobian(self, x, fp, fr, goal, weights, des_r, des_t):
""" Find parent rotations """
prs = fr[:, self.animation.parents]
prs[:, 0] = Quaternions.id((1))
""" Get partial rotations """
qys = Quaternions.from_angle_axis(x[:, 1:prs.shape[1] * 3:3], np.array([[[0, 1, 0]]]))
qzs = Quaternions.from_angle_axis(x[:, 2:prs.shape[1] * 3:3], np.array([[[0, 0, 1]]]))
""" Find axis of rotations """
es = np.empty((len(x), fr.shape[1] * 3, 3))
es[:, 0::3] = ((prs * qzs) * qys) * np.array([[[1, 0, 0]]])
es[:, 1::3] = ((prs * qzs) * np.array([[[0, 1, 0]]]))
es[:, 2::3] = ((prs * np.array([[[0, 0, 1]]])))
""" Construct Jacobian """
j = fp.repeat(3, axis=1)
j = des_r[np.newaxis, :, :, :, np.newaxis] * (
goal[:, np.newaxis, :, np.newaxis] - j[:, :, np.newaxis, np.newaxis])
j = np.sum(j * weights[np.newaxis, np.newaxis, :, :, np.newaxis], 3)
j = self.cross(es[:, :, np.newaxis, :], j)
j = np.swapaxes(j.reshape((len(x), fr.shape[1] * 3, goal.shape[1] * 3)), 1, 2)
if self.translate:
es = np.empty((len(x), fr.shape[1] * 3, 3))
es[:, 0::3] = prs * np.array([[[1, 0, 0]]])
es[:, 1::3] = prs * np.array([[[0, 1, 0]]])
es[:, 2::3] = prs * np.array([[[0, 0, 1]]])
jt = des_t[np.newaxis, :, :, :, np.newaxis] * es[:, :, np.newaxis, np.newaxis, :].repeat(goal.shape[1],
axis=2)
jt = np.sum(jt * weights[np.newaxis, np.newaxis, :, :, np.newaxis], 3)
jt = np.swapaxes(jt.reshape((len(x), fr.shape[1] * 3, goal.shape[1] * 3)), 1, 2)
j = np.concatenate([j, jt], axis=-1)
return j | Find parent rotations | jacobian | python | EricGuo5513/momask-codes | visualization/InverseKinematics.py | https://github.com/EricGuo5513/momask-codes/blob/master/visualization/InverseKinematics.py | MIT |
def __call__(self, descendants=None, maxjoints=4, gamma=1.0, transpose=False):
""" Calculate Masses """
if self.weights is None:
self.weights = np.ones(self.animation.shape[1])
if self.weights_translate is None:
self.weights_translate = np.ones(self.animation.shape[1])
nf = len(self.animation)
nj = self.animation.shape[1]
nv = self.goal.shape[1]
weightids = np.argsort(-self.vweights, axis=1)[:, :maxjoints]
weightvls = np.array(list(map(lambda w, i: w[i], self.vweights, weightids)))
weightvls = weightvls / weightvls.sum(axis=1)[..., np.newaxis]
if descendants is None:
self.descendants = AnimationStructure.descendants_mask(self.animation.parents)
else:
self.descendants = descendants
des_r = np.eye(nj) + self.descendants
des_r = des_r[:, weightids].repeat(3, axis=0)
des_t = np.eye(nj) + self.descendants
des_t = des_t[:, weightids].repeat(3, axis=0)
if not self.silent:
curr = Animation.skin(self.animation, self.rest, self.vweights, self.mesh, maxjoints=maxjoints)
error = np.mean(np.sqrt(np.sum((curr - self.goal) ** 2.0, axis=-1)))
print('[ICP] Start | Error: %f' % error)
for i in range(self.iterations):
""" Get Global Rotations & Positions """
gt = Animation.transforms_global(self.animation)
gp = gt[:, :, :, 3]
gp = gp[:, :, :3] / gp[:, :, 3, np.newaxis]
gr = Quaternions.from_transforms(gt)
x = self.animation.rotations.euler().reshape(nf, -1)
w = self.weights.repeat(3)
if self.translate:
x = np.hstack([x, self.animation.positions.reshape(nf, -1)])
w = np.hstack([w, self.weights_translate.repeat(3)])
""" Get Current State """
curr = Animation.skin(self.animation, self.rest, self.vweights, self.mesh, maxjoints=maxjoints)
""" Find Cloest Points """
if self.find_closest:
mapping = np.argmin(
(curr[:, :, np.newaxis] -
self.goal[:, np.newaxis, :]) ** 2.0, axis=2)
e = gamma * (np.array(list(map(lambda g, m: g[m], self.goal, mapping))) - curr).reshape(nf, -1)
else:
e = gamma * (self.goal - curr).reshape(nf, -1)
""" Generate Jacobian """
if self.recalculate or i == 0:
j = self.jacobian(x, gp, gr, self.goal, weightvls, des_r, des_t)
""" Update Variables """
l = self.damping * (1.0 / (w + 1e-10))
d = (l * l) * np.eye(x.shape[1])
if transpose:
x += np.array(list(map(lambda jf, ef: jf.T.dot(ef), j, e)))
else:
x += np.array(list(map(lambda jf, ef:
linalg.lu_solve(linalg.lu_factor(jf.T.dot(jf) + d), jf.T.dot(ef)), j, e)))
""" Set Back Rotations / Translations """
self.animation.rotations = Quaternions.from_euler(
x[:, :nj * 3].reshape((nf, nj, 3)), order='xyz', world=True)
if self.translate:
self.animation.positions = x[:, nj * 3:].reshape((nf, nj, 3))
if not self.silent:
curr = Animation.skin(self.animation, self.rest, self.vweights, self.mesh)
error = np.mean(np.sqrt(np.sum((curr - self.goal) ** 2.0, axis=-1)))
print('[ICP] Iteration %i | Error: %f' % (i + 1, error)) | Calculate Masses | __call__ | python | EricGuo5513/momask-codes | visualization/InverseKinematics.py | https://github.com/EricGuo5513/momask-codes/blob/master/visualization/InverseKinematics.py | MIT |
def forward(self, rotation: torch.Tensor, position: torch.Tensor, offset: torch.Tensor, order='xyz', quater=False,
world=True):
'''
if not quater and rotation.shape[-2] != 3: raise Exception('Unexpected shape of rotation')
if quater and rotation.shape[-2] != 4: raise Exception('Unexpected shape of rotation')
rotation = rotation.permute(0, 3, 1, 2)
position = position.permute(0, 2, 1)
'''
result = torch.empty(rotation.shape[:-1] + (3,), device=position.device)
norm = torch.norm(rotation, dim=-1, keepdim=True)
rotation = rotation / norm
# if quater:
transform = self.transform_from_quaternion(rotation)
# else:
# transform = self.transform_from_euler(rotation, order)
offset = offset.reshape((-1, 1, offset.shape[-2], offset.shape[-1], 1))
result[..., 0, :] = position
for i, pi in enumerate(self.parents):
if pi == -1:
assert i == 0
continue
result[..., i, :] = torch.matmul(transform[..., pi, :, :], offset[..., i, :, :]).squeeze()
transform[..., i, :, :] = torch.matmul(transform[..., pi, :, :].clone(), transform[..., i, :, :].clone())
if world: result[..., i, :] += result[..., pi, :]
return result | if not quater and rotation.shape[-2] != 3: raise Exception('Unexpected shape of rotation')
if quater and rotation.shape[-2] != 4: raise Exception('Unexpected shape of rotation')
rotation = rotation.permute(0, 3, 1, 2)
position = position.permute(0, 2, 1) | forward | python | EricGuo5513/momask-codes | visualization/InverseKinematics.py | https://github.com/EricGuo5513/momask-codes/blob/master/visualization/InverseKinematics.py | MIT |
def load(filename, start=None, end=None, order=None, world=False, need_quater=True):
"""
Reads a BVH file and constructs an animation
Parameters
----------
filename: str
File to be opened
start : int
Optional Starting Frame
end : int
Optional Ending Frame
order : str
Optional Specifier for joint order.
Given as string E.G 'xyz', 'zxy'
world : bool
If set to true euler angles are applied
together in world space rather than local
space
Returns
-------
(animation, joint_names, frametime)
Tuple of loaded animation and joint names
"""
f = open(filename, "r")
i = 0
active = -1
end_site = False
names = []
orients = Quaternions.id(0)
offsets = np.array([]).reshape((0, 3))
parents = np.array([], dtype=int)
for line in f:
if "HIERARCHY" in line: continue
if "MOTION" in line: continue
""" Modified line read to handle mixamo data """
# rmatch = re.match(r"ROOT (\w+)", line)
rmatch = re.match(r"ROOT (\w+:?\w+)", line)
if rmatch:
names.append(rmatch.group(1))
offsets = np.append(offsets, np.array([[0, 0, 0]]), axis=0)
orients.qs = np.append(orients.qs, np.array([[1, 0, 0, 0]]), axis=0)
parents = np.append(parents, active)
active = (len(parents) - 1)
continue
if "{" in line: continue
if "}" in line:
if end_site:
end_site = False
else:
active = parents[active]
continue
offmatch = re.match(r"\s*OFFSET\s+([\-\d\.e]+)\s+([\-\d\.e]+)\s+([\-\d\.e]+)", line)
if offmatch:
if not end_site:
offsets[active] = np.array([list(map(float, offmatch.groups()))])
continue
chanmatch = re.match(r"\s*CHANNELS\s+(\d+)", line)
if chanmatch:
channels = int(chanmatch.group(1))
if order is None:
channelis = 0 if channels == 3 else 3
channelie = 3 if channels == 3 else 6
parts = line.split()[2 + channelis:2 + channelie]
if any([p not in channelmap for p in parts]):
continue
order = "".join([channelmap[p] for p in parts])
continue
""" Modified line read to handle mixamo data """
# jmatch = re.match("\s*JOINT\s+(\w+)", line)
jmatch = re.match("\s*JOINT\s+(\w+:?\w+)", line)
if jmatch:
names.append(jmatch.group(1))
offsets = np.append(offsets, np.array([[0, 0, 0]]), axis=0)
orients.qs = np.append(orients.qs, np.array([[1, 0, 0, 0]]), axis=0)
parents = np.append(parents, active)
active = (len(parents) - 1)
continue
if "End Site" in line:
end_site = True
continue
fmatch = re.match("\s*Frames:\s+(\d+)", line)
if fmatch:
if start and end:
fnum = (end - start) - 1
else:
fnum = int(fmatch.group(1))
jnum = len(parents)
positions = offsets[np.newaxis].repeat(fnum, axis=0)
rotations = np.zeros((fnum, len(orients), 3))
continue
fmatch = re.match("\s*Frame Time:\s+([\d\.]+)", line)
if fmatch:
frametime = float(fmatch.group(1))
continue
if (start and end) and (i < start or i >= end - 1):
i += 1
continue
# dmatch = line.strip().split(' ')
dmatch = line.strip().split()
if dmatch:
data_block = np.array(list(map(float, dmatch)))
N = len(parents)
fi = i - start if start else i
if channels == 3:
positions[fi, 0:1] = data_block[0:3]
rotations[fi, :] = data_block[3:].reshape(N, 3)
elif channels == 6:
data_block = data_block.reshape(N, 6)
positions[fi, :] = data_block[:, 0:3]
rotations[fi, :] = data_block[:, 3:6]
elif channels == 9:
positions[fi, 0] = data_block[0:3]
data_block = data_block[3:].reshape(N - 1, 9)
rotations[fi, 1:] = data_block[:, 3:6]
positions[fi, 1:] += data_block[:, 0:3] * data_block[:, 6:9]
else:
raise Exception("Too many channels! %i" % channels)
i += 1
f.close()
if need_quater:
rotations = Quaternions.from_euler(np.radians(rotations), order=order, world=world)
elif order != 'xyz':
rotations = Quaternions.from_euler(np.radians(rotations), order=order, world=world)
rotations = np.degrees(rotations.euler())
return Animation(rotations, positions, orients, offsets, parents, names, frametime) | Reads a BVH file and constructs an animation
Parameters
----------
filename: str
File to be opened
start : int
Optional Starting Frame
end : int
Optional Ending Frame
order : str
Optional Specifier for joint order.
Given as string E.G 'xyz', 'zxy'
world : bool
If set to true euler angles are applied
together in world space rather than local
space
Returns
-------
(animation, joint_names, frametime)
Tuple of loaded animation and joint names | load | python | EricGuo5513/momask-codes | visualization/BVH_mod.py | https://github.com/EricGuo5513/momask-codes/blob/master/visualization/BVH_mod.py | MIT |
def save(filename, anim, names=None, frametime=1.0 / 24.0, order='zyx', positions=False, mask=None, quater=False):
"""
Saves an Animation to file as BVH
Parameters
----------
filename: str
File to be saved to
anim : Animation
Animation to save
names : [str]
List of joint names
order : str
Optional Specifier for joint order.
Given as string E.G 'xyz', 'zxy'
frametime : float
Optional Animation Frame time
positions : bool
Optional specfier to save bone
positions for each frame
orients : bool
Multiply joint orients to the rotations
before saving.
"""
if names is None:
names = ["joint_" + str(i) for i in range(len(anim.parents))]
with open(filename, 'w') as f:
t = ""
f.write("%sHIERARCHY\n" % t)
f.write("%sROOT %s\n" % (t, names[0]))
f.write("%s{\n" % t)
t += '\t'
f.write("%sOFFSET %f %f %f\n" % (t, anim.offsets[0, 0], anim.offsets[0, 1], anim.offsets[0, 2]))
f.write("%sCHANNELS 6 Xposition Yposition Zposition %s %s %s \n" %
(t, channelmap_inv[order[0]], channelmap_inv[order[1]], channelmap_inv[order[2]]))
for i in range(anim.shape[1]):
if anim.parents[i] == 0:
t = save_joint(f, anim, names, t, i, order=order, positions=positions)
t = t[:-1]
f.write("%s}\n" % t)
f.write("MOTION\n")
f.write("Frames: %i\n" % anim.shape[0]);
f.write("Frame Time: %f\n" % frametime);
# if orients:
# rots = np.degrees((-anim.orients[np.newaxis] * anim.rotations).euler(order=order[::-1]))
# else:
# rots = np.degrees(anim.rotations.euler(order=order[::-1]))
# rots = np.degrees(anim.rotations.euler(order=order[::-1]))
if quater:
rots = np.degrees(anim.rotations.euler(order=order[::-1]))
else:
rots = anim.rotations
poss = anim.positions
for i in range(anim.shape[0]):
for j in range(anim.shape[1]):
if positions or j == 0:
f.write("%f %f %f %f %f %f " % (
poss[i, j, 0], poss[i, j, 1], poss[i, j, 2],
rots[i, j, ordermap[order[0]]], rots[i, j, ordermap[order[1]]], rots[i, j, ordermap[order[2]]]))
else:
if mask == None or mask[j] == 1:
f.write("%f %f %f " % (
rots[i, j, ordermap[order[0]]], rots[i, j, ordermap[order[1]]],
rots[i, j, ordermap[order[2]]]))
else:
f.write("%f %f %f " % (0, 0, 0))
f.write("\n") | Saves an Animation to file as BVH
Parameters
----------
filename: str
File to be saved to
anim : Animation
Animation to save
names : [str]
List of joint names
order : str
Optional Specifier for joint order.
Given as string E.G 'xyz', 'zxy'
frametime : float
Optional Animation Frame time
positions : bool
Optional specfier to save bone
positions for each frame
orients : bool
Multiply joint orients to the rotations
before saving. | save | python | EricGuo5513/momask-codes | visualization/BVH_mod.py | https://github.com/EricGuo5513/momask-codes/blob/master/visualization/BVH_mod.py | MIT |
def joints(parents):
"""
Parameters
----------
parents : (J) ndarray
parents array
Returns
-------
joints : (J) ndarray
Array of joint indices
"""
return np.arange(len(parents), dtype=int) | Parameters
----------
parents : (J) ndarray
parents array
Returns
-------
joints : (J) ndarray
Array of joint indices | joints | python | EricGuo5513/momask-codes | visualization/AnimationStructure.py | https://github.com/EricGuo5513/momask-codes/blob/master/visualization/AnimationStructure.py | MIT |
def joints_list(parents):
"""
Parameters
----------
parents : (J) ndarray
parents array
Returns
-------
joints : [ndarray]
List of arrays of joint idices for
each joint
"""
return list(joints(parents)[:, np.newaxis]) | Parameters
----------
parents : (J) ndarray
parents array
Returns
-------
joints : [ndarray]
List of arrays of joint idices for
each joint | joints_list | python | EricGuo5513/momask-codes | visualization/AnimationStructure.py | https://github.com/EricGuo5513/momask-codes/blob/master/visualization/AnimationStructure.py | MIT |
def parents_list(parents):
"""
Parameters
----------
parents : (J) ndarray
parents array
Returns
-------
parents : [ndarray]
List of arrays of joint idices for
the parents of each joint
"""
return list(parents[:, np.newaxis]) | Parameters
----------
parents : (J) ndarray
parents array
Returns
-------
parents : [ndarray]
List of arrays of joint idices for
the parents of each joint | parents_list | python | EricGuo5513/momask-codes | visualization/AnimationStructure.py | https://github.com/EricGuo5513/momask-codes/blob/master/visualization/AnimationStructure.py | MIT |
def children_list(parents):
"""
Parameters
----------
parents : (J) ndarray
parents array
Returns
-------
children : [ndarray]
List of arrays of joint indices for
the children of each joint
"""
def joint_children(i):
return [j for j, p in enumerate(parents) if p == i]
return list(map(lambda j: np.array(joint_children(j)), joints(parents))) | Parameters
----------
parents : (J) ndarray
parents array
Returns
-------
children : [ndarray]
List of arrays of joint indices for
the children of each joint | children_list | python | EricGuo5513/momask-codes | visualization/AnimationStructure.py | https://github.com/EricGuo5513/momask-codes/blob/master/visualization/AnimationStructure.py | MIT |
def descendants_list(parents):
"""
Parameters
----------
parents : (J) ndarray
parents array
Returns
-------
descendants : [ndarray]
List of arrays of joint idices for
the descendants of each joint
"""
children = children_list(parents)
def joint_descendants(i):
return sum([joint_descendants(j) for j in children[i]], list(children[i]))
return list(map(lambda j: np.array(joint_descendants(j)), joints(parents))) | Parameters
----------
parents : (J) ndarray
parents array
Returns
-------
descendants : [ndarray]
List of arrays of joint idices for
the descendants of each joint | descendants_list | python | EricGuo5513/momask-codes | visualization/AnimationStructure.py | https://github.com/EricGuo5513/momask-codes/blob/master/visualization/AnimationStructure.py | MIT |
def ancestors_list(parents):
"""
Parameters
----------
parents : (J) ndarray
parents array
Returns
-------
ancestors : [ndarray]
List of arrays of joint idices for
the ancestors of each joint
"""
decendants = descendants_list(parents)
def joint_ancestors(i):
return [j for j in joints(parents) if i in decendants[j]]
return list(map(lambda j: np.array(joint_ancestors(j)), joints(parents))) | Parameters
----------
parents : (J) ndarray
parents array
Returns
-------
ancestors : [ndarray]
List of arrays of joint idices for
the ancestors of each joint | ancestors_list | python | EricGuo5513/momask-codes | visualization/AnimationStructure.py | https://github.com/EricGuo5513/momask-codes/blob/master/visualization/AnimationStructure.py | MIT |
def mask(parents, filter):
"""
Constructs a Mask for a give filter
A mask is a (J, J) ndarray truth table for a given
condition over J joints. For example there
may be a mask specifying if a joint N is a
child of another joint M.
This could be constructed into a mask using
`m = mask(parents, children_list)` and the condition
of childhood tested using `m[N, M]`.
Parameters
----------
parents : (J) ndarray
parents array
filter : (J) ndarray -> [ndarray]
function that outputs a list of arrays
of joint indices for some condition
Returns
-------
mask : (N, N) ndarray
boolean truth table of given condition
"""
m = np.zeros((len(parents), len(parents))).astype(bool)
jnts = joints(parents)
fltr = filter(parents)
for i, f in enumerate(fltr): m[i, :] = np.any(jnts[:, np.newaxis] == f[np.newaxis, :], axis=1)
return m | Constructs a Mask for a give filter
A mask is a (J, J) ndarray truth table for a given
condition over J joints. For example there
may be a mask specifying if a joint N is a
child of another joint M.
This could be constructed into a mask using
`m = mask(parents, children_list)` and the condition
of childhood tested using `m[N, M]`.
Parameters
----------
parents : (J) ndarray
parents array
filter : (J) ndarray -> [ndarray]
function that outputs a list of arrays
of joint indices for some condition
Returns
-------
mask : (N, N) ndarray
boolean truth table of given condition | mask | python | EricGuo5513/momask-codes | visualization/AnimationStructure.py | https://github.com/EricGuo5513/momask-codes/blob/master/visualization/AnimationStructure.py | MIT |
def constraints(anim, **kwargs):
"""
Constraint list for Animation
This constraint list can be used in the
VerletParticle solver to constrain
a animation global joint positions.
Parameters
----------
anim : Animation
Input animation
masses : (F, J) ndarray
Optional list of masses
for joints J across frames F
defaults to weighting by
vertical height
Returns
-------
constraints : [(int, int, (F, J) ndarray, (F, J) ndarray, (F, J) ndarray)]
A list of constraints in the format:
(Joint1, Joint2, Masses1, Masses2, Lengths)
"""
masses = kwargs.pop('masses', None)
children = children_list(anim.parents)
constraints = []
points_offsets = Animation.offsets_global(anim)
points = Animation.positions_global(anim)
if masses is None:
masses = 1.0 / (0.1 + np.absolute(points_offsets[:, 1]))
masses = masses[np.newaxis].repeat(len(anim), axis=0)
for j in range(anim.shape[1]):
""" Add constraints between all joints and their children """
for c0 in children[j]:
dists = np.sum((points[:, c0] - points[:, j]) ** 2.0, axis=1) ** 0.5
constraints.append((c0, j, masses[:, c0], masses[:, j], dists))
""" Add constraints between all children of joint """
for c1 in children[j]:
if c0 == c1: continue
dists = np.sum((points[:, c0] - points[:, c1]) ** 2.0, axis=1) ** 0.5
constraints.append((c0, c1, masses[:, c0], masses[:, c1], dists))
return constraints | Constraint list for Animation
This constraint list can be used in the
VerletParticle solver to constrain
a animation global joint positions.
Parameters
----------
anim : Animation
Input animation
masses : (F, J) ndarray
Optional list of masses
for joints J across frames F
defaults to weighting by
vertical height
Returns
-------
constraints : [(int, int, (F, J) ndarray, (F, J) ndarray, (F, J) ndarray)]
A list of constraints in the format:
(Joint1, Joint2, Masses1, Masses2, Lengths) | constraints | python | EricGuo5513/momask-codes | visualization/AnimationStructure.py | https://github.com/EricGuo5513/momask-codes/blob/master/visualization/AnimationStructure.py | MIT |
def graph(anim):
"""
Generates a weighted adjacency matrix
using local joint distances along
the skeletal structure.
Joints which are not connected
are assigned the weight `0`.
Joints which actually have zero distance
between them, but are still connected, are
perturbed by some minimal amount.
The output of this routine can be used
with the `scipy.sparse.csgraph`
routines for graph analysis.
Parameters
----------
anim : Animation
input animation
Returns
-------
graph : (N, N) ndarray
weight adjacency matrix using
local distances along the
skeletal structure from joint
N to joint M. If joints are not
directly connected are assigned
the weight `0`.
"""
graph = np.zeros(anim.shape[1], anim.shape[1])
lengths = np.sum(anim.offsets ** 2.0, axis=1) ** 0.5 + 0.001
for i, p in enumerate(anim.parents):
if p == -1: continue
graph[i, p] = lengths[p]
graph[p, i] = lengths[p]
return graph | Generates a weighted adjacency matrix
using local joint distances along
the skeletal structure.
Joints which are not connected
are assigned the weight `0`.
Joints which actually have zero distance
between them, but are still connected, are
perturbed by some minimal amount.
The output of this routine can be used
with the `scipy.sparse.csgraph`
routines for graph analysis.
Parameters
----------
anim : Animation
input animation
Returns
-------
graph : (N, N) ndarray
weight adjacency matrix using
local distances along the
skeletal structure from joint
N to joint M. If joints are not
directly connected are assigned
the weight `0`. | graph | python | EricGuo5513/momask-codes | visualization/AnimationStructure.py | https://github.com/EricGuo5513/momask-codes/blob/master/visualization/AnimationStructure.py | MIT |
def find_distance(distances, generated, prev, i, j):
""" If root, identity, or already generated, return """
if j == -1: return (0.0, True)
if j == i: return (0.0, True)
if generated[i, j]: return (distances[i, j], True)
""" Find best distances along parents and children """
par_dists = [(joint_lengths[j], find_distance(distances, generated, j, i, p)) for p in joint_parents[j] if
p != prev]
out_dists = [(joint_lengths[c], find_distance(distances, generated, j, i, c)) for c in joint_children[j] if
c != prev]
""" Check valid distance and not dead end """
par_dists = [a + d for (a, (d, f)) in par_dists if f]
out_dists = [a + d for (a, (d, f)) in out_dists if f]
""" All dead ends """
if (out_dists + par_dists) == []: return (0.0, False)
""" Get minimum path """
dist = min(out_dists + par_dists)
distances[i, j] = dist;
distances[j, i] = dist
generated[i, j] = True;
generated[j, i] = True | If root, identity, or already generated, return | distances.find_distance | python | EricGuo5513/momask-codes | visualization/AnimationStructure.py | https://github.com/EricGuo5513/momask-codes/blob/master/visualization/AnimationStructure.py | MIT |
def distances(anim):
"""
Generates a distance matrix for
pairwise joint distances along
the skeletal structure
Parameters
----------
anim : Animation
input animation
Returns
-------
distances : (N, N) ndarray
array of pairwise distances
along skeletal structure
from some joint N to some
joint M
"""
distances = np.zeros((anim.shape[1], anim.shape[1]))
generated = distances.copy().astype(bool)
joint_lengths = np.sum(anim.offsets ** 2.0, axis=1) ** 0.5
joint_children = children_list(anim)
joint_parents = parents_list(anim)
def find_distance(distances, generated, prev, i, j):
""" If root, identity, or already generated, return """
if j == -1: return (0.0, True)
if j == i: return (0.0, True)
if generated[i, j]: return (distances[i, j], True)
""" Find best distances along parents and children """
par_dists = [(joint_lengths[j], find_distance(distances, generated, j, i, p)) for p in joint_parents[j] if
p != prev]
out_dists = [(joint_lengths[c], find_distance(distances, generated, j, i, c)) for c in joint_children[j] if
c != prev]
""" Check valid distance and not dead end """
par_dists = [a + d for (a, (d, f)) in par_dists if f]
out_dists = [a + d for (a, (d, f)) in out_dists if f]
""" All dead ends """
if (out_dists + par_dists) == []: return (0.0, False)
""" Get minimum path """
dist = min(out_dists + par_dists)
distances[i, j] = dist;
distances[j, i] = dist
generated[i, j] = True;
generated[j, i] = True
for i in range(anim.shape[1]):
for j in range(anim.shape[1]):
find_distance(distances, generated, -1, i, j)
return distances | Generates a distance matrix for
pairwise joint distances along
the skeletal structure
Parameters
----------
anim : Animation
input animation
Returns
-------
distances : (N, N) ndarray
array of pairwise distances
along skeletal structure
from some joint N to some
joint M | distances | python | EricGuo5513/momask-codes | visualization/AnimationStructure.py | https://github.com/EricGuo5513/momask-codes/blob/master/visualization/AnimationStructure.py | MIT |
def edges(parents):
"""
Animation structure edges
Parameters
----------
parents : (J) ndarray
parents array
Returns
-------
edges : (M, 2) ndarray
array of pairs where each
pair contains two indices of a joints
which corrisponds to an edge in the
joint structure going from parent to child.
"""
return np.array(list(zip(parents, joints(parents)))[1:]) | Animation structure edges
Parameters
----------
parents : (J) ndarray
parents array
Returns
-------
edges : (M, 2) ndarray
array of pairs where each
pair contains two indices of a joints
which corrisponds to an edge in the
joint structure going from parent to child. | edges | python | EricGuo5513/momask-codes | visualization/AnimationStructure.py | https://github.com/EricGuo5513/momask-codes/blob/master/visualization/AnimationStructure.py | MIT |
def incidence(parents):
"""
Incidence Matrix
Parameters
----------
parents : (J) ndarray
parents array
Returns
-------
incidence : (N, M) ndarray
Matrix of N joint positions by
M edges which each entry is either
1 or -1 and multiplication by the
joint positions returns the an
array of vectors along each edge
of the structure
"""
es = edges(parents)
inc = np.zeros((len(parents) - 1, len(parents))).astype(np.int)
for i, e in enumerate(es):
inc[i, e[0]] = 1
inc[i, e[1]] = -1
return inc.T | Incidence Matrix
Parameters
----------
parents : (J) ndarray
parents array
Returns
-------
incidence : (N, M) ndarray
Matrix of N joint positions by
M edges which each entry is either
1 or -1 and multiplication by the
joint positions returns the an
array of vectors along each edge
of the structure | incidence | python | EricGuo5513/momask-codes | visualization/AnimationStructure.py | https://github.com/EricGuo5513/momask-codes/blob/master/visualization/AnimationStructure.py | MIT |
def load(filename:str, order:str=None) -> dict:
"""Loads a BVH file.
Args:
filename (str): Path to the BVH file.
order (str): The order of the rotation channels. (i.e."xyz")
Returns:
dict: A dictionary containing the following keys:
* names (list)(jnum): The names of the joints.
* parents (list)(jnum): The parent indices.
* offsets (np.ndarray)(jnum, 3): The offsets of the joints.
* rotations (np.ndarray)(fnum, jnum, 3) : The local coordinates of rotations of the joints.
* positions (np.ndarray)(fnum, jnum, 3) : The positions of the joints.
* order (str): The order of the channels.
* frametime (float): The time between two frames.
"""
f = open(filename, "r")
i = 0
active = -1
end_site = False
# Create empty lists for saving parameters
names = []
offsets = np.array([]).reshape((0, 3))
parents = np.array([], dtype=int)
# Parse the file, line by line
for line in f:
if "HIERARCHY" in line: continue
if "MOTION" in line: continue
rmatch = re.match(r"ROOT (\w+)", line)
if rmatch:
names.append(rmatch.group(1))
offsets = np.append(offsets, np.array([[0, 0, 0]]), axis=0)
parents = np.append(parents, active)
active = (len(parents) - 1)
continue
if "{" in line: continue
if "}" in line:
if end_site:
end_site = False
else:
active = parents[active]
continue
offmatch = re.match(r"\s*OFFSET\s+([\-\d\.e]+)\s+([\-\d\.e]+)\s+([\-\d\.e]+)", line)
if offmatch:
if not end_site:
offsets[active] = np.array([list(map(float, offmatch.groups()))])
continue
chanmatch = re.match(r"\s*CHANNELS\s+(\d+)", line)
if chanmatch:
channels = int(chanmatch.group(1))
if order is None:
channelis = 0 if channels == 3 else 3
channelie = 3 if channels == 3 else 6
parts = line.split()[2 + channelis:2 + channelie]
if any([p not in channelmap for p in parts]):
continue
order = "".join([channelmap[p] for p in parts])
continue
jmatch = re.match("\s*JOINT\s+(\w+)", line)
if jmatch:
names.append(jmatch.group(1))
offsets = np.append(offsets, np.array([[0, 0, 0]]), axis=0)
parents = np.append(parents, active)
active = (len(parents) - 1)
continue
if "End Site" in line:
end_site = True
continue
fmatch = re.match("\s*Frames:\s+(\d+)", line)
if fmatch:
fnum = int(fmatch.group(1))
positions = offsets[None].repeat(fnum, axis=0)
rotations = np.zeros((fnum, len(offsets), 3))
continue
fmatch = re.match("\s*Frame Time:\s+([\d\.]+)", line)
if fmatch:
frametime = float(fmatch.group(1))
continue
dmatch = line.strip().split(' ')
if dmatch:
data_block = np.array(list(map(float, dmatch)))
N = len(parents)
fi = i
if channels == 3:
positions[fi, 0:1] = data_block[0:3]
rotations[fi, :] = data_block[3:].reshape(N, 3)
elif channels == 6:
data_block = data_block.reshape(N, 6)
positions[fi, :] = data_block[:, 0:3]
rotations[fi, :] = data_block[:, 3:6]
elif channels == 9:
positions[fi, 0] = data_block[0:3]
data_block = data_block[3:].reshape(N - 1, 9)
rotations[fi, 1:] = data_block[:, 3:6]
positions[fi, 1:] += data_block[:, 0:3] * data_block[:, 6:9]
else:
raise Exception("Too many channels! %i" % channels)
i += 1
f.close()
return {
'rotations': rotations,
'positions': positions,
'offsets': offsets,
'parents': parents,
'names': names,
'order': order,
'frametime': frametime
} | Loads a BVH file.
Args:
filename (str): Path to the BVH file.
order (str): The order of the rotation channels. (i.e."xyz")
Returns:
dict: A dictionary containing the following keys:
* names (list)(jnum): The names of the joints.
* parents (list)(jnum): The parent indices.
* offsets (np.ndarray)(jnum, 3): The offsets of the joints.
* rotations (np.ndarray)(fnum, jnum, 3) : The local coordinates of rotations of the joints.
* positions (np.ndarray)(fnum, jnum, 3) : The positions of the joints.
* order (str): The order of the channels.
* frametime (float): The time between two frames. | load | python | EricGuo5513/momask-codes | visualization/utils/bvh.py | https://github.com/EricGuo5513/momask-codes/blob/master/visualization/utils/bvh.py | MIT |
def save(filename, data, save_positions=False):
""" Save a joint hierarchy to a file.
Args:
filename (str): The output will save on the bvh file.
data (dict): The data to save.(rotations, positions, offsets, parents, names, order, frametime)
save_positions (bool): Whether to save all of joint positions on MOTION. (False is recommended.)
"""
order = data['order']
frametime = data['frametime']
with open(filename, 'w') as f:
t = ""
f.write("%sHIERARCHY\n" % t)
f.write("%sROOT %s\n" % (t, data['names'][0]))
f.write("%s{\n" % t)
t += '\t'
f.write("%sOFFSET %f %f %f\n" % (t, data['offsets'][0,0], data['offsets'][0,1], data['offsets'][0,2]) )
f.write("%sCHANNELS 6 Xposition Yposition Zposition %s %s %s \n" %
(t, channelmap_inv[order[0]], channelmap_inv[order[1]], channelmap_inv[order[2]]))
save_order = [0]
for i in range(len(data['parents'])):
if data['parents'][i] == 0:
t = save_joint(f, data, t, i, save_order, order=order, save_positions=save_positions)
t = t[:-1]
f.write("%s}\n" % t)
rots, poss = data['rotations'], data['positions']
f.write("MOTION\n")
f.write("Frames: %i\n" % len(rots));
f.write("Frame Time: %f\n" % frametime);
for i in range(rots.shape[0]):
for j in save_order:
if save_positions or j == 0:
f.write("%f %f %f %f %f %f " % (
poss[i,j,0], poss[i,j,1], poss[i,j,2],
rots[i,j,0], rots[i,j,1], rots[i,j,2]))
else:
f.write("%f %f %f " % (
rots[i,j,0], rots[i,j,1], rots[i,j,2]))
f.write("\n") | Save a joint hierarchy to a file.
Args:
filename (str): The output will save on the bvh file.
data (dict): The data to save.(rotations, positions, offsets, parents, names, order, frametime)
save_positions (bool): Whether to save all of joint positions on MOTION. (False is recommended.) | save | python | EricGuo5513/momask-codes | visualization/utils/bvh.py | https://github.com/EricGuo5513/momask-codes/blob/master/visualization/utils/bvh.py | MIT |
def qmul(q, r):
"""
Multiply quaternion(s) q with quaternion(s) r.
Expects two equally-sized tensors of shape (*, 4), where * denotes any number of dimensions.
Returns q*r as a tensor of shape (*, 4).
"""
assert q.shape[-1] == 4
assert r.shape[-1] == 4
original_shape = q.shape
# Compute outer product
terms = torch.bmm(r.view(-1, 4, 1), q.view(-1, 1, 4))
w = terms[:, 0, 0] - terms[:, 1, 1] - terms[:, 2, 2] - terms[:, 3, 3]
x = terms[:, 0, 1] + terms[:, 1, 0] - terms[:, 2, 3] + terms[:, 3, 2]
y = terms[:, 0, 2] + terms[:, 1, 3] + terms[:, 2, 0] - terms[:, 3, 1]
z = terms[:, 0, 3] - terms[:, 1, 2] + terms[:, 2, 1] + terms[:, 3, 0]
return torch.stack((w, x, y, z), dim=1).view(original_shape) | Multiply quaternion(s) q with quaternion(s) r.
Expects two equally-sized tensors of shape (*, 4), where * denotes any number of dimensions.
Returns q*r as a tensor of shape (*, 4). | qmul | python | EricGuo5513/momask-codes | common/quaternion.py | https://github.com/EricGuo5513/momask-codes/blob/master/common/quaternion.py | MIT |
def qrot(q, v):
"""
Rotate vector(s) v about the rotation described by quaternion(s) q.
Expects a tensor of shape (*, 4) for q and a tensor of shape (*, 3) for v,
where * denotes any number of dimensions.
Returns a tensor of shape (*, 3).
"""
assert q.shape[-1] == 4
assert v.shape[-1] == 3
assert q.shape[:-1] == v.shape[:-1]
original_shape = list(v.shape)
# print(q.shape)
q = q.contiguous().view(-1, 4)
v = v.contiguous().view(-1, 3)
qvec = q[:, 1:]
uv = torch.cross(qvec, v, dim=1)
uuv = torch.cross(qvec, uv, dim=1)
return (v + 2 * (q[:, :1] * uv + uuv)).view(original_shape) | Rotate vector(s) v about the rotation described by quaternion(s) q.
Expects a tensor of shape (*, 4) for q and a tensor of shape (*, 3) for v,
where * denotes any number of dimensions.
Returns a tensor of shape (*, 3). | qrot | python | EricGuo5513/momask-codes | common/quaternion.py | https://github.com/EricGuo5513/momask-codes/blob/master/common/quaternion.py | MIT |
def qeuler(q, order, epsilon=0, deg=True):
"""
Convert quaternion(s) q to Euler angles.
Expects a tensor of shape (*, 4), where * denotes any number of dimensions.
Returns a tensor of shape (*, 3).
"""
assert q.shape[-1] == 4
original_shape = list(q.shape)
original_shape[-1] = 3
q = q.view(-1, 4)
q0 = q[:, 0]
q1 = q[:, 1]
q2 = q[:, 2]
q3 = q[:, 3]
if order == 'xyz':
x = torch.atan2(2 * (q0 * q1 - q2 * q3), 1 - 2 * (q1 * q1 + q2 * q2))
y = torch.asin(torch.clamp(2 * (q1 * q3 + q0 * q2), -1 + epsilon, 1 - epsilon))
z = torch.atan2(2 * (q0 * q3 - q1 * q2), 1 - 2 * (q2 * q2 + q3 * q3))
elif order == 'yzx':
x = torch.atan2(2 * (q0 * q1 - q2 * q3), 1 - 2 * (q1 * q1 + q3 * q3))
y = torch.atan2(2 * (q0 * q2 - q1 * q3), 1 - 2 * (q2 * q2 + q3 * q3))
z = torch.asin(torch.clamp(2 * (q1 * q2 + q0 * q3), -1 + epsilon, 1 - epsilon))
elif order == 'zxy':
x = torch.asin(torch.clamp(2 * (q0 * q1 + q2 * q3), -1 + epsilon, 1 - epsilon))
y = torch.atan2(2 * (q0 * q2 - q1 * q3), 1 - 2 * (q1 * q1 + q2 * q2))
z = torch.atan2(2 * (q0 * q3 - q1 * q2), 1 - 2 * (q1 * q1 + q3 * q3))
elif order == 'xzy':
x = torch.atan2(2 * (q0 * q1 + q2 * q3), 1 - 2 * (q1 * q1 + q3 * q3))
y = torch.atan2(2 * (q0 * q2 + q1 * q3), 1 - 2 * (q2 * q2 + q3 * q3))
z = torch.asin(torch.clamp(2 * (q0 * q3 - q1 * q2), -1 + epsilon, 1 - epsilon))
elif order == 'yxz':
x = torch.asin(torch.clamp(2 * (q0 * q1 - q2 * q3), -1 + epsilon, 1 - epsilon))
y = torch.atan2(2 * (q1 * q3 + q0 * q2), 1 - 2 * (q1 * q1 + q2 * q2))
z = torch.atan2(2 * (q1 * q2 + q0 * q3), 1 - 2 * (q1 * q1 + q3 * q3))
elif order == 'zyx':
x = torch.atan2(2 * (q0 * q1 + q2 * q3), 1 - 2 * (q1 * q1 + q2 * q2))
y = torch.asin(torch.clamp(2 * (q0 * q2 - q1 * q3), -1 + epsilon, 1 - epsilon))
z = torch.atan2(2 * (q0 * q3 + q1 * q2), 1 - 2 * (q2 * q2 + q3 * q3))
else:
raise
if deg:
return torch.stack((x, y, z), dim=1).view(original_shape) * 180 / np.pi
else:
return torch.stack((x, y, z), dim=1).view(original_shape) | Convert quaternion(s) q to Euler angles.
Expects a tensor of shape (*, 4), where * denotes any number of dimensions.
Returns a tensor of shape (*, 3). | qeuler | python | EricGuo5513/momask-codes | common/quaternion.py | https://github.com/EricGuo5513/momask-codes/blob/master/common/quaternion.py | MIT |
def qfix(q):
"""
Enforce quaternion continuity across the time dimension by selecting
the representation (q or -q) with minimal distance (or, equivalently, maximal dot product)
between two consecutive frames.
Expects a tensor of shape (L, J, 4), where L is the sequence length and J is the number of joints.
Returns a tensor of the same shape.
"""
assert len(q.shape) == 3
assert q.shape[-1] == 4
result = q.copy()
dot_products = np.sum(q[1:] * q[:-1], axis=2)
mask = dot_products < 0
mask = (np.cumsum(mask, axis=0) % 2).astype(bool)
result[1:][mask] *= -1
return result | Enforce quaternion continuity across the time dimension by selecting
the representation (q or -q) with minimal distance (or, equivalently, maximal dot product)
between two consecutive frames.
Expects a tensor of shape (L, J, 4), where L is the sequence length and J is the number of joints.
Returns a tensor of the same shape. | qfix | python | EricGuo5513/momask-codes | common/quaternion.py | https://github.com/EricGuo5513/momask-codes/blob/master/common/quaternion.py | MIT |
def euler2quat(e, order, deg=True):
"""
Convert Euler angles to quaternions.
"""
assert e.shape[-1] == 3
original_shape = list(e.shape)
original_shape[-1] = 4
e = e.view(-1, 3)
## if euler angles in degrees
if deg:
e = e * np.pi / 180.
x = e[:, 0]
y = e[:, 1]
z = e[:, 2]
rx = torch.stack((torch.cos(x / 2), torch.sin(x / 2), torch.zeros_like(x), torch.zeros_like(x)), dim=1)
ry = torch.stack((torch.cos(y / 2), torch.zeros_like(y), torch.sin(y / 2), torch.zeros_like(y)), dim=1)
rz = torch.stack((torch.cos(z / 2), torch.zeros_like(z), torch.zeros_like(z), torch.sin(z / 2)), dim=1)
result = None
for coord in order:
if coord == 'x':
r = rx
elif coord == 'y':
r = ry
elif coord == 'z':
r = rz
else:
raise
if result is None:
result = r
else:
result = qmul(result, r)
# Reverse antipodal representation to have a non-negative "w"
if order in ['xyz', 'yzx', 'zxy']:
result *= -1
return result.view(original_shape) | Convert Euler angles to quaternions. | euler2quat | python | EricGuo5513/momask-codes | common/quaternion.py | https://github.com/EricGuo5513/momask-codes/blob/master/common/quaternion.py | MIT |
def expmap_to_quaternion(e):
"""
Convert axis-angle rotations (aka exponential maps) to quaternions.
Stable formula from "Practical Parameterization of Rotations Using the Exponential Map".
Expects a tensor of shape (*, 3), where * denotes any number of dimensions.
Returns a tensor of shape (*, 4).
"""
assert e.shape[-1] == 3
original_shape = list(e.shape)
original_shape[-1] = 4
e = e.reshape(-1, 3)
theta = np.linalg.norm(e, axis=1).reshape(-1, 1)
w = np.cos(0.5 * theta).reshape(-1, 1)
xyz = 0.5 * np.sinc(0.5 * theta / np.pi) * e
return np.concatenate((w, xyz), axis=1).reshape(original_shape) | Convert axis-angle rotations (aka exponential maps) to quaternions.
Stable formula from "Practical Parameterization of Rotations Using the Exponential Map".
Expects a tensor of shape (*, 3), where * denotes any number of dimensions.
Returns a tensor of shape (*, 4). | expmap_to_quaternion | python | EricGuo5513/momask-codes | common/quaternion.py | https://github.com/EricGuo5513/momask-codes/blob/master/common/quaternion.py | MIT |
def euler_to_quaternion(e, order):
"""
Convert Euler angles to quaternions.
"""
assert e.shape[-1] == 3
original_shape = list(e.shape)
original_shape[-1] = 4
e = e.reshape(-1, 3)
x = e[:, 0]
y = e[:, 1]
z = e[:, 2]
rx = np.stack((np.cos(x / 2), np.sin(x / 2), np.zeros_like(x), np.zeros_like(x)), axis=1)
ry = np.stack((np.cos(y / 2), np.zeros_like(y), np.sin(y / 2), np.zeros_like(y)), axis=1)
rz = np.stack((np.cos(z / 2), np.zeros_like(z), np.zeros_like(z), np.sin(z / 2)), axis=1)
result = None
for coord in order:
if coord == 'x':
r = rx
elif coord == 'y':
r = ry
elif coord == 'z':
r = rz
else:
raise
if result is None:
result = r
else:
result = qmul_np(result, r)
# Reverse antipodal representation to have a non-negative "w"
if order in ['xyz', 'yzx', 'zxy']:
result *= -1
return result.reshape(original_shape) | Convert Euler angles to quaternions. | euler_to_quaternion | python | EricGuo5513/momask-codes | common/quaternion.py | https://github.com/EricGuo5513/momask-codes/blob/master/common/quaternion.py | MIT |
def quaternion_to_matrix(quaternions):
"""
Convert rotations given as quaternions to rotation matrices.
Args:
quaternions: quaternions with real part first,
as tensor of shape (..., 4).
Returns:
Rotation matrices as tensor of shape (..., 3, 3).
"""
r, i, j, k = torch.unbind(quaternions, -1)
two_s = 2.0 / (quaternions * quaternions).sum(-1)
o = torch.stack(
(
1 - two_s * (j * j + k * k),
two_s * (i * j - k * r),
two_s * (i * k + j * r),
two_s * (i * j + k * r),
1 - two_s * (i * i + k * k),
two_s * (j * k - i * r),
two_s * (i * k - j * r),
two_s * (j * k + i * r),
1 - two_s * (i * i + j * j),
),
-1,
)
return o.reshape(quaternions.shape[:-1] + (3, 3)) | Convert rotations given as quaternions to rotation matrices.
Args:
quaternions: quaternions with real part first,
as tensor of shape (..., 4).
Returns:
Rotation matrices as tensor of shape (..., 3, 3). | quaternion_to_matrix | python | EricGuo5513/momask-codes | common/quaternion.py | https://github.com/EricGuo5513/momask-codes/blob/master/common/quaternion.py | MIT |
def qpow(q0, t, dtype=torch.float):
''' q0 : tensor of quaternions
t: tensor of powers
'''
q0 = qnormalize(q0)
theta0 = torch.acos(q0[..., 0])
## if theta0 is close to zero, add epsilon to avoid NaNs
mask = (theta0 <= 10e-10) * (theta0 >= -10e-10)
theta0 = (1 - mask) * theta0 + mask * 10e-10
v0 = q0[..., 1:] / torch.sin(theta0).view(-1, 1)
if isinstance(t, torch.Tensor):
q = torch.zeros(t.shape + q0.shape)
theta = t.view(-1, 1) * theta0.view(1, -1)
else: ## if t is a number
q = torch.zeros(q0.shape)
theta = t * theta0
q[..., 0] = torch.cos(theta)
q[..., 1:] = v0 * torch.sin(theta).unsqueeze(-1)
return q.to(dtype) | q0 : tensor of quaternions
t: tensor of powers | qpow | python | EricGuo5513/momask-codes | common/quaternion.py | https://github.com/EricGuo5513/momask-codes/blob/master/common/quaternion.py | MIT |
def qslerp(q0, q1, t):
'''
q0: starting quaternion
q1: ending quaternion
t: array of points along the way
Returns:
Tensor of Slerps: t.shape + q0.shape
'''
q0 = qnormalize(q0)
q1 = qnormalize(q1)
q_ = qpow(qmul(q1, qinv(q0)), t)
return qmul(q_,
q0.contiguous().view(torch.Size([1] * len(t.shape)) + q0.shape).expand(t.shape + q0.shape).contiguous()) | q0: starting quaternion
q1: ending quaternion
t: array of points along the way
Returns:
Tensor of Slerps: t.shape + q0.shape | qslerp | python | EricGuo5513/momask-codes | common/quaternion.py | https://github.com/EricGuo5513/momask-codes/blob/master/common/quaternion.py | MIT |
def qbetween(v0, v1):
'''
find the quaternion used to rotate v0 to v1
'''
assert v0.shape[-1] == 3, 'v0 must be of the shape (*, 3)'
assert v1.shape[-1] == 3, 'v1 must be of the shape (*, 3)'
v = torch.cross(v0, v1)
w = torch.sqrt((v0 ** 2).sum(dim=-1, keepdim=True) * (v1 ** 2).sum(dim=-1, keepdim=True)) + (v0 * v1).sum(dim=-1,
keepdim=True)
return qnormalize(torch.cat([w, v], dim=-1)) | find the quaternion used to rotate v0 to v1 | qbetween | python | EricGuo5513/momask-codes | common/quaternion.py | https://github.com/EricGuo5513/momask-codes/blob/master/common/quaternion.py | MIT |
def qbetween_np(v0, v1):
'''
find the quaternion used to rotate v0 to v1
'''
assert v0.shape[-1] == 3, 'v0 must be of the shape (*, 3)'
assert v1.shape[-1] == 3, 'v1 must be of the shape (*, 3)'
v0 = torch.from_numpy(v0).float()
v1 = torch.from_numpy(v1).float()
return qbetween(v0, v1).numpy() | find the quaternion used to rotate v0 to v1 | qbetween_np | python | EricGuo5513/momask-codes | common/quaternion.py | https://github.com/EricGuo5513/momask-codes/blob/master/common/quaternion.py | MIT |
def inverse_kinematics_np(self, joints, face_joint_idx, smooth_forward=False):
assert len(face_joint_idx) == 4
'''Get Forward Direction'''
l_hip, r_hip, sdr_r, sdr_l = face_joint_idx
across1 = joints[:, r_hip] - joints[:, l_hip]
across2 = joints[:, sdr_r] - joints[:, sdr_l]
across = across1 + across2
across = across / np.sqrt((across**2).sum(axis=-1))[:, np.newaxis]
# print(across1.shape, across2.shape)
# forward (batch_size, 3)
forward = np.cross(np.array([[0, 1, 0]]), across, axis=-1)
if smooth_forward:
forward = filters.gaussian_filter1d(forward, 20, axis=0, mode='nearest')
# forward (batch_size, 3)
forward = forward / np.sqrt((forward**2).sum(axis=-1))[..., np.newaxis]
'''Get Root Rotation'''
target = np.array([[0,0,1]]).repeat(len(forward), axis=0)
root_quat = qbetween_np(forward, target)
'''Inverse Kinematics'''
# quat_params (batch_size, joints_num, 4)
# print(joints.shape[:-1])
quat_params = np.zeros(joints.shape[:-1] + (4,))
# print(quat_params.shape)
root_quat[0] = np.array([[1.0, 0.0, 0.0, 0.0]])
quat_params[:, 0] = root_quat
# quat_params[0, 0] = np.array([[1.0, 0.0, 0.0, 0.0]])
for chain in self._kinematic_tree:
R = root_quat
for j in range(len(chain) - 1):
# (batch, 3)
u = self._raw_offset_np[chain[j+1]][np.newaxis,...].repeat(len(joints), axis=0)
# print(u.shape)
# (batch, 3)
v = joints[:, chain[j+1]] - joints[:, chain[j]]
v = v / np.sqrt((v**2).sum(axis=-1))[:, np.newaxis]
# print(u.shape, v.shape)
rot_u_v = qbetween_np(u, v)
R_loc = qmul_np(qinv_np(R), rot_u_v)
quat_params[:,chain[j + 1], :] = R_loc
R = qmul_np(R, R_loc)
return quat_params | Get Forward Direction | inverse_kinematics_np | python | EricGuo5513/momask-codes | common/skeleton.py | https://github.com/EricGuo5513/momask-codes/blob/master/common/skeleton.py | MIT |
def uniform_skeleton(positions, target_offset):
src_skel = Skeleton(n_raw_offsets, kinematic_chain, 'cpu')
src_offset = src_skel.get_offsets_joints(torch.from_numpy(positions[0]))
src_offset = src_offset.numpy()
tgt_offset = target_offset.numpy()
# print(src_offset)
# print(tgt_offset)
'''Calculate Scale Ratio as the ratio of legs'''
src_leg_len = np.abs(src_offset[l_idx1]).max() + np.abs(src_offset[l_idx2]).max()
tgt_leg_len = np.abs(tgt_offset[l_idx1]).max() + np.abs(tgt_offset[l_idx2]).max()
scale_rt = tgt_leg_len / src_leg_len
# print(scale_rt)
src_root_pos = positions[:, 0]
tgt_root_pos = src_root_pos * scale_rt
'''Inverse Kinematics'''
quat_params = src_skel.inverse_kinematics_np(positions, face_joint_indx)
# print(quat_params.shape)
'''Forward Kinematics'''
src_skel.set_offset(target_offset)
new_joints = src_skel.forward_kinematics_np(quat_params, tgt_root_pos)
return new_joints | Calculate Scale Ratio as the ratio of legs | uniform_skeleton | python | EricGuo5513/momask-codes | utils/motion_process.py | https://github.com/EricGuo5513/momask-codes/blob/master/utils/motion_process.py | MIT |
def get_rifke(positions):
'''Local pose'''
positions[..., 0] -= positions[:, 0:1, 0]
positions[..., 2] -= positions[:, 0:1, 2]
'''All pose face Z+'''
positions = qrot_np(np.repeat(r_rot[:, None], positions.shape[1], axis=1), positions)
return positions | Local pose | extract_features.get_rifke | python | EricGuo5513/momask-codes | utils/motion_process.py | https://github.com/EricGuo5513/momask-codes/blob/master/utils/motion_process.py | MIT |
def get_quaternion(positions):
skel = Skeleton(n_raw_offsets, kinematic_chain, "cpu")
# (seq_len, joints_num, 4)
quat_params = skel.inverse_kinematics_np(positions, face_joint_indx, smooth_forward=False)
'''Fix Quaternion Discontinuity'''
quat_params = qfix(quat_params)
# (seq_len, 4)
r_rot = quat_params[:, 0].copy()
# print(r_rot[0])
'''Root Linear Velocity'''
# (seq_len - 1, 3)
velocity = (positions[1:, 0] - positions[:-1, 0]).copy()
# print(r_rot.shape, velocity.shape)
velocity = qrot_np(r_rot[1:], velocity)
'''Root Angular Velocity'''
# (seq_len - 1, 4)
r_velocity = qmul_np(r_rot[1:], qinv_np(r_rot[:-1]))
quat_params[1:, 0] = r_velocity
# (seq_len, joints_num, 4)
return quat_params, r_velocity, velocity, r_rot | Fix Quaternion Discontinuity | extract_features.get_quaternion | python | EricGuo5513/momask-codes | utils/motion_process.py | https://github.com/EricGuo5513/momask-codes/blob/master/utils/motion_process.py | MIT |
def get_cont6d_params(positions):
skel = Skeleton(n_raw_offsets, kinematic_chain, "cpu")
# (seq_len, joints_num, 4)
quat_params = skel.inverse_kinematics_np(positions, face_joint_indx, smooth_forward=True)
'''Quaternion to continuous 6D'''
cont_6d_params = quaternion_to_cont6d_np(quat_params)
# (seq_len, 4)
r_rot = quat_params[:, 0].copy()
# print(r_rot[0])
'''Root Linear Velocity'''
# (seq_len - 1, 3)
velocity = (positions[1:, 0] - positions[:-1, 0]).copy()
# print(r_rot.shape, velocity.shape)
velocity = qrot_np(r_rot[1:], velocity)
'''Root Angular Velocity'''
# (seq_len - 1, 4)
r_velocity = qmul_np(r_rot[1:], qinv_np(r_rot[:-1]))
# (seq_len, joints_num, 4)
return cont_6d_params, r_velocity, velocity, r_rot | Quaternion to continuous 6D | extract_features.get_cont6d_params | python | EricGuo5513/momask-codes | utils/motion_process.py | https://github.com/EricGuo5513/momask-codes/blob/master/utils/motion_process.py | MIT |
def extract_features(positions, feet_thre, n_raw_offsets, kinematic_chain, face_joint_indx, fid_r, fid_l):
global_positions = positions.copy()
""" Get Foot Contacts """
def foot_detect(positions, thres):
velfactor, heightfactor = np.array([thres, thres]), np.array([3.0, 2.0])
feet_l_x = (positions[1:, fid_l, 0] - positions[:-1, fid_l, 0]) ** 2
feet_l_y = (positions[1:, fid_l, 1] - positions[:-1, fid_l, 1]) ** 2
feet_l_z = (positions[1:, fid_l, 2] - positions[:-1, fid_l, 2]) ** 2
# feet_l_h = positions[:-1,fid_l,1]
# feet_l = (((feet_l_x + feet_l_y + feet_l_z) < velfactor) & (feet_l_h < heightfactor)).astype(np.float)
feet_l = ((feet_l_x + feet_l_y + feet_l_z) < velfactor).astype(np.float)
feet_r_x = (positions[1:, fid_r, 0] - positions[:-1, fid_r, 0]) ** 2
feet_r_y = (positions[1:, fid_r, 1] - positions[:-1, fid_r, 1]) ** 2
feet_r_z = (positions[1:, fid_r, 2] - positions[:-1, fid_r, 2]) ** 2
# feet_r_h = positions[:-1,fid_r,1]
# feet_r = (((feet_r_x + feet_r_y + feet_r_z) < velfactor) & (feet_r_h < heightfactor)).astype(np.float)
feet_r = (((feet_r_x + feet_r_y + feet_r_z) < velfactor)).astype(np.float)
return feet_l, feet_r
#
feet_l, feet_r = foot_detect(positions, feet_thre)
# feet_l, feet_r = foot_detect(positions, 0.002)
'''Quaternion and Cartesian representation'''
r_rot = None
def get_rifke(positions):
'''Local pose'''
positions[..., 0] -= positions[:, 0:1, 0]
positions[..., 2] -= positions[:, 0:1, 2]
'''All pose face Z+'''
positions = qrot_np(np.repeat(r_rot[:, None], positions.shape[1], axis=1), positions)
return positions
def get_quaternion(positions):
skel = Skeleton(n_raw_offsets, kinematic_chain, "cpu")
# (seq_len, joints_num, 4)
quat_params = skel.inverse_kinematics_np(positions, face_joint_indx, smooth_forward=False)
'''Fix Quaternion Discontinuity'''
quat_params = qfix(quat_params)
# (seq_len, 4)
r_rot = quat_params[:, 0].copy()
# print(r_rot[0])
'''Root Linear Velocity'''
# (seq_len - 1, 3)
velocity = (positions[1:, 0] - positions[:-1, 0]).copy()
# print(r_rot.shape, velocity.shape)
velocity = qrot_np(r_rot[1:], velocity)
'''Root Angular Velocity'''
# (seq_len - 1, 4)
r_velocity = qmul_np(r_rot[1:], qinv_np(r_rot[:-1]))
quat_params[1:, 0] = r_velocity
# (seq_len, joints_num, 4)
return quat_params, r_velocity, velocity, r_rot
def get_cont6d_params(positions):
skel = Skeleton(n_raw_offsets, kinematic_chain, "cpu")
# (seq_len, joints_num, 4)
quat_params = skel.inverse_kinematics_np(positions, face_joint_indx, smooth_forward=True)
'''Quaternion to continuous 6D'''
cont_6d_params = quaternion_to_cont6d_np(quat_params)
# (seq_len, 4)
r_rot = quat_params[:, 0].copy()
# print(r_rot[0])
'''Root Linear Velocity'''
# (seq_len - 1, 3)
velocity = (positions[1:, 0] - positions[:-1, 0]).copy()
# print(r_rot.shape, velocity.shape)
velocity = qrot_np(r_rot[1:], velocity)
'''Root Angular Velocity'''
# (seq_len - 1, 4)
r_velocity = qmul_np(r_rot[1:], qinv_np(r_rot[:-1]))
# (seq_len, joints_num, 4)
return cont_6d_params, r_velocity, velocity, r_rot
cont_6d_params, r_velocity, velocity, r_rot = get_cont6d_params(positions)
positions = get_rifke(positions)
# trejec = np.cumsum(np.concatenate([np.array([[0, 0, 0]]), velocity], axis=0), axis=0)
# r_rotations, r_pos = recover_ric_glo_np(r_velocity, velocity[:, [0, 2]])
# plt.plot(positions_b[:, 0, 0], positions_b[:, 0, 2], marker='*')
# plt.plot(ground_positions[:, 0, 0], ground_positions[:, 0, 2], marker='o', color='r')
# plt.plot(trejec[:, 0], trejec[:, 2], marker='^', color='g')
# plt.plot(r_pos[:, 0], r_pos[:, 2], marker='s', color='y')
# plt.xlabel('x')
# plt.ylabel('z')
# plt.axis('equal')
# plt.show()
'''Root height'''
root_y = positions[:, 0, 1:2]
'''Root rotation and linear velocity'''
# (seq_len-1, 1) rotation velocity along y-axis
# (seq_len-1, 2) linear velovity on xz plane
r_velocity = np.arcsin(r_velocity[:, 2:3])
l_velocity = velocity[:, [0, 2]]
# print(r_velocity.shape, l_velocity.shape, root_y.shape)
root_data = np.concatenate([r_velocity, l_velocity, root_y[:-1]], axis=-1)
'''Get Joint Rotation Representation'''
# (seq_len, (joints_num-1) *6) quaternion for skeleton joints
rot_data = cont_6d_params[:, 1:].reshape(len(cont_6d_params), -1)
'''Get Joint Rotation Invariant Position Represention'''
# (seq_len, (joints_num-1)*3) local joint position
ric_data = positions[:, 1:].reshape(len(positions), -1)
'''Get Joint Velocity Representation'''
# (seq_len-1, joints_num*3)
local_vel = qrot_np(np.repeat(r_rot[:-1, None], global_positions.shape[1], axis=1),
global_positions[1:] - global_positions[:-1])
local_vel = local_vel.reshape(len(local_vel), -1)
data = root_data
data = np.concatenate([data, ric_data[:-1]], axis=-1)
data = np.concatenate([data, rot_data[:-1]], axis=-1)
# print(data.shape, local_vel.shape)
data = np.concatenate([data, local_vel], axis=-1)
data = np.concatenate([data, feet_l, feet_r], axis=-1)
return data | Get Foot Contacts | extract_features | python | EricGuo5513/momask-codes | utils/motion_process.py | https://github.com/EricGuo5513/momask-codes/blob/master/utils/motion_process.py | MIT |
def get_rifke(positions):
'''Local pose'''
positions[..., 0] -= positions[:, 0:1, 0]
positions[..., 2] -= positions[:, 0:1, 2]
'''All pose face Z+'''
positions = qrot_np(np.repeat(r_rot[:, None], positions.shape[1], axis=1), positions)
return positions | Local pose | process_file.get_rifke | python | EricGuo5513/momask-codes | utils/motion_process.py | https://github.com/EricGuo5513/momask-codes/blob/master/utils/motion_process.py | MIT |
def get_quaternion(positions):
skel = Skeleton(n_raw_offsets, kinematic_chain, "cpu")
# (seq_len, joints_num, 4)
quat_params = skel.inverse_kinematics_np(positions, face_joint_indx, smooth_forward=False)
'''Fix Quaternion Discontinuity'''
quat_params = qfix(quat_params)
# (seq_len, 4)
r_rot = quat_params[:, 0].copy()
# print(r_rot[0])
'''Root Linear Velocity'''
# (seq_len - 1, 3)
velocity = (positions[1:, 0] - positions[:-1, 0]).copy()
# print(r_rot.shape, velocity.shape)
velocity = qrot_np(r_rot[1:], velocity)
'''Root Angular Velocity'''
# (seq_len - 1, 4)
r_velocity = qmul_np(r_rot[1:], qinv_np(r_rot[:-1]))
quat_params[1:, 0] = r_velocity
# (seq_len, joints_num, 4)
return quat_params, r_velocity, velocity, r_rot | Fix Quaternion Discontinuity | process_file.get_quaternion | python | EricGuo5513/momask-codes | utils/motion_process.py | https://github.com/EricGuo5513/momask-codes/blob/master/utils/motion_process.py | MIT |
def get_cont6d_params(positions):
skel = Skeleton(n_raw_offsets, kinematic_chain, "cpu")
# (seq_len, joints_num, 4)
quat_params = skel.inverse_kinematics_np(positions, face_joint_indx, smooth_forward=True)
'''Quaternion to continuous 6D'''
cont_6d_params = quaternion_to_cont6d_np(quat_params)
# (seq_len, 4)
r_rot = quat_params[:, 0].copy()
# print(r_rot[0])
'''Root Linear Velocity'''
# (seq_len - 1, 3)
velocity = (positions[1:, 0] - positions[:-1, 0]).copy()
# print(r_rot.shape, velocity.shape)
velocity = qrot_np(r_rot[1:], velocity)
'''Root Angular Velocity'''
# (seq_len - 1, 4)
r_velocity = qmul_np(r_rot[1:], qinv_np(r_rot[:-1]))
# (seq_len, joints_num, 4)
return cont_6d_params, r_velocity, velocity, r_rot | Quaternion to continuous 6D | process_file.get_cont6d_params | python | EricGuo5513/momask-codes | utils/motion_process.py | https://github.com/EricGuo5513/momask-codes/blob/master/utils/motion_process.py | MIT |
def process_file(positions, feet_thre):
# (seq_len, joints_num, 3)
# '''Down Sample'''
# positions = positions[::ds_num]
'''Uniform Skeleton'''
positions = uniform_skeleton(positions, tgt_offsets)
'''Put on Floor'''
floor_height = positions.min(axis=0).min(axis=0)[1]
positions[:, :, 1] -= floor_height
# print(floor_height)
# plot_3d_motion("./positions_1.mp4", kinematic_chain, positions, 'title', fps=20)
'''XZ at origin'''
root_pos_init = positions[0]
root_pose_init_xz = root_pos_init[0] * np.array([1, 0, 1])
positions = positions - root_pose_init_xz
# '''Move the first pose to origin '''
# root_pos_init = positions[0]
# positions = positions - root_pos_init[0]
'''All initially face Z+'''
r_hip, l_hip, sdr_r, sdr_l = face_joint_indx
across1 = root_pos_init[r_hip] - root_pos_init[l_hip]
across2 = root_pos_init[sdr_r] - root_pos_init[sdr_l]
across = across1 + across2
across = across / np.sqrt((across ** 2).sum(axis=-1))[..., np.newaxis]
# forward (3,), rotate around y-axis
forward_init = np.cross(np.array([[0, 1, 0]]), across, axis=-1)
# forward (3,)
forward_init = forward_init / np.sqrt((forward_init ** 2).sum(axis=-1))[..., np.newaxis]
# print(forward_init)
target = np.array([[0, 0, 1]])
root_quat_init = qbetween_np(forward_init, target)
root_quat_init = np.ones(positions.shape[:-1] + (4,)) * root_quat_init
positions_b = positions.copy()
positions = qrot_np(root_quat_init, positions)
# plot_3d_motion("./positions_2.mp4", kinematic_chain, positions, 'title', fps=20)
'''New ground truth positions'''
global_positions = positions.copy()
# plt.plot(positions_b[:, 0, 0], positions_b[:, 0, 2], marker='*')
# plt.plot(positions[:, 0, 0], positions[:, 0, 2], marker='o', color='r')
# plt.xlabel('x')
# plt.ylabel('z')
# plt.axis('equal')
# plt.show()
""" Get Foot Contacts """
def foot_detect(positions, thres):
velfactor, heightfactor = np.array([thres, thres]), np.array([3.0, 2.0])
feet_l_x = (positions[1:, fid_l, 0] - positions[:-1, fid_l, 0]) ** 2
feet_l_y = (positions[1:, fid_l, 1] - positions[:-1, fid_l, 1]) ** 2
feet_l_z = (positions[1:, fid_l, 2] - positions[:-1, fid_l, 2]) ** 2
# feet_l_h = positions[:-1,fid_l,1]
# feet_l = (((feet_l_x + feet_l_y + feet_l_z) < velfactor) & (feet_l_h < heightfactor)).astype(np.float)
feet_l = ((feet_l_x + feet_l_y + feet_l_z) < velfactor).astype(np.float)
feet_r_x = (positions[1:, fid_r, 0] - positions[:-1, fid_r, 0]) ** 2
feet_r_y = (positions[1:, fid_r, 1] - positions[:-1, fid_r, 1]) ** 2
feet_r_z = (positions[1:, fid_r, 2] - positions[:-1, fid_r, 2]) ** 2
# feet_r_h = positions[:-1,fid_r,1]
# feet_r = (((feet_r_x + feet_r_y + feet_r_z) < velfactor) & (feet_r_h < heightfactor)).astype(np.float)
feet_r = (((feet_r_x + feet_r_y + feet_r_z) < velfactor)).astype(np.float)
return feet_l, feet_r
#
feet_l, feet_r = foot_detect(positions, feet_thre)
# feet_l, feet_r = foot_detect(positions, 0.002)
'''Quaternion and Cartesian representation'''
r_rot = None
def get_rifke(positions):
'''Local pose'''
positions[..., 0] -= positions[:, 0:1, 0]
positions[..., 2] -= positions[:, 0:1, 2]
'''All pose face Z+'''
positions = qrot_np(np.repeat(r_rot[:, None], positions.shape[1], axis=1), positions)
return positions
def get_quaternion(positions):
skel = Skeleton(n_raw_offsets, kinematic_chain, "cpu")
# (seq_len, joints_num, 4)
quat_params = skel.inverse_kinematics_np(positions, face_joint_indx, smooth_forward=False)
'''Fix Quaternion Discontinuity'''
quat_params = qfix(quat_params)
# (seq_len, 4)
r_rot = quat_params[:, 0].copy()
# print(r_rot[0])
'''Root Linear Velocity'''
# (seq_len - 1, 3)
velocity = (positions[1:, 0] - positions[:-1, 0]).copy()
# print(r_rot.shape, velocity.shape)
velocity = qrot_np(r_rot[1:], velocity)
'''Root Angular Velocity'''
# (seq_len - 1, 4)
r_velocity = qmul_np(r_rot[1:], qinv_np(r_rot[:-1]))
quat_params[1:, 0] = r_velocity
# (seq_len, joints_num, 4)
return quat_params, r_velocity, velocity, r_rot
def get_cont6d_params(positions):
skel = Skeleton(n_raw_offsets, kinematic_chain, "cpu")
# (seq_len, joints_num, 4)
quat_params = skel.inverse_kinematics_np(positions, face_joint_indx, smooth_forward=True)
'''Quaternion to continuous 6D'''
cont_6d_params = quaternion_to_cont6d_np(quat_params)
# (seq_len, 4)
r_rot = quat_params[:, 0].copy()
# print(r_rot[0])
'''Root Linear Velocity'''
# (seq_len - 1, 3)
velocity = (positions[1:, 0] - positions[:-1, 0]).copy()
# print(r_rot.shape, velocity.shape)
velocity = qrot_np(r_rot[1:], velocity)
'''Root Angular Velocity'''
# (seq_len - 1, 4)
r_velocity = qmul_np(r_rot[1:], qinv_np(r_rot[:-1]))
# (seq_len, joints_num, 4)
return cont_6d_params, r_velocity, velocity, r_rot
cont_6d_params, r_velocity, velocity, r_rot = get_cont6d_params(positions)
positions = get_rifke(positions)
# trejec = np.cumsum(np.concatenate([np.array([[0, 0, 0]]), velocity], axis=0), axis=0)
# r_rotations, r_pos = recover_ric_glo_np(r_velocity, velocity[:, [0, 2]])
# plt.plot(positions_b[:, 0, 0], positions_b[:, 0, 2], marker='*')
# plt.plot(ground_positions[:, 0, 0], ground_positions[:, 0, 2], marker='o', color='r')
# plt.plot(trejec[:, 0], trejec[:, 2], marker='^', color='g')
# plt.plot(r_pos[:, 0], r_pos[:, 2], marker='s', color='y')
# plt.xlabel('x')
# plt.ylabel('z')
# plt.axis('equal')
# plt.show()
'''Root height'''
root_y = positions[:, 0, 1:2]
'''Root rotation and linear velocity'''
# (seq_len-1, 1) rotation velocity along y-axis
# (seq_len-1, 2) linear velovity on xz plane
r_velocity = np.arcsin(r_velocity[:, 2:3])
l_velocity = velocity[:, [0, 2]]
# print(r_velocity.shape, l_velocity.shape, root_y.shape)
root_data = np.concatenate([r_velocity, l_velocity, root_y[:-1]], axis=-1)
'''Get Joint Rotation Representation'''
# (seq_len, (joints_num-1) *6) quaternion for skeleton joints
rot_data = cont_6d_params[:, 1:].reshape(len(cont_6d_params), -1)
'''Get Joint Rotation Invariant Position Represention'''
# (seq_len, (joints_num-1)*3) local joint position
ric_data = positions[:, 1:].reshape(len(positions), -1)
'''Get Joint Velocity Representation'''
# (seq_len-1, joints_num*3)
local_vel = qrot_np(np.repeat(r_rot[:-1, None], global_positions.shape[1], axis=1),
global_positions[1:] - global_positions[:-1])
local_vel = local_vel.reshape(len(local_vel), -1)
data = root_data
data = np.concatenate([data, ric_data[:-1]], axis=-1)
data = np.concatenate([data, rot_data[:-1]], axis=-1)
# print(data.shape, local_vel.shape)
data = np.concatenate([data, local_vel], axis=-1)
data = np.concatenate([data, feet_l, feet_r], axis=-1)
return data, global_positions, positions, l_velocity | Uniform Skeleton | process_file | python | EricGuo5513/momask-codes | utils/motion_process.py | https://github.com/EricGuo5513/momask-codes/blob/master/utils/motion_process.py | MIT |
def recover_root_rot_pos(data):
rot_vel = data[..., 0]
r_rot_ang = torch.zeros_like(rot_vel).to(data.device)
'''Get Y-axis rotation from rotation velocity'''
r_rot_ang[..., 1:] = rot_vel[..., :-1]
r_rot_ang = torch.cumsum(r_rot_ang, dim=-1)
r_rot_quat = torch.zeros(data.shape[:-1] + (4,)).to(data.device)
r_rot_quat[..., 0] = torch.cos(r_rot_ang)
r_rot_quat[..., 2] = torch.sin(r_rot_ang)
r_pos = torch.zeros(data.shape[:-1] + (3,)).to(data.device)
r_pos[..., 1:, [0, 2]] = data[..., :-1, 1:3]
'''Add Y-axis rotation to root position'''
r_pos = qrot(qinv(r_rot_quat), r_pos)
r_pos = torch.cumsum(r_pos, dim=-2)
r_pos[..., 1] = data[..., 3]
return r_rot_quat, r_pos | Get Y-axis rotation from rotation velocity | recover_root_rot_pos | python | EricGuo5513/momask-codes | utils/motion_process.py | https://github.com/EricGuo5513/momask-codes/blob/master/utils/motion_process.py | MIT |
def recover_from_ric(data, joints_num):
r_rot_quat, r_pos = recover_root_rot_pos(data)
positions = data[..., 4:(joints_num - 1) * 3 + 4]
positions = positions.view(positions.shape[:-1] + (-1, 3))
'''Add Y-axis rotation to local joints'''
positions = qrot(qinv(r_rot_quat[..., None, :]).expand(positions.shape[:-1] + (4,)), positions)
'''Add root XZ to joints'''
positions[..., 0] += r_pos[..., 0:1]
positions[..., 2] += r_pos[..., 2:3]
'''Concate root and joints'''
positions = torch.cat([r_pos.unsqueeze(-2), positions], dim=-2)
return positions | Add Y-axis rotation to local joints | recover_from_ric | python | EricGuo5513/momask-codes | utils/motion_process.py | https://github.com/EricGuo5513/momask-codes/blob/master/utils/motion_process.py | MIT |
def calculate_mpjpe(gt_joints, pred_joints):
"""
gt_joints: num_poses x num_joints(22) x 3
pred_joints: num_poses x num_joints(22) x 3
(obtained from recover_from_ric())
"""
assert gt_joints.shape == pred_joints.shape, f"GT shape: {gt_joints.shape}, pred shape: {pred_joints.shape}"
# Align by root (pelvis)
pelvis = gt_joints[:, [0]].mean(1)
gt_joints = gt_joints - torch.unsqueeze(pelvis, dim=1)
pelvis = pred_joints[:, [0]].mean(1)
pred_joints = pred_joints - torch.unsqueeze(pelvis, dim=1)
# Compute MPJPE
mpjpe = torch.linalg.norm(pred_joints - gt_joints, dim=-1) # num_poses x num_joints=22
mpjpe_seq = mpjpe.mean(-1) # num_poses
return mpjpe_seq | gt_joints: num_poses x num_joints(22) x 3
pred_joints: num_poses x num_joints(22) x 3
(obtained from recover_from_ric()) | calculate_mpjpe | python | EricGuo5513/momask-codes | utils/metrics.py | https://github.com/EricGuo5513/momask-codes/blob/master/utils/metrics.py | MIT |
def euclidean_distance_matrix(matrix1, matrix2):
"""
Params:
-- matrix1: N1 x D
-- matrix2: N2 x D
Returns:
-- dist: N1 x N2
dist[i, j] == distance(matrix1[i], matrix2[j])
"""
assert matrix1.shape[1] == matrix2.shape[1]
d1 = -2 * np.dot(matrix1, matrix2.T) # shape (num_test, num_train)
d2 = np.sum(np.square(matrix1), axis=1, keepdims=True) # shape (num_test, 1)
d3 = np.sum(np.square(matrix2), axis=1) # shape (num_train, )
dists = np.sqrt(d1 + d2 + d3) # broadcasting
return dists | Params:
-- matrix1: N1 x D
-- matrix2: N2 x D
Returns:
-- dist: N1 x N2
dist[i, j] == distance(matrix1[i], matrix2[j]) | euclidean_distance_matrix | python | EricGuo5513/momask-codes | utils/metrics.py | https://github.com/EricGuo5513/momask-codes/blob/master/utils/metrics.py | MIT |
def calculate_activation_statistics(activations):
"""
Params:
-- activation: num_samples x dim_feat
Returns:
-- mu: dim_feat
-- sigma: dim_feat x dim_feat
"""
mu = np.mean(activations, axis=0)
cov = np.cov(activations, rowvar=False)
return mu, cov | Params:
-- activation: num_samples x dim_feat
Returns:
-- mu: dim_feat
-- sigma: dim_feat x dim_feat | calculate_activation_statistics | python | EricGuo5513/momask-codes | utils/metrics.py | https://github.com/EricGuo5513/momask-codes/blob/master/utils/metrics.py | MIT |
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
-- mu1 : Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations, precalculated on an
representative data set.
-- sigma1: The covariance matrix over activations for generated samples.
-- sigma2: The covariance matrix over activations, precalculated on an
representative data set.
Returns:
-- : The Frechet Distance.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert mu1.shape == mu2.shape, \
'Training and test mean vectors have different lengths'
assert sigma1.shape == sigma2.shape, \
'Training and test covariances have different dimensions'
diff = mu1 - mu2
# Product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = ('fid calculation produces singular product; '
'adding %s to diagonal of cov estimates') % eps
print(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# Numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError('Imaginary component {}'.format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return (diff.dot(diff) + np.trace(sigma1) +
np.trace(sigma2) - 2 * tr_covmean) | Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by Dougal J. Sutherland.
Params:
-- mu1 : Numpy array containing the activations of a layer of the
inception net (like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations, precalculated on an
representative data set.
-- sigma1: The covariance matrix over activations for generated samples.
-- sigma2: The covariance matrix over activations, precalculated on an
representative data set.
Returns:
-- : The Frechet Distance. | calculate_frechet_distance | python | EricGuo5513/momask-codes | utils/metrics.py | https://github.com/EricGuo5513/momask-codes/blob/master/utils/metrics.py | MIT |
def fetch_requirements(paths) -> List[str]:
"""
This function reads the requirements file.
Args:
path (str): the path to the requirements file.
Returns:
The lines in the requirements file.
"""
if not isinstance(paths, list):
paths = [paths]
requirements = []
for path in paths:
with open(path, "r") as fd:
requirements += [r.strip() for r in fd.readlines()]
return requirements | This function reads the requirements file.
Args:
path (str): the path to the requirements file.
Returns:
The lines in the requirements file. | fetch_requirements | python | hpcaitech/Open-Sora | setup.py | https://github.com/hpcaitech/Open-Sora/blob/master/setup.py | Apache-2.0 |
def fetch_readme() -> str:
"""
This function reads the README.md file in the current directory.
Returns:
The lines in the README file.
"""
with open("README.md", encoding="utf-8") as f:
return f.read() | This function reads the README.md file in the current directory.
Returns:
The lines in the README file. | fetch_readme | python | hpcaitech/Open-Sora | setup.py | https://github.com/hpcaitech/Open-Sora/blob/master/setup.py | Apache-2.0 |
def build_module(module: dict | nn.Module, builder: Registry, **kwargs) -> nn.Module | None:
"""Build module from config or return the module itself.
Args:
module (dict | nn.Module): The module to build.
builder (Registry): The registry to build module.
*args, **kwargs: Arguments passed to build function.
Returns:
(None | nn.Module): The created model.
"""
if module is None:
return None
if isinstance(module, dict):
cfg = deepcopy(module)
for k, v in kwargs.items():
cfg[k] = v
return builder.build(cfg)
elif isinstance(module, nn.Module):
return module
elif module is None:
return None
else:
raise TypeError(f"Only support dict and nn.Module, but got {type(module)}.") | Build module from config or return the module itself.
Args:
module (dict | nn.Module): The module to build.
builder (Registry): The registry to build module.
*args, **kwargs: Arguments passed to build function.
Returns:
(None | nn.Module): The created model. | build_module | python | hpcaitech/Open-Sora | opensora/registry.py | https://github.com/hpcaitech/Open-Sora/blob/master/opensora/registry.py | Apache-2.0 |
def __init__(self, hidden_size, eps=1e-6):
"""
Construct a layernorm module in the T5 style. No bias and no subtraction of mean.
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps | Construct a layernorm module in the T5 style. No bias and no subtraction of mean. | __init__ | python | hpcaitech/Open-Sora | opensora/acceleration/shardformer/modeling/t5.py | https://github.com/hpcaitech/Open-Sora/blob/master/opensora/acceleration/shardformer/modeling/t5.py | Apache-2.0 |
def prepare_block_inputs(
self,
img: Tensor,
img_ids: Tensor,
txt: Tensor, # t5 encoded vec
txt_ids: Tensor,
timesteps: Tensor,
y_vec: Tensor, # clip encoded vec
cond: Tensor = None,
guidance: Tensor | None = None,
):
"""
obtain the processed:
img: projected noisy img latent,
txt: text context (from t5),
vec: clip encoded vector,
pe: the positional embeddings for concatenated img and txt
"""
if img.ndim != 3 or txt.ndim != 3:
raise ValueError("Input img and txt tensors must have 3 dimensions.")
# running on sequences img
img = self.img_in(img)
if self.config.cond_embed:
if cond is None:
raise ValueError("Didn't get conditional input for conditional model.")
img = img + self.cond_in(cond)
vec = self.time_in(timestep_embedding(timesteps, 256))
if self.config.guidance_embed:
if guidance is None:
raise ValueError(
"Didn't get guidance strength for guidance distilled model."
)
vec = vec + self.guidance_in(timestep_embedding(guidance, 256))
vec = vec + self.vector_in(y_vec)
txt = self.txt_in(txt)
# concat: 4096 + t*h*2/4
ids = torch.cat((txt_ids, img_ids), dim=1)
pe = self.pe_embedder(ids)
if self._input_requires_grad:
# we only apply lora to double/single blocks, thus we only need to enable grad for these inputs
img.requires_grad_()
txt.requires_grad_()
return img, txt, vec, pe | obtain the processed:
img: projected noisy img latent,
txt: text context (from t5),
vec: clip encoded vector,
pe: the positional embeddings for concatenated img and txt | prepare_block_inputs | python | hpcaitech/Open-Sora | opensora/models/mmdit/model.py | https://github.com/hpcaitech/Open-Sora/blob/master/opensora/models/mmdit/model.py | Apache-2.0 |
def enable_input_require_grads(self):
"""Fit peft lora. This method should not be called manually."""
self._input_requires_grad = True | Fit peft lora. This method should not be called manually. | enable_input_require_grads | python | hpcaitech/Open-Sora | opensora/models/mmdit/model.py | https://github.com/hpcaitech/Open-Sora/blob/master/opensora/models/mmdit/model.py | Apache-2.0 |
def forward(
ctx,
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
sp_group: dist.ProcessGroup,
sp_stream: torch.cuda.Stream,
dropout_p: float = 0.0,
softmax_scale: Optional[float] = None,
deterministic: Optional[bool] = False,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Ring attention forward
Args:
ctx (_type_): self
q (torch.Tensor): shape [B, S, N, D]
k (torch.Tensor): shape [B, S, N, D]
v (torch.Tensor): shape [B, S, N, D]
sp_group (dist.ProcessGroup): sequence parallel group
sp_stream (torch.cuda.Stream): sequence parallel stream
dropout_p (float, optional): dropout prob. Defaults to 0.0.
softmax_scale (Optional[float], optional): softmax scale. Defaults to None.
deterministic (Optional[bool], optional): backward deterministic mode. Defaults to False.
Returns:
Tuple[torch.Tensor, torch.Tensor]: output and log sum exp. Output's shape should be [B, S, N, D]. LSE's shape should be [B, N, S].
"""
if softmax_scale is None:
softmax_scale = q.shape[-1] ** (-0.5)
sp_size = dist.get_world_size(sp_group)
kv_comms: List[RingComm] = [RingComm(sp_group) for _ in range(2)]
# [B, S, N, D]
q, k, v = [x.contiguous() for x in [q, k, v]]
# Pre-allocate double buffer for overlapping and receiving next step's inputs
kv_buffers = [torch.stack((k, v))] # (2, B, S, N, D)
kv_buffers.append(torch.empty_like(kv_buffers[0]))
# outputs
out = None
block_out = [None, None]
softmax_lse = [None, None]
block_softmax_lse = [None, None] # log sum exp, the denominator of softmax in attention
rng_states = [None for _ in range(sp_size)]
sp_streams = [torch.cuda.current_stream(), sp_stream]
def _kv_comm(i):
# Avoid overwriting attn input when it shares mem with buffer
if not RingAttention.ATTN_DONE.query():
kv_buffers[(i + 1) % 2] = torch.empty_like(kv_buffers[i % 2])
if i < sp_size - 1:
kv_comms[i % 2].send_recv(kv_buffers[i % 2], kv_buffers[(i + 1) % 2])
for i in range(sp_size):
with torch.cuda.stream(sp_streams[i % 2]):
# Wait for current kv from prev rank
# NOTE: waiting outside the current stream will NOT correctly synchronize.
if i == 0:
_kv_comm(i)
else:
kv_comms[(i + 1) % 2].wait()
kv_block = kv_buffers[i % 2]
q_block = q
block_out[i % 2], block_softmax_lse[i % 2], rng_states[i] = _fa_forward(
q_block, kv_block[0], kv_block[1], dropout_p, softmax_scale
)
RingAttention.ATTN_DONE.record()
# Pipeline the next KV comm with output correction instead of the next flash attn
# to minimize idle time when comm takes longer than attn.
_kv_comm(i + 1)
block_softmax_lse[i % 2] = (
block_softmax_lse[i % 2].transpose(1, 2).unsqueeze(-1).contiguous().float()
) # [B, N, S] -> [B, S, N, 1]
assert block_out[i % 2].shape[:-1] == block_softmax_lse[i % 2].shape[:-1]
# Output and log sum exp correction. Ideally overlap this with the next flash attn kernel.
# In reality this always finishes before next flash attn; no need for extra sync.
if i == 0:
out = block_out[0]
softmax_lse = block_softmax_lse[0]
else:
out, softmax_lse = _rescale_out_lse(out, block_out[i % 2], softmax_lse, block_softmax_lse[i % 2])
torch.cuda.current_stream().wait_stream(sp_stream)
out = out.to(q.dtype)
softmax_lse = softmax_lse.squeeze(-1).transpose(1, 2).contiguous()
ctx.dropout_p = dropout_p
ctx.softmax_scale = softmax_scale
ctx.deterministic = deterministic
ctx.sp_group = sp_group
ctx.save_for_backward(q, k, v, out, softmax_lse, *rng_states) # lse [B, N, S]
return out, softmax_lse | Ring attention forward
Args:
ctx (_type_): self
q (torch.Tensor): shape [B, S, N, D]
k (torch.Tensor): shape [B, S, N, D]
v (torch.Tensor): shape [B, S, N, D]
sp_group (dist.ProcessGroup): sequence parallel group
sp_stream (torch.cuda.Stream): sequence parallel stream
dropout_p (float, optional): dropout prob. Defaults to 0.0.
softmax_scale (Optional[float], optional): softmax scale. Defaults to None.
deterministic (Optional[bool], optional): backward deterministic mode. Defaults to False.
Returns:
Tuple[torch.Tensor, torch.Tensor]: output and log sum exp. Output's shape should be [B, S, N, D]. LSE's shape should be [B, N, S]. | forward | python | hpcaitech/Open-Sora | opensora/models/mmdit/distributed.py | https://github.com/hpcaitech/Open-Sora/blob/master/opensora/models/mmdit/distributed.py | Apache-2.0 |
def attention(
q,
k,
v,
sp_group,
dropout_p: float = 0.0,
softmax_scale: Optional[float] = None,
deterministic: bool = False,
return_softmax: bool = False,
):
"""Ring attention
Args:
q (torch.Tensor): shape [B, S, N, D]
k (torch.Tensor): shape [B, S, N, D]
v (torch.Tensor): shape [B, S, N, D]
sp_group (dist.ProcessGroup): sequence parallel group
dropout_p (float, optional): dropout prob. Defaults to 0.0.
softmax_scale (Optional[float], optional): softmax scale. Defaults to None.
deterministic (Optional[bool], optional): backward deterministic mode. Defaults to False.
return_softmax (bool, optional): return softmax or not. Defaults to False.
Returns:
Tuple[torch.Tensor, torch.Tensor]: output and log sum exp. Output's shape should be [B, S, N, D]. LSE's shape should be [B, N, S].
"""
if RingAttention.ATTN_DONE is None:
RingAttention.ATTN_DONE = torch.cuda.Event()
if RingAttention.SP_STREAM is None:
RingAttention.SP_STREAM = torch.cuda.Stream()
out, softmax_lse = RingAttention.apply(
q, k, v, sp_group, RingAttention.SP_STREAM, dropout_p, softmax_scale, deterministic
)
if return_softmax:
return out, softmax_lse
return out | Ring attention
Args:
q (torch.Tensor): shape [B, S, N, D]
k (torch.Tensor): shape [B, S, N, D]
v (torch.Tensor): shape [B, S, N, D]
sp_group (dist.ProcessGroup): sequence parallel group
dropout_p (float, optional): dropout prob. Defaults to 0.0.
softmax_scale (Optional[float], optional): softmax scale. Defaults to None.
deterministic (Optional[bool], optional): backward deterministic mode. Defaults to False.
return_softmax (bool, optional): return softmax or not. Defaults to False.
Returns:
Tuple[torch.Tensor, torch.Tensor]: output and log sum exp. Output's shape should be [B, S, N, D]. LSE's shape should be [B, N, S]. | attention | python | hpcaitech/Open-Sora | opensora/models/mmdit/distributed.py | https://github.com/hpcaitech/Open-Sora/blob/master/opensora/models/mmdit/distributed.py | Apache-2.0 |
def timestep_embedding(t: Tensor, dim, max_period=10000, time_factor: float = 1000.0):
"""
Create sinusoidal timestep embeddings.
:param t: a 1-D Tensor of N indices, one per batch element.
These may be fractional.
:param dim: the dimension of the output.
:param max_period: controls the minimum frequency of the embeddings.
:return: an (N, D) Tensor of positional embeddings.
"""
t = time_factor * t
half = dim // 2
freqs = torch.exp(-math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half).to(t.device)
args = t[:, None].float() * freqs[None]
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
if dim % 2:
embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
if torch.is_floating_point(t):
embedding = embedding.to(t)
return embedding | Create sinusoidal timestep embeddings.
:param t: a 1-D Tensor of N indices, one per batch element.
These may be fractional.
:param dim: the dimension of the output.
:param max_period: controls the minimum frequency of the embeddings.
:return: an (N, D) Tensor of positional embeddings. | timestep_embedding | python | hpcaitech/Open-Sora | opensora/models/mmdit/layers.py | https://github.com/hpcaitech/Open-Sora/blob/master/opensora/models/mmdit/layers.py | Apache-2.0 |
def rearrange_tensor(tensor):
"""
Rearranges the last dimension (D) of the input tensor based on the specified mapping:
2d -> d, 2d+1 -> D/2 + d.
Args:
tensor (torch.Tensor): Input tensor of shape [B, H, L, D], where D is even.
Returns:
torch.Tensor: Tensor with rearranged last dimension, same shape as input.
"""
B, H, L, D = tensor.shape
if D % 2 != 0:
raise ValueError("The last dimension D must be even.")
half_D = D // 2
indices = torch.empty(D, dtype=torch.long, device=tensor.device)
# Fill the indices based on the mapping rule
indices[:half_D] = torch.arange(0, D, 2, device=tensor.device)
indices[half_D:] = torch.arange(1, D, 2, device=tensor.device)
# Rearrange the tensor based on the computed indices
return tensor.index_select(dim=-1, index=indices) | Rearranges the last dimension (D) of the input tensor based on the specified mapping:
2d -> d, 2d+1 -> D/2 + d.
Args:
tensor (torch.Tensor): Input tensor of shape [B, H, L, D], where D is even.
Returns:
torch.Tensor: Tensor with rearranged last dimension, same shape as input. | rearrange_tensor | python | hpcaitech/Open-Sora | opensora/models/mmdit/math.py | https://github.com/hpcaitech/Open-Sora/blob/master/opensora/models/mmdit/math.py | Apache-2.0 |
def reverse_rearrange_tensor(tensor):
"""
Restores the original order of the last dimension (D) of the input tensor based on the reverse mapping:
d -> 2d, D/2 + d -> 2d + 1.
Args:
tensor (torch.Tensor): Input tensor of shape [B, H, L, D], where D is even.
Returns:
torch.Tensor: Tensor with restored original last dimension order, same shape as input.
"""
B, H, L, D = tensor.shape
if D % 2 != 0:
raise ValueError("The last dimension D must be even.")
half_D = D // 2
reverse_indices = torch.empty(D, dtype=torch.long, device=tensor.device)
# Fill the reverse indices to restore the original order
reverse_indices[::2] = torch.arange(half_D, device=tensor.device)
reverse_indices[1::2] = torch.arange(half_D, D, device=tensor.device)
# Rearrange the tensor based on the reverse indices
return tensor.index_select(dim=-1, index=reverse_indices) | Restores the original order of the last dimension (D) of the input tensor based on the reverse mapping:
d -> 2d, D/2 + d -> 2d + 1.
Args:
tensor (torch.Tensor): Input tensor of shape [B, H, L, D], where D is even.
Returns:
torch.Tensor: Tensor with restored original last dimension order, same shape as input. | reverse_rearrange_tensor | python | hpcaitech/Open-Sora | opensora/models/mmdit/math.py | https://github.com/hpcaitech/Open-Sora/blob/master/opensora/models/mmdit/math.py | Apache-2.0 |
def build_downsample_block(
block_type: str,
in_channels: int,
out_channels: int,
shortcut: Optional[str],
is_video: bool,
temporal_downsample: bool = False,
) -> nn.Module:
"""
Spatial downsample is always performed. Temporal downsample is optional.
"""
if block_type == "Conv":
if is_video:
if temporal_downsample:
stride = (2, 2, 2)
else:
stride = (1, 2, 2)
else:
stride = 2
block = ConvLayer(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
use_bias=True,
norm=None,
act_func=None,
is_video=is_video,
)
elif block_type == "ConvPixelUnshuffle":
if is_video:
raise NotImplementedError("ConvPixelUnshuffle downsample is not supported for video")
block = ConvPixelUnshuffleDownSampleLayer(
in_channels=in_channels, out_channels=out_channels, kernel_size=3, factor=2
)
else:
raise ValueError(f"block_type {block_type} is not supported for downsampling")
if shortcut is None:
pass
elif shortcut == "averaging":
shortcut_block = PixelUnshuffleChannelAveragingDownSampleLayer(
in_channels=in_channels, out_channels=out_channels, factor=2, temporal_downsample=temporal_downsample
)
block = ResidualBlock(block, shortcut_block)
else:
raise ValueError(f"shortcut {shortcut} is not supported for downsample")
return block | Spatial downsample is always performed. Temporal downsample is optional. | build_downsample_block | python | hpcaitech/Open-Sora | opensora/models/dc_ae/models/dc_ae.py | https://github.com/hpcaitech/Open-Sora/blob/master/opensora/models/dc_ae/models/dc_ae.py | Apache-2.0 |
def pixel_shuffle_3d(x, upscale_factor):
"""
3D pixelshuffle 操作。
"""
B, C, T, H, W = x.shape
r = upscale_factor
assert C % (r * r * r) == 0, "通道数必须是上采样因子的立方倍数"
C_new = C // (r * r * r)
x = x.view(B, C_new, r, r, r, T, H, W)
if VERBOSE:
print("x.view:")
print(x)
print("x.view.shape:")
print(x.shape)
x = x.permute(0, 1, 5, 2, 6, 3, 7, 4)
if VERBOSE:
print("x.permute:")
print(x)
print("x.permute.shape:")
print(x.shape)
y = x.reshape(B, C_new, T * r, H * r, W * r)
return y | 3D pixelshuffle 操作。 | pixel_shuffle_3d | python | hpcaitech/Open-Sora | opensora/models/dc_ae/models/nn/vo_ops.py | https://github.com/hpcaitech/Open-Sora/blob/master/opensora/models/dc_ae/models/nn/vo_ops.py | Apache-2.0 |
def pixel_unshuffle_3d(x, downsample_factor):
"""
3D pixel unshuffle 操作。
"""
B, C, T, H, W = x.shape
r = downsample_factor
assert T % r == 0, f"时间维度必须是下采样因子的倍数, got shape {x.shape}"
assert H % r == 0, f"高度维度必须是下采样因子的倍数, got shape {x.shape}"
assert W % r == 0, f"宽度维度必须是下采样因子的倍数, got shape {x.shape}"
T_new = T // r
H_new = H // r
W_new = W // r
C_new = C * (r * r * r)
x = x.view(B, C, T_new, r, H_new, r, W_new, r)
x = x.permute(0, 1, 3, 5, 7, 2, 4, 6)
y = x.reshape(B, C_new, T_new, H_new, W_new)
return y | 3D pixel unshuffle 操作。 | pixel_unshuffle_3d | python | hpcaitech/Open-Sora | opensora/models/dc_ae/models/nn/vo_ops.py | https://github.com/hpcaitech/Open-Sora/blob/master/opensora/models/dc_ae/models/nn/vo_ops.py | Apache-2.0 |
def chunked_interpolate(x, scale_factor, mode="nearest"):
"""
Interpolate large tensors by chunking along the channel dimension. https://discuss.pytorch.org/t/error-using-f-interpolate-for-large-3d-input/207859
Only supports 'nearest' interpolation mode.
Args:
x (torch.Tensor): Input tensor (B, C, D, H, W)
scale_factor: Tuple of scaling factors (d, h, w)
Returns:
torch.Tensor: Interpolated tensor
"""
assert (
mode == "nearest"
), "Only the nearest mode is supported" # actually other modes are theoretically supported but not tested
if len(x.shape) != 5:
raise ValueError("Expected 5D input tensor (B, C, D, H, W)")
# Calculate max chunk size to avoid int32 overflow. num_elements < max_int32
# Max int32 is 2^31 - 1
max_elements_per_chunk = 2**31 - 1
# Calculate output spatial dimensions
out_d = math.ceil(x.shape[2] * scale_factor[0])
out_h = math.ceil(x.shape[3] * scale_factor[1])
out_w = math.ceil(x.shape[4] * scale_factor[2])
# Calculate max channels per chunk to stay under limit
elements_per_channel = out_d * out_h * out_w
max_channels = max_elements_per_chunk // (x.shape[0] * elements_per_channel)
# Use smaller of max channels or input channels
chunk_size = min(max_channels, x.shape[1])
# Ensure at least 1 channel per chunk
chunk_size = max(1, chunk_size)
if VERBOSE:
print(f"Input channels: {x.shape[1]}")
print(f"Chunk size: {chunk_size}")
print(f"max_channels: {max_channels}")
print(f"num_chunks: {math.ceil(x.shape[1] / chunk_size)}")
chunks = []
for i in range(0, x.shape[1], chunk_size):
start_idx = i
end_idx = min(i + chunk_size, x.shape[1])
chunk = x[:, start_idx:end_idx, :, :, :]
interpolated_chunk = F.interpolate(chunk, scale_factor=scale_factor, mode="nearest")
chunks.append(interpolated_chunk)
if not chunks:
raise ValueError(f"No chunks were generated. Input shape: {x.shape}")
# Concatenate chunks along channel dimension
return torch.cat(chunks, dim=1) | Interpolate large tensors by chunking along the channel dimension. https://discuss.pytorch.org/t/error-using-f-interpolate-for-large-3d-input/207859
Only supports 'nearest' interpolation mode.
Args:
x (torch.Tensor): Input tensor (B, C, D, H, W)
scale_factor: Tuple of scaling factors (d, h, w)
Returns:
torch.Tensor: Interpolated tensor | chunked_interpolate | python | hpcaitech/Open-Sora | opensora/models/dc_ae/models/nn/vo_ops.py | https://github.com/hpcaitech/Open-Sora/blob/master/opensora/models/dc_ae/models/nn/vo_ops.py | Apache-2.0 |
def forward(
ctx,
q: torch.Tensor,
k: torch.Tensor,
v: torch.Tensor,
sp_group: dist.ProcessGroup,
sp_stream: torch.cuda.Stream,
softmax_scale: Optional[float] = None,
attn_mask: Optional[torch.Tensor] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Ring attention forward
Args:
ctx (_type_): self
q (torch.Tensor): shape [B, S/P, N, D]
k (torch.Tensor): shape [B, S/P, N, D]
v (torch.Tensor): shape [B, S/P, N, D]
sp_group (dist.ProcessGroup): sequence parallel group
sp_stream (torch.cuda.Stream): sequence parallel stream
softmax_scale (Optional[float], optional): softmax scale. Defaults to None.
attn_mask (Optional[torch.Tensor], optional): attention mask shape [B, N, S/P, S]. Defaults to None.
Returns:
Tuple[torch.Tensor, torch.Tensor]: output and log sum exp. Output's shape should be [B, S/P, N, D]. LSE's shape should be [B, N, S/P].
"""
if softmax_scale is None:
softmax_scale = q.shape[-1] ** (-0.5)
sp_size = dist.get_world_size(sp_group)
sp_rank = dist.get_rank(sp_group)
kv_comms: List[RingComm] = [RingComm(sp_group) for _ in range(2)]
block_attn_masks = [None] * sp_size
if attn_mask is not None:
# if attn_mask is splitted, uncomment the following line
# attn_mask = attn_mask.chunk(sp_size, dim=2)[sp_rank]
block_attn_masks = attn_mask.chunk(sp_size, dim=-1)
# [B, S, N, D]
q, k, v = [x.contiguous() for x in [q, k, v]]
# Pre-allocate double buffer for overlapping and receiving next step's inputs
kv_buffers = [torch.stack((k, v))] # (2, B, S, N, D)
kv_buffers.append(torch.empty_like(kv_buffers[0]))
# outputs
out = None
block_out = [None, None]
softmax_lse = [None, None]
block_softmax_lse = [None, None] # log sum exp, the denominator of softmax in attention
rng_states = [None for _ in range(sp_size)]
sp_streams = [torch.cuda.current_stream(), sp_stream]
def _kv_comm(i):
# Avoid overwriting attn input when it shares mem with buffer
if not MemEfficientRingAttention.ATTN_DONE.query():
kv_buffers[(i + 1) % 2] = torch.empty_like(kv_buffers[i % 2])
if i < sp_size - 1:
kv_comms[i % 2].send_recv(kv_buffers[i % 2], kv_buffers[(i + 1) % 2])
block_idx = sp_rank
for i in range(sp_size):
with torch.cuda.stream(sp_streams[i % 2]):
# Wait for current kv from prev rank
# NOTE: waiting outside the current stream will NOT correctly synchronize.
if i == 0:
_kv_comm(i)
else:
kv_comms[(i + 1) % 2].wait()
kv_block = kv_buffers[i % 2]
q_block = q
block_out[i % 2], block_softmax_lse[i % 2], rng_states[i] = _attn_fwd(
q_block, kv_block[0], kv_block[1], attn_bias=block_attn_masks[block_idx], scale=softmax_scale
)
MemEfficientRingAttention.ATTN_DONE.record()
# Pipeline the next KV comm with output correction instead of the next flash attn
# to minimize idle time when comm takes longer than attn.
_kv_comm(i + 1)
block_softmax_lse[i % 2] = (
block_softmax_lse[i % 2].transpose(1, 2).unsqueeze(-1).contiguous().float()
) # [B, N, S] -> [B, S, N, 1]
assert (
block_out[i % 2].shape[:-1] == block_softmax_lse[i % 2].shape[:-1]
), f"{block_out[i % 2].shape} != {block_softmax_lse[i % 2].shape}"
# Output and log sum exp correction. Ideally overlap this with the next flash attn kernel.
# In reality this always finishes before next flash attn; no need for extra sync.
if i == 0:
out = block_out[0]
softmax_lse = block_softmax_lse[0]
else:
out, softmax_lse = _rescale_out_lse(out, block_out[i % 2], softmax_lse, block_softmax_lse[i % 2])
block_idx = (block_idx - 1) % sp_size
torch.cuda.current_stream().wait_stream(sp_stream)
out = out.to(q.dtype)
softmax_lse = softmax_lse.squeeze(-1).transpose(1, 2).contiguous()
ctx.softmax_scale = softmax_scale
ctx.block_attn_masks = block_attn_masks
ctx.sp_group = sp_group
ctx.save_for_backward(q, k, v, out, softmax_lse, *rng_states) # lse [B, N, S]
return out, softmax_lse | Ring attention forward
Args:
ctx (_type_): self
q (torch.Tensor): shape [B, S/P, N, D]
k (torch.Tensor): shape [B, S/P, N, D]
v (torch.Tensor): shape [B, S/P, N, D]
sp_group (dist.ProcessGroup): sequence parallel group
sp_stream (torch.cuda.Stream): sequence parallel stream
softmax_scale (Optional[float], optional): softmax scale. Defaults to None.
attn_mask (Optional[torch.Tensor], optional): attention mask shape [B, N, S/P, S]. Defaults to None.
Returns:
Tuple[torch.Tensor, torch.Tensor]: output and log sum exp. Output's shape should be [B, S/P, N, D]. LSE's shape should be [B, N, S/P]. | forward | python | hpcaitech/Open-Sora | opensora/models/hunyuan_vae/distributed.py | https://github.com/hpcaitech/Open-Sora/blob/master/opensora/models/hunyuan_vae/distributed.py | Apache-2.0 |
def attention(
q,
k,
v,
sp_group,
softmax_scale: Optional[float] = None,
attn_mask: Optional[torch.Tensor] = None,
return_softmax: bool = False,
):
"""Ring attention
Args:
q (torch.Tensor): shape [B, S, N, D]
k (torch.Tensor): shape [B, S, N, D]
v (torch.Tensor): shape [B, S, N, D]
sp_group (dist.ProcessGroup): sequence parallel group
softmax_scale (Optional[float], optional): softmax scale. Defaults to None.
attn_mask (Optional[torch.Tensor], optional): attention mask. Defaults to None.
return_softmax (bool, optional): return softmax or not. Defaults to False.
Returns:
Tuple[torch.Tensor, torch.Tensor]: output and log sum exp. Output's shape should be [B, S, N, D]. LSE's shape should be [B, N, S].
"""
if MemEfficientRingAttention.ATTN_DONE is None:
MemEfficientRingAttention.ATTN_DONE = torch.cuda.Event()
if MemEfficientRingAttention.SP_STREAM is None:
MemEfficientRingAttention.SP_STREAM = torch.cuda.Stream()
out, softmax_lse = MemEfficientRingAttention.apply(
q, k, v, sp_group, MemEfficientRingAttention.SP_STREAM, softmax_scale, attn_mask
)
if return_softmax:
return out, softmax_lse
return out | Ring attention
Args:
q (torch.Tensor): shape [B, S, N, D]
k (torch.Tensor): shape [B, S, N, D]
v (torch.Tensor): shape [B, S, N, D]
sp_group (dist.ProcessGroup): sequence parallel group
softmax_scale (Optional[float], optional): softmax scale. Defaults to None.
attn_mask (Optional[torch.Tensor], optional): attention mask. Defaults to None.
return_softmax (bool, optional): return softmax or not. Defaults to False.
Returns:
Tuple[torch.Tensor, torch.Tensor]: output and log sum exp. Output's shape should be [B, S, N, D]. LSE's shape should be [B, N, S]. | attention | python | hpcaitech/Open-Sora | opensora/models/hunyuan_vae/distributed.py | https://github.com/hpcaitech/Open-Sora/blob/master/opensora/models/hunyuan_vae/distributed.py | Apache-2.0 |
def from_native_module(module: Attention, process_group, *args, **kwargs) -> Attention:
"""
Convert a native RMSNorm module to colossalai layer norm module,
and optionally mark parameters for gradient aggregation.
Args:
module (nn.Module): The native RMSNorm module to be converted.
sp_partial_derived (bool): Whether this module's gradients are partially derived in sequence parallelism.
Returns:
nn.Module: The RMSNorm module.
"""
# Since gradients are computed using only a subset of the data,
# aggregation of these gradients is necessary during backpropagation.
# Therefore, we annotate these parameters in advance to indicate the need for gradient aggregation.
SeqParallelUtils.marked_as_sp_partial_derived_param(module.to_q.weight)
SeqParallelUtils.marked_as_sp_partial_derived_param(module.to_k.weight)
SeqParallelUtils.marked_as_sp_partial_derived_param(module.to_v.weight)
if module.to_q.bias is not None:
SeqParallelUtils.marked_as_sp_partial_derived_param(module.to_q.bias)
SeqParallelUtils.marked_as_sp_partial_derived_param(module.to_k.bias)
SeqParallelUtils.marked_as_sp_partial_derived_param(module.to_v.bias)
module.set_processor(MemEfficientRingAttnProcessor(process_group))
return module | Convert a native RMSNorm module to colossalai layer norm module,
and optionally mark parameters for gradient aggregation.
Args:
module (nn.Module): The native RMSNorm module to be converted.
sp_partial_derived (bool): Whether this module's gradients are partially derived in sequence parallelism.
Returns:
nn.Module: The RMSNorm module. | from_native_module | python | hpcaitech/Open-Sora | opensora/models/hunyuan_vae/distributed.py | https://github.com/hpcaitech/Open-Sora/blob/master/opensora/models/hunyuan_vae/distributed.py | Apache-2.0 |
def set_default_attn_processor(self):
"""
Disables custom attention processors and sets the default attention implementation.
"""
if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
processor = AttnAddedKVProcessor()
elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
processor = AttnProcessor()
else:
raise ValueError(
f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}"
)
self.set_attn_processor(processor, _remove_lora=True) | Disables custom attention processors and sets the default attention implementation. | set_default_attn_processor | python | hpcaitech/Open-Sora | opensora/models/hunyuan_vae/autoencoder_kl_causal_3d.py | https://github.com/hpcaitech/Open-Sora/blob/master/opensora/models/hunyuan_vae/autoencoder_kl_causal_3d.py | Apache-2.0 |
def encode(
self,
x: torch.FloatTensor,
sample_posterior: bool = True,
return_posterior: bool = False,
generator: Optional[torch.Generator] = None,
) -> Union[torch.FloatTensor, Tuple[DiagonalGaussianDistribution]]:
"""
Encode a batch of images/videos into latents.
Args:
x (`torch.FloatTensor`): Input batch of images/videos.
return_dict (`bool`, *optional*, defaults to `True`):
Whether to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple.
Returns:
The latent representations of the encoded images/videos. If `return_dict` is True, a
[`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned.
"""
assert len(x.shape) == 5, "The input tensor should have 5 dimensions."
if self.use_temporal_tiling and x.shape[2] > self.tile_sample_min_tsize:
posterior = self.temporal_tiled_encode(x)
elif self.use_spatial_tiling and (
x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size
):
posterior = self.spatial_tiled_encode(x)
else:
if self.use_slicing and x.shape[0] > 1:
encoded_slices = [self.encoder(x_slice) for x_slice in x.split(1)]
h = torch.cat(encoded_slices)
else:
h = self.encoder(x)
moments = self.quant_conv(h)
posterior = DiagonalGaussianDistribution(moments)
if sample_posterior:
z = posterior.sample(generator=generator)
else:
z = posterior.mode()
z = self.scale_factor * (z - self.shift_factor) # shift & scale
if return_posterior:
return z, posterior
else:
return z | Encode a batch of images/videos into latents.
Args:
x (`torch.FloatTensor`): Input batch of images/videos.
return_dict (`bool`, *optional*, defaults to `True`):
Whether to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple.
Returns:
The latent representations of the encoded images/videos. If `return_dict` is True, a
[`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned. | encode | python | hpcaitech/Open-Sora | opensora/models/hunyuan_vae/autoencoder_kl_causal_3d.py | https://github.com/hpcaitech/Open-Sora/blob/master/opensora/models/hunyuan_vae/autoencoder_kl_causal_3d.py | Apache-2.0 |
def decode(self, z: torch.FloatTensor) -> torch.FloatTensor:
"""
Decode a batch of images/videos.
Args:
z (`torch.FloatTensor`): Input batch of latent vectors.
Returns:
[`~models.vae.DecoderOutput`] or `tuple`:
If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is
returned.
"""
z = z / self.scale_factor + self.shift_factor # scale & shift
if self.use_slicing and z.shape[0] > 1:
decoded_slices = [self._decode(z_slice).sample for z_slice in z.split(1)]
decoded = torch.cat(decoded_slices)
else:
decoded = self._decode(z).sample
return decoded | Decode a batch of images/videos.
Args:
z (`torch.FloatTensor`): Input batch of latent vectors.
Returns:
[`~models.vae.DecoderOutput`] or `tuple`:
If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is
returned. | decode | python | hpcaitech/Open-Sora | opensora/models/hunyuan_vae/autoencoder_kl_causal_3d.py | https://github.com/hpcaitech/Open-Sora/blob/master/opensora/models/hunyuan_vae/autoencoder_kl_causal_3d.py | Apache-2.0 |
def fuse_qkv_projections(self):
"""
Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query,
key, value) are fused. For cross-attention modules, key and value projection matrices are fused.
<Tip warning={true}>
This API is 🧪 experimental.
</Tip>
"""
self.original_attn_processors = None
for _, attn_processor in self.attn_processors.items():
if "Added" in str(attn_processor.__class__.__name__):
raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.")
self.original_attn_processors = self.attn_processors
for module in self.modules():
if isinstance(module, Attention):
module.fuse_projections(fuse=True) | Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query,
key, value) are fused. For cross-attention modules, key and value projection matrices are fused.
<Tip warning={true}>
This API is 🧪 experimental.
</Tip> | fuse_qkv_projections | python | hpcaitech/Open-Sora | opensora/models/hunyuan_vae/autoencoder_kl_causal_3d.py | https://github.com/hpcaitech/Open-Sora/blob/master/opensora/models/hunyuan_vae/autoencoder_kl_causal_3d.py | Apache-2.0 |
def unfuse_qkv_projections(self):
"""Disables the fused QKV projection if enabled.
<Tip warning={true}>
This API is 🧪 experimental.
</Tip>
"""
if self.original_attn_processors is not None:
self.set_attn_processor(self.original_attn_processors) | Disables the fused QKV projection if enabled.
<Tip warning={true}>
This API is 🧪 experimental.
</Tip> | unfuse_qkv_projections | python | hpcaitech/Open-Sora | opensora/models/hunyuan_vae/autoencoder_kl_causal_3d.py | https://github.com/hpcaitech/Open-Sora/blob/master/opensora/models/hunyuan_vae/autoencoder_kl_causal_3d.py | Apache-2.0 |
def shardformer_t5(t5: T5EncoderModel) -> T5EncoderModel:
"""
Shardformer for T5 model
Args:
t5: T5 model to be optimized
Returns:
optimized T5 model
"""
dtype = t5.shared.weight.dtype
shard_config = ShardConfig(
enable_tensor_parallelism=False,
enable_jit_fused=True,
)
shard_former = ShardFormer(shard_config=shard_config)
optim_model, _ = shard_former.optimize(t5, policy=T5EncoderPolicy())
optim_model = optim_model.to(dtype).eval().requires_grad_(False)
return optim_model | Shardformer for T5 model
Args:
t5: T5 model to be optimized
Returns:
optimized T5 model | shardformer_t5 | python | hpcaitech/Open-Sora | opensora/models/text/conditioner.py | https://github.com/hpcaitech/Open-Sora/blob/master/opensora/models/text/conditioner.py | Apache-2.0 |
def shard_channelwise(
tensor: torch.Tensor, group_or_device_mesh: Union[ProcessGroup, DeviceMesh] = None
) -> torch.Tensor:
"""
Shard the second dim of the given tensor.
Args:
tensor (torch.Tensor): The tensor to be sharded.
group_or_device_mesh (Union[ProcessGroup, DeviceMesh], optional): The group or device mesh to shard the tensor.
If None, the tensor will be sharded with respect to the global process group.
Defaults to None.
inplace (bool, optional): Whether to shard the tensor in-place. Defaults to False.
Returns:
torch.Tensor: The sharded tensor.
"""
# if the group_or_device_mesh is None, we shard the tensor with respect to the global process group
if group_or_device_mesh is None:
group_or_device_mesh = dist.GroupMember.WORLD
if isinstance(group_or_device_mesh, ProcessGroup):
device_mesh = DeviceMesh.from_process_group(group_or_device_mesh)
else:
assert len(group_or_device_mesh.shape) == 1, "Only 1D DeviceMesh is accepted for row-wise sharding."
device_mesh = group_or_device_mesh
sharding_spec = ShardingSpec(dim_size=tensor.dim(), dim_partition_dict={1: [0]})
return distribute_tensor(tensor, device_mesh, sharding_spec) | Shard the second dim of the given tensor.
Args:
tensor (torch.Tensor): The tensor to be sharded.
group_or_device_mesh (Union[ProcessGroup, DeviceMesh], optional): The group or device mesh to shard the tensor.
If None, the tensor will be sharded with respect to the global process group.
Defaults to None.
inplace (bool, optional): Whether to shard the tensor in-place. Defaults to False.
Returns:
torch.Tensor: The sharded tensor. | shard_channelwise | python | hpcaitech/Open-Sora | opensora/models/vae/tensor_parallel.py | https://github.com/hpcaitech/Open-Sora/blob/master/opensora/models/vae/tensor_parallel.py | Apache-2.0 |
def __init__(
self,
input_nc=1,
ndf=64,
n_layers=5,
norm_layer=nn.BatchNorm3d,
conv_cls="conv3d",
dropout=0.30,
):
"""
Construct a 3D PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input volumes
ndf (int) -- the number of filters in the last conv layer
n_layers (int) -- the number of conv layers in the discriminator
use_actnorm (bool) -- flag to use actnorm instead of batchnorm
"""
super(NLayerDiscriminator3D, self).__init__()
assert conv_cls == "conv3d"
use_bias = False
kw = 3
padw = 1
sequence = [nn.Conv3d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers): # gradually increase the number of filters
nf_mult_prev = nf_mult
nf_mult = min(2**n, 8)
sequence += [
nn.Conv3d(
ndf * nf_mult_prev,
ndf * nf_mult,
kernel_size=(kw, kw, kw),
stride=2 if n == 1 else (1, 2, 2),
padding=padw,
bias=use_bias,
),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True),
nn.Dropout(dropout),
]
nf_mult_prev = nf_mult
nf_mult = min(2**n_layers, 8)
sequence += [
nn.Conv3d(
ndf * nf_mult_prev,
ndf * nf_mult,
kernel_size=(kw, kw, kw),
stride=1,
padding=padw,
bias=use_bias,
),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True),
nn.Dropout(dropout),
nn.Conv3d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw),
]
self.main = nn.Sequential(*sequence) | Construct a 3D PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input volumes
ndf (int) -- the number of filters in the last conv layer
n_layers (int) -- the number of conv layers in the discriminator
use_actnorm (bool) -- flag to use actnorm instead of batchnorm | __init__ | python | hpcaitech/Open-Sora | opensora/models/vae/discriminator.py | https://github.com/hpcaitech/Open-Sora/blob/master/opensora/models/vae/discriminator.py | Apache-2.0 |
def forward(self, x):
"""Standard forward."""
return self.main(x) | Standard forward. | forward | python | hpcaitech/Open-Sora | opensora/models/vae/discriminator.py | https://github.com/hpcaitech/Open-Sora/blob/master/opensora/models/vae/discriminator.py | Apache-2.0 |
def __init__(
self,
parameters,
deterministic=False,
):
"""Stripped version of https://github.com/richzhang/PerceptualSimilarity/tree/master/models"""
self.parameters = parameters
self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)
self.logvar = torch.clamp(self.logvar, -30.0, 20.0)
self.deterministic = deterministic
self.std = torch.exp(0.5 * self.logvar)
self.var = torch.exp(self.logvar)
if self.deterministic:
self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device, dtype=self.mean.dtype) | Stripped version of https://github.com/richzhang/PerceptualSimilarity/tree/master/models | __init__ | python | hpcaitech/Open-Sora | opensora/models/vae/utils.py | https://github.com/hpcaitech/Open-Sora/blob/master/opensora/models/vae/utils.py | Apache-2.0 |
def collate_fn_batch(batch):
"""
Used only with BatchDistributedSampler
"""
# filter out None
batch = [x for x in batch if x is not None]
res = torch.utils.data.default_collate(batch)
# squeeze the first dimension, which is due to torch.stack() in default_collate()
if isinstance(res, collections.abc.Mapping):
for k, v in res.items():
if isinstance(v, torch.Tensor):
res[k] = v.squeeze(0)
elif isinstance(res, collections.abc.Sequence):
res = [x.squeeze(0) if isinstance(x, torch.Tensor) else x for x in res]
elif isinstance(res, torch.Tensor):
res = res.squeeze(0)
else:
raise TypeError
return res | Used only with BatchDistributedSampler | collate_fn_batch | python | hpcaitech/Open-Sora | opensora/datasets/dataloader.py | https://github.com/hpcaitech/Open-Sora/blob/master/opensora/datasets/dataloader.py | Apache-2.0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.