code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def construct_meta_info(frames_dir_path: Path) -> dict:
"""
Construct meta information for a given frames directory.
Args:
frames_dir_path (Path): The path to the frames directory.
Returns:
dict: A dictionary containing the meta information for the frames directory, or None if the required files do not exist.
"""
mask_path = str(frames_dir_path).replace("images", "face_mask") + ".png"
face_emb_path = str(frames_dir_path).replace("images", "face_emb") + ".pt"
if not os.path.exists(mask_path):
print(f"Mask path not found: {mask_path}")
return None
if torch.load(face_emb_path) is None:
print(f"Face emb is None: {face_emb_path}")
return None
return {
"image_path": str(frames_dir_path),
"mask_path": mask_path,
"face_emb": face_emb_path,
}
|
Construct meta information for a given frames directory.
Args:
frames_dir_path (Path): The path to the frames directory.
Returns:
dict: A dictionary containing the meta information for the frames directory, or None if the required files do not exist.
|
construct_meta_info
|
python
|
jdh-algo/JoyHallo
|
scripts/extract_meta_info_stage1.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/scripts/extract_meta_info_stage1.py
|
MIT
|
def main():
"""
Main function to extract meta info for training.
"""
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--root_path", type=str,
required=True, help="Root path of the video directories")
parser.add_argument("-n", "--dataset_name", type=str,
required=True, help="Name of the dataset")
parser.add_argument("--meta_info_name", type=str,
help="Name of the meta information file")
args = parser.parse_args()
if args.meta_info_name is None:
args.meta_info_name = args.dataset_name
image_dir = Path(args.root_path) / "images"
output_dir = Path("./data")
output_dir.mkdir(exist_ok=True)
# Collect all video folder paths
frames_dir_paths = collect_video_folder_paths(image_dir)
meta_infos = []
for frames_dir_path in frames_dir_paths:
meta_info = construct_meta_info(frames_dir_path)
if meta_info:
meta_infos.append(meta_info)
output_file = output_dir / f"{args.meta_info_name}_stage1.json"
with output_file.open("w", encoding="utf-8") as f:
json.dump(meta_infos, f, indent=4)
print(f"Final data count: {len(meta_infos)}")
|
Main function to extract meta info for training.
|
main
|
python
|
jdh-algo/JoyHallo
|
scripts/extract_meta_info_stage1.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/scripts/extract_meta_info_stage1.py
|
MIT
|
def extract_meta_info(video_path: str) -> dict:
"""
Extract meta information for a given video file.
Args:
video_path (str): The path to the video file.
Returns:
dict: A dictionary containing the meta information for the video.
"""
mask_path = construct_paths(
video_path, "videos", "face_mask", ".png")
sep_mask_border = construct_paths(
video_path, "videos", "sep_pose_mask", ".png")
sep_mask_face = construct_paths(
video_path, "videos", "sep_face_mask", ".png")
sep_mask_lip = construct_paths(
video_path, "videos", "sep_lip_mask", ".png")
face_emb_path = construct_paths(
video_path, "videos", "face_emb", ".pt")
audio_path = construct_paths(video_path, "videos", "audios", ".wav")
vocal_emb_base_all = construct_paths(
video_path, "videos", "audio_emb", ".pt")
assert_flag = True
if not file_exists(mask_path):
print(f"Mask path not found: {mask_path}")
assert_flag = False
if not file_exists(sep_mask_border):
print(f"Separate mask border not found: {sep_mask_border}")
assert_flag = False
if not file_exists(sep_mask_face):
print(f"Separate mask face not found: {sep_mask_face}")
assert_flag = False
if not file_exists(sep_mask_lip):
print(f"Separate mask lip not found: {sep_mask_lip}")
assert_flag = False
if not file_exists(face_emb_path):
print(f"Face embedding path not found: {face_emb_path}")
assert_flag = False
if not file_exists(audio_path):
print(f"Audio path not found: {audio_path}")
assert_flag = False
if not file_exists(vocal_emb_base_all):
print(f"Vocal embedding base all not found: {vocal_emb_base_all}")
assert_flag = False
video_frames = VideoReader(video_path, ctx=cpu(0))
audio_emb = torch.load(vocal_emb_base_all)
if abs(len(video_frames) - audio_emb.shape[0]) > 3:
print(f"Frame count mismatch for video: {video_path}")
assert_flag = False
face_emb = torch.load(face_emb_path)
if face_emb is None:
print(f"Face embedding is None for video: {video_path}")
assert_flag = False
del video_frames, audio_emb
if assert_flag:
return {
"video_path": str(video_path),
"mask_path": mask_path,
"sep_mask_border": sep_mask_border,
"sep_mask_face": sep_mask_face,
"sep_mask_lip": sep_mask_lip,
"face_emb_path": face_emb_path,
"audio_path": audio_path,
"vocals_emb_base_all": vocal_emb_base_all,
}
return None
|
Extract meta information for a given video file.
Args:
video_path (str): The path to the video file.
Returns:
dict: A dictionary containing the meta information for the video.
|
extract_meta_info
|
python
|
jdh-algo/JoyHallo
|
scripts/extract_meta_info_stage2.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/scripts/extract_meta_info_stage2.py
|
MIT
|
def main():
"""
Main function to extract meta info for training.
"""
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--root_path", type=str,
required=True, help="Root path of the video files")
parser.add_argument("-n", "--dataset_name", type=str,
required=True, help="Name of the dataset")
parser.add_argument("--meta_info_name", type=str,
help="Name of the meta information file")
args = parser.parse_args()
if args.meta_info_name is None:
args.meta_info_name = args.dataset_name
video_dir = Path(args.root_path) / "videos"
video_paths = get_video_paths(video_dir, [".mp4"])
meta_infos = []
for video_path in tqdm(video_paths, desc="Extracting meta info"):
meta_info = extract_meta_info(video_path)
if meta_info:
meta_infos.append(meta_info)
print(f"Final data count: {len(meta_infos)}")
output_file = Path(f"./data/{args.meta_info_name}_stage2.json")
output_file.parent.mkdir(parents=True, exist_ok=True)
with output_file.open("w", encoding="utf-8") as f:
json.dump(meta_infos, f, indent=4)
|
Main function to extract meta info for training.
|
main
|
python
|
jdh-algo/JoyHallo
|
scripts/extract_meta_info_stage2.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/scripts/extract_meta_info_stage2.py
|
MIT
|
def forward(
self,
noisy_latents: torch.Tensor,
timesteps: torch.Tensor,
ref_image_latents: torch.Tensor,
face_emb: torch.Tensor,
audio_emb: torch.Tensor,
mask: torch.Tensor,
full_mask: torch.Tensor,
face_mask: torch.Tensor,
lip_mask: torch.Tensor,
uncond_img_fwd: bool = False,
uncond_audio_fwd: bool = False,
):
"""
simple docstring to prevent pylint error
"""
face_emb = self.imageproj(face_emb)
mask = mask.to(device="cuda")
mask_feature = self.face_locator(mask)
audio_emb = audio_emb.to(
device=self.audioproj.device, dtype=self.audioproj.dtype)
audio_emb = self.audioproj(audio_emb)
# condition forward
if not uncond_img_fwd:
ref_timesteps = torch.zeros_like(timesteps)
ref_timesteps = repeat(
ref_timesteps,
"b -> (repeat b)",
repeat=ref_image_latents.size(0) // ref_timesteps.size(0),
)
self.reference_unet(
ref_image_latents,
ref_timesteps,
encoder_hidden_states=face_emb,
return_dict=False,
)
self.reference_control_reader.update(self.reference_control_writer)
if uncond_audio_fwd:
audio_emb = torch.zeros_like(audio_emb).to(
device=audio_emb.device, dtype=audio_emb.dtype
)
model_pred = self.denoising_unet(
noisy_latents,
timesteps,
mask_cond_fea=mask_feature,
encoder_hidden_states=face_emb,
audio_embedding=audio_emb,
full_mask=full_mask,
face_mask=face_mask,
lip_mask=lip_mask
).sample
return model_pred
|
simple docstring to prevent pylint error
|
forward
|
python
|
jdh-algo/JoyHallo
|
scripts/inference.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/scripts/inference.py
|
MIT
|
def get_attention_mask(mask: torch.Tensor, weight_dtype: torch.dtype) -> torch.Tensor:
"""
Rearrange the mask tensors to the required format.
Args:
mask (torch.Tensor): The input mask tensor.
weight_dtype (torch.dtype): The data type for the mask tensor.
Returns:
torch.Tensor: The rearranged mask tensor.
"""
if isinstance(mask, List):
_mask = []
for m in mask:
_mask.append(
rearrange(m, "b f 1 h w -> (b f) (h w)").to(weight_dtype))
return _mask
mask = rearrange(mask, "b f 1 h w -> (b f) (h w)").to(weight_dtype)
return mask
|
Rearrange the mask tensors to the required format.
Args:
mask (torch.Tensor): The input mask tensor.
weight_dtype (torch.dtype): The data type for the mask tensor.
Returns:
torch.Tensor: The rearranged mask tensor.
|
get_attention_mask
|
python
|
jdh-algo/JoyHallo
|
scripts/inference.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/scripts/inference.py
|
MIT
|
def get_noise_scheduler(cfg: argparse.Namespace) -> Tuple[DDIMScheduler, DDIMScheduler]:
"""
Create noise scheduler for training.
Args:
cfg (argparse.Namespace): Configuration object.
Returns:
Tuple[DDIMScheduler, DDIMScheduler]: Train noise scheduler and validation noise scheduler.
"""
sched_kwargs = OmegaConf.to_container(cfg.noise_scheduler_kwargs)
if cfg.enable_zero_snr:
sched_kwargs.update(
rescale_betas_zero_snr=True,
timestep_spacing="trailing",
prediction_type="v_prediction",
)
val_noise_scheduler = DDIMScheduler(**sched_kwargs)
sched_kwargs.update({"beta_schedule": "scaled_linear"})
train_noise_scheduler = DDIMScheduler(**sched_kwargs)
return train_noise_scheduler, val_noise_scheduler
|
Create noise scheduler for training.
Args:
cfg (argparse.Namespace): Configuration object.
Returns:
Tuple[DDIMScheduler, DDIMScheduler]: Train noise scheduler and validation noise scheduler.
|
get_noise_scheduler
|
python
|
jdh-algo/JoyHallo
|
scripts/inference.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/scripts/inference.py
|
MIT
|
def process_audio_emb(audio_emb: torch.Tensor) -> torch.Tensor:
"""
Process the audio embedding to concatenate with other tensors.
Parameters:
audio_emb (torch.Tensor): The audio embedding tensor to process.
Returns:
concatenated_tensors (List[torch.Tensor]): The concatenated tensor list.
"""
concatenated_tensors = []
for i in range(audio_emb.shape[0]):
vectors_to_concat = [
audio_emb[max(min(i + j, audio_emb.shape[0] - 1), 0)]for j in range(-2, 3)]
concatenated_tensors.append(torch.stack(vectors_to_concat, dim=0))
audio_emb = torch.stack(concatenated_tensors, dim=0)
return audio_emb
|
Process the audio embedding to concatenate with other tensors.
Parameters:
audio_emb (torch.Tensor): The audio embedding tensor to process.
Returns:
concatenated_tensors (List[torch.Tensor]): The concatenated tensor list.
|
process_audio_emb
|
python
|
jdh-algo/JoyHallo
|
scripts/inference.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/scripts/inference.py
|
MIT
|
def log_validation(
accelerator: Accelerator,
vae: AutoencoderKL,
net: Net,
scheduler: DDIMScheduler,
width: int,
height: int,
clip_length: int = 24,
generator: torch.Generator = None,
cfg: dict = None,
save_dir: str = None,
global_step: int = 0,
times: int = None,
face_analysis_model_path: str = "",
) -> None:
"""
Log validation video during the training process.
Args:
accelerator (Accelerator): The accelerator for distributed training.
vae (AutoencoderKL): The autoencoder model.
net (Net): The main neural network model.
scheduler (DDIMScheduler): The scheduler for noise.
width (int): The width of the input images.
height (int): The height of the input images.
clip_length (int): The length of the video clips. Defaults to 24.
generator (torch.Generator): The random number generator. Defaults to None.
cfg (dict): The configuration dictionary. Defaults to None.
save_dir (str): The directory to save validation results. Defaults to None.
global_step (int): The current global step in training. Defaults to 0.
times (int): The number of inference times. Defaults to None.
face_analysis_model_path (str): The path to the face analysis model. Defaults to "".
Returns:
torch.Tensor: The tensor result of the validation.
"""
ori_net = accelerator.unwrap_model(net)
reference_unet = ori_net.reference_unet
denoising_unet = ori_net.denoising_unet
face_locator = ori_net.face_locator
imageproj = ori_net.imageproj
audioproj = ori_net.audioproj
# generator = torch.manual_seed(42)
generator = torch.cuda.manual_seed_all(42) # use cuda random seed
tmp_denoising_unet = copy.deepcopy(denoising_unet)
pipeline = FaceAnimatePipeline(
vae=vae,
reference_unet=reference_unet,
denoising_unet=tmp_denoising_unet,
face_locator=face_locator,
image_proj=imageproj,
scheduler=scheduler,
)
pipeline = pipeline.to("cuda")
image_processor = ImageProcessor((width, height), face_analysis_model_path)
audio_processor = AudioProcessor(
cfg.data.sample_rate,
cfg.data.fps,
cfg.wav2vec_config.model_path,
cfg.wav2vec_config.features == "last",
os.path.dirname(cfg.audio_separator.model_path),
os.path.basename(cfg.audio_separator.model_path),
os.path.join(save_dir, '.cache', "audio_preprocess")
)
for idx, ref_img_path in enumerate(cfg.ref_img_path):
audio_path = cfg.audio_path[idx]
source_image_pixels, \
source_image_face_region, \
source_image_face_emb, \
source_image_full_mask, \
source_image_face_mask, \
source_image_lip_mask = image_processor.preprocess(
ref_img_path, os.path.join(save_dir, '.cache'), cfg.face_expand_ratio)
audio_emb, audio_length = audio_processor.preprocess(
audio_path, clip_length)
audio_emb = process_audio_emb(audio_emb)
source_image_pixels = source_image_pixels.unsqueeze(0)
source_image_face_region = source_image_face_region.unsqueeze(0)
source_image_face_emb = source_image_face_emb.reshape(1, -1)
source_image_face_emb = torch.tensor(source_image_face_emb)
source_image_full_mask = [
(mask.repeat(clip_length, 1))
for mask in source_image_full_mask
]
source_image_face_mask = [
(mask.repeat(clip_length, 1))
for mask in source_image_face_mask
]
source_image_lip_mask = [
(mask.repeat(clip_length, 1))
for mask in source_image_lip_mask
]
times = audio_emb.shape[0] // clip_length
tensor_result = []
for t in range(times):
print(f"[{t+1}/{times}]")
if len(tensor_result) == 0:
# The first iteration
motion_zeros = source_image_pixels.repeat(
cfg.data.n_motion_frames, 1, 1, 1)
motion_zeros = motion_zeros.to(
dtype=source_image_pixels.dtype, device=source_image_pixels.device)
pixel_values_ref_img = torch.cat(
[source_image_pixels, motion_zeros], dim=0) # concat the ref image and the first motion frames
else:
motion_frames = tensor_result[-1][0]
motion_frames = motion_frames.permute(1, 0, 2, 3)
motion_frames = motion_frames[0 - cfg.data.n_motion_frames:]
motion_frames = motion_frames * 2.0 - 1.0
motion_frames = motion_frames.to(
dtype=source_image_pixels.dtype, device=source_image_pixels.device)
pixel_values_ref_img = torch.cat(
[source_image_pixels, motion_frames], dim=0) # concat the ref image and the motion frames
pixel_values_ref_img = pixel_values_ref_img.unsqueeze(0)
audio_tensor = audio_emb[
t * clip_length: min((t + 1) * clip_length, audio_emb.shape[0])
]
audio_tensor = audio_tensor.unsqueeze(0)
audio_tensor = audio_tensor.to(
device=audioproj.device, dtype=audioproj.dtype)
audio_tensor = audioproj(audio_tensor)
pipeline_output = pipeline(
ref_image=pixel_values_ref_img,
audio_tensor=audio_tensor,
face_emb=source_image_face_emb,
face_mask=source_image_face_region,
pixel_values_full_mask=source_image_full_mask,
pixel_values_face_mask=source_image_face_mask,
pixel_values_lip_mask=source_image_lip_mask,
width=cfg.data.train_width,
height=cfg.data.train_height,
video_length=clip_length,
num_inference_steps=cfg.inference_steps,
guidance_scale=cfg.cfg_scale,
generator=generator,
)
tensor_result.append(pipeline_output.videos)
tensor_result = torch.cat(tensor_result, dim=2)
tensor_result = tensor_result.squeeze(0)
tensor_result = tensor_result[:, :audio_length]
audio_name = os.path.basename(audio_path).split('.')[0]
ref_name = os.path.basename(ref_img_path).split('.')[0]
output_file = os.path.join(save_dir,f"{global_step}_{ref_name}_{audio_name}.mp4")
# save the result after all iterations
tensor_to_video(tensor_result, output_file, audio_path)
# clean up
del tmp_denoising_unet
del pipeline
del image_processor
del audio_processor
torch.cuda.empty_cache()
|
Log validation video during the training process.
Args:
accelerator (Accelerator): The accelerator for distributed training.
vae (AutoencoderKL): The autoencoder model.
net (Net): The main neural network model.
scheduler (DDIMScheduler): The scheduler for noise.
width (int): The width of the input images.
height (int): The height of the input images.
clip_length (int): The length of the video clips. Defaults to 24.
generator (torch.Generator): The random number generator. Defaults to None.
cfg (dict): The configuration dictionary. Defaults to None.
save_dir (str): The directory to save validation results. Defaults to None.
global_step (int): The current global step in training. Defaults to 0.
times (int): The number of inference times. Defaults to None.
face_analysis_model_path (str): The path to the face analysis model. Defaults to "".
Returns:
torch.Tensor: The tensor result of the validation.
|
log_validation
|
python
|
jdh-algo/JoyHallo
|
scripts/inference.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/scripts/inference.py
|
MIT
|
def inference_process(cfg: argparse.Namespace) -> None:
"""
Trains the model using the given configuration (cfg).
Args:
cfg (dict): The configuration dictionary containing the parameters for training.
Notes:
- This function trains the model using the given configuration.
- It initializes the necessary components for training, such as the pipeline, optimizer, and scheduler.
- The training progress is logged and tracked using the accelerator.
- The trained model is saved after the training is completed.
"""
kwargs = DistributedDataParallelKwargs(find_unused_parameters=False)
accelerator = Accelerator(
gradient_accumulation_steps=cfg.solver.gradient_accumulation_steps,
mixed_precision=cfg.solver.mixed_precision,
log_with="mlflow",
project_dir="./mlruns",
kwargs_handlers=[kwargs],
)
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state, main_process_only=False)
if accelerator.is_local_main_process:
transformers.utils.logging.set_verbosity_warning()
diffusers.utils.logging.set_verbosity_info()
else:
transformers.utils.logging.set_verbosity_error()
diffusers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if cfg.seed is not None:
seed_everything(cfg.seed)
# create output dir for training
exp_name = cfg.exp_name
save_dir = f"{cfg.output_dir}/{exp_name}"
validation_dir = save_dir
if accelerator.is_main_process:
init_output_dir([save_dir])
accelerator.wait_for_everyone()
if cfg.weight_dtype == "fp16":
weight_dtype = torch.float16
elif cfg.weight_dtype == "bf16":
weight_dtype = torch.bfloat16
elif cfg.weight_dtype == "fp32":
weight_dtype = torch.float32
else:
raise ValueError(
f"Do not support weight dtype: {cfg.weight_dtype} during training"
)
# Create Models
vae = AutoencoderKL.from_pretrained(cfg.vae_model_path).to(
"cuda", dtype=weight_dtype
)
reference_unet = UNet2DConditionModel.from_pretrained(
cfg.base_model_path,
subfolder="unet",
).to(device="cuda", dtype=weight_dtype)
denoising_unet = UNet3DConditionModel.from_pretrained_2d(
cfg.base_model_path,
cfg.mm_path,
subfolder="unet",
unet_additional_kwargs=OmegaConf.to_container(
cfg.unet_additional_kwargs),
use_landmark=False
).to(device="cuda", dtype=weight_dtype)
imageproj = ImageProjModel(
cross_attention_dim=denoising_unet.config.cross_attention_dim,
clip_embeddings_dim=512,
clip_extra_context_tokens=4,
).to(device="cuda", dtype=weight_dtype)
face_locator = FaceLocator(
conditioning_embedding_channels=320,
).to(device="cuda", dtype=weight_dtype)
audioproj = AudioProjModel(
seq_len=5,
blocks=12,
channels=768,
intermediate_dim=512,
output_dim=768,
context_tokens=32,
).to(device="cuda", dtype=weight_dtype)
# Freeze
vae.requires_grad_(False)
imageproj.requires_grad_(False)
reference_unet.requires_grad_(False)
denoising_unet.requires_grad_(False)
face_locator.requires_grad_(False)
audioproj.requires_grad_(True)
# Set motion module learnable
trainable_modules = cfg.trainable_para
for name, module in denoising_unet.named_modules():
if any(trainable_mod in name for trainable_mod in trainable_modules):
for params in module.parameters():
params.requires_grad_(True)
reference_control_writer = ReferenceAttentionControl(
reference_unet,
do_classifier_free_guidance=False,
mode="write",
fusion_blocks="full",
)
reference_control_reader = ReferenceAttentionControl(
denoising_unet,
do_classifier_free_guidance=False,
mode="read",
fusion_blocks="full",
)
net = Net(
reference_unet,
denoising_unet,
face_locator,
reference_control_writer,
reference_control_reader,
imageproj,
audioproj,
).to(dtype=weight_dtype)
m,u = net.load_state_dict(
torch.load(
cfg.audio_ckpt_dir,
map_location="cpu",
),
)
assert len(m) == 0 and len(u) == 0, "Fail to load correct checkpoint."
print("loaded weight from ", os.path.join(cfg.audio_ckpt_dir))
# get noise scheduler
_, val_noise_scheduler = get_noise_scheduler(cfg)
if cfg.solver.enable_xformers_memory_efficient_attention:
if is_xformers_available():
reference_unet.enable_xformers_memory_efficient_attention()
denoising_unet.enable_xformers_memory_efficient_attention()
else:
raise ValueError(
"xformers is not available. Make sure it is installed correctly"
)
if cfg.solver.gradient_checkpointing:
reference_unet.enable_gradient_checkpointing()
denoising_unet.enable_gradient_checkpointing()
if cfg.solver.scale_lr:
learning_rate = (
cfg.solver.learning_rate
* cfg.solver.gradient_accumulation_steps
* cfg.data.train_bs
* accelerator.num_processes
)
else:
learning_rate = cfg.solver.learning_rate
# Initialize the optimizer
if cfg.solver.use_8bit_adam:
try:
import bitsandbytes as bnb
except ImportError as exc:
raise ImportError(
"Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`"
) from exc
optimizer_cls = bnb.optim.AdamW8bit
else:
optimizer_cls = torch.optim.AdamW
trainable_params = list(
filter(lambda p: p.requires_grad, net.parameters()))
optimizer = optimizer_cls(
trainable_params,
lr=learning_rate,
betas=(cfg.solver.adam_beta1, cfg.solver.adam_beta2),
weight_decay=cfg.solver.adam_weight_decay,
eps=cfg.solver.adam_epsilon,
)
# Scheduler
lr_scheduler = get_scheduler(
cfg.solver.lr_scheduler,
optimizer=optimizer,
num_warmup_steps=cfg.solver.lr_warmup_steps
* cfg.solver.gradient_accumulation_steps,
num_training_steps=cfg.solver.max_train_steps
* cfg.solver.gradient_accumulation_steps,
)
# get data loader
train_dataset = TalkingVideoDataset(
img_size=(cfg.data.train_width, cfg.data.train_height),
sample_rate=cfg.data.sample_rate,
n_sample_frames=cfg.data.n_sample_frames,
n_motion_frames=cfg.data.n_motion_frames,
audio_margin=cfg.data.audio_margin,
data_meta_paths=cfg.data.train_meta_paths,
wav2vec_cfg=cfg.wav2vec_config,
)
train_dataloader = torch.utils.data.DataLoader(
train_dataset, batch_size=cfg.data.train_bs, shuffle=True, num_workers=16
)
# Prepare everything with our `accelerator`.
(
net,
optimizer,
train_dataloader,
lr_scheduler,
) = accelerator.prepare(
net,
optimizer,
train_dataloader,
lr_scheduler,
)
# We need to initialize the trackers we use, and also store our configuration.
# The trackers initializes automatically on the main process.
if accelerator.is_main_process:
run_time = datetime.now().strftime("%Y%m%d-%H%M")
accelerator.init_trackers(
exp_name,
init_kwargs={"mlflow": {"run_name": run_time}},
)
logger.info("***** Running Inferencing *****")
# Inference
log_validation(
accelerator=accelerator,
vae=vae,
net=net,
scheduler=val_noise_scheduler,
width=cfg.data.train_width,
height=cfg.data.train_height,
clip_length=cfg.data.n_sample_frames,
cfg=cfg,
save_dir=validation_dir,
global_step=0,
times=cfg.single_inference_times if cfg.single_inference_times is not None else None,
face_analysis_model_path=cfg.face_analysis_model_path
)
# Create the pipeline using the trained modules and save it.
accelerator.wait_for_everyone()
accelerator.end_training()
|
Trains the model using the given configuration (cfg).
Args:
cfg (dict): The configuration dictionary containing the parameters for training.
Notes:
- This function trains the model using the given configuration.
- It initializes the necessary components for training, such as the pipeline, optimizer, and scheduler.
- The training progress is logged and tracked using the accelerator.
- The trained model is saved after the training is completed.
|
inference_process
|
python
|
jdh-algo/JoyHallo
|
scripts/inference.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/scripts/inference.py
|
MIT
|
def load_config(config_path: str) -> dict:
"""
Loads the configuration file.
Args:
config_path (str): Path to the configuration file.
Returns:
dict: The configuration dictionary.
"""
if config_path.endswith(".yaml"):
return OmegaConf.load(config_path)
if config_path.endswith(".py"):
return import_filename(config_path).cfg
raise ValueError("Unsupported format for config file")
|
Loads the configuration file.
Args:
config_path (str): Path to the configuration file.
Returns:
dict: The configuration dictionary.
|
load_config
|
python
|
jdh-algo/JoyHallo
|
scripts/inference.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/scripts/inference.py
|
MIT
|
def forward(
self,
noisy_latents,
timesteps,
ref_image_latents,
face_emb,
face_mask,
uncond_fwd: bool = False,
):
"""
Forward pass of the model.
Args:
self (Net): The model instance.
noisy_latents (torch.Tensor): Noisy latents.
timesteps (torch.Tensor): Timesteps.
ref_image_latents (torch.Tensor): Reference image latents.
face_emb (torch.Tensor): Face embedding.
face_mask (torch.Tensor): Face mask.
uncond_fwd (bool, optional): Unconditional forward pass. Defaults to False.
Returns:
torch.Tensor: Model prediction.
"""
face_emb = self.imageproj(face_emb)
face_mask = face_mask.to(device="cuda")
face_mask_feature = self.face_locator(face_mask)
if not uncond_fwd:
ref_timesteps = torch.zeros_like(timesteps)
self.reference_unet(
ref_image_latents,
ref_timesteps,
encoder_hidden_states=face_emb,
return_dict=False,
)
self.reference_control_reader.update(self.reference_control_writer)
model_pred = self.denoising_unet(
noisy_latents,
timesteps,
mask_cond_fea=face_mask_feature,
encoder_hidden_states=face_emb,
).sample
return model_pred
|
Forward pass of the model.
Args:
self (Net): The model instance.
noisy_latents (torch.Tensor): Noisy latents.
timesteps (torch.Tensor): Timesteps.
ref_image_latents (torch.Tensor): Reference image latents.
face_emb (torch.Tensor): Face embedding.
face_mask (torch.Tensor): Face mask.
uncond_fwd (bool, optional): Unconditional forward pass. Defaults to False.
Returns:
torch.Tensor: Model prediction.
|
forward
|
python
|
jdh-algo/JoyHallo
|
scripts/train_stage1_alltrain.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/scripts/train_stage1_alltrain.py
|
MIT
|
def get_noise_scheduler(cfg: argparse.Namespace):
"""
Create noise scheduler for training
Args:
cfg (omegaconf.dictconfig.DictConfig): Configuration object.
Returns:
train noise scheduler and val noise scheduler
"""
sched_kwargs = OmegaConf.to_container(cfg.noise_scheduler_kwargs)
if cfg.enable_zero_snr:
sched_kwargs.update(
rescale_betas_zero_snr=True,
timestep_spacing="trailing",
prediction_type="v_prediction",
)
val_noise_scheduler = DDIMScheduler(**sched_kwargs)
sched_kwargs.update({"beta_schedule": "scaled_linear"})
train_noise_scheduler = DDIMScheduler(**sched_kwargs)
return train_noise_scheduler, val_noise_scheduler
|
Create noise scheduler for training
Args:
cfg (omegaconf.dictconfig.DictConfig): Configuration object.
Returns:
train noise scheduler and val noise scheduler
|
get_noise_scheduler
|
python
|
jdh-algo/JoyHallo
|
scripts/train_stage1_alltrain.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/scripts/train_stage1_alltrain.py
|
MIT
|
def log_validation(
vae,
net,
scheduler,
accelerator,
width,
height,
imageproj,
cfg,
save_dir,
global_step,
face_analysis_model_path,
):
"""
Log validation generation image.
Args:
vae (nn.Module): Variational Autoencoder model.
net (Net): Main model.
scheduler (diffusers.SchedulerMixin): Noise scheduler.
accelerator (accelerate.Accelerator): Accelerator for training.
width (int): Width of the input images.
height (int): Height of the input images.
imageproj (nn.Module): Image projection model.
cfg (omegaconf.dictconfig.DictConfig): Configuration object.
save_dir (str): directory path to save log result.
global_step (int): Global step number.
Returns:
None
"""
logger.info("Running validation... ")
ori_net = accelerator.unwrap_model(net)
ori_net = copy.deepcopy(ori_net)
reference_unet = ori_net.reference_unet
denoising_unet = ori_net.denoising_unet
face_locator = ori_net.face_locator
generator = torch.manual_seed(42)
image_enc = FaceAnalysis(
name="",
root=face_analysis_model_path,
providers=["CUDAExecutionProvider", "CPUExecutionProvider"],
)
image_enc.prepare(ctx_id=0, det_size=(640, 640))
pipe = StaticPipeline(
vae=vae,
reference_unet=reference_unet,
denoising_unet=denoising_unet,
face_locator=face_locator,
scheduler=scheduler,
imageproj=imageproj,
)
pil_images = []
for ref_image_path, mask_image_path in zip(cfg.ref_image_paths, cfg.mask_image_paths):
# for mask_image_path in mask_image_paths:
mask_name = os.path.splitext(
os.path.basename(mask_image_path))[0]
ref_name = os.path.splitext(
os.path.basename(ref_image_path))[0]
ref_image_pil = Image.open(ref_image_path).convert("RGB")
mask_image_pil = Image.open(mask_image_path).convert("RGB")
# Prepare face embeds
face_info = image_enc.get(
cv2.cvtColor(np.array(ref_image_pil), cv2.COLOR_RGB2BGR))
face_info = sorted(face_info, key=lambda x: (x['bbox'][2] - x['bbox'][0]) * (
x['bbox'][3] - x['bbox'][1]))[-1] # only use the maximum face
face_emb = torch.tensor(face_info['embedding'])
face_emb = face_emb.to(
imageproj.device, imageproj.dtype)
image = pipe(
ref_image_pil,
mask_image_pil,
width,
height,
20,
3.5,
face_emb,
generator=generator,
).images
image = image[0, :, 0].permute(1, 2, 0).cpu().numpy() # (3, 512, 512)
res_image_pil = Image.fromarray((image * 255).astype(np.uint8))
# Save ref_image, src_image and the generated_image
w, h = res_image_pil.size
canvas = Image.new("RGB", (w * 3, h), "white")
ref_image_pil = ref_image_pil.resize((w, h))
mask_image_pil = mask_image_pil.resize((w, h))
canvas.paste(ref_image_pil, (0, 0))
canvas.paste(mask_image_pil, (w, 0))
canvas.paste(res_image_pil, (w * 2, 0))
out_file = os.path.join(
save_dir, f"{global_step:06d}-{ref_name}_{mask_name}.jpg"
)
canvas.save(out_file)
del pipe
del ori_net
torch.cuda.empty_cache()
return pil_images
|
Log validation generation image.
Args:
vae (nn.Module): Variational Autoencoder model.
net (Net): Main model.
scheduler (diffusers.SchedulerMixin): Noise scheduler.
accelerator (accelerate.Accelerator): Accelerator for training.
width (int): Width of the input images.
height (int): Height of the input images.
imageproj (nn.Module): Image projection model.
cfg (omegaconf.dictconfig.DictConfig): Configuration object.
save_dir (str): directory path to save log result.
global_step (int): Global step number.
Returns:
None
|
log_validation
|
python
|
jdh-algo/JoyHallo
|
scripts/train_stage1_alltrain.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/scripts/train_stage1_alltrain.py
|
MIT
|
def train_stage1_process(cfg: argparse.Namespace) -> None:
"""
Trains the model using the given configuration (cfg).
Args:
cfg (dict): The configuration dictionary containing the parameters for training.
Notes:
- This function trains the model using the given configuration.
- It initializes the necessary components for training, such as the pipeline, optimizer, and scheduler.
- The training progress is logged and tracked using the accelerator.
- The trained model is saved after the training is completed.
"""
kwargs = DistributedDataParallelKwargs(find_unused_parameters=True)
accelerator = Accelerator(
gradient_accumulation_steps=cfg.solver.gradient_accumulation_steps,
mixed_precision=cfg.solver.mixed_precision,
log_with="mlflow",
project_dir="./mlruns",
kwargs_handlers=[kwargs],
)
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state, main_process_only=False)
if accelerator.is_local_main_process:
transformers.utils.logging.set_verbosity_warning()
diffusers.utils.logging.set_verbosity_info()
else:
transformers.utils.logging.set_verbosity_error()
diffusers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if cfg.seed is not None:
seed_everything(cfg.seed)
# create output dir for training
exp_name = cfg.exp_name
save_dir = f"{cfg.output_dir}/{exp_name}"
checkpoint_dir = os.path.join(save_dir, "checkpoints")
module_dir = os.path.join(save_dir, "modules")
validation_dir = os.path.join(save_dir, "validation")
if accelerator.is_main_process:
init_output_dir([save_dir, checkpoint_dir, module_dir, validation_dir])
accelerator.wait_for_everyone()
# create model
if cfg.weight_dtype == "fp16":
weight_dtype = torch.float16
elif cfg.weight_dtype == "bf16":
weight_dtype = torch.bfloat16
elif cfg.weight_dtype == "fp32":
weight_dtype = torch.float32
else:
raise ValueError(
f"Do not support weight dtype: {cfg.weight_dtype} during training"
)
# create model
vae = AutoencoderKL.from_pretrained(cfg.vae_model_path).to(
"cuda", dtype=weight_dtype
)
reference_unet = UNet2DConditionModel.from_pretrained(
cfg.base_model_path,
subfolder="unet",
).to(device="cuda", dtype=weight_dtype)
denoising_unet = UNet3DConditionModel.from_pretrained_2d(
cfg.base_model_path,
"",
subfolder="unet",
unet_additional_kwargs={
"use_motion_module": False,
"unet_use_temporal_attention": False,
},
use_landmark=False
).to(device="cuda", dtype=weight_dtype)
imageproj = ImageProjModel(
cross_attention_dim=denoising_unet.config.cross_attention_dim,
clip_embeddings_dim=512,
clip_extra_context_tokens=4,
).to(device="cuda", dtype=weight_dtype)
if cfg.face_locator_pretrained:
face_locator = FaceLocator(
conditioning_embedding_channels=320, block_out_channels=(16, 32, 96, 256)
).to(device="cuda", dtype=weight_dtype)
miss, _ = face_locator.load_state_dict(
cfg.face_state_dict_path, strict=False)
logger.info(f"Missing key for face locator: {len(miss)}")
else:
face_locator = FaceLocator(
conditioning_embedding_channels=320,
).to(device="cuda", dtype=weight_dtype)
# Freeze
vae.requires_grad_(False)
denoising_unet.requires_grad_(True)
reference_unet.requires_grad_(True)
imageproj.requires_grad_(True)
face_locator.requires_grad_(True)
reference_control_writer = ReferenceAttentionControl(
reference_unet,
do_classifier_free_guidance=False,
mode="write",
fusion_blocks="full",
)
reference_control_reader = ReferenceAttentionControl(
denoising_unet,
do_classifier_free_guidance=False,
mode="read",
fusion_blocks="full",
)
net = Net(
reference_unet,
denoising_unet,
face_locator,
reference_control_writer,
reference_control_reader,
imageproj,
).to(dtype=weight_dtype)
# load weight from pretrain model
net.load_state_dict(
torch.load(
os.path.join(config.audio_ckpt_dir, "net.pth"),
map_location="cpu",
),
strict=False,
)
if accelerator.is_main_process:
logger.info(f"loaded weight from {os.path.join(config.audio_ckpt_dir, 'net.pth')}")
# get noise scheduler
train_noise_scheduler, val_noise_scheduler = get_noise_scheduler(cfg)
# init optimizer
if cfg.solver.enable_xformers_memory_efficient_attention:
if is_xformers_available():
reference_unet.enable_xformers_memory_efficient_attention()
denoising_unet.enable_xformers_memory_efficient_attention()
else:
raise ValueError(
"xformers is not available. Make sure it is installed correctly"
)
if cfg.solver.gradient_checkpointing:
reference_unet.enable_gradient_checkpointing()
denoising_unet.enable_gradient_checkpointing()
if cfg.solver.scale_lr:
learning_rate = (
cfg.solver.learning_rate
* cfg.solver.gradient_accumulation_steps
* cfg.data.train_bs
* accelerator.num_processes
)
else:
learning_rate = cfg.solver.learning_rate
# Initialize the optimizer
if cfg.solver.use_8bit_adam:
try:
import bitsandbytes as bnb
except ImportError as exc:
raise ImportError(
"Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`"
) from exc
optimizer_cls = bnb.optim.AdamW8bit
else:
optimizer_cls = torch.optim.AdamW
trainable_params = list(
filter(lambda p: p.requires_grad, net.parameters()))
optimizer = optimizer_cls(
trainable_params,
lr=learning_rate,
betas=(cfg.solver.adam_beta1, cfg.solver.adam_beta2),
weight_decay=cfg.solver.adam_weight_decay,
eps=cfg.solver.adam_epsilon,
)
# init scheduler
lr_scheduler = get_scheduler(
cfg.solver.lr_scheduler,
optimizer=optimizer,
num_warmup_steps=cfg.solver.lr_warmup_steps
* cfg.solver.gradient_accumulation_steps,
num_training_steps=cfg.solver.max_train_steps
* cfg.solver.gradient_accumulation_steps,
)
# get data loader
train_dataset = FaceMaskDataset(
img_size=(cfg.data.train_width, cfg.data.train_height),
data_meta_paths=cfg.data.meta_paths,
sample_margin=cfg.data.sample_margin,
)
train_dataloader = torch.utils.data.DataLoader(
train_dataset, batch_size=cfg.data.train_bs, shuffle=True, num_workers=4
)
# Prepare everything with our `accelerator`.
(
net,
optimizer,
train_dataloader,
lr_scheduler,
) = accelerator.prepare(
net,
optimizer,
train_dataloader,
lr_scheduler,
)
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
num_update_steps_per_epoch = math.ceil(
len(train_dataloader) / cfg.solver.gradient_accumulation_steps
)
# Afterwards we recalculate our number of training epochs
num_train_epochs = math.ceil(
cfg.solver.max_train_steps / num_update_steps_per_epoch
)
# We need to initialize the trackers we use, and also store our configuration.
# The trackers initializes automatically on the main process.
if accelerator.is_main_process:
run_time = datetime.now().strftime("%Y%m%d-%H%M")
accelerator.init_trackers(
cfg.exp_name,
init_kwargs={"mlflow": {"run_name": run_time}},
)
# dump config file
mlflow.log_dict(OmegaConf.to_container(cfg), "config.yaml")
logger.info(f"save config to {save_dir}")
OmegaConf.save(
cfg, os.path.join(save_dir, "config.yaml")
)
# Train!
total_batch_size = (
cfg.data.train_bs
* accelerator.num_processes
* cfg.solver.gradient_accumulation_steps
)
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num Epochs = {num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {cfg.data.train_bs}")
logger.info(
f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}"
)
logger.info(
f" Gradient Accumulation steps = {cfg.solver.gradient_accumulation_steps}"
)
logger.info(f" Total optimization steps = {cfg.solver.max_train_steps}")
global_step = 0
first_epoch = 0
# load checkpoint
# Potentially load in the weights and states from a previous save
if cfg.resume_from_checkpoint:
logger.info(f"Loading checkpoint from {checkpoint_dir}")
global_step = load_checkpoint(cfg, checkpoint_dir, accelerator)
first_epoch = global_step // num_update_steps_per_epoch
# Only show the progress bar once on each machine.
progress_bar = tqdm(
range(global_step, cfg.solver.max_train_steps),
disable=not accelerator.is_main_process,
)
progress_bar.set_description("Steps")
net.train()
for _ in range(first_epoch, num_train_epochs):
train_loss = 0.0
for _, batch in enumerate(train_dataloader):
with accelerator.accumulate(net):
# Convert videos to latent space
pixel_values = batch["img"].to(weight_dtype)
with torch.no_grad():
latents = vae.encode(pixel_values).latent_dist.sample()
latents = latents.unsqueeze(2) # (b, c, 1, h, w)
latents = latents * 0.18215
noise = torch.randn_like(latents)
if cfg.noise_offset > 0.0:
noise += cfg.noise_offset * torch.randn(
(noise.shape[0], noise.shape[1], 1, 1, 1),
device=noise.device,
)
bsz = latents.shape[0]
# Sample a random timestep for each video
timesteps = torch.randint(
0,
train_noise_scheduler.num_train_timesteps,
(bsz,),
device=latents.device,
)
timesteps = timesteps.long()
face_mask_img = batch["tgt_mask"]
face_mask_img = face_mask_img.unsqueeze(
2)
face_mask_img = face_mask_img.to(weight_dtype)
uncond_fwd = random.random() < cfg.uncond_ratio
face_emb_list = []
ref_image_list = []
for _, (ref_img, face_emb) in enumerate(
zip(batch["ref_img"], batch["face_emb"])
):
if uncond_fwd:
face_emb_list.append(torch.zeros_like(face_emb))
else:
face_emb_list.append(face_emb)
ref_image_list.append(ref_img)
with torch.no_grad():
ref_img = torch.stack(ref_image_list, dim=0).to(
dtype=vae.dtype, device=vae.device
)
ref_image_latents = vae.encode(
ref_img
).latent_dist.sample()
ref_image_latents = ref_image_latents * 0.18215
face_emb = torch.stack(face_emb_list, dim=0).to(
dtype=imageproj.dtype, device=imageproj.device
)
# add noise
noisy_latents = train_noise_scheduler.add_noise(
latents, noise, timesteps
)
# Get the target for loss depending on the prediction type
if train_noise_scheduler.prediction_type == "epsilon":
target = noise
elif train_noise_scheduler.prediction_type == "v_prediction":
target = train_noise_scheduler.get_velocity(
latents, noise, timesteps
)
else:
raise ValueError(
f"Unknown prediction type {train_noise_scheduler.prediction_type}"
)
model_pred = net(
noisy_latents,
timesteps,
ref_image_latents,
face_emb,
face_mask_img,
uncond_fwd,
)
if cfg.snr_gamma == 0:
loss = F.mse_loss(
model_pred.float(), target.float(), reduction="mean"
)
else:
snr = compute_snr(train_noise_scheduler, timesteps)
if train_noise_scheduler.config.prediction_type == "v_prediction":
# Velocity objective requires that we add one to SNR values before we divide by them.
snr = snr + 1
mse_loss_weights = (
torch.stack(
[snr, cfg.snr_gamma * torch.ones_like(timesteps)], dim=1
).min(dim=1)[0]
/ snr
)
loss = F.mse_loss(
model_pred.float(), target.float(), reduction="none"
)
loss = (
loss.mean(dim=list(range(1, len(loss.shape))))
* mse_loss_weights
)
loss = loss.mean()
# Gather the losses across all processes for logging (if we use distributed training).
avg_loss = accelerator.gather(
loss.repeat(cfg.data.train_bs)).mean()
train_loss += avg_loss.item() / cfg.solver.gradient_accumulation_steps
# Backpropagate
accelerator.backward(loss)
if accelerator.sync_gradients:
accelerator.clip_grad_norm_(
trainable_params,
cfg.solver.max_grad_norm,
)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
if accelerator.sync_gradients:
reference_control_reader.clear()
reference_control_writer.clear()
progress_bar.update(1)
global_step += 1
accelerator.log({"train_loss": train_loss}, step=global_step)
train_loss = 0.0
if global_step % cfg.checkpointing_steps == 0 or global_step == cfg.solver.max_train_steps:
accelerator.wait_for_everyone()
save_path = os.path.join(
checkpoint_dir, f"checkpoint-{global_step}")
if accelerator.is_main_process:
delete_additional_ckpt(checkpoint_dir, 3)
accelerator.save_state(save_path)
accelerator.wait_for_everyone()
unwrap_net = accelerator.unwrap_model(net)
if accelerator.is_main_process:
save_checkpoint(
unwrap_net.reference_unet,
module_dir,
"reference_unet",
global_step,
total_limit=3,
)
save_checkpoint(
unwrap_net.imageproj,
module_dir,
"imageproj",
global_step,
total_limit=3,
)
save_checkpoint(
unwrap_net.denoising_unet,
module_dir,
"denoising_unet",
global_step,
total_limit=3,
)
save_checkpoint(
unwrap_net.face_locator,
module_dir,
"face_locator",
global_step,
total_limit=3,
)
if global_step % cfg.val.validation_steps == 0 or global_step == 1:
if accelerator.is_main_process:
generator = torch.Generator(device=accelerator.device)
generator.manual_seed(cfg.seed)
log_validation(
vae=vae,
net=net,
scheduler=val_noise_scheduler,
accelerator=accelerator,
width=cfg.data.train_width,
height=cfg.data.train_height,
imageproj=imageproj,
cfg=cfg,
save_dir=validation_dir,
global_step=global_step,
face_analysis_model_path=cfg.face_analysis_model_path
)
logs = {
"step_loss": loss.detach().item(),
"lr": lr_scheduler.get_last_lr()[0],
}
progress_bar.set_postfix(**logs)
if global_step >= cfg.solver.max_train_steps:
# process final module weight for stage2
if accelerator.is_main_process:
move_final_checkpoint(save_dir, module_dir, "reference_unet")
move_final_checkpoint(save_dir, module_dir, "imageproj")
move_final_checkpoint(save_dir, module_dir, "denoising_unet")
move_final_checkpoint(save_dir, module_dir, "face_locator")
break
accelerator.wait_for_everyone()
accelerator.end_training()
|
Trains the model using the given configuration (cfg).
Args:
cfg (dict): The configuration dictionary containing the parameters for training.
Notes:
- This function trains the model using the given configuration.
- It initializes the necessary components for training, such as the pipeline, optimizer, and scheduler.
- The training progress is logged and tracked using the accelerator.
- The trained model is saved after the training is completed.
|
train_stage1_process
|
python
|
jdh-algo/JoyHallo
|
scripts/train_stage1_alltrain.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/scripts/train_stage1_alltrain.py
|
MIT
|
def load_config(config_path: str) -> dict:
"""
Loads the configuration file.
Args:
config_path (str): Path to the configuration file.
Returns:
dict: The configuration dictionary.
"""
if config_path.endswith(".yaml"):
return OmegaConf.load(config_path)
if config_path.endswith(".py"):
return import_filename(config_path).cfg
raise ValueError("Unsupported format for config file")
|
Loads the configuration file.
Args:
config_path (str): Path to the configuration file.
Returns:
dict: The configuration dictionary.
|
load_config
|
python
|
jdh-algo/JoyHallo
|
scripts/train_stage1_alltrain.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/scripts/train_stage1_alltrain.py
|
MIT
|
def forward(
self,
noisy_latents: torch.Tensor,
timesteps: torch.Tensor,
ref_image_latents: torch.Tensor,
face_emb: torch.Tensor,
audio_emb: torch.Tensor,
mask: torch.Tensor,
full_mask: torch.Tensor,
face_mask: torch.Tensor,
lip_mask: torch.Tensor,
uncond_img_fwd: bool = False,
uncond_audio_fwd: bool = False,
):
"""
simple docstring to prevent pylint error
"""
face_emb = self.imageproj(face_emb)
mask = mask.to(device="cuda")
mask_feature = self.face_locator(mask)
audio_emb = audio_emb.to(
device=self.audioproj.device, dtype=self.audioproj.dtype)
audio_emb = self.audioproj(audio_emb)
# condition forward
if not uncond_img_fwd:
ref_timesteps = torch.zeros_like(timesteps)
ref_timesteps = repeat(
ref_timesteps,
"b -> (repeat b)",
repeat=ref_image_latents.size(0) // ref_timesteps.size(0),
)
self.reference_unet(
ref_image_latents,
ref_timesteps,
encoder_hidden_states=face_emb,
return_dict=False,
)
self.reference_control_reader.update(self.reference_control_writer)
if uncond_audio_fwd:
audio_emb = torch.zeros_like(audio_emb).to(
device=audio_emb.device, dtype=audio_emb.dtype
)
model_pred = self.denoising_unet(
noisy_latents,
timesteps,
mask_cond_fea=mask_feature,
encoder_hidden_states=face_emb,
audio_embedding=audio_emb,
full_mask=full_mask,
face_mask=face_mask,
lip_mask=lip_mask
).sample
return model_pred
|
simple docstring to prevent pylint error
|
forward
|
python
|
jdh-algo/JoyHallo
|
scripts/train_stage2.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/scripts/train_stage2.py
|
MIT
|
def get_attention_mask(mask: torch.Tensor, weight_dtype: torch.dtype) -> torch.Tensor:
"""
Rearrange the mask tensors to the required format.
Args:
mask (torch.Tensor): The input mask tensor.
weight_dtype (torch.dtype): The data type for the mask tensor.
Returns:
torch.Tensor: The rearranged mask tensor.
"""
if isinstance(mask, List):
_mask = []
for m in mask:
_mask.append(
rearrange(m, "b f 1 h w -> (b f) (h w)").to(weight_dtype))
return _mask
mask = rearrange(mask, "b f 1 h w -> (b f) (h w)").to(weight_dtype)
return mask
|
Rearrange the mask tensors to the required format.
Args:
mask (torch.Tensor): The input mask tensor.
weight_dtype (torch.dtype): The data type for the mask tensor.
Returns:
torch.Tensor: The rearranged mask tensor.
|
get_attention_mask
|
python
|
jdh-algo/JoyHallo
|
scripts/train_stage2.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/scripts/train_stage2.py
|
MIT
|
def get_noise_scheduler(cfg: argparse.Namespace) -> Tuple[DDIMScheduler, DDIMScheduler]:
"""
Create noise scheduler for training.
Args:
cfg (argparse.Namespace): Configuration object.
Returns:
Tuple[DDIMScheduler, DDIMScheduler]: Train noise scheduler and validation noise scheduler.
"""
sched_kwargs = OmegaConf.to_container(cfg.noise_scheduler_kwargs)
if cfg.enable_zero_snr:
sched_kwargs.update(
rescale_betas_zero_snr=True,
timestep_spacing="trailing",
prediction_type="v_prediction",
)
val_noise_scheduler = DDIMScheduler(**sched_kwargs)
sched_kwargs.update({"beta_schedule": "scaled_linear"})
train_noise_scheduler = DDIMScheduler(**sched_kwargs)
return train_noise_scheduler, val_noise_scheduler
|
Create noise scheduler for training.
Args:
cfg (argparse.Namespace): Configuration object.
Returns:
Tuple[DDIMScheduler, DDIMScheduler]: Train noise scheduler and validation noise scheduler.
|
get_noise_scheduler
|
python
|
jdh-algo/JoyHallo
|
scripts/train_stage2.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/scripts/train_stage2.py
|
MIT
|
def process_audio_emb(audio_emb: torch.Tensor) -> torch.Tensor:
"""
Process the audio embedding to concatenate with other tensors.
Parameters:
audio_emb (torch.Tensor): The audio embedding tensor to process.
Returns:
concatenated_tensors (List[torch.Tensor]): The concatenated tensor list.
"""
concatenated_tensors = []
for i in range(audio_emb.shape[0]):
vectors_to_concat = [
audio_emb[max(min(i + j, audio_emb.shape[0] - 1), 0)]for j in range(-2, 3)]
concatenated_tensors.append(torch.stack(vectors_to_concat, dim=0))
audio_emb = torch.stack(concatenated_tensors, dim=0)
return audio_emb
|
Process the audio embedding to concatenate with other tensors.
Parameters:
audio_emb (torch.Tensor): The audio embedding tensor to process.
Returns:
concatenated_tensors (List[torch.Tensor]): The concatenated tensor list.
|
process_audio_emb
|
python
|
jdh-algo/JoyHallo
|
scripts/train_stage2.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/scripts/train_stage2.py
|
MIT
|
def log_validation(
accelerator: Accelerator,
vae: AutoencoderKL,
net: Net,
scheduler: DDIMScheduler,
width: int,
height: int,
clip_length: int = 24,
generator: torch.Generator = None,
cfg: dict = None,
save_dir: str = None,
global_step: int = 0,
times: int = None,
face_analysis_model_path: str = "",
) -> None:
"""
Log validation video during the training process.
Args:
accelerator (Accelerator): The accelerator for distributed training.
vae (AutoencoderKL): The autoencoder model.
net (Net): The main neural network model.
scheduler (DDIMScheduler): The scheduler for noise.
width (int): The width of the input images.
height (int): The height of the input images.
clip_length (int): The length of the video clips. Defaults to 24.
generator (torch.Generator): The random number generator. Defaults to None.
cfg (dict): The configuration dictionary. Defaults to None.
save_dir (str): The directory to save validation results. Defaults to None.
global_step (int): The current global step in training. Defaults to 0.
times (int): The number of inference times. Defaults to None.
face_analysis_model_path (str): The path to the face analysis model. Defaults to "".
Returns:
torch.Tensor: The tensor result of the validation.
"""
ori_net = accelerator.unwrap_model(net)
reference_unet = ori_net.reference_unet
denoising_unet = ori_net.denoising_unet
face_locator = ori_net.face_locator
imageproj = ori_net.imageproj
audioproj = ori_net.audioproj
generator = torch.manual_seed(42)
tmp_denoising_unet = copy.deepcopy(denoising_unet)
pipeline = FaceAnimatePipeline(
vae=vae,
reference_unet=reference_unet,
denoising_unet=tmp_denoising_unet,
face_locator=face_locator,
image_proj=imageproj,
scheduler=scheduler,
)
pipeline = pipeline.to("cuda")
image_processor = ImageProcessor((width, height), face_analysis_model_path)
audio_processor = AudioProcessor(
cfg.data.sample_rate,
cfg.data.fps,
cfg.wav2vec_config.model_path,
cfg.wav2vec_config.features == "last",
os.path.dirname(cfg.audio_separator.model_path),
os.path.basename(cfg.audio_separator.model_path),
os.path.join(save_dir, '.cache', "audio_preprocess")
)
for idx, ref_img_path in enumerate(cfg.ref_img_path):
audio_path = cfg.audio_path[idx]
source_image_pixels, \
source_image_face_region, \
source_image_face_emb, \
source_image_full_mask, \
source_image_face_mask, \
source_image_lip_mask = image_processor.preprocess(
ref_img_path, os.path.join(save_dir, '.cache'), cfg.face_expand_ratio)
audio_emb, audio_length = audio_processor.preprocess(
audio_path, clip_length)
audio_emb = process_audio_emb(audio_emb)
source_image_pixels = source_image_pixels.unsqueeze(0)
source_image_face_region = source_image_face_region.unsqueeze(0)
source_image_face_emb = source_image_face_emb.reshape(1, -1)
source_image_face_emb = torch.tensor(source_image_face_emb)
source_image_full_mask = [
(mask.repeat(clip_length, 1))
for mask in source_image_full_mask
]
source_image_face_mask = [
(mask.repeat(clip_length, 1))
for mask in source_image_face_mask
]
source_image_lip_mask = [
(mask.repeat(clip_length, 1))
for mask in source_image_lip_mask
]
times = audio_emb.shape[0] // clip_length
tensor_result = []
generator = torch.manual_seed(42)
for t in range(times):
print(f"[{t+1}/{times}]")
if len(tensor_result) == 0:
# The first iteration
motion_zeros = source_image_pixels.repeat(
cfg.data.n_motion_frames, 1, 1, 1)
motion_zeros = motion_zeros.to(
dtype=source_image_pixels.dtype, device=source_image_pixels.device)
pixel_values_ref_img = torch.cat(
[source_image_pixels, motion_zeros], dim=0) # concat the ref image and the first motion frames
else:
motion_frames = tensor_result[-1][0]
motion_frames = motion_frames.permute(1, 0, 2, 3)
motion_frames = motion_frames[0 - cfg.data.n_motion_frames:]
motion_frames = motion_frames * 2.0 - 1.0
motion_frames = motion_frames.to(
dtype=source_image_pixels.dtype, device=source_image_pixels.device)
pixel_values_ref_img = torch.cat(
[source_image_pixels, motion_frames], dim=0) # concat the ref image and the motion frames
pixel_values_ref_img = pixel_values_ref_img.unsqueeze(0)
audio_tensor = audio_emb[
t * clip_length: min((t + 1) * clip_length, audio_emb.shape[0])
]
audio_tensor = audio_tensor.unsqueeze(0)
audio_tensor = audio_tensor.to(
device=audioproj.device, dtype=audioproj.dtype)
audio_tensor = audioproj(audio_tensor)
pipeline_output = pipeline(
ref_image=pixel_values_ref_img,
audio_tensor=audio_tensor,
face_emb=source_image_face_emb,
face_mask=source_image_face_region,
pixel_values_full_mask=source_image_full_mask,
pixel_values_face_mask=source_image_face_mask,
pixel_values_lip_mask=source_image_lip_mask,
width=cfg.data.train_width,
height=cfg.data.train_height,
video_length=clip_length,
num_inference_steps=cfg.inference_steps,
guidance_scale=cfg.cfg_scale,
generator=generator,
)
tensor_result.append(pipeline_output.videos)
tensor_result = torch.cat(tensor_result, dim=2)
tensor_result = tensor_result.squeeze(0)
tensor_result = tensor_result[:, :audio_length]
audio_name = os.path.basename(audio_path).split('.')[0]
ref_name = os.path.basename(ref_img_path).split('.')[0]
output_file = os.path.join(save_dir,f"{global_step}_{ref_name}_{audio_name}.mp4")
# save the result after all iteration
tensor_to_video(tensor_result, output_file, audio_path)
# clean up
del tmp_denoising_unet
del pipeline
del image_processor
del audio_processor
torch.cuda.empty_cache()
return tensor_result
|
Log validation video during the training process.
Args:
accelerator (Accelerator): The accelerator for distributed training.
vae (AutoencoderKL): The autoencoder model.
net (Net): The main neural network model.
scheduler (DDIMScheduler): The scheduler for noise.
width (int): The width of the input images.
height (int): The height of the input images.
clip_length (int): The length of the video clips. Defaults to 24.
generator (torch.Generator): The random number generator. Defaults to None.
cfg (dict): The configuration dictionary. Defaults to None.
save_dir (str): The directory to save validation results. Defaults to None.
global_step (int): The current global step in training. Defaults to 0.
times (int): The number of inference times. Defaults to None.
face_analysis_model_path (str): The path to the face analysis model. Defaults to "".
Returns:
torch.Tensor: The tensor result of the validation.
|
log_validation
|
python
|
jdh-algo/JoyHallo
|
scripts/train_stage2.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/scripts/train_stage2.py
|
MIT
|
def load_config(config_path: str) -> dict:
"""
Loads the configuration file.
Args:
config_path (str): Path to the configuration file.
Returns:
dict: The configuration dictionary.
"""
if config_path.endswith(".yaml"):
return OmegaConf.load(config_path)
if config_path.endswith(".py"):
return import_filename(config_path).cfg
raise ValueError("Unsupported format for config file")
|
Loads the configuration file.
Args:
config_path (str): Path to the configuration file.
Returns:
dict: The configuration dictionary.
|
load_config
|
python
|
jdh-algo/JoyHallo
|
scripts/train_stage2.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/scripts/train_stage2.py
|
MIT
|
def forward(
self,
noisy_latents: torch.Tensor,
timesteps: torch.Tensor,
ref_image_latents: torch.Tensor,
face_emb: torch.Tensor,
audio_emb: torch.Tensor,
mask: torch.Tensor,
full_mask: torch.Tensor,
face_mask: torch.Tensor,
lip_mask: torch.Tensor,
uncond_img_fwd: bool = False,
uncond_audio_fwd: bool = False,
):
"""
simple docstring to prevent pylint error
"""
face_emb = self.imageproj(face_emb)
mask = mask.to(device="cuda")
mask_feature = self.face_locator(mask)
audio_emb = audio_emb.to(
device=self.audioproj.device, dtype=self.audioproj.dtype)
audio_emb = self.audioproj(audio_emb)
# condition forward
if not uncond_img_fwd:
ref_timesteps = torch.zeros_like(timesteps)
ref_timesteps = repeat(
ref_timesteps,
"b -> (repeat b)",
repeat=ref_image_latents.size(0) // ref_timesteps.size(0),
)
self.reference_unet(
ref_image_latents,
ref_timesteps,
encoder_hidden_states=face_emb,
return_dict=False,
)
self.reference_control_reader.update(self.reference_control_writer)
if uncond_audio_fwd:
audio_emb = torch.zeros_like(audio_emb).to(
device=audio_emb.device, dtype=audio_emb.dtype
)
model_pred = self.denoising_unet(
noisy_latents,
timesteps,
mask_cond_fea=mask_feature,
encoder_hidden_states=face_emb,
audio_embedding=audio_emb,
full_mask=full_mask,
face_mask=face_mask,
lip_mask=lip_mask
).sample
return model_pred
|
simple docstring to prevent pylint error
|
forward
|
python
|
jdh-algo/JoyHallo
|
scripts/train_stage2_alltrain.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/scripts/train_stage2_alltrain.py
|
MIT
|
def get_attention_mask(mask: torch.Tensor, weight_dtype: torch.dtype) -> torch.Tensor:
"""
Rearrange the mask tensors to the required format.
Args:
mask (torch.Tensor): The input mask tensor.
weight_dtype (torch.dtype): The data type for the mask tensor.
Returns:
torch.Tensor: The rearranged mask tensor.
"""
if isinstance(mask, List):
_mask = []
for m in mask:
_mask.append(
rearrange(m, "b f 1 h w -> (b f) (h w)").to(weight_dtype))
return _mask
mask = rearrange(mask, "b f 1 h w -> (b f) (h w)").to(weight_dtype)
return mask
|
Rearrange the mask tensors to the required format.
Args:
mask (torch.Tensor): The input mask tensor.
weight_dtype (torch.dtype): The data type for the mask tensor.
Returns:
torch.Tensor: The rearranged mask tensor.
|
get_attention_mask
|
python
|
jdh-algo/JoyHallo
|
scripts/train_stage2_alltrain.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/scripts/train_stage2_alltrain.py
|
MIT
|
def get_noise_scheduler(cfg: argparse.Namespace) -> Tuple[DDIMScheduler, DDIMScheduler]:
"""
Create noise scheduler for training.
Args:
cfg (argparse.Namespace): Configuration object.
Returns:
Tuple[DDIMScheduler, DDIMScheduler]: Train noise scheduler and validation noise scheduler.
"""
sched_kwargs = OmegaConf.to_container(cfg.noise_scheduler_kwargs)
if cfg.enable_zero_snr:
sched_kwargs.update(
rescale_betas_zero_snr=True,
timestep_spacing="trailing",
prediction_type="v_prediction",
)
val_noise_scheduler = DDIMScheduler(**sched_kwargs)
sched_kwargs.update({"beta_schedule": "scaled_linear"})
train_noise_scheduler = DDIMScheduler(**sched_kwargs)
return train_noise_scheduler, val_noise_scheduler
|
Create noise scheduler for training.
Args:
cfg (argparse.Namespace): Configuration object.
Returns:
Tuple[DDIMScheduler, DDIMScheduler]: Train noise scheduler and validation noise scheduler.
|
get_noise_scheduler
|
python
|
jdh-algo/JoyHallo
|
scripts/train_stage2_alltrain.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/scripts/train_stage2_alltrain.py
|
MIT
|
def process_audio_emb(audio_emb: torch.Tensor) -> torch.Tensor:
"""
Process the audio embedding to concatenate with other tensors.
Parameters:
audio_emb (torch.Tensor): The audio embedding tensor to process.
Returns:
concatenated_tensors (List[torch.Tensor]): The concatenated tensor list.
"""
concatenated_tensors = []
for i in range(audio_emb.shape[0]):
vectors_to_concat = [
audio_emb[max(min(i + j, audio_emb.shape[0] - 1), 0)]for j in range(-2, 3)]
concatenated_tensors.append(torch.stack(vectors_to_concat, dim=0))
audio_emb = torch.stack(concatenated_tensors, dim=0)
return audio_emb
|
Process the audio embedding to concatenate with other tensors.
Parameters:
audio_emb (torch.Tensor): The audio embedding tensor to process.
Returns:
concatenated_tensors (List[torch.Tensor]): The concatenated tensor list.
|
process_audio_emb
|
python
|
jdh-algo/JoyHallo
|
scripts/train_stage2_alltrain.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/scripts/train_stage2_alltrain.py
|
MIT
|
def log_validation(
accelerator: Accelerator,
vae: AutoencoderKL,
net: Net,
scheduler: DDIMScheduler,
width: int,
height: int,
clip_length: int = 24,
generator: torch.Generator = None,
cfg: dict = None,
save_dir: str = None,
global_step: int = 0,
times: int = None,
face_analysis_model_path: str = "",
) -> None:
"""
Log validation video during the training process.
Args:
accelerator (Accelerator): The accelerator for distributed training.
vae (AutoencoderKL): The autoencoder model.
net (Net): The main neural network model.
scheduler (DDIMScheduler): The scheduler for noise.
width (int): The width of the input images.
height (int): The height of the input images.
clip_length (int): The length of the video clips. Defaults to 24.
generator (torch.Generator): The random number generator. Defaults to None.
cfg (dict): The configuration dictionary. Defaults to None.
save_dir (str): The directory to save validation results. Defaults to None.
global_step (int): The current global step in training. Defaults to 0.
times (int): The number of inference times. Defaults to None.
face_analysis_model_path (str): The path to the face analysis model. Defaults to "".
Returns:
torch.Tensor: The tensor result of the validation.
"""
ori_net = accelerator.unwrap_model(net)
reference_unet = ori_net.reference_unet
denoising_unet = ori_net.denoising_unet
face_locator = ori_net.face_locator
imageproj = ori_net.imageproj
audioproj = ori_net.audioproj
generator = torch.manual_seed(42)
tmp_denoising_unet = copy.deepcopy(denoising_unet)
pipeline = FaceAnimatePipeline(
vae=vae,
reference_unet=reference_unet,
denoising_unet=tmp_denoising_unet,
face_locator=face_locator,
image_proj=imageproj,
scheduler=scheduler,
)
pipeline = pipeline.to("cuda")
image_processor = ImageProcessor((width, height), face_analysis_model_path)
audio_processor = AudioProcessor(
cfg.data.sample_rate,
cfg.data.fps,
cfg.wav2vec_config.model_path,
cfg.wav2vec_config.features == "last",
os.path.dirname(cfg.audio_separator.model_path),
os.path.basename(cfg.audio_separator.model_path),
os.path.join(save_dir, '.cache', "audio_preprocess")
)
for idx, ref_img_path in enumerate(cfg.ref_img_path):
audio_path = cfg.audio_path[idx]
source_image_pixels, \
source_image_face_region, \
source_image_face_emb, \
source_image_full_mask, \
source_image_face_mask, \
source_image_lip_mask = image_processor.preprocess(
ref_img_path, os.path.join(save_dir, '.cache'), cfg.face_expand_ratio)
audio_emb, audio_length = audio_processor.preprocess(
audio_path, clip_length)
audio_emb = process_audio_emb(audio_emb)
source_image_pixels = source_image_pixels.unsqueeze(0)
source_image_face_region = source_image_face_region.unsqueeze(0)
source_image_face_emb = source_image_face_emb.reshape(1, -1)
source_image_face_emb = torch.tensor(source_image_face_emb)
source_image_full_mask = [
(mask.repeat(clip_length, 1))
for mask in source_image_full_mask
]
source_image_face_mask = [
(mask.repeat(clip_length, 1))
for mask in source_image_face_mask
]
source_image_lip_mask = [
(mask.repeat(clip_length, 1))
for mask in source_image_lip_mask
]
times = audio_emb.shape[0] // clip_length
tensor_result = []
generator = torch.manual_seed(42)
for t in range(times):
print(f"[{t+1}/{times}]")
if len(tensor_result) == 0:
# The first iteration
motion_zeros = source_image_pixels.repeat(
cfg.data.n_motion_frames, 1, 1, 1)
motion_zeros = motion_zeros.to(
dtype=source_image_pixels.dtype, device=source_image_pixels.device)
pixel_values_ref_img = torch.cat(
[source_image_pixels, motion_zeros], dim=0) # concat the ref image and the first motion frames
else:
motion_frames = tensor_result[-1][0]
motion_frames = motion_frames.permute(1, 0, 2, 3)
motion_frames = motion_frames[0 - cfg.data.n_motion_frames:]
motion_frames = motion_frames * 2.0 - 1.0
motion_frames = motion_frames.to(
dtype=source_image_pixels.dtype, device=source_image_pixels.device)
pixel_values_ref_img = torch.cat(
[source_image_pixels, motion_frames], dim=0) # concat the ref image and the motion frames
pixel_values_ref_img = pixel_values_ref_img.unsqueeze(0)
audio_tensor = audio_emb[
t * clip_length: min((t + 1) * clip_length, audio_emb.shape[0])
]
audio_tensor = audio_tensor.unsqueeze(0)
audio_tensor = audio_tensor.to(
device=audioproj.device, dtype=audioproj.dtype)
audio_tensor = audioproj(audio_tensor)
pipeline_output = pipeline(
ref_image=pixel_values_ref_img,
audio_tensor=audio_tensor,
face_emb=source_image_face_emb,
face_mask=source_image_face_region,
pixel_values_full_mask=source_image_full_mask,
pixel_values_face_mask=source_image_face_mask,
pixel_values_lip_mask=source_image_lip_mask,
width=cfg.data.train_width,
height=cfg.data.train_height,
video_length=clip_length,
num_inference_steps=cfg.inference_steps,
guidance_scale=cfg.cfg_scale,
generator=generator,
)
tensor_result.append(pipeline_output.videos)
tensor_result = torch.cat(tensor_result, dim=2)
tensor_result = tensor_result.squeeze(0)
tensor_result = tensor_result[:, :audio_length]
audio_name = os.path.basename(audio_path).split('.')[0]
ref_name = os.path.basename(ref_img_path).split('.')[0]
output_file = os.path.join(save_dir,f"{global_step}_{ref_name}_{audio_name}.mp4")
# save the result after all iteration
tensor_to_video(tensor_result, output_file, audio_path)
# clean up
del tmp_denoising_unet
del pipeline
del image_processor
del audio_processor
torch.cuda.empty_cache()
return tensor_result
|
Log validation video during the training process.
Args:
accelerator (Accelerator): The accelerator for distributed training.
vae (AutoencoderKL): The autoencoder model.
net (Net): The main neural network model.
scheduler (DDIMScheduler): The scheduler for noise.
width (int): The width of the input images.
height (int): The height of the input images.
clip_length (int): The length of the video clips. Defaults to 24.
generator (torch.Generator): The random number generator. Defaults to None.
cfg (dict): The configuration dictionary. Defaults to None.
save_dir (str): The directory to save validation results. Defaults to None.
global_step (int): The current global step in training. Defaults to 0.
times (int): The number of inference times. Defaults to None.
face_analysis_model_path (str): The path to the face analysis model. Defaults to "".
Returns:
torch.Tensor: The tensor result of the validation.
|
log_validation
|
python
|
jdh-algo/JoyHallo
|
scripts/train_stage2_alltrain.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/scripts/train_stage2_alltrain.py
|
MIT
|
def load_config(config_path: str) -> dict:
"""
Loads the configuration file.
Args:
config_path (str): Path to the configuration file.
Returns:
dict: The configuration dictionary.
"""
if config_path.endswith(".yaml"):
return OmegaConf.load(config_path)
if config_path.endswith(".py"):
return import_filename(config_path).cfg
raise ValueError("Unsupported format for config file")
|
Loads the configuration file.
Args:
config_path (str): Path to the configuration file.
Returns:
dict: The configuration dictionary.
|
load_config
|
python
|
jdh-algo/JoyHallo
|
scripts/train_stage2_alltrain.py
|
https://github.com/jdh-algo/JoyHallo/blob/master/scripts/train_stage2_alltrain.py
|
MIT
|
def preprocess(
self, video_path: Path | None, image_path: Path | None
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Loads and preprocesses a video and an image.
If either path is None, no preprocessing will be done for that input.
Args:
video_path: Path to the video file to load
image_path: Path to the image file to load
Returns:
A tuple containing:
- video(torch.Tensor) of shape [F, C, H, W] where F is number of frames,
C is number of channels, H is height and W is width
- image(torch.Tensor) of shape [C, H, W]
"""
raise NotImplementedError("Subclass must implement this method")
|
Loads and preprocesses a video and an image.
If either path is None, no preprocessing will be done for that input.
Args:
video_path: Path to the video file to load
image_path: Path to the image file to load
Returns:
A tuple containing:
- video(torch.Tensor) of shape [F, C, H, W] where F is number of frames,
C is number of channels, H is height and W is width
- image(torch.Tensor) of shape [C, H, W]
|
preprocess
|
python
|
THUDM/CogVideo
|
finetune/datasets/i2v_dataset.py
|
https://github.com/THUDM/CogVideo/blob/master/finetune/datasets/i2v_dataset.py
|
Apache-2.0
|
def preprocess_image_with_resize(
image_path: Path | str,
height: int,
width: int,
) -> torch.Tensor:
"""
Loads and resizes a single image.
Args:
image_path: Path to the image file.
height: Target height for resizing.
width: Target width for resizing.
Returns:
torch.Tensor: Image tensor with shape [C, H, W] where:
C = number of channels (3 for RGB)
H = height
W = width
"""
if isinstance(image_path, str):
image_path = Path(image_path)
image = cv2.imread(image_path.as_posix())
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = cv2.resize(image, (width, height))
image = torch.from_numpy(image).float()
image = image.permute(2, 0, 1).contiguous()
return image
|
Loads and resizes a single image.
Args:
image_path: Path to the image file.
height: Target height for resizing.
width: Target width for resizing.
Returns:
torch.Tensor: Image tensor with shape [C, H, W] where:
C = number of channels (3 for RGB)
H = height
W = width
|
preprocess_image_with_resize
|
python
|
THUDM/CogVideo
|
finetune/datasets/utils.py
|
https://github.com/THUDM/CogVideo/blob/master/finetune/datasets/utils.py
|
Apache-2.0
|
def preprocess_video_with_resize(
video_path: Path | str,
max_num_frames: int,
height: int,
width: int,
) -> torch.Tensor:
"""
Loads and resizes a single video.
The function processes the video through these steps:
1. If video frame count > max_num_frames, downsample frames evenly
2. If video dimensions don't match (height, width), resize frames
Args:
video_path: Path to the video file.
max_num_frames: Maximum number of frames to keep.
height: Target height for resizing.
width: Target width for resizing.
Returns:
A torch.Tensor with shape [F, C, H, W] where:
F = number of frames
C = number of channels (3 for RGB)
H = height
W = width
"""
if isinstance(video_path, str):
video_path = Path(video_path)
video_reader = decord.VideoReader(uri=video_path.as_posix(), width=width, height=height)
video_num_frames = len(video_reader)
if video_num_frames < max_num_frames:
# Get all frames first
frames = video_reader.get_batch(list(range(video_num_frames)))
# Repeat the last frame until we reach max_num_frames
last_frame = frames[-1:]
num_repeats = max_num_frames - video_num_frames
repeated_frames = last_frame.repeat(num_repeats, 1, 1, 1)
frames = torch.cat([frames, repeated_frames], dim=0)
return frames.float().permute(0, 3, 1, 2).contiguous()
else:
indices = list(range(0, video_num_frames, video_num_frames // max_num_frames))
frames = video_reader.get_batch(indices)
frames = frames[:max_num_frames].float()
frames = frames.permute(0, 3, 1, 2).contiguous()
return frames
|
Loads and resizes a single video.
The function processes the video through these steps:
1. If video frame count > max_num_frames, downsample frames evenly
2. If video dimensions don't match (height, width), resize frames
Args:
video_path: Path to the video file.
max_num_frames: Maximum number of frames to keep.
height: Target height for resizing.
width: Target width for resizing.
Returns:
A torch.Tensor with shape [F, C, H, W] where:
F = number of frames
C = number of channels (3 for RGB)
H = height
W = width
|
preprocess_video_with_resize
|
python
|
THUDM/CogVideo
|
finetune/datasets/utils.py
|
https://github.com/THUDM/CogVideo/blob/master/finetune/datasets/utils.py
|
Apache-2.0
|
def preprocess_video_with_buckets(
video_path: Path,
resolution_buckets: List[Tuple[int, int, int]],
) -> torch.Tensor:
"""
Args:
video_path: Path to the video file.
resolution_buckets: List of tuples (num_frames, height, width) representing
available resolution buckets.
Returns:
torch.Tensor: Video tensor with shape [F, C, H, W] where:
F = number of frames
C = number of channels (3 for RGB)
H = height
W = width
The function processes the video through these steps:
1. Finds nearest frame bucket <= video frame count
2. Downsamples frames evenly to match bucket size
3. Finds nearest resolution bucket based on dimensions
4. Resizes frames to match bucket resolution
"""
video_reader = decord.VideoReader(uri=video_path.as_posix())
video_num_frames = len(video_reader)
resolution_buckets = [bucket for bucket in resolution_buckets if bucket[0] <= video_num_frames]
if len(resolution_buckets) == 0:
raise ValueError(
f"video frame count in {video_path} is less than all frame buckets {resolution_buckets}"
)
nearest_frame_bucket = min(
resolution_buckets,
key=lambda bucket: video_num_frames - bucket[0],
default=1,
)[0]
frame_indices = list(range(0, video_num_frames, video_num_frames // nearest_frame_bucket))
frames = video_reader.get_batch(frame_indices)
frames = frames[:nearest_frame_bucket].float()
frames = frames.permute(0, 3, 1, 2).contiguous()
nearest_res = min(
resolution_buckets, key=lambda x: abs(x[1] - frames.shape[2]) + abs(x[2] - frames.shape[3])
)
nearest_res = (nearest_res[1], nearest_res[2])
frames = torch.stack([resize(f, nearest_res) for f in frames], dim=0)
return frames
|
Args:
video_path: Path to the video file.
resolution_buckets: List of tuples (num_frames, height, width) representing
available resolution buckets.
Returns:
torch.Tensor: Video tensor with shape [F, C, H, W] where:
F = number of frames
C = number of channels (3 for RGB)
H = height
W = width
The function processes the video through these steps:
1. Finds nearest frame bucket <= video frame count
2. Downsamples frames evenly to match bucket size
3. Finds nearest resolution bucket based on dimensions
4. Resizes frames to match bucket resolution
|
preprocess_video_with_buckets
|
python
|
THUDM/CogVideo
|
finetune/datasets/utils.py
|
https://github.com/THUDM/CogVideo/blob/master/finetune/datasets/utils.py
|
Apache-2.0
|
def register(model_name: str, training_type: Literal["lora", "sft"], trainer_cls: Trainer):
"""Register a model and its associated functions for a specific training type.
Args:
model_name (str): Name of the model to register (e.g. "cogvideox-5b")
training_type (Literal["lora", "sft"]): Type of training - either "lora" or "sft"
trainer_cls (Trainer): Trainer class to register.
"""
# Check if model_name and training_type exists in SUPPORTED_MODELS
if model_name not in SUPPORTED_MODELS:
SUPPORTED_MODELS[model_name] = {}
else:
if training_type in SUPPORTED_MODELS[model_name]:
raise ValueError(f"Training type {training_type} already exists for model {model_name}")
SUPPORTED_MODELS[model_name][training_type] = trainer_cls
|
Register a model and its associated functions for a specific training type.
Args:
model_name (str): Name of the model to register (e.g. "cogvideox-5b")
training_type (Literal["lora", "sft"]): Type of training - either "lora" or "sft"
trainer_cls (Trainer): Trainer class to register.
|
register
|
python
|
THUDM/CogVideo
|
finetune/models/utils.py
|
https://github.com/THUDM/CogVideo/blob/master/finetune/models/utils.py
|
Apache-2.0
|
def validation_step(
self, eval_data: Dict[str, Any], pipe: CogVideoXImageToVideoPipeline
) -> List[Tuple[str, Image.Image | List[Image.Image]]]:
"""
Return the data that needs to be saved. For videos, the data format is List[PIL],
and for images, the data format is PIL
"""
prompt, image, video = eval_data["prompt"], eval_data["image"], eval_data["video"]
video_generate = pipe(
num_frames=self.state.train_frames,
height=self.state.train_height,
width=self.state.train_width,
prompt=prompt,
image=image,
generator=self.state.generator,
).frames[0]
return [("video", video_generate)]
|
Return the data that needs to be saved. For videos, the data format is List[PIL],
and for images, the data format is PIL
|
validation_step
|
python
|
THUDM/CogVideo
|
finetune/models/cogvideox_i2v/lora_trainer.py
|
https://github.com/THUDM/CogVideo/blob/master/finetune/models/cogvideox_i2v/lora_trainer.py
|
Apache-2.0
|
def validation_step(
self, eval_data: Dict[str, Any], pipe: CogVideoXPipeline
) -> List[Tuple[str, Image.Image | List[Image.Image]]]:
"""
Return the data that needs to be saved. For videos, the data format is List[PIL],
and for images, the data format is PIL
"""
prompt, image, video = eval_data["prompt"], eval_data["image"], eval_data["video"]
video_generate = pipe(
num_frames=self.state.train_frames,
height=self.state.train_height,
width=self.state.train_width,
prompt=prompt,
generator=self.state.generator,
).frames[0]
return [("video", video_generate)]
|
Return the data that needs to be saved. For videos, the data format is List[PIL],
and for images, the data format is PIL
|
validation_step
|
python
|
THUDM/CogVideo
|
finetune/models/cogvideox_t2v/lora_trainer.py
|
https://github.com/THUDM/CogVideo/blob/master/finetune/models/cogvideox_t2v/lora_trainer.py
|
Apache-2.0
|
def parse_args(cls):
"""Parse command line arguments and return Args instance"""
parser = argparse.ArgumentParser()
# Required arguments
parser.add_argument("--model_path", type=str, required=True)
parser.add_argument("--model_name", type=str, required=True)
parser.add_argument("--model_type", type=str, required=True)
parser.add_argument("--training_type", type=str, required=True)
parser.add_argument("--output_dir", type=str, required=True)
parser.add_argument("--data_root", type=str, required=True)
parser.add_argument("--caption_column", type=str, required=True)
parser.add_argument("--video_column", type=str, required=True)
parser.add_argument("--train_resolution", type=str, required=True)
parser.add_argument("--report_to", type=str, required=True)
# Training hyperparameters
parser.add_argument("--seed", type=int, default=42)
parser.add_argument("--train_epochs", type=int, default=10)
parser.add_argument("--train_steps", type=int, default=None)
parser.add_argument("--gradient_accumulation_steps", type=int, default=1)
parser.add_argument("--batch_size", type=int, default=1)
parser.add_argument("--learning_rate", type=float, default=2e-5)
parser.add_argument("--optimizer", type=str, default="adamw")
parser.add_argument("--beta1", type=float, default=0.9)
parser.add_argument("--beta2", type=float, default=0.95)
parser.add_argument("--beta3", type=float, default=0.98)
parser.add_argument("--epsilon", type=float, default=1e-8)
parser.add_argument("--weight_decay", type=float, default=1e-4)
parser.add_argument("--max_grad_norm", type=float, default=1.0)
# Learning rate scheduler
parser.add_argument("--lr_scheduler", type=str, default="constant_with_warmup")
parser.add_argument("--lr_warmup_steps", type=int, default=100)
parser.add_argument("--lr_num_cycles", type=int, default=1)
parser.add_argument("--lr_power", type=float, default=1.0)
# Data loading
parser.add_argument("--num_workers", type=int, default=8)
parser.add_argument("--pin_memory", type=bool, default=True)
parser.add_argument("--image_column", type=str, default=None)
# Model configuration
parser.add_argument("--mixed_precision", type=str, default="no")
parser.add_argument("--gradient_checkpointing", type=bool, default=True)
parser.add_argument("--enable_slicing", type=bool, default=True)
parser.add_argument("--enable_tiling", type=bool, default=True)
parser.add_argument("--nccl_timeout", type=int, default=1800)
# LoRA parameters
parser.add_argument("--rank", type=int, default=128)
parser.add_argument("--lora_alpha", type=int, default=64)
parser.add_argument(
"--target_modules", type=str, nargs="+", default=["to_q", "to_k", "to_v", "to_out.0"]
)
# Checkpointing
parser.add_argument("--checkpointing_steps", type=int, default=200)
parser.add_argument("--checkpointing_limit", type=int, default=10)
parser.add_argument("--resume_from_checkpoint", type=str, default=None)
# Validation
parser.add_argument("--do_validation", type=lambda x: x.lower() == 'true', default=False)
parser.add_argument("--validation_steps", type=int, default=None)
parser.add_argument("--validation_dir", type=str, default=None)
parser.add_argument("--validation_prompts", type=str, default=None)
parser.add_argument("--validation_images", type=str, default=None)
parser.add_argument("--validation_videos", type=str, default=None)
parser.add_argument("--gen_fps", type=int, default=15)
args = parser.parse_args()
# Convert video_resolution_buckets string to list of tuples
frames, height, width = args.train_resolution.split("x")
args.train_resolution = (int(frames), int(height), int(width))
return cls(**vars(args))
|
Parse command line arguments and return Args instance
|
parse_args
|
python
|
THUDM/CogVideo
|
finetune/schemas/args.py
|
https://github.com/THUDM/CogVideo/blob/master/finetune/schemas/args.py
|
Apache-2.0
|
def cast_training_params(model: Union[torch.nn.Module, List[torch.nn.Module]], dtype=torch.float32):
"""
Casts the training parameters of the model to the specified data type.
Args:
model: The PyTorch model whose parameters will be cast.
dtype: The data type to which the model parameters will be cast.
"""
if not isinstance(model, list):
model = [model]
for m in model:
for param in m.parameters():
# only upcast trainable parameters into fp32
if param.requires_grad:
param.data = param.to(dtype)
|
Casts the training parameters of the model to the specified data type.
Args:
model: The PyTorch model whose parameters will be cast.
dtype: The data type to which the model parameters will be cast.
|
cast_training_params
|
python
|
THUDM/CogVideo
|
finetune/utils/torch_utils.py
|
https://github.com/THUDM/CogVideo/blob/master/finetune/utils/torch_utils.py
|
Apache-2.0
|
def generate_video(
prompt: str,
model_path: str,
output_path: str = "./output.mp4",
num_inference_steps: int = 50,
guidance_scale: float = 6.0,
num_videos_per_prompt: int = 1,
quantization_scheme: str = "fp8",
dtype: torch.dtype = torch.bfloat16,
num_frames: int = 81,
fps: int = 8,
seed: int = 42,
):
"""
Generates a video based on the given prompt and saves it to the specified path.
Parameters:
- prompt (str): The description of the video to be generated.
- model_path (str): The path of the pre-trained model to be used.
- output_path (str): The path where the generated video will be saved.
- num_inference_steps (int): Number of steps for the inference process. More steps can result in better quality.
- guidance_scale (float): The scale for classifier-free guidance. Higher values can lead to better alignment with the prompt.
- num_videos_per_prompt (int): Number of videos to generate per prompt.
- quantization_scheme (str): The quantization scheme to use ('int8', 'fp8').
- dtype (torch.dtype): The data type for computation (default is torch.bfloat16).
"""
text_encoder = T5EncoderModel.from_pretrained(
model_path, subfolder="text_encoder", torch_dtype=dtype
)
text_encoder = quantize_model(part=text_encoder, quantization_scheme=quantization_scheme)
transformer = CogVideoXTransformer3DModel.from_pretrained(
model_path, subfolder="transformer", torch_dtype=dtype
)
transformer = quantize_model(part=transformer, quantization_scheme=quantization_scheme)
vae = AutoencoderKLCogVideoX.from_pretrained(model_path, subfolder="vae", torch_dtype=dtype)
vae = quantize_model(part=vae, quantization_scheme=quantization_scheme)
pipe = CogVideoXPipeline.from_pretrained(
model_path,
text_encoder=text_encoder,
transformer=transformer,
vae=vae,
torch_dtype=dtype,
)
pipe.scheduler = CogVideoXDPMScheduler.from_config(
pipe.scheduler.config, timestep_spacing="trailing"
)
pipe.enable_model_cpu_offload()
pipe.vae.enable_slicing()
pipe.vae.enable_tiling()
video = pipe(
prompt=prompt,
num_videos_per_prompt=num_videos_per_prompt,
num_inference_steps=num_inference_steps,
num_frames=num_frames,
use_dynamic_cfg=True,
guidance_scale=guidance_scale,
generator=torch.Generator(device="cuda").manual_seed(seed),
).frames[0]
export_to_video(video, output_path, fps=fps)
|
Generates a video based on the given prompt and saves it to the specified path.
Parameters:
- prompt (str): The description of the video to be generated.
- model_path (str): The path of the pre-trained model to be used.
- output_path (str): The path where the generated video will be saved.
- num_inference_steps (int): Number of steps for the inference process. More steps can result in better quality.
- guidance_scale (float): The scale for classifier-free guidance. Higher values can lead to better alignment with the prompt.
- num_videos_per_prompt (int): Number of videos to generate per prompt.
- quantization_scheme (str): The quantization scheme to use ('int8', 'fp8').
- dtype (torch.dtype): The data type for computation (default is torch.bfloat16).
|
generate_video
|
python
|
THUDM/CogVideo
|
inference/cli_demo_quantization.py
|
https://github.com/THUDM/CogVideo/blob/master/inference/cli_demo_quantization.py
|
Apache-2.0
|
def encode_video(model_path, video_path, dtype, device):
"""
Loads a pre-trained AutoencoderKLCogVideoX model and encodes the video frames.
Parameters:
- model_path (str): The path to the pre-trained model.
- video_path (str): The path to the video file.
- dtype (torch.dtype): The data type for computation.
- device (str): The device to use for computation (e.g., "cuda" or "cpu").
Returns:
- torch.Tensor: The encoded video frames.
"""
model = AutoencoderKLCogVideoX.from_pretrained(model_path, torch_dtype=dtype).to(device)
model.enable_slicing()
model.enable_tiling()
video_reader = imageio.get_reader(video_path, "ffmpeg")
frames = [transforms.ToTensor()(frame) for frame in video_reader]
video_reader.close()
frames_tensor = torch.stack(frames).to(device).permute(1, 0, 2, 3).unsqueeze(0).to(dtype)
with torch.no_grad():
encoded_frames = model.encode(frames_tensor)[0].sample()
return encoded_frames
|
Loads a pre-trained AutoencoderKLCogVideoX model and encodes the video frames.
Parameters:
- model_path (str): The path to the pre-trained model.
- video_path (str): The path to the video file.
- dtype (torch.dtype): The data type for computation.
- device (str): The device to use for computation (e.g., "cuda" or "cpu").
Returns:
- torch.Tensor: The encoded video frames.
|
encode_video
|
python
|
THUDM/CogVideo
|
inference/cli_vae_demo.py
|
https://github.com/THUDM/CogVideo/blob/master/inference/cli_vae_demo.py
|
Apache-2.0
|
def decode_video(model_path, encoded_tensor_path, dtype, device):
"""
Loads a pre-trained AutoencoderKLCogVideoX model and decodes the encoded video frames.
Parameters:
- model_path (str): The path to the pre-trained model.
- encoded_tensor_path (str): The path to the encoded tensor file.
- dtype (torch.dtype): The data type for computation.
- device (str): The device to use for computation (e.g., "cuda" or "cpu").
Returns:
- torch.Tensor: The decoded video frames.
"""
model = AutoencoderKLCogVideoX.from_pretrained(model_path, torch_dtype=dtype).to(device)
encoded_frames = torch.load(encoded_tensor_path, weights_only=True).to(device).to(dtype)
with torch.no_grad():
decoded_frames = model.decode(encoded_frames).sample
return decoded_frames
|
Loads a pre-trained AutoencoderKLCogVideoX model and decodes the encoded video frames.
Parameters:
- model_path (str): The path to the pre-trained model.
- encoded_tensor_path (str): The path to the encoded tensor file.
- dtype (torch.dtype): The data type for computation.
- device (str): The device to use for computation (e.g., "cuda" or "cpu").
Returns:
- torch.Tensor: The decoded video frames.
|
decode_video
|
python
|
THUDM/CogVideo
|
inference/cli_vae_demo.py
|
https://github.com/THUDM/CogVideo/blob/master/inference/cli_vae_demo.py
|
Apache-2.0
|
def save_video(tensor, output_path):
"""
Saves the video frames to a video file.
Parameters:
- tensor (torch.Tensor): The video frames' tensor.
- output_path (str): The path to save the output video.
"""
tensor = tensor.to(dtype=torch.float32)
frames = tensor[0].squeeze(0).permute(1, 2, 3, 0).cpu().numpy()
frames = np.clip(frames, 0, 1) * 255
frames = frames.astype(np.uint8)
writer = imageio.get_writer(output_path + "/output.mp4", fps=8)
for frame in frames:
writer.append_data(frame)
writer.close()
|
Saves the video frames to a video file.
Parameters:
- tensor (torch.Tensor): The video frames' tensor.
- output_path (str): The path to save the output video.
|
save_video
|
python
|
THUDM/CogVideo
|
inference/cli_vae_demo.py
|
https://github.com/THUDM/CogVideo/blob/master/inference/cli_vae_demo.py
|
Apache-2.0
|
def convert_prompt(prompt: str, retry_times: int = 3, type: str = "t2v", image_path: str = None):
"""
Convert a prompt to a format that can be used by the model for inference
"""
client = OpenAI()
## If you using with Azure OpenAI, please uncomment the below line and comment the above line
# client = AzureOpenAI(
# api_key="",
# api_version="",
# azure_endpoint=""
# )
text = prompt.strip()
for i in range(retry_times):
if type == "t2v":
response = client.chat.completions.create(
messages=[
{"role": "system", "content": f"{sys_prompt_t2v}"},
{
"role": "user",
"content": 'Create an imaginative video descriptive caption or modify an earlier caption for the user input : " a girl is on the beach"',
},
{
"role": "assistant",
"content": "A radiant woman stands on a deserted beach, arms outstretched, wearing a beige trench coat, white blouse, light blue jeans, and chic boots, against a backdrop of soft sky and sea. Moments later, she is seen mid-twirl, arms exuberant, with the lighting suggesting dawn or dusk. Then, she runs along the beach, her attire complemented by an off-white scarf and black ankle boots, the tranquil sea behind her. Finally, she holds a paper airplane, her pose reflecting joy and freedom, with the ocean's gentle waves and the sky's soft pastel hues enhancing the serene ambiance.",
},
{
"role": "user",
"content": 'Create an imaginative video descriptive caption or modify an earlier caption for the user input : " A man jogging on a football field"',
},
{
"role": "assistant",
"content": "A determined man in athletic attire, including a blue long-sleeve shirt, black shorts, and blue socks, jogs around a snow-covered soccer field, showcasing his solitary exercise in a quiet, overcast setting. His long dreadlocks, focused expression, and the serene winter backdrop highlight his dedication to fitness. As he moves, his attire, consisting of a blue sports sweatshirt, black athletic pants, gloves, and sneakers, grips the snowy ground. He is seen running past a chain-link fence enclosing the playground area, with a basketball hoop and children's slide, suggesting a moment of solitary exercise amidst the empty field.",
},
{
"role": "user",
"content": 'Create an imaginative video descriptive caption or modify an earlier caption for the user input : " A woman is dancing, HD footage, close-up"',
},
{
"role": "assistant",
"content": "A young woman with her hair in an updo and wearing a teal hoodie stands against a light backdrop, initially looking over her shoulder with a contemplative expression. She then confidently makes a subtle dance move, suggesting rhythm and movement. Next, she appears poised and focused, looking directly at the camera. Her expression shifts to one of introspection as she gazes downward slightly. Finally, she dances with confidence, her left hand over her heart, symbolizing a poignant moment, all while dressed in the same teal hoodie against a plain, light-colored background.",
},
{
"role": "user",
"content": f'Create an imaginative video descriptive caption or modify an earlier caption in ENGLISH for the user input: " {text} "',
},
],
model="glm-4-plus", # glm-4-plus and gpt-4o have be tested
temperature=0.01,
top_p=0.7,
stream=False,
max_tokens=250,
)
else:
response = client.chat.completions.create(
model="gpt-4o",
messages=[
{"role": "system", "content": f"{sys_prompt_i2v}"},
{
"role": "user",
"content": [
{"type": "text", "text": prompt},
{
"type": "image_url",
"image_url": {
"url": image_to_url(image_path),
},
},
],
},
],
temperature=0.01,
top_p=0.7,
stream=False,
max_tokens=250,
)
if response.choices:
return response.choices[0].message.content
return prompt
|
Convert a prompt to a format that can be used by the model for inference
|
convert_prompt
|
python
|
THUDM/CogVideo
|
inference/convert_demo.py
|
https://github.com/THUDM/CogVideo/blob/master/inference/convert_demo.py
|
Apache-2.0
|
def read_video(
filename: str,
start_pts: Union[float, Fraction] = 0,
end_pts: Optional[Union[float, Fraction]] = None,
pts_unit: str = "pts",
output_format: str = "THWC",
) -> Tuple[torch.Tensor, torch.Tensor, Dict[str, Any]]:
"""
Reads a video from a file, returning both the video frames and the audio frames
Args:
filename (str): path to the video file
start_pts (int if pts_unit = 'pts', float / Fraction if pts_unit = 'sec', optional):
The start presentation time of the video
end_pts (int if pts_unit = 'pts', float / Fraction if pts_unit = 'sec', optional):
The end presentation time
pts_unit (str, optional): unit in which start_pts and end_pts values will be interpreted,
either 'pts' or 'sec'. Defaults to 'pts'.
output_format (str, optional): The format of the output video tensors. Can be either "THWC" (default) or "TCHW".
Returns:
vframes (Tensor[T, H, W, C] or Tensor[T, C, H, W]): the `T` video frames
aframes (Tensor[K, L]): the audio frames, where `K` is the number of channels and `L` is the number of points
info (Dict): metadata for the video and audio. Can contain the fields video_fps (float) and audio_fps (int)
"""
output_format = output_format.upper()
if output_format not in ("THWC", "TCHW"):
raise ValueError(f"output_format should be either 'THWC' or 'TCHW', got {output_format}.")
_check_av_available()
if end_pts is None:
end_pts = float("inf")
if end_pts < start_pts:
raise ValueError(
f"end_pts should be larger than start_pts, got start_pts={start_pts} and end_pts={end_pts}"
)
info = {}
audio_frames = []
audio_timebase = _video_opt.default_timebase
with av.open(filename, metadata_errors="ignore") as container:
if container.streams.audio:
audio_timebase = container.streams.audio[0].time_base
if container.streams.video:
video_frames = _read_from_stream(
container,
start_pts,
end_pts,
pts_unit,
container.streams.video[0],
{"video": 0},
)
video_fps = container.streams.video[0].average_rate
# guard against potentially corrupted files
if video_fps is not None:
info["video_fps"] = float(video_fps)
if container.streams.audio:
audio_frames = _read_from_stream(
container,
start_pts,
end_pts,
pts_unit,
container.streams.audio[0],
{"audio": 0},
)
info["audio_fps"] = container.streams.audio[0].rate
aframes_list = [frame.to_ndarray() for frame in audio_frames]
vframes = torch.empty((0, 1, 1, 3), dtype=torch.uint8)
if aframes_list:
aframes = np.concatenate(aframes_list, 1)
aframes = torch.as_tensor(aframes)
if pts_unit == "sec":
start_pts = int(math.floor(start_pts * (1 / audio_timebase)))
if end_pts != float("inf"):
end_pts = int(math.ceil(end_pts * (1 / audio_timebase)))
aframes = _align_audio_frames(aframes, audio_frames, start_pts, end_pts)
else:
aframes = torch.empty((1, 0), dtype=torch.float32)
if output_format == "TCHW":
# [T,H,W,C] --> [T,C,H,W]
vframes = vframes.permute(0, 3, 1, 2)
return vframes, aframes, info
|
Reads a video from a file, returning both the video frames and the audio frames
Args:
filename (str): path to the video file
start_pts (int if pts_unit = 'pts', float / Fraction if pts_unit = 'sec', optional):
The start presentation time of the video
end_pts (int if pts_unit = 'pts', float / Fraction if pts_unit = 'sec', optional):
The end presentation time
pts_unit (str, optional): unit in which start_pts and end_pts values will be interpreted,
either 'pts' or 'sec'. Defaults to 'pts'.
output_format (str, optional): The format of the output video tensors. Can be either "THWC" (default) or "TCHW".
Returns:
vframes (Tensor[T, H, W, C] or Tensor[T, C, H, W]): the `T` video frames
aframes (Tensor[K, L]): the audio frames, where `K` is the number of channels and `L` is the number of points
info (Dict): metadata for the video and audio. Can contain the fields video_fps (float) and audio_fps (int)
|
read_video
|
python
|
THUDM/CogVideo
|
sat/data_video.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/data_video.py
|
Apache-2.0
|
def process_video(
video_path,
image_size=None,
duration=None,
num_frames=4,
wanted_fps=None,
actual_fps=None,
skip_frms_num=0.0,
nb_read_frames=None,
):
"""
video_path: str or io.BytesIO
image_size: .
duration: preknow the duration to speed up by seeking to sampled start. TODO by_pass if unknown.
num_frames: wanted num_frames.
wanted_fps: .
skip_frms_num: ignore the first and the last xx frames, avoiding transitions.
"""
video = load_video_with_timeout(
video_path,
duration=duration,
num_frames=num_frames,
wanted_fps=wanted_fps,
actual_fps=actual_fps,
skip_frms_num=skip_frms_num,
nb_read_frames=nb_read_frames,
)
# --- copy and modify the image process ---
video = video.permute(0, 3, 1, 2) # [T, C, H, W]
# resize
if image_size is not None:
video = resize_for_rectangle_crop(video, image_size, reshape_mode="center")
return video
|
video_path: str or io.BytesIO
image_size: .
duration: preknow the duration to speed up by seeking to sampled start. TODO by_pass if unknown.
num_frames: wanted num_frames.
wanted_fps: .
skip_frms_num: ignore the first and the last xx frames, avoiding transitions.
|
process_video
|
python
|
THUDM/CogVideo
|
sat/data_video.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/data_video.py
|
Apache-2.0
|
def __init__(self, data_dir, video_size, fps, max_num_frames, skip_frms_num=3):
"""
skip_frms_num: ignore the first and the last xx frames, avoiding transitions.
"""
super(SFTDataset, self).__init__()
self.video_size = video_size
self.fps = fps
self.max_num_frames = max_num_frames
self.skip_frms_num = skip_frms_num
self.video_paths = []
self.captions = []
for root, dirnames, filenames in os.walk(data_dir):
for filename in filenames:
if filename.endswith(".mp4"):
video_path = os.path.join(root, filename)
self.video_paths.append(video_path)
caption_path = video_path.replace(".mp4", ".txt").replace("videos", "labels")
if os.path.exists(caption_path):
caption = open(caption_path, "r").read().splitlines()[0]
else:
caption = ""
self.captions.append(caption)
|
skip_frms_num: ignore the first and the last xx frames, avoiding transitions.
|
__init__
|
python
|
THUDM/CogVideo
|
sat/data_video.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/data_video.py
|
Apache-2.0
|
def log_conditionings(self, batch: Dict, n: int) -> Dict:
"""
Defines heuristics to log different conditionings.
These can be lists of strings (text-to-image), tensors, ints, ...
"""
image_h, image_w = batch[self.input_key].shape[3:]
log = dict()
for embedder in self.conditioner.embedders:
if (
(self.log_keys is None) or (embedder.input_key in self.log_keys)
) and not self.no_cond_log:
x = batch[embedder.input_key][:n]
if isinstance(x, torch.Tensor):
if x.dim() == 1:
# class-conditional, convert integer to string
x = [str(x[i].item()) for i in range(x.shape[0])]
xc = log_txt_as_img((image_h, image_w), x, size=image_h // 4)
elif x.dim() == 2:
# size and crop cond and the like
x = ["x".join([str(xx) for xx in x[i].tolist()]) for i in range(x.shape[0])]
xc = log_txt_as_img((image_h, image_w), x, size=image_h // 20)
else:
raise NotImplementedError()
elif isinstance(x, (List, ListConfig)):
if isinstance(x[0], str):
xc = log_txt_as_img((image_h, image_w), x, size=image_h // 20)
else:
raise NotImplementedError()
else:
raise NotImplementedError()
log[embedder.input_key] = xc
return log
|
Defines heuristics to log different conditionings.
These can be lists of strings (text-to-image), tensors, ints, ...
|
log_conditionings
|
python
|
THUDM/CogVideo
|
sat/diffusion_video.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/diffusion_video.py
|
Apache-2.0
|
def get_3d_sincos_pos_embed(
embed_dim,
grid_height,
grid_width,
t_size,
cls_token=False,
height_interpolation=1.0,
width_interpolation=1.0,
time_interpolation=1.0,
):
"""
grid_size: int of the grid height and width
t_size: int of the temporal size
return:
pos_embed: [t_size*grid_size * grid_size, embed_dim] or [1+t_size*grid_size * grid_size, embed_dim]
(w/ or w/o cls_token)
"""
assert embed_dim % 4 == 0
embed_dim_spatial = embed_dim // 4 * 3
embed_dim_temporal = embed_dim // 4
# spatial
grid_h = np.arange(grid_height, dtype=np.float32) / height_interpolation
grid_w = np.arange(grid_width, dtype=np.float32) / width_interpolation
grid = np.meshgrid(grid_w, grid_h) # here w goes first
grid = np.stack(grid, axis=0)
grid = grid.reshape([2, 1, grid_height, grid_width])
pos_embed_spatial = get_2d_sincos_pos_embed_from_grid(embed_dim_spatial, grid)
# temporal
grid_t = np.arange(t_size, dtype=np.float32) / time_interpolation
pos_embed_temporal = get_1d_sincos_pos_embed_from_grid(embed_dim_temporal, grid_t)
# concate: [T, H, W] order
pos_embed_temporal = pos_embed_temporal[:, np.newaxis, :]
pos_embed_temporal = np.repeat(
pos_embed_temporal, grid_height * grid_width, axis=1
) # [T, H*W, D // 4]
pos_embed_spatial = pos_embed_spatial[np.newaxis, :, :]
pos_embed_spatial = np.repeat(pos_embed_spatial, t_size, axis=0) # [T, H*W, D // 4 * 3]
pos_embed = np.concatenate([pos_embed_temporal, pos_embed_spatial], axis=-1)
return pos_embed # [T, H*W, D]
|
grid_size: int of the grid height and width
t_size: int of the temporal size
return:
pos_embed: [t_size*grid_size * grid_size, embed_dim] or [1+t_size*grid_size * grid_size, embed_dim]
(w/ or w/o cls_token)
|
get_3d_sincos_pos_embed
|
python
|
THUDM/CogVideo
|
sat/dit_video_concat.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/dit_video_concat.py
|
Apache-2.0
|
def get_2d_sincos_pos_embed(embed_dim, grid_height, grid_width, cls_token=False, extra_tokens=0):
"""
grid_size: int of the grid height and width
return:
pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)
"""
grid_h = np.arange(grid_height, dtype=np.float32)
grid_w = np.arange(grid_width, dtype=np.float32)
grid = np.meshgrid(grid_w, grid_h) # here w goes first
grid = np.stack(grid, axis=0)
grid = grid.reshape([2, 1, grid_height, grid_width])
pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid)
if cls_token and extra_tokens > 0:
pos_embed = np.concatenate([np.zeros([extra_tokens, embed_dim]), pos_embed], axis=0)
return pos_embed
|
grid_size: int of the grid height and width
return:
pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)
|
get_2d_sincos_pos_embed
|
python
|
THUDM/CogVideo
|
sat/dit_video_concat.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/dit_video_concat.py
|
Apache-2.0
|
def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
"""
embed_dim: output dimension for each position
pos: a list of positions to be encoded: size (M,)
out: (M, D)
"""
assert embed_dim % 2 == 0
omega = np.arange(embed_dim // 2, dtype=np.float64)
omega /= embed_dim / 2.0
omega = 1.0 / 10000**omega # (D/2,)
pos = pos.reshape(-1) # (M,)
out = np.einsum("m,d->md", pos, omega) # (M, D/2), outer product
emb_sin = np.sin(out) # (M, D/2)
emb_cos = np.cos(out) # (M, D/2)
emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D)
return emb
|
embed_dim: output dimension for each position
pos: a list of positions to be encoded: size (M,)
out: (M, D)
|
get_1d_sincos_pos_embed_from_grid
|
python
|
THUDM/CogVideo
|
sat/dit_video_concat.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/dit_video_concat.py
|
Apache-2.0
|
def is_power_of_two(n):
"""
chat.openai.com/chat
Return True if n is a power of 2, otherwise return False.
The function is_power_of_two takes an integer n as input and returns True if n is a power of 2, otherwise it returns False.
The function works by first checking if n is less than or equal to 0. If n is less than or equal to 0, it can't be a power of 2, so the function returns False.
If n is greater than 0, the function checks whether n is a power of 2 by using a bitwise AND operation between n and n-1. If n is a power of 2, then it will have only one bit set to 1 in its binary representation. When we subtract 1 from a power of 2, all the bits to the right of that bit become 1, and the bit itself becomes 0. So, when we perform a bitwise AND between n and n-1, we get 0 if n is a power of 2, and a non-zero value otherwise.
Thus, if the result of the bitwise AND operation is 0, then n is a power of 2 and the function returns True. Otherwise, the function returns False.
"""
if n <= 0:
return False
return (n & (n - 1)) == 0
|
chat.openai.com/chat
Return True if n is a power of 2, otherwise return False.
The function is_power_of_two takes an integer n as input and returns True if n is a power of 2, otherwise it returns False.
The function works by first checking if n is less than or equal to 0. If n is less than or equal to 0, it can't be a power of 2, so the function returns False.
If n is greater than 0, the function checks whether n is a power of 2 by using a bitwise AND operation between n and n-1. If n is a power of 2, then it will have only one bit set to 1 in its binary representation. When we subtract 1 from a power of 2, all the bits to the right of that bit become 1, and the bit itself becomes 0. So, when we perform a bitwise AND between n and n-1, we get 0 if n is a power of 2, and a non-zero value otherwise.
Thus, if the result of the bitwise AND operation is 0, then n is a power of 2 and the function returns True. Otherwise, the function returns False.
|
is_power_of_two
|
python
|
THUDM/CogVideo
|
sat/sgm/util.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/sgm/util.py
|
Apache-2.0
|
def append_dims(x, target_dims):
"""Appends dimensions to the end of a tensor until it has target_dims dimensions."""
dims_to_append = target_dims - x.ndim
if dims_to_append < 0:
raise ValueError(f"input has {x.ndim} dims but target_dims is {target_dims}, which is less")
return x[(...,) + (None,) * dims_to_append]
|
Appends dimensions to the end of a tensor until it has target_dims dimensions.
|
append_dims
|
python
|
THUDM/CogVideo
|
sat/sgm/util.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/sgm/util.py
|
Apache-2.0
|
def get_configs_path() -> str:
"""
Get the `configs` directory.
For a working copy, this is the one in the root of the repository,
but for an installed copy, it's in the `sgm` package (see pyproject.toml).
"""
this_dir = os.path.dirname(__file__)
candidates = (
os.path.join(this_dir, "configs"),
os.path.join(this_dir, "..", "configs"),
)
for candidate in candidates:
candidate = os.path.abspath(candidate)
if os.path.isdir(candidate):
return candidate
raise FileNotFoundError(f"Could not find SGM configs in {candidates}")
|
Get the `configs` directory.
For a working copy, this is the one in the root of the repository,
but for an installed copy, it's in the `sgm` package (see pyproject.toml).
|
get_configs_path
|
python
|
THUDM/CogVideo
|
sat/sgm/util.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/sgm/util.py
|
Apache-2.0
|
def get_nested_attribute(obj, attribute_path, depth=None, return_key=False):
"""
Will return the result of a recursive get attribute call.
E.g.:
a.b.c
= getattr(getattr(a, "b"), "c")
= get_nested_attribute(a, "b.c")
If any part of the attribute call is an integer x with current obj a, will
try to call a[x] instead of a.x first.
"""
attributes = attribute_path.split(".")
if depth is not None and depth > 0:
attributes = attributes[:depth]
assert len(attributes) > 0, "At least one attribute should be selected"
current_attribute = obj
current_key = None
for level, attribute in enumerate(attributes):
current_key = ".".join(attributes[: level + 1])
try:
id_ = int(attribute)
current_attribute = current_attribute[id_]
except ValueError:
current_attribute = getattr(current_attribute, attribute)
return (current_attribute, current_key) if return_key else current_attribute
|
Will return the result of a recursive get attribute call.
E.g.:
a.b.c
= getattr(getattr(a, "b"), "c")
= get_nested_attribute(a, "b.c")
If any part of the attribute call is an integer x with current obj a, will
try to call a[x] instead of a.x first.
|
get_nested_attribute
|
python
|
THUDM/CogVideo
|
sat/sgm/util.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/sgm/util.py
|
Apache-2.0
|
def pytorch_worker_info(group=None): # sourcery skip: use-contextlib-suppress
"""Return node and worker info for PyTorch and some distributed environments."""
rank = 0
world_size = 1
worker = 0
num_workers = 1
try:
import torch.distributed
if torch.distributed.is_available() and torch.distributed.is_initialized():
group = group or torch.distributed.group.WORLD
rank = torch.distributed.get_rank(group=group)
world_size = torch.distributed.get_world_size(group=group)
except ModuleNotFoundError:
pass
try:
import torch.utils.data
worker_info = torch.utils.data.get_worker_info()
if worker_info is not None:
worker = worker_info.id
num_workers = worker_info.num_workers
except ModuleNotFoundError:
pass
return rank, world_size, worker, num_workers
|
Return node and worker info for PyTorch and some distributed environments.
|
pytorch_worker_info
|
python
|
THUDM/CogVideo
|
sat/sgm/webds.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/sgm/webds.py
|
Apache-2.0
|
def pytorch_worker_seed(group=None):
"""Compute a distinct, deterministic RNG seed for each worker and node."""
rank, world_size, worker, num_workers = pytorch_worker_info(group=group)
return rank * 1000 + worker
|
Compute a distinct, deterministic RNG seed for each worker and node.
|
pytorch_worker_seed
|
python
|
THUDM/CogVideo
|
sat/sgm/webds.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/sgm/webds.py
|
Apache-2.0
|
def tar_file_iterator_with_meta(
fileobj,
meta_names,
skip_meta=r"__[^/]*__($|/)",
suffix=None,
handler=reraise_exception,
meta_stream=None,
):
"""Iterate over tar file, yielding filename, content pairs for the given tar stream.
:param fileobj: byte stream suitable for tarfile
:param meta_names: key of different items in meta file
:param skip_meta: regexp for keys that are skipped entirely (Default value = r"__[^/]*__($|/)")
"""
stream = tarfile.open(fileobj=fileobj, mode="r|*")
data_dir, filename = fileobj.name.rsplit("/", 1)
meta_data = {} # {id: {meta_name: meta_value, meta_name2: meta_value2, ...}}
if meta_stream is None:
meta_file_name = filename.split(".")[0] + ".meta.jsonl"
meta_path = os.path.join(data_dir, meta_file_name)
if os.path.exists(meta_path):
meta_stream = open(meta_path, "r")
else:
meta_file_name = meta_stream.name
if meta_stream is not None:
for lineno, line in enumerate(meta_stream):
meta_list = []
try:
meta_list.append(json.loads(line))
except Exception as exn:
from sat.helpers import print_rank0
print_rank0(
f"Error in loading jsonl {meta_file_name}, lineno {lineno}: {line}",
level="DEBUG",
)
continue
for item in meta_list:
if item["key"] not in meta_data:
meta_data[item["key"]] = {}
for meta_name in meta_names:
if meta_name in item:
meta_data[item["key"]][meta_name] = item[meta_name]
meta_stream.close()
try:
for tarinfo in stream:
fname = tarinfo.name
try:
if not tarinfo.isreg():
continue
if fname is None:
continue
if "/" not in fname and fname.startswith("__") and fname.endswith("__"):
# skipping metadata for now
continue
if skip_meta is not None and re.match(skip_meta, fname):
continue
if fname.endswith(".txt") and suffix is not None:
data = (stream.extractfile(tarinfo).read().decode() + suffix).encode()
else:
data = stream.extractfile(tarinfo).read()
result = dict(fname=fname, data=data)
yield result
if fname.endswith(".id"):
fid = fname.split(".")[0]
if "-$#%@&" in fid:
sfid = fid.split("-$#%@&")[0]
else:
sfid = fid
meta_data_fid = meta_data.get(sfid, {})
for meta_name in meta_names:
meta_fname = fid + "." + meta_name
meta = meta_data_fid.get(meta_name, None)
yield dict(fname=meta_fname, data=meta)
stream.members = []
except Exception as exn:
if hasattr(exn, "args") and len(exn.args) > 0:
exn.args = (exn.args[0] + " @ " + str(fileobj),) + exn.args[1:]
if handler(exn):
continue
else:
break
except Exception as exn:
print(exn)
del stream
|
Iterate over tar file, yielding filename, content pairs for the given tar stream.
:param fileobj: byte stream suitable for tarfile
:param meta_names: key of different items in meta file
:param skip_meta: regexp for keys that are skipped entirely (Default value = r"__[^/]*__($|/)")
|
tar_file_iterator_with_meta
|
python
|
THUDM/CogVideo
|
sat/sgm/webds.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/sgm/webds.py
|
Apache-2.0
|
def tar_file_expander_with_meta(data, meta_names, handler=reraise_exception):
"""Expand a stream of open tar files into a stream of tar file contents.
This returns an iterator over (filename, file_contents).
"""
for source in data:
url = source["url"]
try:
assert isinstance(source, dict)
assert "stream" in source
for sample in tar_file_iterator_with_meta(
source["stream"], meta_names, meta_stream=source["meta_stream"]
):
assert isinstance(sample, dict) and "data" in sample and "fname" in sample
sample["__url__"] = url
yield sample
except Exception as exn:
exn.args = exn.args + (source.get("stream"), source.get("url"))
if handler(exn):
continue
else:
break
|
Expand a stream of open tar files into a stream of tar file contents.
This returns an iterator over (filename, file_contents).
|
tar_file_expander_with_meta
|
python
|
THUDM/CogVideo
|
sat/sgm/webds.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/sgm/webds.py
|
Apache-2.0
|
def url_opener(
data,
handler,
**kw,
):
"""Open URLs and yield a stream of url+stream pairs.
Args:
data: iterator over dict(url=...)
handler: exception handler.
kw: keyword arguments for gopen.gopen.
Yields:
a stream of url+stream pairs.
"""
for sample in data:
assert isinstance(sample, dict), sample
assert "url" in sample
url = sample["url"]
try:
stream = gopen(url, **kw)
if hasattr(stream, "meta_stream"):
meta_stream = stream.meta_stream
del stream.meta_stream
else:
meta_stream = None
sample.update(stream=stream, meta_stream=meta_stream)
yield sample
except Exception as exn:
exn.args = exn.args + (url,)
if handler(exn):
continue
else:
break
|
Open URLs and yield a stream of url+stream pairs.
Args:
data: iterator over dict(url=...)
handler: exception handler.
kw: keyword arguments for gopen.gopen.
Yields:
a stream of url+stream pairs.
|
url_opener
|
python
|
THUDM/CogVideo
|
sat/sgm/webds.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/sgm/webds.py
|
Apache-2.0
|
def gopen_rclone(url, mode="rb", bufsize=1024 * 1024 * 32):
"""Open a URL with `curl`.
:param url: rclone url, e.g. data:bucket1/foo.tar. data should be configured.
:param mode: file mode
:param bufsize: buffer size
"""
url = url.replace("rclone://", "")
if mode[0] == "r":
cmd = f"rclone cat '{url}'"
return Pipe(
cmd,
mode=mode,
shell=True,
bufsize=bufsize,
ignore_status=[141, 23],
) # skipcq: BAN-B604
elif mode[0] == "w":
cmd = f"rclone cp - '{url}'"
return Pipe(
cmd,
mode=mode,
shell=True,
bufsize=bufsize,
ignore_status=[141, 26],
) # skipcq: BAN-B604
else:
raise ValueError(f"{mode}: unknown mode")
|
Open a URL with `curl`.
:param url: rclone url, e.g. data:bucket1/foo.tar. data should be configured.
:param mode: file mode
:param bufsize: buffer size
|
gopen_rclone
|
python
|
THUDM/CogVideo
|
sat/sgm/webds.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/sgm/webds.py
|
Apache-2.0
|
def gopen_boto3(url, mode="rb", bufsize=8192 * 2):
"""Open a URL with boto3 API.
:param url: boto3 url, e.g. boto3://bucket1/foo.tar. data should be configured.
:param mode: file mode
:param bufsize: buffer size
"""
import boto3
# boto3.set_stream_logger('botocore', level='DEBUG')
if url.startswith("boto3://"):
url = url.replace("boto3://", "")
need_meta = False
else:
url = url.replace("metaboto3://", "")
need_meta = True
endpoint_url = os.environ.get("S3_ENDPOINT_URL", None)
access_key = os.environ.get("S3_ACCESS_KEY_ID", None)
secret_key = os.environ.get("S3_SECRET_ACCESS_KEY", None)
if mode[0] == "r":
s3_client = boto3.client(
"s3",
endpoint_url=endpoint_url,
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
)
bucket, key = url.split("/", 1)
if need_meta:
# download a meta json
meta_file_key = key.split(".")[0] + ".meta.jsonl"
meta_stream = io.BytesIO()
s3_client.download_fileobj(bucket, meta_file_key, meta_stream)
meta_stream.seek(0)
meta_stream.name = meta_file_key
else:
meta_stream = None
# data tar stream
response = s3_client.get_object(Bucket=bucket, Key=key) # Range optional
response["Body"].name = key # actually not used
response["Body"].meta_stream = meta_stream
return response["Body"]
else:
raise ValueError(f"{mode}: unknown mode")
|
Open a URL with boto3 API.
:param url: boto3 url, e.g. boto3://bucket1/foo.tar. data should be configured.
:param mode: file mode
:param bufsize: buffer size
|
gopen_boto3
|
python
|
THUDM/CogVideo
|
sat/sgm/webds.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/sgm/webds.py
|
Apache-2.0
|
def zero_module(module):
"""
Zero out the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().zero_()
return module
|
Zero out the parameters of a module and return it.
|
zero_module
|
python
|
THUDM/CogVideo
|
sat/sgm/modules/attention.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/sgm/modules/attention.py
|
Apache-2.0
|
def get_timestep_embedding(timesteps, embedding_dim):
"""
This matches the implementation in Denoising Diffusion Probabilistic Models:
From Fairseq.
Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""
assert len(timesteps.shape) == 1
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb)
emb = emb.to(device=timesteps.device)
emb = timesteps.float()[:, None] * emb[None, :]
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
if embedding_dim % 2 == 1: # zero pad
emb = torch.nn.functional.pad(emb, (0, 1, 0, 0))
return emb
|
This matches the implementation in Denoising Diffusion Probabilistic Models:
From Fairseq.
Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
|
get_timestep_embedding
|
python
|
THUDM/CogVideo
|
sat/sgm/modules/cp_enc_dec.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/sgm/modules/cp_enc_dec.py
|
Apache-2.0
|
def forward(self, fmap, cond: Tensor):
"""
notation
b - batch
n - convs
o - output
i - input
k - kernel
"""
b = fmap.shape[0]
# prepare weights for modulation
weights = self.weights
# do the modulation, demodulation, as done in stylegan2
cond = rearrange(cond, "b i -> b 1 i 1 1 1")
weights = weights * (cond + 1)
if self.demod:
inv_norm = (
reduce(weights**2, "b o i k0 k1 k2 -> b o 1 1 1 1", "sum")
.clamp(min=self.eps)
.rsqrt()
)
weights = weights * inv_norm
fmap = rearrange(fmap, "b c t h w -> 1 (b c) t h w")
weights = rearrange(weights, "b o ... -> (b o) ...")
fmap = F.pad(fmap, self.padding, mode=self.pad_mode)
fmap = F.conv3d(fmap, weights, groups=b)
return rearrange(fmap, "1 (b o) ... -> b o ...", b=b)
|
notation
b - batch
n - convs
o - output
i - input
k - kernel
|
forward
|
python
|
THUDM/CogVideo
|
sat/sgm/modules/autoencoding/magvit2_pytorch.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/sgm/modules/autoencoding/magvit2_pytorch.py
|
Apache-2.0
|
def __init__(self, input_nc=3, ndf=64, n_layers=3, use_actnorm=False):
"""Construct a PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
n_layers (int) -- the number of conv layers in the discriminator
norm_layer -- normalization layer
"""
super(NLayerDiscriminator, self).__init__()
if not use_actnorm:
norm_layer = nn.BatchNorm2d
else:
norm_layer = ActNorm
if (
type(norm_layer) == functools.partial
): # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func != nn.BatchNorm2d
else:
use_bias = norm_layer != nn.BatchNorm2d
kw = 4
padw = 1
sequence = [
nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),
nn.LeakyReLU(0.2, True),
]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers): # gradually increase the number of filters
nf_mult_prev = nf_mult
nf_mult = min(2**n, 8)
sequence += [
nn.Conv2d(
ndf * nf_mult_prev,
ndf * nf_mult,
kernel_size=kw,
stride=2,
padding=padw,
bias=use_bias,
),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True),
]
nf_mult_prev = nf_mult
nf_mult = min(2**n_layers, 8)
sequence += [
nn.Conv2d(
ndf * nf_mult_prev,
ndf * nf_mult,
kernel_size=kw,
stride=1,
padding=padw,
bias=use_bias,
),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True),
]
sequence += [
nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)
] # output 1 channel prediction map
self.main = nn.Sequential(*sequence)
|
Construct a PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
n_layers (int) -- the number of conv layers in the discriminator
norm_layer -- normalization layer
|
__init__
|
python
|
THUDM/CogVideo
|
sat/sgm/modules/autoencoding/lpips/model/model.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/sgm/modules/autoencoding/lpips/model/model.py
|
Apache-2.0
|
def quantize(self, z: Tensor) -> Tensor:
"""Quantizes z, returns quantized zhat, same shape as z."""
quantized = round_ste(self.bound(z))
half_width = self._levels // 2 # Renormalize to [-1, 1].
return quantized / half_width
|
Quantizes z, returns quantized zhat, same shape as z.
|
quantize
|
python
|
THUDM/CogVideo
|
sat/sgm/modules/autoencoding/regularizers/finite_scalar_quantization.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/sgm/modules/autoencoding/regularizers/finite_scalar_quantization.py
|
Apache-2.0
|
def codes_to_indices(self, zhat: Tensor) -> Tensor:
"""Converts a `code` to an index in the codebook."""
assert zhat.shape[-1] == self.codebook_dim
zhat = self._scale_and_shift(zhat)
return (zhat * self._basis).sum(dim=-1).to(int32)
|
Converts a `code` to an index in the codebook.
|
codes_to_indices
|
python
|
THUDM/CogVideo
|
sat/sgm/modules/autoencoding/regularizers/finite_scalar_quantization.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/sgm/modules/autoencoding/regularizers/finite_scalar_quantization.py
|
Apache-2.0
|
def forward(self, z: Tensor) -> Tensor:
"""
einstein notation
b - batch
n - sequence (or flattened spatial dimensions)
d - feature dimension
c - number of codebook dim
"""
is_img_or_video = z.ndim >= 4
# standardize image or video into (batch, seq, dimension)
if is_img_or_video:
z = rearrange(z, "b d ... -> b ... d")
z, ps = pack_one(z, "b * d")
assert (
z.shape[-1] == self.dim
), f"expected dimension of {self.dim} but found dimension of {z.shape[-1]}"
z = self.project_in(z)
z = rearrange(z, "b n (c d) -> b n c d", c=self.num_codebooks)
codes = self.quantize(z)
indices = self.codes_to_indices(codes)
codes = rearrange(codes, "b n c d -> b n (c d)")
out = self.project_out(codes)
# reconstitute image or video dimensions
if is_img_or_video:
out = unpack_one(out, ps, "b * d")
out = rearrange(out, "b ... d -> b d ...")
indices = unpack_one(indices, ps, "b * c")
if not self.keep_num_codebooks_dim:
indices = rearrange(indices, "... 1 -> ...")
return out, indices
|
einstein notation
b - batch
n - sequence (or flattened spatial dimensions)
d - feature dimension
c - number of codebook dim
|
forward
|
python
|
THUDM/CogVideo
|
sat/sgm/modules/autoencoding/regularizers/finite_scalar_quantization.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/sgm/modules/autoencoding/regularizers/finite_scalar_quantization.py
|
Apache-2.0
|
def forward(
self,
x,
inv_temperature=100.0,
return_loss_breakdown=False,
mask=None,
):
"""
einstein notation
b - batch
n - sequence (or flattened spatial dimensions)
d - feature dimension, which is also log2(codebook size)
c - number of codebook dim
"""
x = x.float()
is_img_or_video = x.ndim >= 4
# standardize image or video into (batch, seq, dimension)
if is_img_or_video:
x = rearrange(x, "b d ... -> b ... d")
x, ps = pack_one(x, "b * d")
assert (
x.shape[-1] == self.dim
), f"expected dimension of {self.dim} but received {x.shape[-1]}"
x = self.project_in(x)
# split out number of codebooks
x = rearrange(x, "b n (c d) -> b n c d", c=self.num_codebooks)
# quantize by eq 3.
original_input = x
codebook_value = torch.ones_like(x) * self.codebook_scale
quantized = torch.where(x > 0, codebook_value, -codebook_value)
# use straight-through gradients (optionally with custom activation fn) if training
if self.training:
x = self.activation(x)
x = x + (quantized - x).detach()
else:
x = quantized
# calculate indices
indices = reduce((x > 0).int() * self.mask.int(), "b n c d -> b n c", "sum")
# entropy aux loss
if self.training:
# the same as euclidean distance up to a constant
distance = -2 * einsum("... i d, j d -> ... i j", original_input, self.codebook)
prob = (-distance * inv_temperature).softmax(dim=-1)
# account for mask
if exists(mask):
prob = prob[mask]
else:
prob = rearrange(prob, "b n ... -> (b n) ...")
# whether to only use a fraction of probs, for reducing memory
if self.frac_per_sample_entropy < 1.0:
num_tokens = prob.shape[0]
num_sampled_tokens = int(num_tokens * self.frac_per_sample_entropy)
rand_mask = torch.randn(num_tokens).argsort(dim=-1) < num_sampled_tokens
per_sample_probs = prob[rand_mask]
else:
per_sample_probs = prob
# calculate per sample entropy
per_sample_entropy = entropy(per_sample_probs).mean()
# distribution over all available tokens in the batch
avg_prob = reduce(per_sample_probs, "... c d -> c d", "mean")
codebook_entropy = entropy(avg_prob).mean()
# 1. entropy will be nudged to be low for each code, to encourage the network to output confident predictions
# 2. codebook entropy will be nudged to be high, to encourage all codes to be uniformly used within the batch
entropy_aux_loss = per_sample_entropy - self.diversity_gamma * codebook_entropy
else:
# if not training, just return dummy 0
entropy_aux_loss = per_sample_entropy = codebook_entropy = self.zero
# commit loss
if self.training:
commit_loss = F.mse_loss(original_input, quantized.detach(), reduction="none")
if exists(mask):
commit_loss = commit_loss[mask]
commit_loss = commit_loss.mean()
else:
commit_loss = self.zero
# merge back codebook dim
x = rearrange(x, "b n c d -> b n (c d)")
# project out to feature dimension if needed
x = self.project_out(x)
# reconstitute image or video dimensions
if is_img_or_video:
x = unpack_one(x, ps, "b * d")
x = rearrange(x, "b ... d -> b d ...")
indices = unpack_one(indices, ps, "b * c")
# whether to remove single codebook dim
if not self.keep_num_codebooks_dim:
indices = rearrange(indices, "... 1 -> ...")
# complete aux loss
aux_loss = (
entropy_aux_loss * self.entropy_loss_weight + commit_loss * self.commitment_loss_weight
)
ret = Return(x, indices, aux_loss)
if not return_loss_breakdown:
return ret
return ret, LossBreakdown(per_sample_entropy, codebook_entropy, commit_loss)
|
einstein notation
b - batch
n - sequence (or flattened spatial dimensions)
d - feature dimension, which is also log2(codebook size)
c - number of codebook dim
|
forward
|
python
|
THUDM/CogVideo
|
sat/sgm/modules/autoencoding/regularizers/lookup_free_quantization.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/sgm/modules/autoencoding/regularizers/lookup_free_quantization.py
|
Apache-2.0
|
def get_timestep_embedding(timesteps, embedding_dim):
"""
This matches the implementation in Denoising Diffusion Probabilistic Models:
From Fairseq.
Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""
assert len(timesteps.shape) == 1
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb)
emb = emb.to(device=timesteps.device)
emb = timesteps.float()[:, None] * emb[None, :]
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
if embedding_dim % 2 == 1: # zero pad
emb = torch.nn.functional.pad(emb, (0, 1, 0, 0))
return emb
|
This matches the implementation in Denoising Diffusion Probabilistic Models:
From Fairseq.
Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
|
get_timestep_embedding
|
python
|
THUDM/CogVideo
|
sat/sgm/modules/autoencoding/vqvae/movq_dec_3d.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/sgm/modules/autoencoding/vqvae/movq_dec_3d.py
|
Apache-2.0
|
def get_timestep_embedding(timesteps, embedding_dim):
"""
This matches the implementation in Denoising Diffusion Probabilistic Models:
From Fairseq.
Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""
assert len(timesteps.shape) == 1
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb)
emb = emb.to(device=timesteps.device)
emb = timesteps.float()[:, None] * emb[None, :]
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
if embedding_dim % 2 == 1: # zero pad
emb = torch.nn.functional.pad(emb, (0, 1, 0, 0))
return emb
|
This matches the implementation in Denoising Diffusion Probabilistic Models:
From Fairseq.
Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
|
get_timestep_embedding
|
python
|
THUDM/CogVideo
|
sat/sgm/modules/autoencoding/vqvae/movq_dec_3d_dev.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/sgm/modules/autoencoding/vqvae/movq_dec_3d_dev.py
|
Apache-2.0
|
def get_timestep_embedding(timesteps, embedding_dim):
"""
This matches the implementation in Denoising Diffusion Probabilistic Models:
From Fairseq.
Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""
assert len(timesteps.shape) == 1
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb)
emb = emb.to(device=timesteps.device)
emb = timesteps.float()[:, None] * emb[None, :]
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
if embedding_dim % 2 == 1: # zero pad
emb = torch.nn.functional.pad(emb, (0, 1, 0, 0))
return emb
|
This matches the implementation in Denoising Diffusion Probabilistic Models:
From Fairseq.
Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
|
get_timestep_embedding
|
python
|
THUDM/CogVideo
|
sat/sgm/modules/autoencoding/vqvae/movq_enc_3d.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/sgm/modules/autoencoding/vqvae/movq_enc_3d.py
|
Apache-2.0
|
def get_timestep_embedding(timesteps, embedding_dim):
"""
This matches the implementation in Denoising Diffusion Probabilistic Models:
From Fairseq.
Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""
assert len(timesteps.shape) == 1
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb)
emb = emb.to(device=timesteps.device)
emb = timesteps.float()[:, None] * emb[None, :]
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
if embedding_dim % 2 == 1: # zero pad
emb = torch.nn.functional.pad(emb, (0, 1, 0, 0))
return emb
|
This matches the implementation in Denoising Diffusion Probabilistic Models:
From Fairseq.
Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
|
get_timestep_embedding
|
python
|
THUDM/CogVideo
|
sat/sgm/modules/autoencoding/vqvae/movq_modules.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/sgm/modules/autoencoding/vqvae/movq_modules.py
|
Apache-2.0
|
def get_timestep_embedding(timesteps, embedding_dim):
"""
This matches the implementation in Denoising Diffusion Probabilistic Models:
From Fairseq.
Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""
assert len(timesteps.shape) == 1
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb)
emb = emb.to(device=timesteps.device)
emb = timesteps.float()[:, None] * emb[None, :]
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
if embedding_dim % 2 == 1: # zero pad
emb = torch.nn.functional.pad(emb, (0, 1, 0, 0))
return emb
|
This matches the implementation in Denoising Diffusion Probabilistic Models:
From Fairseq.
Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
|
get_timestep_embedding
|
python
|
THUDM/CogVideo
|
sat/sgm/modules/autoencoding/vqvae/vqvae_blocks.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/sgm/modules/autoencoding/vqvae/vqvae_blocks.py
|
Apache-2.0
|
def _find_children(
model,
search_class: List[Type[nn.Module]] = [nn.Linear],
):
"""
Find all modules of a certain class (or union of classes).
Returns all matching modules, along with the parent of those moduless and the
names they are referenced by.
"""
# For each target find every linear_class module that isn't a child of a LoraInjectedLinear
for parent in model.modules():
for name, module in parent.named_children():
if any([isinstance(module, _class) for _class in search_class]):
yield parent, name, module
|
Find all modules of a certain class (or union of classes).
Returns all matching modules, along with the parent of those moduless and the
names they are referenced by.
|
_find_children
|
python
|
THUDM/CogVideo
|
sat/sgm/modules/diffusionmodules/lora.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/sgm/modules/diffusionmodules/lora.py
|
Apache-2.0
|
def _find_modules_v2(
model,
ancestor_class: Optional[Set[str]] = None,
search_class: List[Type[nn.Module]] = [nn.Linear],
exclude_children_of: Optional[List[Type[nn.Module]]] = [
LoRACompatibleLinear,
LoRACompatibleConv,
LoRALinearLayer,
LoRAConv2dLayer,
],
):
"""
Find all modules of a certain class (or union of classes) that are direct or
indirect descendants of other modules of a certain class (or union of classes).
Returns all matching modules, along with the parent of those moduless and the
names they are referenced by.
"""
# Get the targets we should replace all linears under
if ancestor_class is not None:
ancestors = (
module for module in model.modules() if module.__class__.__name__ in ancestor_class
)
else:
# this, incase you want to naively iterate over all modules.
ancestors = [module for module in model.modules()]
# For each target find every linear_class module that isn't a child of a LoraInjectedLinear
for ancestor in ancestors:
for fullname, module in ancestor.named_modules():
if any([isinstance(module, _class) for _class in search_class]):
# Find the direct parent if this is a descendant, not a child, of target
*path, name = fullname.split(".")
parent = ancestor
flag = False
while path:
try:
parent = parent.get_submodule(path.pop(0))
except:
flag = True
break
if flag:
continue
# Skip this linear if it's a child of a LoraInjectedLinear
if exclude_children_of and any(
[isinstance(parent, _class) for _class in exclude_children_of]
):
continue
# Otherwise, yield it
yield parent, name, module
|
Find all modules of a certain class (or union of classes) that are direct or
indirect descendants of other modules of a certain class (or union of classes).
Returns all matching modules, along with the parent of those moduless and the
names they are referenced by.
|
_find_modules_v2
|
python
|
THUDM/CogVideo
|
sat/sgm/modules/diffusionmodules/lora.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/sgm/modules/diffusionmodules/lora.py
|
Apache-2.0
|
def get_timestep_embedding(timesteps, embedding_dim):
"""
This matches the implementation in Denoising Diffusion Probabilistic Models:
From Fairseq.
Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""
assert len(timesteps.shape) == 1
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb)
emb = emb.to(device=timesteps.device)
emb = timesteps.float()[:, None] * emb[None, :]
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
if embedding_dim % 2 == 1: # zero pad
emb = torch.nn.functional.pad(emb, (0, 1, 0, 0))
return emb
|
This matches the implementation in Denoising Diffusion Probabilistic Models:
From Fairseq.
Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
|
get_timestep_embedding
|
python
|
THUDM/CogVideo
|
sat/sgm/modules/diffusionmodules/model.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/sgm/modules/diffusionmodules/model.py
|
Apache-2.0
|
def forward(self, x, emb):
"""
Apply the module to `x` given `emb` timestep embeddings.
"""
|
Apply the module to `x` given `emb` timestep embeddings.
|
forward
|
python
|
THUDM/CogVideo
|
sat/sgm/modules/diffusionmodules/openaimodel.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/sgm/modules/diffusionmodules/openaimodel.py
|
Apache-2.0
|
def count_flops_attn(model, _x, y):
"""
A counter for the `thop` package to count the operations in an
attention operation.
Meant to be used like:
macs, params = thop.profile(
model,
inputs=(inputs, timestamps),
custom_ops={QKVAttention: QKVAttention.count_flops},
)
"""
b, c, *spatial = y[0].shape
num_spatial = int(np.prod(spatial))
# We perform two matmuls with the same number of ops.
# The first computes the weight matrix, the second computes
# the combination of the value vectors.
matmul_ops = 2 * b * (num_spatial**2) * c
model.total_ops += th.DoubleTensor([matmul_ops])
|
A counter for the `thop` package to count the operations in an
attention operation.
Meant to be used like:
macs, params = thop.profile(
model,
inputs=(inputs, timestamps),
custom_ops={QKVAttention: QKVAttention.count_flops},
)
|
count_flops_attn
|
python
|
THUDM/CogVideo
|
sat/sgm/modules/diffusionmodules/openaimodel.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/sgm/modules/diffusionmodules/openaimodel.py
|
Apache-2.0
|
def forward(self, qkv):
"""
Apply QKV attention.
:param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.
:return: an [N x (H * C) x T] tensor after attention.
"""
bs, width, length = qkv.shape
assert width % (3 * self.n_heads) == 0
ch = width // (3 * self.n_heads)
q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1)
scale = 1 / math.sqrt(math.sqrt(ch))
weight = th.einsum(
"bct,bcs->bts", q * scale, k * scale
) # More stable with f16 than dividing afterwards
weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
a = th.einsum("bts,bcs->bct", weight, v)
return a.reshape(bs, -1, length)
|
Apply QKV attention.
:param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.
:return: an [N x (H * C) x T] tensor after attention.
|
forward
|
python
|
THUDM/CogVideo
|
sat/sgm/modules/diffusionmodules/openaimodel.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/sgm/modules/diffusionmodules/openaimodel.py
|
Apache-2.0
|
def forward(self, qkv):
"""
Apply QKV attention.
:param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs.
:return: an [N x (H * C) x T] tensor after attention.
"""
bs, width, length = qkv.shape
assert width % (3 * self.n_heads) == 0
ch = width // (3 * self.n_heads)
q, k, v = qkv.chunk(3, dim=1)
scale = 1 / math.sqrt(math.sqrt(ch))
weight = th.einsum(
"bct,bcs->bts",
(q * scale).view(bs * self.n_heads, ch, length),
(k * scale).view(bs * self.n_heads, ch, length),
) # More stable with f16 than dividing afterwards
weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length))
return a.reshape(bs, -1, length)
|
Apply QKV attention.
:param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs.
:return: an [N x (H * C) x T] tensor after attention.
|
forward
|
python
|
THUDM/CogVideo
|
sat/sgm/modules/diffusionmodules/openaimodel.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/sgm/modules/diffusionmodules/openaimodel.py
|
Apache-2.0
|
def forward(self, x, timesteps=None, context=None, y=None, **kwargs):
"""
Apply the model to an input batch.
:param x: an [N x C x ...] Tensor of inputs.
:param timesteps: a 1-D batch of timesteps.
:param context: conditioning plugged in via crossattn
:param y: an [N] Tensor of labels, if class-conditional.
:return: an [N x C x ...] Tensor of outputs.
"""
assert (y is not None) == (
self.num_classes is not None
), "must specify y if and only if the model is class-conditional"
hs = []
t_emb = timestep_embedding(
timesteps, self.model_channels, repeat_only=False, dtype=self.dtype
)
emb = self.time_embed(t_emb)
if self.num_classes is not None:
assert y.shape[0] == x.shape[0]
emb = emb + self.label_emb(y)
# h = x.type(self.dtype)
h = x
for module in self.input_blocks:
h = module(h, emb, context)
hs.append(h)
h = self.middle_block(h, emb, context)
for module in self.output_blocks:
h = th.cat([h, hs.pop()], dim=1)
h = module(h, emb, context)
h = h.type(x.dtype)
if self.predict_codebook_ids:
assert False, "not supported anymore. what the f*** are you doing?"
else:
return self.out(h)
|
Apply the model to an input batch.
:param x: an [N x C x ...] Tensor of inputs.
:param timesteps: a 1-D batch of timesteps.
:param context: conditioning plugged in via crossattn
:param y: an [N] Tensor of labels, if class-conditional.
:return: an [N x C x ...] Tensor of outputs.
|
forward
|
python
|
THUDM/CogVideo
|
sat/sgm/modules/diffusionmodules/openaimodel.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/sgm/modules/diffusionmodules/openaimodel.py
|
Apache-2.0
|
def forward(self, x, timesteps):
"""
Apply the model to an input batch.
:param x: an [N x C x ...] Tensor of inputs.
:param timesteps: a 1-D batch of timesteps.
:return: an [N x K] Tensor of outputs.
"""
emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
results = []
# h = x.type(self.dtype)
h = x
for module in self.input_blocks:
h = module(h, emb)
if self.pool.startswith("spatial"):
results.append(h.type(x.dtype).mean(dim=(2, 3)))
h = self.middle_block(h, emb)
if self.pool.startswith("spatial"):
results.append(h.type(x.dtype).mean(dim=(2, 3)))
h = th.cat(results, axis=-1)
return self.out(h)
else:
h = h.type(x.dtype)
return self.out(h)
|
Apply the model to an input batch.
:param x: an [N x C x ...] Tensor of inputs.
:param timesteps: a 1-D batch of timesteps.
:return: an [N x K] Tensor of outputs.
|
forward
|
python
|
THUDM/CogVideo
|
sat/sgm/modules/diffusionmodules/openaimodel.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/sgm/modules/diffusionmodules/openaimodel.py
|
Apache-2.0
|
def mixed_checkpoint(func, inputs: dict, params, flag):
"""
Evaluate a function without caching intermediate activations, allowing for
reduced memory at the expense of extra compute in the backward pass. This differs from the original checkpoint function
borrowed from https://github.com/openai/guided-diffusion/blob/0ba878e517b276c45d1195eb29f6f5f72659a05b/guided_diffusion/nn.py in that
it also works with non-tensor inputs
:param func: the function to evaluate.
:param inputs: the argument dictionary to pass to `func`.
:param params: a sequence of parameters `func` depends on but does not
explicitly take as arguments.
:param flag: if False, disable gradient checkpointing.
"""
if flag:
tensor_keys = [key for key in inputs if isinstance(inputs[key], torch.Tensor)]
tensor_inputs = [inputs[key] for key in inputs if isinstance(inputs[key], torch.Tensor)]
non_tensor_keys = [key for key in inputs if not isinstance(inputs[key], torch.Tensor)]
non_tensor_inputs = [
inputs[key] for key in inputs if not isinstance(inputs[key], torch.Tensor)
]
args = tuple(tensor_inputs) + tuple(non_tensor_inputs) + tuple(params)
return MixedCheckpointFunction.apply(
func,
len(tensor_inputs),
len(non_tensor_inputs),
tensor_keys,
non_tensor_keys,
*args,
)
else:
return func(**inputs)
|
Evaluate a function without caching intermediate activations, allowing for
reduced memory at the expense of extra compute in the backward pass. This differs from the original checkpoint function
borrowed from https://github.com/openai/guided-diffusion/blob/0ba878e517b276c45d1195eb29f6f5f72659a05b/guided_diffusion/nn.py in that
it also works with non-tensor inputs
:param func: the function to evaluate.
:param inputs: the argument dictionary to pass to `func`.
:param params: a sequence of parameters `func` depends on but does not
explicitly take as arguments.
:param flag: if False, disable gradient checkpointing.
|
mixed_checkpoint
|
python
|
THUDM/CogVideo
|
sat/sgm/modules/diffusionmodules/util.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/sgm/modules/diffusionmodules/util.py
|
Apache-2.0
|
def checkpoint(func, inputs, params, flag):
"""
Evaluate a function without caching intermediate activations, allowing for
reduced memory at the expense of extra compute in the backward pass.
:param func: the function to evaluate.
:param inputs: the argument sequence to pass to `func`.
:param params: a sequence of parameters `func` depends on but does not
explicitly take as arguments.
:param flag: if False, disable gradient checkpointing.
"""
if flag:
args = tuple(inputs) + tuple(params)
return CheckpointFunction.apply(func, len(inputs), *args)
else:
return func(*inputs)
|
Evaluate a function without caching intermediate activations, allowing for
reduced memory at the expense of extra compute in the backward pass.
:param func: the function to evaluate.
:param inputs: the argument sequence to pass to `func`.
:param params: a sequence of parameters `func` depends on but does not
explicitly take as arguments.
:param flag: if False, disable gradient checkpointing.
|
checkpoint
|
python
|
THUDM/CogVideo
|
sat/sgm/modules/diffusionmodules/util.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/sgm/modules/diffusionmodules/util.py
|
Apache-2.0
|
def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False, dtype=torch.float32):
"""
Create sinusoidal timestep embeddings.
:param timesteps: a 1-D Tensor of N indices, one per batch element.
These may be fractional.
:param dim: the dimension of the output.
:param max_period: controls the minimum frequency of the embeddings.
:return: an [N x dim] Tensor of positional embeddings.
"""
if not repeat_only:
half = dim // 2
freqs = torch.exp(
-math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
).to(device=timesteps.device)
args = timesteps[:, None].float() * freqs[None]
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
if dim % 2:
embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
else:
embedding = repeat(timesteps, "b -> b d", d=dim)
return embedding.to(dtype)
|
Create sinusoidal timestep embeddings.
:param timesteps: a 1-D Tensor of N indices, one per batch element.
These may be fractional.
:param dim: the dimension of the output.
:param max_period: controls the minimum frequency of the embeddings.
:return: an [N x dim] Tensor of positional embeddings.
|
timestep_embedding
|
python
|
THUDM/CogVideo
|
sat/sgm/modules/diffusionmodules/util.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/sgm/modules/diffusionmodules/util.py
|
Apache-2.0
|
def zero_module(module):
"""
Zero out the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().zero_()
return module
|
Zero out the parameters of a module and return it.
|
zero_module
|
python
|
THUDM/CogVideo
|
sat/sgm/modules/diffusionmodules/util.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/sgm/modules/diffusionmodules/util.py
|
Apache-2.0
|
def scale_module(module, scale):
"""
Scale the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().mul_(scale)
return module
|
Scale the parameters of a module and return it.
|
scale_module
|
python
|
THUDM/CogVideo
|
sat/sgm/modules/diffusionmodules/util.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/sgm/modules/diffusionmodules/util.py
|
Apache-2.0
|
def normal_kl(mean1, logvar1, mean2, logvar2):
"""
source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12
Compute the KL divergence between two gaussians.
Shapes are automatically broadcasted, so batches can be compared to
scalars, among other use cases.
"""
tensor = None
for obj in (mean1, logvar1, mean2, logvar2):
if isinstance(obj, torch.Tensor):
tensor = obj
break
assert tensor is not None, "at least one argument must be a Tensor"
# Force variances to be Tensors. Broadcasting helps convert scalars to
# Tensors, but it does not work for torch.exp().
logvar1, logvar2 = [
x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor) for x in (logvar1, logvar2)
]
return 0.5 * (
-1.0
+ logvar2
- logvar1
+ torch.exp(logvar1 - logvar2)
+ ((mean1 - mean2) ** 2) * torch.exp(-logvar2)
)
|
source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12
Compute the KL divergence between two gaussians.
Shapes are automatically broadcasted, so batches can be compared to
scalars, among other use cases.
|
normal_kl
|
python
|
THUDM/CogVideo
|
sat/sgm/modules/distributions/distributions.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/sgm/modules/distributions/distributions.py
|
Apache-2.0
|
def zero_module(module):
"""
Zero out the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().zero_()
return module
|
Zero out the parameters of a module and return it.
|
zero_module
|
python
|
THUDM/CogVideo
|
sat/vae_modules/attention.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/vae_modules/attention.py
|
Apache-2.0
|
def get_timestep_embedding(timesteps, embedding_dim):
"""
This matches the implementation in Denoising Diffusion Probabilistic Models:
From Fairseq.
Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""
assert len(timesteps.shape) == 1
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb)
emb = emb.to(device=timesteps.device)
emb = timesteps.float()[:, None] * emb[None, :]
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
if embedding_dim % 2 == 1: # zero pad
emb = torch.nn.functional.pad(emb, (0, 1, 0, 0))
return emb
|
This matches the implementation in Denoising Diffusion Probabilistic Models:
From Fairseq.
Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
|
get_timestep_embedding
|
python
|
THUDM/CogVideo
|
sat/vae_modules/cp_enc_dec.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/vae_modules/cp_enc_dec.py
|
Apache-2.0
|
def is_power_of_two(n):
"""
chat.openai.com/chat
Return True if n is a power of 2, otherwise return False.
The function is_power_of_two takes an integer n as input and returns True if n is a power of 2, otherwise it returns False.
The function works by first checking if n is less than or equal to 0. If n is less than or equal to 0, it can't be a power of 2, so the function returns False.
If n is greater than 0, the function checks whether n is a power of 2 by using a bitwise AND operation between n and n-1. If n is a power of 2, then it will have only one bit set to 1 in its binary representation. When we subtract 1 from a power of 2, all the bits to the right of that bit become 1, and the bit itself becomes 0. So, when we perform a bitwise AND between n and n-1, we get 0 if n is a power of 2, and a non-zero value otherwise.
Thus, if the result of the bitwise AND operation is 0, then n is a power of 2 and the function returns True. Otherwise, the function returns False.
"""
if n <= 0:
return False
return (n & (n - 1)) == 0
|
chat.openai.com/chat
Return True if n is a power of 2, otherwise return False.
The function is_power_of_two takes an integer n as input and returns True if n is a power of 2, otherwise it returns False.
The function works by first checking if n is less than or equal to 0. If n is less than or equal to 0, it can't be a power of 2, so the function returns False.
If n is greater than 0, the function checks whether n is a power of 2 by using a bitwise AND operation between n and n-1. If n is a power of 2, then it will have only one bit set to 1 in its binary representation. When we subtract 1 from a power of 2, all the bits to the right of that bit become 1, and the bit itself becomes 0. So, when we perform a bitwise AND between n and n-1, we get 0 if n is a power of 2, and a non-zero value otherwise.
Thus, if the result of the bitwise AND operation is 0, then n is a power of 2 and the function returns True. Otherwise, the function returns False.
|
is_power_of_two
|
python
|
THUDM/CogVideo
|
sat/vae_modules/utils.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/vae_modules/utils.py
|
Apache-2.0
|
def append_dims(x, target_dims):
"""Appends dimensions to the end of a tensor until it has target_dims dimensions."""
dims_to_append = target_dims - x.ndim
if dims_to_append < 0:
raise ValueError(f"input has {x.ndim} dims but target_dims is {target_dims}, which is less")
return x[(...,) + (None,) * dims_to_append]
|
Appends dimensions to the end of a tensor until it has target_dims dimensions.
|
append_dims
|
python
|
THUDM/CogVideo
|
sat/vae_modules/utils.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/vae_modules/utils.py
|
Apache-2.0
|
def get_configs_path() -> str:
"""
Get the `configs` directory.
For a working copy, this is the one in the root of the repository,
but for an installed copy, it's in the `sgm` package (see pyproject.toml).
"""
this_dir = os.path.dirname(__file__)
candidates = (
os.path.join(this_dir, "configs"),
os.path.join(this_dir, "..", "configs"),
)
for candidate in candidates:
candidate = os.path.abspath(candidate)
if os.path.isdir(candidate):
return candidate
raise FileNotFoundError(f"Could not find SGM configs in {candidates}")
|
Get the `configs` directory.
For a working copy, this is the one in the root of the repository,
but for an installed copy, it's in the `sgm` package (see pyproject.toml).
|
get_configs_path
|
python
|
THUDM/CogVideo
|
sat/vae_modules/utils.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/vae_modules/utils.py
|
Apache-2.0
|
def get_nested_attribute(obj, attribute_path, depth=None, return_key=False):
"""
Will return the result of a recursive get attribute call.
E.g.:
a.b.c
= getattr(getattr(a, "b"), "c")
= get_nested_attribute(a, "b.c")
If any part of the attribute call is an integer x with current obj a, will
try to call a[x] instead of a.x first.
"""
attributes = attribute_path.split(".")
if depth is not None and depth > 0:
attributes = attributes[:depth]
assert len(attributes) > 0, "At least one attribute should be selected"
current_attribute = obj
current_key = None
for level, attribute in enumerate(attributes):
current_key = ".".join(attributes[: level + 1])
try:
id_ = int(attribute)
current_attribute = current_attribute[id_]
except ValueError:
current_attribute = getattr(current_attribute, attribute)
return (current_attribute, current_key) if return_key else current_attribute
|
Will return the result of a recursive get attribute call.
E.g.:
a.b.c
= getattr(getattr(a, "b"), "c")
= get_nested_attribute(a, "b.c")
If any part of the attribute call is an integer x with current obj a, will
try to call a[x] instead of a.x first.
|
get_nested_attribute
|
python
|
THUDM/CogVideo
|
sat/vae_modules/utils.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/vae_modules/utils.py
|
Apache-2.0
|
def checkpoint(func, inputs, params, flag):
"""
Evaluate a function without caching intermediate activations, allowing for
reduced memory at the expense of extra compute in the backward pass.
:param func: the function to evaluate.
:param inputs: the argument sequence to pass to `func`.
:param params: a sequence of parameters `func` depends on but does not
explicitly take as arguments.
:param flag: if False, disable gradient checkpointing.
"""
if flag:
args = tuple(inputs) + tuple(params)
return CheckpointFunction.apply(func, len(inputs), *args)
else:
return func(*inputs)
|
Evaluate a function without caching intermediate activations, allowing for
reduced memory at the expense of extra compute in the backward pass.
:param func: the function to evaluate.
:param inputs: the argument sequence to pass to `func`.
:param params: a sequence of parameters `func` depends on but does not
explicitly take as arguments.
:param flag: if False, disable gradient checkpointing.
|
checkpoint
|
python
|
THUDM/CogVideo
|
sat/vae_modules/utils.py
|
https://github.com/THUDM/CogVideo/blob/master/sat/vae_modules/utils.py
|
Apache-2.0
|
def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
"""
Returns fp32 state_dict reconstructed from ds checkpoint
Args:
- ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
"""
print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
optim_files = get_optim_files(ds_checkpoint_dir)
zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
model_files = get_model_state_files(ds_checkpoint_dir)
zero_model_states = parse_model_states(model_files)
print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
if zero_stage <= 2:
return _get_fp32_state_dict_from_zero2_checkpoint(
world_size, fp32_flat_groups, zero_model_states, exclude_frozen_parameters
)
elif zero_stage == 3:
return _get_fp32_state_dict_from_zero3_checkpoint(
world_size, fp32_flat_groups, zero_model_states, exclude_frozen_parameters
)
|
Returns fp32 state_dict reconstructed from ds checkpoint
Args:
- ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
|
_get_fp32_state_dict_from_zero_checkpoint
|
python
|
THUDM/CogVideo
|
tools/convert_weight_deepspeed2hf.py
|
https://github.com/THUDM/CogVideo/blob/master/tools/convert_weight_deepspeed2hf.py
|
Apache-2.0
|
def contiguous(self):
"""
Merge partitioned weights from flat_groups into a single tensor.
"""
end_idx = self.offset + self.partitioned_numel
world_size = len(self.flat_groups)
pad_flat_param_chunks = []
for rank_i in range(world_size):
# for each rank, we need to collect weights from related group/groups
flat_groups_at_rank_i = self.flat_groups[rank_i]
start_group_id = None
end_group_id = None
for group_id in range(len(self.flat_groups_offset)):
if (
self.flat_groups_offset[group_id]
<= self.offset
< self.flat_groups_offset[group_id + 1]
):
start_group_id = group_id
if (
self.flat_groups_offset[group_id]
< end_idx
<= self.flat_groups_offset[group_id + 1]
):
end_group_id = group_id
break
# collect weights from related group/groups
for group_id in range(start_group_id, end_group_id + 1):
flat_tensor = flat_groups_at_rank_i[group_id]
start_offset = self.offset - self.flat_groups_offset[group_id]
end_offset = (
min(end_idx, self.flat_groups_offset[group_id + 1])
- self.flat_groups_offset[group_id]
)
pad_flat_param_chunks.append(flat_tensor[start_offset:end_offset])
# collect weights from all ranks
pad_flat_param = torch.cat(pad_flat_param_chunks, dim=0)
param = pad_flat_param[: self.shape.numel()].view(self.shape).contiguous()
return param
|
Merge partitioned weights from flat_groups into a single tensor.
|
contiguous
|
python
|
THUDM/CogVideo
|
tools/convert_weight_deepspeed2hf.py
|
https://github.com/THUDM/CogVideo/blob/master/tools/convert_weight_deepspeed2hf.py
|
Apache-2.0
|
def to_torch_tensor(state_dict, return_empty_tensor=False):
"""
Convert state_dict of GatheredTensor to torch tensor
"""
torch_state_dict = {}
converted_tensors = {}
for name, tensor in state_dict.items():
tensor_id = id(tensor)
if tensor_id in converted_tensors: # shared tensors
shared_tensor = torch_state_dict[converted_tensors[tensor_id]]
torch_state_dict[name] = shared_tensor
else:
converted_tensors[tensor_id] = name
if return_empty_tensor:
torch_state_dict[name] = torch.empty(tensor.shape, dtype=tensor.dtype)
else:
torch_state_dict[name] = tensor.contiguous()
return torch_state_dict
|
Convert state_dict of GatheredTensor to torch tensor
|
to_torch_tensor
|
python
|
THUDM/CogVideo
|
tools/convert_weight_deepspeed2hf.py
|
https://github.com/THUDM/CogVideo/blob/master/tools/convert_weight_deepspeed2hf.py
|
Apache-2.0
|
def convert_zero_checkpoint_to_fp32_state_dict(
checkpoint_dir,
output_dir,
max_shard_size="5GB",
safe_serialization=False,
tag=None,
exclude_frozen_parameters=False,
):
"""
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
Args:
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
- ``output_dir``: directory to the pytorch fp32 state_dict output files
- ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB
- ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
- ``exclude_frozen_parameters``: exclude frozen parameters
"""
# Dependency pre-check
if safe_serialization:
try:
from safetensors.torch import save_file
except ImportError:
print('If you want to use `safe_serialization`, please `pip install safetensors`')
raise
if max_shard_size is not None:
try:
from huggingface_hub import split_torch_state_dict_into_shards
except ImportError:
print('If you want to use `max_shard_size`, please `pip install huggingface_hub`')
raise
# Convert zero checkpoint to state_dict
state_dict = get_fp32_state_dict_from_zero_checkpoint(
checkpoint_dir, tag, exclude_frozen_parameters, lazy_mode=True
)
# Shard the model if it is too big.
weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin"
if max_shard_size is not None:
filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(
".safetensors", "{suffix}.safetensors"
)
# an memory-efficient approach for sharding
empty_state_dict = to_torch_tensor(state_dict, return_empty_tensor=True)
state_dict_split = split_torch_state_dict_into_shards(
empty_state_dict, filename_pattern=filename_pattern, max_shard_size=max_shard_size
)
else:
from collections import namedtuple
StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"])
state_dict_split = StateDictSplit(
is_sharded=False, filename_to_tensors={weights_name: list(state_dict.keys())}
)
# Save the model by shard
os.makedirs(output_dir, exist_ok=True)
filename_to_tensors = state_dict_split.filename_to_tensors.items()
for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"):
shard_state_dict = {tensor_name: state_dict[tensor_name] for tensor_name in tensors}
shard_state_dict = to_torch_tensor(shard_state_dict)
output_path = os.path.join(output_dir, shard_file)
if safe_serialization:
save_file(shard_state_dict, output_path, metadata={"format": "pt"})
else:
torch.save(shard_state_dict, output_path)
# release the memory of current shard
for tensor_name in list(shard_state_dict.keys()):
del state_dict[tensor_name]
del shard_state_dict[tensor_name]
del shard_state_dict
gc.collect()
# Save index if sharded
if state_dict_split.is_sharded:
index = {
"metadata": state_dict_split.metadata,
"weight_map": state_dict_split.tensor_to_filename,
}
save_index_file = (
"model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json"
)
save_index_file = os.path.join(output_dir, save_index_file)
with open(save_index_file, "w", encoding="utf-8") as f:
content = json.dumps(index, indent=2, sort_keys=True) + "\n"
f.write(content)
|
Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
Args:
- ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
- ``output_dir``: directory to the pytorch fp32 state_dict output files
- ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB
- ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
- ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
- ``exclude_frozen_parameters``: exclude frozen parameters
|
convert_zero_checkpoint_to_fp32_state_dict
|
python
|
THUDM/CogVideo
|
tools/convert_weight_deepspeed2hf.py
|
https://github.com/THUDM/CogVideo/blob/master/tools/convert_weight_deepspeed2hf.py
|
Apache-2.0
|
def setup(self) -> None:
"""Load the model into memory to make running multiple predictions efficient"""
if not os.path.exists(MODEL_CACHE):
download_weights(MODEL_URL, MODEL_CACHE)
# model_id: THUDM/CogVideoX-5b-I2V
self.pipe = CogVideoXImageToVideoPipeline.from_pretrained(
MODEL_CACHE, torch_dtype=torch.bfloat16
).to("cuda")
self.pipe.enable_model_cpu_offload()
self.pipe.vae.enable_tiling()
|
Load the model into memory to make running multiple predictions efficient
|
setup
|
python
|
THUDM/CogVideo
|
tools/replicate/predict_i2v.py
|
https://github.com/THUDM/CogVideo/blob/master/tools/replicate/predict_i2v.py
|
Apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.