code
stringlengths
26
870k
docstring
stringlengths
1
65.6k
func_name
stringlengths
1
194
language
stringclasses
1 value
repo
stringlengths
8
68
path
stringlengths
5
194
url
stringlengths
46
254
license
stringclasses
4 values
def get(self, tensor: torch.Tensor) -> torch.Tensor: """Receive a cpu tensor and return the corresponding pinned tensor. Note that this only manage memory allocation, doesn't copy content. Args: tensor (torch.Tensor): The tensor to be pinned. Returns: torch.Tensor: The pinned tensor. """ self.total_cnt += 1 with self.lock: # find free cache for cache_id, cache_tensor in self.cache.items(): if cache_id not in self.cache_to_output and cache_tensor.numel() >= tensor.numel(): target_cache_tensor = cache_tensor[: tensor.numel()].view(tensor.shape) out_id = id(target_cache_tensor) self.output_to_cache[out_id] = cache_id self.cache_to_output[cache_id] = out_id self.hit_cnt += 1 return target_cache_tensor # no free cache, create a new one dtype = self.force_dtype if self.force_dtype is not None else tensor.dtype cache_numel = max(tensor.numel(), self.min_cache_numel) cache_tensor = torch.empty(cache_numel, dtype=dtype, device="cpu", pin_memory=True) target_cache_tensor = cache_tensor[: tensor.numel()].view(tensor.shape) out_id = id(target_cache_tensor) with self.lock: self.cache[id(cache_tensor)] = cache_tensor self.output_to_cache[out_id] = id(cache_tensor) self.cache_to_output[id(cache_tensor)] = out_id return target_cache_tensor
Receive a cpu tensor and return the corresponding pinned tensor. Note that this only manage memory allocation, doesn't copy content. Args: tensor (torch.Tensor): The tensor to be pinned. Returns: torch.Tensor: The pinned tensor.
get
python
hpcaitech/Open-Sora
opensora/datasets/pin_memory_cache.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/datasets/pin_memory_cache.py
Apache-2.0
def remove(self, output_tensor: torch.Tensor) -> None: """Release corresponding cache tensor. Args: output_tensor (torch.Tensor): The tensor to be released. """ out_id = id(output_tensor) with self.lock: if out_id not in self.output_to_cache: raise ValueError("Tensor not found in cache.") cache_id = self.output_to_cache.pop(out_id) del self.cache_to_output[cache_id]
Release corresponding cache tensor. Args: output_tensor (torch.Tensor): The tensor to be released.
remove
python
hpcaitech/Open-Sora
opensora/datasets/pin_memory_cache.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/datasets/pin_memory_cache.py
Apache-2.0
def get_resolution_with_aspect_ratio( resolution: str, ) -> tuple[int, dict[str, tuple[int, int]]]: """Get resolution with aspect ratio Args: resolution (str): resolution name. The format is name only or "{name}_{setting}". name supports "256px" or "360p". setting supports "ar1:1" or "max". Returns: tuple[int, dict[str, tuple[int, int]]]: resolution with aspect ratio """ keys = resolution.split("_") if len(keys) == 1: resolution = keys[0] setting = "" else: resolution, setting = keys assert setting == "max" or setting.startswith( "ar" ), f"Invalid setting {setting}" # get resolution num_pexels = get_num_pexels_from_name(resolution) # get aspect ratio aspect_ratio_dict = get_aspect_ratios_dict(num_pexels) # handle setting if setting == "max": aspect_ratio = max( aspect_ratio_dict, key=lambda x: aspect_ratio_dict[x][0] * aspect_ratio_dict[x][1], ) aspect_ratio_dict = {aspect_ratio: aspect_ratio_dict[aspect_ratio]} elif setting.startswith("ar"): aspect_ratio = setting[2:] assert ( aspect_ratio in aspect_ratio_dict ), f"Aspect ratio {aspect_ratio} not found" aspect_ratio_dict = {aspect_ratio: aspect_ratio_dict[aspect_ratio]} return num_pexels, aspect_ratio_dict
Get resolution with aspect ratio Args: resolution (str): resolution name. The format is name only or "{name}_{setting}". name supports "256px" or "360p". setting supports "ar1:1" or "max". Returns: tuple[int, dict[str, tuple[int, int]]]: resolution with aspect ratio
get_resolution_with_aspect_ratio
python
hpcaitech/Open-Sora
opensora/datasets/aspect.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/datasets/aspect.py
Apache-2.0
def save_sample( x, save_path=None, fps=8, normalize=True, value_range=(-1, 1), force_video=False, verbose=True, crf=23, ): """ Args: x (Tensor): shape [C, T, H, W] """ assert x.ndim == 4 if not force_video and x.shape[1] == 1: # T = 1: save as image save_path += ".png" x = x.squeeze(1) save_image([x], save_path, normalize=normalize, value_range=value_range) else: save_path += ".mp4" if normalize: low, high = value_range x.clamp_(min=low, max=high) x.sub_(low).div_(max(high - low, 1e-5)) x = x.mul_(255).add_(0.5).clamp_(0, 255).permute(1, 2, 3, 0).to("cpu", torch.uint8) write_video(save_path, x, fps=fps, video_codec="h264", options={"crf": str(crf)}) if verbose: print(f"Saved to {save_path}") return save_path
Args: x (Tensor): shape [C, T, H, W]
save_sample
python
hpcaitech/Open-Sora
opensora/datasets/utils.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/datasets/utils.py
Apache-2.0
def center_crop_arr(pil_image, image_size): """ Center cropping implementation from ADM. https://github.com/openai/guided-diffusion/blob/8fb3ad9197f16bbc40620447b2742e13458d2831/guided_diffusion/image_datasets.py#L126 """ while min(*pil_image.size) >= 2 * image_size: pil_image = pil_image.resize(tuple(x // 2 for x in pil_image.size), resample=Image.BOX) scale = image_size / min(*pil_image.size) pil_image = pil_image.resize(tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC) arr = np.array(pil_image) crop_y = (arr.shape[0] - image_size) // 2 crop_x = (arr.shape[1] - image_size) // 2 return Image.fromarray(arr[crop_y : crop_y + image_size, crop_x : crop_x + image_size])
Center cropping implementation from ADM. https://github.com/openai/guided-diffusion/blob/8fb3ad9197f16bbc40620447b2742e13458d2831/guided_diffusion/image_datasets.py#L126
center_crop_arr
python
hpcaitech/Open-Sora
opensora/datasets/utils.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/datasets/utils.py
Apache-2.0
def rand_size_crop_arr(pil_image, image_size): """ Randomly crop image for height and width, ranging from image_size[0] to image_size[1] """ arr = np.array(pil_image) # get random target h w height = random.randint(image_size[0], image_size[1]) width = random.randint(image_size[0], image_size[1]) # ensure that h w are factors of 8 height = height - height % 8 width = width - width % 8 # get random start pos h_start = random.randint(0, max(len(arr) - height, 0)) w_start = random.randint(0, max(len(arr[0]) - height, 0)) # crop return Image.fromarray(arr[h_start : h_start + height, w_start : w_start + width])
Randomly crop image for height and width, ranging from image_size[0] to image_size[1]
rand_size_crop_arr
python
hpcaitech/Open-Sora
opensora/datasets/utils.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/datasets/utils.py
Apache-2.0
def map_target_fps( fps: float, max_fps: float, ) -> tuple[float, int]: """ Map fps to a new fps that is less than max_fps. Args: fps (float): Original fps. max_fps (float): Maximum fps. Returns: tuple[float, int]: New fps and sampling interval. """ if math.isnan(fps): return 0, 1 if fps < max_fps: return fps, 1 sampling_interval = math.ceil(fps / max_fps) new_fps = math.floor(fps / sampling_interval) return new_fps, sampling_interval
Map fps to a new fps that is less than max_fps. Args: fps (float): Original fps. max_fps (float): Maximum fps. Returns: tuple[float, int]: New fps and sampling interval.
map_target_fps
python
hpcaitech/Open-Sora
opensora/datasets/utils.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/datasets/utils.py
Apache-2.0
def sync_object_across_devices(obj: Any, rank: int = 0): """ Synchronizes any picklable object across devices in a PyTorch distributed setting using `broadcast_object_list` with CUDA support. Parameters: obj (Any): The object to synchronize. Can be any picklable object (e.g., list, dict, custom class). rank (int): The rank of the device from which to broadcast the object state. Default is 0. Note: Ensure torch.distributed is initialized before using this function and CUDA is available. """ # Move the object to a list for broadcasting object_list = [obj] # Broadcast the object list from the source rank to all other ranks dist.broadcast_object_list(object_list, src=rank, device="cuda") # Retrieve the synchronized object obj = object_list[0] return obj
Synchronizes any picklable object across devices in a PyTorch distributed setting using `broadcast_object_list` with CUDA support. Parameters: obj (Any): The object to synchronize. Can be any picklable object (e.g., list, dict, custom class). rank (int): The rank of the device from which to broadcast the object state. Default is 0. Note: Ensure torch.distributed is initialized before using this function and CUDA is available.
sync_object_across_devices
python
hpcaitech/Open-Sora
opensora/datasets/utils.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/datasets/utils.py
Apache-2.0
def rescale_image_by_path(path: str, height: int, width: int): """ Rescales an image to the specified height and width and saves it back to the original path. Args: path (str): The file path of the image. height (int): The target height of the image. width (int): The target width of the image. """ try: # read image image = Image.open(path) # check if image is valid if image is None: raise ValueError("The image is invalid or empty.") # resize image resize_transform = transforms.Resize((width, height)) resized_image = resize_transform(image) # save resized image back to the original path resized_image.save(path) except Exception as e: print(f"Error rescaling image: {e}")
Rescales an image to the specified height and width and saves it back to the original path. Args: path (str): The file path of the image. height (int): The target height of the image. width (int): The target width of the image.
rescale_image_by_path
python
hpcaitech/Open-Sora
opensora/datasets/utils.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/datasets/utils.py
Apache-2.0
def rescale_video_by_path(path: str, height: int, width: int): """ Rescales an MP4 video (without audio) to the specified height and width. Args: path (str): The file path of the video. height (int): The target height of the video. width (int): The target width of the video. """ try: # Read video and metadata video, info = read_video(path, backend="av") # Check if video is valid if video is None or video.size(0) == 0: raise ValueError("The video is invalid or empty.") # Resize video frames using a performant method resize_transform = transforms.Compose([transforms.Resize((width, height))]) resized_video = torch.stack([resize_transform(frame) for frame in video]) # Save resized video back to the original path resized_video = resized_video.permute(0, 2, 3, 1) write_video(path, resized_video, fps=int(info["video_fps"]), video_codec="h264") except Exception as e: print(f"Error rescaling video: {e}")
Rescales an MP4 video (without audio) to the specified height and width. Args: path (str): The file path of the video. height (int): The target height of the video. width (int): The target width of the video.
rescale_video_by_path
python
hpcaitech/Open-Sora
opensora/datasets/utils.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/datasets/utils.py
Apache-2.0
def __init__(self, bucket_config: dict[str, dict[int, tuple[float, int] | tuple[tuple[float, float], int]]]): """ Args: bucket_config (dict): A dictionary containing the bucket configuration. The dictionary should be in the following format: { "bucket_name": { "time": (probability, batch_size), "time": (probability, batch_size), ... }, ... } Or in the following format: { "bucket_name": { "time": ((probability, next_probability), batch_size), "time": ((probability, next_probability), batch_size), ... }, ... } The bucket_name should be the name of the bucket, and the time should be the number of frames in the video. The probability should be a float between 0 and 1, and the batch_size should be an integer. If the probability is a tuple, the second value should be the probability to skip to the next time. """ aspect_ratios = {key: get_resolution_with_aspect_ratio(key) for key in bucket_config.keys()} bucket_probs = OrderedDict() bucket_bs = OrderedDict() bucket_names = sorted(bucket_config.keys(), key=lambda x: aspect_ratios[x][0], reverse=True) for key in bucket_names: bucket_time_names = sorted(bucket_config[key].keys(), key=lambda x: x, reverse=True) bucket_probs[key] = OrderedDict({k: bucket_config[key][k][0] for k in bucket_time_names}) bucket_bs[key] = OrderedDict({k: bucket_config[key][k][1] for k in bucket_time_names}) self.hw_criteria = {k: aspect_ratios[k][0] for k in bucket_names} self.t_criteria = {k1: {k2: k2 for k2 in bucket_config[k1].keys()} for k1 in bucket_names} self.ar_criteria = { k1: {k2: {k3: v3 for k3, v3 in aspect_ratios[k1][1].items()} for k2 in bucket_config[k1].keys()} for k1 in bucket_names } bucket_id_cnt = num_bucket = 0 bucket_id = dict() for k1, v1 in bucket_probs.items(): bucket_id[k1] = dict() for k2, _ in v1.items(): bucket_id[k1][k2] = bucket_id_cnt bucket_id_cnt += 1 num_bucket += len(aspect_ratios[k1][1]) self.bucket_probs = bucket_probs self.bucket_bs = bucket_bs self.bucket_id = bucket_id self.num_bucket = num_bucket log_message("Number of buckets: %s", num_bucket)
Args: bucket_config (dict): A dictionary containing the bucket configuration. The dictionary should be in the following format: { "bucket_name": { "time": (probability, batch_size), "time": (probability, batch_size), ... }, ... } Or in the following format: { "bucket_name": { "time": ((probability, next_probability), batch_size), "time": ((probability, next_probability), batch_size), ... }, ... } The bucket_name should be the name of the bucket, and the time should be the number of frames in the video. The probability should be a float between 0 and 1, and the batch_size should be an integer. If the probability is a tuple, the second value should be the probability to skip to the next time.
__init__
python
hpcaitech/Open-Sora
opensora/datasets/bucket.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/datasets/bucket.py
Apache-2.0
def group_by_bucket(self) -> dict: """ Group the dataset samples into buckets. This method will set `self._cached_bucket_sample_dict` to the bucket sample dict. Returns: dict: a dictionary with bucket id as key and a list of sample indices as value """ if self._cached_bucket_sample_dict is not None: return self._cached_bucket_sample_dict, self._cached_num_total_batch # use pandarallel to accelerate bucket processing log_message("Building buckets using %d workers...", self.num_bucket_build_workers) bucket_ids = None if dist.get_rank() == 0: data = self.dataset.data.copy(deep=True) data["id"] = data.index bucket_ids = data.parallel_apply( apply, axis=1, method=self.bucket.get_bucket_id, seed=self.seed + self.epoch, num_bucket=self.bucket.num_bucket, fps_max=self.dataset.fps_max, ) dist.barrier() bucket_ids = sync_object_across_devices(bucket_ids) dist.barrier() # group by bucket # each data sample is put into a bucket with a similar image/video size bucket_sample_dict = defaultdict(list) bucket_ids_np = np.array(bucket_ids) valid_indices = np.where(bucket_ids_np != None)[0] for i in valid_indices: bucket_sample_dict[bucket_ids_np[i]].append(i) # cache the bucket sample dict self._cached_bucket_sample_dict = bucket_sample_dict # num total batch num_total_batch = self.print_bucket_info(bucket_sample_dict) self._cached_num_total_batch = num_total_batch return bucket_sample_dict, num_total_batch
Group the dataset samples into buckets. This method will set `self._cached_bucket_sample_dict` to the bucket sample dict. Returns: dict: a dictionary with bucket id as key and a list of sample indices as value
group_by_bucket
python
hpcaitech/Open-Sora
opensora/datasets/sampler.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/datasets/sampler.py
Apache-2.0
def read_video_av( filename: str, start_pts: float | Fraction = 0, end_pts: float | Fraction | None = None, pts_unit: str = "pts", output_format: str = "THWC", ) -> tuple[torch.Tensor, torch.Tensor, dict]: """ Reads a video from a file, returning both the video frames and the audio frames This method is modified from torchvision.io.video.read_video, with the following changes: 1. will not extract audio frames and return empty for aframes 2. remove checks and only support pyav 3. add container.close() and gc.collect() to avoid thread leakage 4. try our best to avoid memory leak Args: filename (str): path to the video file start_pts (int if pts_unit = 'pts', float / Fraction if pts_unit = 'sec', optional): The start presentation time of the video end_pts (int if pts_unit = 'pts', float / Fraction if pts_unit = 'sec', optional): The end presentation time pts_unit (str, optional): unit in which start_pts and end_pts values will be interpreted, either 'pts' or 'sec'. Defaults to 'pts'. output_format (str, optional): The format of the output video tensors. Can be either "THWC" (default) or "TCHW". Returns: vframes (Tensor[T, H, W, C] or Tensor[T, C, H, W]): the `T` video frames aframes (Tensor[K, L]): the audio frames, where `K` is the number of channels and `L` is the number of points info (dict): metadata for the video and audio. Can contain the fields video_fps (float) and audio_fps (int) """ # format output_format = output_format.upper() if output_format not in ("THWC", "TCHW"): raise ValueError(f"output_format should be either 'THWC' or 'TCHW', got {output_format}.") # file existence if not os.path.exists(filename): raise RuntimeError(f"File not found: {filename}") # backend check assert get_video_backend() == "pyav", "pyav backend is required for read_video_av" _check_av_available() # end_pts check if end_pts is None: end_pts = float("inf") if end_pts < start_pts: raise ValueError(f"end_pts should be larger than start_pts, got start_pts={start_pts} and end_pts={end_pts}") # == get video info == info = {} # TODO: creating an container leads to memory leak (1G for 8 workers 1 GPU) container = av.open(filename, metadata_errors="ignore") # fps video_fps = container.streams.video[0].average_rate # guard against potentially corrupted files if video_fps is not None: info["video_fps"] = float(video_fps) iter_video = container.decode(**{"video": 0}) frame = next(iter_video).to_rgb().to_ndarray() height, width = frame.shape[:2] total_frames = container.streams.video[0].frames if total_frames == 0: total_frames = MAX_NUM_FRAMES warnings.warn(f"total_frames is 0, using {MAX_NUM_FRAMES} as a fallback") container.close() del container # HACK: must create before iterating stream # use np.zeros will not actually allocate memory # use np.ones will lead to a little memory leak video_frames = np.zeros((total_frames, height, width, 3), dtype=np.uint8) # == read == try: # TODO: The reading has memory leak (4G for 8 workers 1 GPU) container = av.open(filename, metadata_errors="ignore") assert container.streams.video is not None video_frames = _read_from_stream( video_frames, container, start_pts, end_pts, pts_unit, container.streams.video[0], {"video": 0}, filename=filename, ) except av.AVError as e: print(f"[Warning] Error while reading video {filename}: {e}") vframes = torch.from_numpy(video_frames).clone() del video_frames if output_format == "TCHW": # [T,H,W,C] --> [T,C,H,W] vframes = vframes.permute(0, 3, 1, 2) aframes = torch.empty((1, 0), dtype=torch.float32) return vframes, aframes, info
Reads a video from a file, returning both the video frames and the audio frames This method is modified from torchvision.io.video.read_video, with the following changes: 1. will not extract audio frames and return empty for aframes 2. remove checks and only support pyav 3. add container.close() and gc.collect() to avoid thread leakage 4. try our best to avoid memory leak Args: filename (str): path to the video file start_pts (int if pts_unit = 'pts', float / Fraction if pts_unit = 'sec', optional): The start presentation time of the video end_pts (int if pts_unit = 'pts', float / Fraction if pts_unit = 'sec', optional): The end presentation time pts_unit (str, optional): unit in which start_pts and end_pts values will be interpreted, either 'pts' or 'sec'. Defaults to 'pts'. output_format (str, optional): The format of the output video tensors. Can be either "THWC" (default) or "TCHW". Returns: vframes (Tensor[T, H, W, C] or Tensor[T, C, H, W]): the `T` video frames aframes (Tensor[K, L]): the audio frames, where `K` is the number of channels and `L` is the number of points info (dict): metadata for the video and audio. Can contain the fields video_fps (float) and audio_fps (int)
read_video_av
python
hpcaitech/Open-Sora
opensora/datasets/read_video.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/datasets/read_video.py
Apache-2.0
def crop(clip, i, j, h, w): """ Args: clip (torch.tensor): Video clip to be cropped. Size is (T, C, H, W) """ if len(clip.size()) != 4: raise ValueError("clip should be a 4D tensor") return clip[..., i : i + h, j : j + w]
Args: clip (torch.tensor): Video clip to be cropped. Size is (T, C, H, W)
crop
python
hpcaitech/Open-Sora
opensora/datasets/video_transforms.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/datasets/video_transforms.py
Apache-2.0
def resized_crop(clip, i, j, h, w, size, interpolation_mode="bilinear"): """ Do spatial cropping and resizing to the video clip Args: clip (torch.tensor): Video clip to be cropped. Size is (T, C, H, W) i (int): i in (i,j) i.e coordinates of the upper left corner. j (int): j in (i,j) i.e coordinates of the upper left corner. h (int): Height of the cropped region. w (int): Width of the cropped region. size (tuple(int, int)): height and width of resized clip Returns: clip (torch.tensor): Resized and cropped clip. Size is (T, C, H, W) """ if not _is_tensor_video_clip(clip): raise ValueError("clip should be a 4D torch.tensor") clip = crop(clip, i, j, h, w) clip = resize(clip, size, interpolation_mode) return clip
Do spatial cropping and resizing to the video clip Args: clip (torch.tensor): Video clip to be cropped. Size is (T, C, H, W) i (int): i in (i,j) i.e coordinates of the upper left corner. j (int): j in (i,j) i.e coordinates of the upper left corner. h (int): Height of the cropped region. w (int): Width of the cropped region. size (tuple(int, int)): height and width of resized clip Returns: clip (torch.tensor): Resized and cropped clip. Size is (T, C, H, W)
resized_crop
python
hpcaitech/Open-Sora
opensora/datasets/video_transforms.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/datasets/video_transforms.py
Apache-2.0
def random_shift_crop(clip): """ Slide along the long edge, with the short edge as crop size """ if not _is_tensor_video_clip(clip): raise ValueError("clip should be a 4D torch.tensor") h, w = clip.size(-2), clip.size(-1) if h <= w: short_edge = h else: short_edge = w th, tw = short_edge, short_edge i = torch.randint(0, h - th + 1, size=(1,)).item() j = torch.randint(0, w - tw + 1, size=(1,)).item() return crop(clip, i, j, th, tw)
Slide along the long edge, with the short edge as crop size
random_shift_crop
python
hpcaitech/Open-Sora
opensora/datasets/video_transforms.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/datasets/video_transforms.py
Apache-2.0
def to_tensor(clip): """ Convert tensor data type from uint8 to float, divide value by 255.0 and permute the dimensions of clip tensor Args: clip (torch.tensor, dtype=torch.uint8): Size is (T, C, H, W) Return: clip (torch.tensor, dtype=torch.float): Size is (T, C, H, W) """ _is_tensor_video_clip(clip) if not clip.dtype == torch.uint8: raise TypeError("clip tensor should have data type uint8. Got %s" % str(clip.dtype)) # return clip.float().permute(3, 0, 1, 2) / 255.0 return clip.float() / 255.0
Convert tensor data type from uint8 to float, divide value by 255.0 and permute the dimensions of clip tensor Args: clip (torch.tensor, dtype=torch.uint8): Size is (T, C, H, W) Return: clip (torch.tensor, dtype=torch.float): Size is (T, C, H, W)
to_tensor
python
hpcaitech/Open-Sora
opensora/datasets/video_transforms.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/datasets/video_transforms.py
Apache-2.0
def normalize(clip, mean, std, inplace=False): """ Args: clip (torch.tensor): Video clip to be normalized. Size is (T, C, H, W) mean (tuple): pixel RGB mean. Size is (3) std (tuple): pixel standard deviation. Size is (3) Returns: normalized clip (torch.tensor): Size is (T, C, H, W) """ if not _is_tensor_video_clip(clip): raise ValueError("clip should be a 4D torch.tensor") if not inplace: clip = clip.clone() mean = torch.as_tensor(mean, dtype=clip.dtype, device=clip.device) # print(mean) std = torch.as_tensor(std, dtype=clip.dtype, device=clip.device) clip.sub_(mean[:, None, None, None]).div_(std[:, None, None, None]) return clip
Args: clip (torch.tensor): Video clip to be normalized. Size is (T, C, H, W) mean (tuple): pixel RGB mean. Size is (3) std (tuple): pixel standard deviation. Size is (3) Returns: normalized clip (torch.tensor): Size is (T, C, H, W)
normalize
python
hpcaitech/Open-Sora
opensora/datasets/video_transforms.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/datasets/video_transforms.py
Apache-2.0
def hflip(clip): """ Args: clip (torch.tensor): Video clip to be normalized. Size is (T, C, H, W) Returns: flipped clip (torch.tensor): Size is (T, C, H, W) """ if not _is_tensor_video_clip(clip): raise ValueError("clip should be a 4D torch.tensor") return clip.flip(-1)
Args: clip (torch.tensor): Video clip to be normalized. Size is (T, C, H, W) Returns: flipped clip (torch.tensor): Size is (T, C, H, W)
hflip
python
hpcaitech/Open-Sora
opensora/datasets/video_transforms.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/datasets/video_transforms.py
Apache-2.0
def __call__(self, clip): """ Args: clip (torch.tensor): Video clip to be cropped. Size is (T, C, H, W) Returns: torch.tensor: randomly cropped video clip. size is (T, C, OH, OW) """ i, j, h, w = self.get_params(clip) return crop(clip, i, j, h, w)
Args: clip (torch.tensor): Video clip to be cropped. Size is (T, C, H, W) Returns: torch.tensor: randomly cropped video clip. size is (T, C, OH, OW)
__call__
python
hpcaitech/Open-Sora
opensora/datasets/video_transforms.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/datasets/video_transforms.py
Apache-2.0
def __call__(self, clip): """ Args: clip (torch.tensor): Video clip to be cropped. Size is (T, C, H, W) Returns: torch.tensor: scale resized / center cropped video clip. size is (T, C, crop_size, crop_size) """ clip_center_crop = center_crop_using_short_edge(clip) clip_center_crop_resize = resize( clip_center_crop, target_size=self.size, interpolation_mode=self.interpolation_mode ) return clip_center_crop_resize
Args: clip (torch.tensor): Video clip to be cropped. Size is (T, C, H, W) Returns: torch.tensor: scale resized / center cropped video clip. size is (T, C, crop_size, crop_size)
__call__
python
hpcaitech/Open-Sora
opensora/datasets/video_transforms.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/datasets/video_transforms.py
Apache-2.0
def __call__(self, clip): """ Args: clip (torch.tensor): Video clip to be cropped. Size is (T, C, H, W) Returns: torch.tensor: scale resized / center cropped video clip. size is (T, C, crop_size, crop_size) """ clip_resize = resize_scale(clip=clip, target_size=self.size, interpolation_mode=self.interpolation_mode) clip_center_crop = center_crop(clip_resize, self.size) return clip_center_crop
Args: clip (torch.tensor): Video clip to be cropped. Size is (T, C, H, W) Returns: torch.tensor: scale resized / center cropped video clip. size is (T, C, crop_size, crop_size)
__call__
python
hpcaitech/Open-Sora
opensora/datasets/video_transforms.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/datasets/video_transforms.py
Apache-2.0
def __call__(self, clip): """ Args: clip (torch.tensor): Video clip to be cropped. Size is (T, C, H, W) Returns: torch.tensor: center cropped video clip. size is (T, C, crop_size, crop_size) """ clip_center_crop = center_crop(clip, self.size) return clip_center_crop
Args: clip (torch.tensor): Video clip to be cropped. Size is (T, C, H, W) Returns: torch.tensor: center cropped video clip. size is (T, C, crop_size, crop_size)
__call__
python
hpcaitech/Open-Sora
opensora/datasets/video_transforms.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/datasets/video_transforms.py
Apache-2.0
def __call__(self, clip): """ Args: clip (torch.tensor): video clip must be normalized. Size is (C, T, H, W) """ return normalize(clip, self.mean, self.std, self.inplace)
Args: clip (torch.tensor): video clip must be normalized. Size is (C, T, H, W)
__call__
python
hpcaitech/Open-Sora
opensora/datasets/video_transforms.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/datasets/video_transforms.py
Apache-2.0
def __call__(self, clip): """ Args: clip (torch.tensor, dtype=torch.uint8): Size is (T, C, H, W) Return: clip (torch.tensor, dtype=torch.float): Size is (T, C, H, W) """ return to_tensor(clip)
Args: clip (torch.tensor, dtype=torch.uint8): Size is (T, C, H, W) Return: clip (torch.tensor, dtype=torch.float): Size is (T, C, H, W)
__call__
python
hpcaitech/Open-Sora
opensora/datasets/video_transforms.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/datasets/video_transforms.py
Apache-2.0
def __call__(self, clip): """ Args: clip (torch.tensor): Size is (T, C, H, W) Return: clip (torch.tensor): Size is (T, C, H, W) """ if random.random() < self.p: clip = hflip(clip) return clip
Args: clip (torch.tensor): Size is (T, C, H, W) Return: clip (torch.tensor): Size is (T, C, H, W)
__call__
python
hpcaitech/Open-Sora
opensora/datasets/video_transforms.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/datasets/video_transforms.py
Apache-2.0
def sanitize_sampling_option(sampling_option: SamplingOption) -> SamplingOption: """ Sanitize the sampling options. Args: sampling_option (SamplingOption): The sampling options. Returns: SamplingOption: The sanitized sampling options. """ if ( sampling_option.resolution is not None or sampling_option.aspect_ratio is not None ): assert ( sampling_option.resolution is not None and sampling_option.aspect_ratio is not None ), "Both resolution and aspect ratio must be provided" resolution = sampling_option.resolution aspect_ratio = sampling_option.aspect_ratio height, width = get_image_size(resolution, aspect_ratio, training=False) else: assert ( sampling_option.height is not None and sampling_option.width is not None ), "Both height and width must be provided" height, width = sampling_option.height, sampling_option.width height = (height // 16 + (1 if height % 16 else 0)) * 16 width = (width // 16 + (1 if width % 16 else 0)) * 16 replace_dict = dict(height=height, width=width) if isinstance(sampling_option.method, str): method = SamplingMethod(sampling_option.method) replace_dict["method"] = method return replace(sampling_option, **replace_dict)
Sanitize the sampling options. Args: sampling_option (SamplingOption): The sampling options. Returns: SamplingOption: The sanitized sampling options.
sanitize_sampling_option
python
hpcaitech/Open-Sora
opensora/utils/sampling.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/sampling.py
Apache-2.0
def get_oscillation_gs(guidance_scale: float, i: int, force_num=10): """ get oscillation guidance for cfg. Args: guidance_scale: original guidance value i: denoising step force_num: before which don't apply oscillation """ if i < force_num or (i >= force_num and i % 2 == 0): gs = guidance_scale else: gs = 1.0 return gs
get oscillation guidance for cfg. Args: guidance_scale: original guidance value i: denoising step force_num: before which don't apply oscillation
get_oscillation_gs
python
hpcaitech/Open-Sora
opensora/utils/sampling.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/sampling.py
Apache-2.0
def denoise(self, model: MMDiTModel, **kwargs) -> Tensor: """Denoise the input."""
Denoise the input.
denoise
python
hpcaitech/Open-Sora
opensora/utils/sampling.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/sampling.py
Apache-2.0
def prepare_guidance( self, text: list[str], optional_models: dict[str, nn.Module], device: torch.device, dtype: torch.dtype, **kwargs, ) -> dict[str, Tensor]: """Prepare the guidance for the model. This method will alter text."""
Prepare the guidance for the model. This method will alter text.
prepare_guidance
python
hpcaitech/Open-Sora
opensora/utils/sampling.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/sampling.py
Apache-2.0
def get_noise( num_samples: int, height: int, width: int, num_frames: int, device: torch.device, dtype: torch.dtype, seed: int, patch_size: int = 2, channel: int = 16, ) -> Tensor: """ Generate a noise tensor. Args: num_samples (int): Number of samples. height (int): Height of the noise tensor. width (int): Width of the noise tensor. num_frames (int): Number of frames. device (torch.device): Device to put the noise tensor on. dtype (torch.dtype): Data type of the noise tensor. seed (int): Seed for the random number generator. Returns: Tensor: The noise tensor. """ D = int(os.environ.get("AE_SPATIAL_COMPRESSION", 16)) return torch.randn( num_samples, channel, num_frames, # allow for packing patch_size * math.ceil(height / D), patch_size * math.ceil(width / D), device=device, dtype=dtype, generator=torch.Generator(device=device).manual_seed(seed), )
Generate a noise tensor. Args: num_samples (int): Number of samples. height (int): Height of the noise tensor. width (int): Width of the noise tensor. num_frames (int): Number of frames. device (torch.device): Device to put the noise tensor on. dtype (torch.dtype): Data type of the noise tensor. seed (int): Seed for the random number generator. Returns: Tensor: The noise tensor.
get_noise
python
hpcaitech/Open-Sora
opensora/utils/sampling.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/sampling.py
Apache-2.0
def prepare( t5, clip: HFEmbedder, img: Tensor, prompt: str | list[str], seq_align: int = 1, patch_size: int = 2, ) -> dict[str, Tensor]: """ Prepare the input for the model. Args: t5 (HFEmbedder): The T5 model. clip (HFEmbedder): The CLIP model. img (Tensor): The image tensor. prompt (str | list[str]): The prompt(s). Returns: dict[str, Tensor]: The input dictionary. img_ids: used for positional embedding in T,H,W dimensions later text_ids: for positional embedding, but set to 0 for now since our text encoder already encodes positional information """ bs, c, t, h, w = img.shape device, dtype = img.device, img.dtype if isinstance(prompt, str): prompt = [prompt] if bs != len(prompt): bs = len(prompt) img = rearrange( img, "b c t (h ph) (w pw) -> b (t h w) (c ph pw)", ph=patch_size, pw=patch_size ) if img.shape[0] != bs: img = repeat(img, "b ... -> (repeat b) ...", repeat=bs // img.shape[0]) img_ids = torch.zeros(t, h // patch_size, w // patch_size, 3) img_ids[..., 0] = img_ids[..., 0] + torch.arange(t)[:, None, None] img_ids[..., 1] = img_ids[..., 1] + torch.arange(h // patch_size)[None, :, None] img_ids[..., 2] = img_ids[..., 2] + torch.arange(w // patch_size)[None, None, :] img_ids = repeat(img_ids, "t h w c -> b (t h w) c", b=bs) # Encode the tokenized prompts txt = t5(prompt, added_tokens=img_ids.shape[1], seq_align=seq_align) if txt.shape[0] == 1 and bs > 1: txt = repeat(txt, "1 ... -> bs ...", bs=bs) txt_ids = torch.zeros(bs, txt.shape[1], 3) vec = clip(prompt) if vec.shape[0] == 1 and bs > 1: vec = repeat(vec, "1 ... -> bs ...", bs=bs) return { "img": img, "img_ids": img_ids.to(device, dtype), "txt": txt.to(device, dtype), "txt_ids": txt_ids.to(device, dtype), "y_vec": vec.to(device, dtype), }
Prepare the input for the model. Args: t5 (HFEmbedder): The T5 model. clip (HFEmbedder): The CLIP model. img (Tensor): The image tensor. prompt (str | list[str]): The prompt(s). Returns: dict[str, Tensor]: The input dictionary. img_ids: used for positional embedding in T,H,W dimensions later text_ids: for positional embedding, but set to 0 for now since our text encoder already encodes positional information
prepare
python
hpcaitech/Open-Sora
opensora/utils/sampling.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/sampling.py
Apache-2.0
def prepare_ids( img: Tensor, t5_embedding: Tensor, clip_embedding: Tensor, ) -> dict[str, Tensor]: """ Prepare the input for the model. Args: img (Tensor): The image tensor. t5_embedding (Tensor): The T5 embedding. clip_embedding (Tensor): The CLIP embedding. Returns: dict[str, Tensor]: The input dictionary. img_ids: used for positional embedding in T,H,W dimensions later text_ids: for positional embedding, but set to 0 for now since our text encoder already encodes positional information """ bs, c, t, h, w = img.shape device, dtype = img.device, img.dtype img = rearrange(img, "b c t (h ph) (w pw) -> b (t h w) (c ph pw)", ph=2, pw=2) if img.shape[0] != bs: img = repeat(img, "b ... -> (repeat b) ...", repeat=bs // img.shape[0]) img_ids = torch.zeros(t, h // 2, w // 2, 3) img_ids[..., 0] = img_ids[..., 0] + torch.arange(t)[:, None, None] img_ids[..., 1] = img_ids[..., 1] + torch.arange(h // 2)[None, :, None] img_ids[..., 2] = img_ids[..., 2] + torch.arange(w // 2)[None, None, :] img_ids = repeat(img_ids, "t h w c -> b (t h w) c", b=bs) # Encode the tokenized prompts if t5_embedding.shape[0] == 1 and bs > 1: t5_embedding = repeat(t5_embedding, "1 ... -> bs ...", bs=bs) txt_ids = torch.zeros(bs, t5_embedding.shape[1], 3) if clip_embedding.shape[0] == 1 and bs > 1: clip_embedding = repeat(clip_embedding, "1 ... -> bs ...", bs=bs) return { "img": img, "img_ids": img_ids.to(device, dtype), "txt": t5_embedding.to(device, dtype), "txt_ids": txt_ids.to(device, dtype), "y_vec": clip_embedding.to(device, dtype), }
Prepare the input for the model. Args: img (Tensor): The image tensor. t5_embedding (Tensor): The T5 embedding. clip_embedding (Tensor): The CLIP embedding. Returns: dict[str, Tensor]: The input dictionary. img_ids: used for positional embedding in T,H,W dimensions later text_ids: for positional embedding, but set to 0 for now since our text encoder already encodes positional information
prepare_ids
python
hpcaitech/Open-Sora
opensora/utils/sampling.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/sampling.py
Apache-2.0
def prepare_models( cfg: Config, device: torch.device, dtype: torch.dtype, offload_model: bool = False, ) -> tuple[nn.Module, nn.Module, nn.Module, nn.Module, dict[str, nn.Module]]: """ Prepare models for inference. Args: cfg (Config): The configuration object. device (torch.device): The device to use. dtype (torch.dtype): The data type to use. Returns: tuple[nn.Module, nn.Module, nn.Module, nn.Module, dict[str, nn.Module]]: The models. They are the diffusion model, the autoencoder model, the T5 model, the CLIP model, and the optional models. """ model_device = ( "cpu" if offload_model and cfg.get("img_flux", None) is not None else device ) model = build_module( cfg.model, MODELS, device_map=model_device, torch_dtype=dtype ).eval() model_ae = build_module( cfg.ae, MODELS, device_map=model_device, torch_dtype=dtype ).eval() model_t5 = build_module(cfg.t5, MODELS, device_map=device, torch_dtype=dtype).eval() model_clip = build_module( cfg.clip, MODELS, device_map=device, torch_dtype=dtype ).eval() if cfg.get("pretrained_lora_path", None) is not None: model = PeftModel.from_pretrained( model, cfg.pretrained_lora_path, is_trainable=False ) # optional models optional_models = {} if cfg.get("img_flux", None) is not None: model_img_flux = build_module( cfg.img_flux, MODELS, device_map=device, torch_dtype=dtype ).eval() model_ae_img_flux = build_module( cfg.img_flux_ae, MODELS, device_map=device, torch_dtype=dtype ).eval() optional_models["img_flux"] = model_img_flux optional_models["img_flux_ae"] = model_ae_img_flux return model, model_ae, model_t5, model_clip, optional_models
Prepare models for inference. Args: cfg (Config): The configuration object. device (torch.device): The device to use. dtype (torch.dtype): The data type to use. Returns: tuple[nn.Module, nn.Module, nn.Module, nn.Module, dict[str, nn.Module]]: The models. They are the diffusion model, the autoencoder model, the T5 model, the CLIP model, and the optional models.
prepare_models
python
hpcaitech/Open-Sora
opensora/utils/sampling.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/sampling.py
Apache-2.0
def api_fn( opt: SamplingOption, cond_type: str = "t2v", seed: int = None, sigma_min: float = 1e-5, text: list[str] = None, neg: list[str] = None, patch_size: int = 2, channel: int = 16, **kwargs, ): """ The API function for inference. Args: opt (SamplingOption): The sampling options. text (list[str], optional): The text prompts. Defaults to None. neg (list[str], optional): The negative text prompts. Defaults to None. Returns: torch.Tensor: The generated images. """ device = next(model.parameters()).device dtype = next(model.parameters()).dtype # passing seed will overwrite opt seed if seed is None: # random seed if not provided seed = opt.seed if opt.seed is not None else random.randint(0, 2**32 - 1) if opt.is_causal_vae: num_frames = ( 1 if opt.num_frames == 1 else (opt.num_frames - 1) // opt.temporal_reduction + 1 ) else: num_frames = ( 1 if opt.num_frames == 1 else opt.num_frames // opt.temporal_reduction ) z = get_noise( len(text), opt.height, opt.width, num_frames, device, dtype, seed, patch_size=patch_size, channel=channel // (patch_size**2), ) denoiser = SamplingMethodDict[opt.method] # i2v reference conditions references = [None] * len(text) if cond_type != "t2v" and "ref" in kwargs: reference_path_list = kwargs.pop("ref") references = collect_references_batch( reference_path_list, cond_type, model_ae, (opt.height, opt.width), is_causal=opt.is_causal_vae, ) elif cond_type != "t2v": print( "your csv file doesn't have a ref column or is not processed properly. will default to cond_type t2v!" ) cond_type = "t2v" # timestep editing timesteps = get_schedule( opt.num_steps, (z.shape[-1] * z.shape[-2]) // patch_size**2, num_frames, shift=opt.shift, shift_alpha=opt.flow_shift, ) # prepare classifier-free guidance data (method specific) text, additional_inp = denoiser.prepare_guidance( text=text, optional_models=optional_models, device=device, dtype=dtype, neg=neg, guidance_img=opt.guidance_img, ) inp = prepare(model_t5, model_clip, z, prompt=text, patch_size=patch_size) inp.update(additional_inp) if opt.method in [SamplingMethod.I2V]: # prepare references masks, masked_ref = prepare_inference_condition( z, cond_type, ref_list=references, causal=opt.is_causal_vae ) inp["masks"] = masks inp["masked_ref"] = masked_ref inp["sigma_min"] = sigma_min x = denoiser.denoise( model, **inp, timesteps=timesteps, guidance=opt.guidance, text_osci=opt.text_osci, image_osci=opt.image_osci, scale_temporal_osci=( opt.scale_temporal_osci and "i2v" in cond_type ), # don't use temporal osci for v2v or t2v flow_shift=opt.flow_shift, patch_size=patch_size, ) x = unpack(x, opt.height, opt.width, num_frames, patch_size=patch_size) # replace for image condition if cond_type == "i2v_head": x[0, :, :1] = references[0][0] elif cond_type == "i2v_tail": x[0, :, -1:] = references[0][0] elif cond_type == "i2v_loop": x[0, :, :1] = references[0][0] x[0, :, -1:] = references[0][1] x = model_ae.decode(x) x = x[:, :, : opt.num_frames] # image # remove the duplicate frames if not opt.is_causal_vae: if cond_type == "i2v_head": pad_len = model_ae.compression[0] - 1 x = x[:, :, pad_len:] elif cond_type == "i2v_tail": pad_len = model_ae.compression[0] - 1 x = x[:, :, :-pad_len] elif cond_type == "i2v_loop": pad_len = model_ae.compression[0] - 1 x = x[:, :, pad_len:-pad_len] return x
The API function for inference. Args: opt (SamplingOption): The sampling options. text (list[str], optional): The text prompts. Defaults to None. neg (list[str], optional): The negative text prompts. Defaults to None. Returns: torch.Tensor: The generated images.
prepare_api.api_fn
python
hpcaitech/Open-Sora
opensora/utils/sampling.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/sampling.py
Apache-2.0
def prepare_api( model: nn.Module, model_ae: nn.Module, model_t5: nn.Module, model_clip: nn.Module, optional_models: dict[str, nn.Module], ) -> callable: """ Prepare the API function for inference. Args: model (nn.Module): The diffusion model. model_ae (nn.Module): The autoencoder model. model_t5 (nn.Module): The T5 model. model_clip (nn.Module): The CLIP model. Returns: callable: The API function for inference. """ @torch.inference_mode() def api_fn( opt: SamplingOption, cond_type: str = "t2v", seed: int = None, sigma_min: float = 1e-5, text: list[str] = None, neg: list[str] = None, patch_size: int = 2, channel: int = 16, **kwargs, ): """ The API function for inference. Args: opt (SamplingOption): The sampling options. text (list[str], optional): The text prompts. Defaults to None. neg (list[str], optional): The negative text prompts. Defaults to None. Returns: torch.Tensor: The generated images. """ device = next(model.parameters()).device dtype = next(model.parameters()).dtype # passing seed will overwrite opt seed if seed is None: # random seed if not provided seed = opt.seed if opt.seed is not None else random.randint(0, 2**32 - 1) if opt.is_causal_vae: num_frames = ( 1 if opt.num_frames == 1 else (opt.num_frames - 1) // opt.temporal_reduction + 1 ) else: num_frames = ( 1 if opt.num_frames == 1 else opt.num_frames // opt.temporal_reduction ) z = get_noise( len(text), opt.height, opt.width, num_frames, device, dtype, seed, patch_size=patch_size, channel=channel // (patch_size**2), ) denoiser = SamplingMethodDict[opt.method] # i2v reference conditions references = [None] * len(text) if cond_type != "t2v" and "ref" in kwargs: reference_path_list = kwargs.pop("ref") references = collect_references_batch( reference_path_list, cond_type, model_ae, (opt.height, opt.width), is_causal=opt.is_causal_vae, ) elif cond_type != "t2v": print( "your csv file doesn't have a ref column or is not processed properly. will default to cond_type t2v!" ) cond_type = "t2v" # timestep editing timesteps = get_schedule( opt.num_steps, (z.shape[-1] * z.shape[-2]) // patch_size**2, num_frames, shift=opt.shift, shift_alpha=opt.flow_shift, ) # prepare classifier-free guidance data (method specific) text, additional_inp = denoiser.prepare_guidance( text=text, optional_models=optional_models, device=device, dtype=dtype, neg=neg, guidance_img=opt.guidance_img, ) inp = prepare(model_t5, model_clip, z, prompt=text, patch_size=patch_size) inp.update(additional_inp) if opt.method in [SamplingMethod.I2V]: # prepare references masks, masked_ref = prepare_inference_condition( z, cond_type, ref_list=references, causal=opt.is_causal_vae ) inp["masks"] = masks inp["masked_ref"] = masked_ref inp["sigma_min"] = sigma_min x = denoiser.denoise( model, **inp, timesteps=timesteps, guidance=opt.guidance, text_osci=opt.text_osci, image_osci=opt.image_osci, scale_temporal_osci=( opt.scale_temporal_osci and "i2v" in cond_type ), # don't use temporal osci for v2v or t2v flow_shift=opt.flow_shift, patch_size=patch_size, ) x = unpack(x, opt.height, opt.width, num_frames, patch_size=patch_size) # replace for image condition if cond_type == "i2v_head": x[0, :, :1] = references[0][0] elif cond_type == "i2v_tail": x[0, :, -1:] = references[0][0] elif cond_type == "i2v_loop": x[0, :, :1] = references[0][0] x[0, :, -1:] = references[0][1] x = model_ae.decode(x) x = x[:, :, : opt.num_frames] # image # remove the duplicate frames if not opt.is_causal_vae: if cond_type == "i2v_head": pad_len = model_ae.compression[0] - 1 x = x[:, :, pad_len:] elif cond_type == "i2v_tail": pad_len = model_ae.compression[0] - 1 x = x[:, :, :-pad_len] elif cond_type == "i2v_loop": pad_len = model_ae.compression[0] - 1 x = x[:, :, pad_len:-pad_len] return x return api_fn
Prepare the API function for inference. Args: model (nn.Module): The diffusion model. model_ae (nn.Module): The autoencoder model. model_t5 (nn.Module): The T5 model. model_clip (nn.Module): The CLIP model. Returns: callable: The API function for inference.
prepare_api
python
hpcaitech/Open-Sora
opensora/utils/sampling.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/sampling.py
Apache-2.0
def set_group_size(plugin_config: dict): """ Set the group size for tensor parallelism and sequence parallelism. Args: plugin_config (dict): Plugin configuration. """ tp_size = int(plugin_config.get("tp_size", 1)) sp_size = int(plugin_config.get("sp_size", 1)) if tp_size > 1: assert sp_size == 1 plugin_config["tp_size"] = tp_size = min(tp_size, torch.cuda.device_count()) log_message(f"Using TP with size {tp_size}") if sp_size > 1: assert tp_size == 1 plugin_config["sp_size"] = sp_size = min(sp_size, torch.cuda.device_count()) log_message(f"Using SP with size {sp_size}")
Set the group size for tensor parallelism and sequence parallelism. Args: plugin_config (dict): Plugin configuration.
set_group_size
python
hpcaitech/Open-Sora
opensora/utils/cai.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/cai.py
Apache-2.0
def init_inference_environment(): """ Initialize the inference environment. """ if is_distributed(): colossalai.launch_from_torch({}) coordinator = DistCoordinator() enable_sequence_parallelism = coordinator.world_size > 1 if enable_sequence_parallelism: set_sequence_parallel_group(dist.group.WORLD)
Initialize the inference environment.
init_inference_environment
python
hpcaitech/Open-Sora
opensora/utils/cai.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/cai.py
Apache-2.0
def get_is_saving_process(cfg: dict): """ Check if the current process is the one that saves the model. Args: plugin_config (dict): Plugin configuration. Returns: bool: True if the current process is the one that saves the model. """ plugin_type = cfg.get("plugin", "zero2") plugin_config = cfg.get("plugin_config", {}) is_saving_process = ( plugin_type != "hybrid" or (plugin_config["tp_size"] > 1 and dist.get_rank(get_tensor_parallel_group()) == 0) or (plugin_config["sp_size"] > 1 and dist.get_rank(get_sequence_parallel_group()) == 0) ) return is_saving_process
Check if the current process is the one that saves the model. Args: plugin_config (dict): Plugin configuration. Returns: bool: True if the current process is the one that saves the model.
get_is_saving_process
python
hpcaitech/Open-Sora
opensora/utils/cai.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/cai.py
Apache-2.0
def parse_args() -> tuple[str, argparse.Namespace]: """ This function parses the command line arguments. Returns: tuple[str, argparse.Namespace]: The path to the configuration file and the command line arguments. """ parser = argparse.ArgumentParser() parser.add_argument("config", type=str, help="model config file path") args, unknown_args = parser.parse_known_args() return args.config, unknown_args
This function parses the command line arguments. Returns: tuple[str, argparse.Namespace]: The path to the configuration file and the command line arguments.
parse_args
python
hpcaitech/Open-Sora
opensora/utils/config.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/config.py
Apache-2.0
def read_config(config_path: str) -> Config: """ This function reads the configuration file. Args: config_path (str): The path to the configuration file. Returns: Config: The configuration object. """ cfg = Config.fromfile(config_path) return cfg
This function reads the configuration file. Args: config_path (str): The path to the configuration file. Returns: Config: The configuration object.
read_config
python
hpcaitech/Open-Sora
opensora/utils/config.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/config.py
Apache-2.0
def parse_configs() -> Config: """ This function parses the configuration file and command line arguments. Returns: Config: The configuration object. """ config, args = parse_args() cfg = read_config(config) cfg = merge_args(cfg, args) cfg.config_path = config # hard-coded for spatial compression if cfg.get("ae_spatial_compression", None) is not None: os.environ["AE_SPATIAL_COMPRESSION"] = str(cfg.ae_spatial_compression) return cfg
This function parses the configuration file and command line arguments. Returns: Config: The configuration object.
parse_configs
python
hpcaitech/Open-Sora
opensora/utils/config.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/config.py
Apache-2.0
def merge_args(cfg: Config, args: argparse.Namespace) -> Config: """ This function merges the configuration file and command line arguments. Args: cfg (Config): The configuration object. args (argparse.Namespace): The command line arguments. Returns: Config: The configuration object. """ for k, v in zip(args[::2], args[1::2]): assert k.startswith("--"), f"Invalid argument: {k}" k = k[2:].replace("-", "_") k_split = k.split(".") target = cfg for key in k_split[:-1]: assert key in cfg, f"Key {key} not found in config" target = target[key] if v.lower() == "none": v = None elif k in target: v_type = type(target[k]) if v_type == bool: v = auto_convert(v) else: v = type(target[k])(v) else: v = auto_convert(v) target[k_split[-1]] = v return cfg
This function merges the configuration file and command line arguments. Args: cfg (Config): The configuration object. args (argparse.Namespace): The command line arguments. Returns: Config: The configuration object.
merge_args
python
hpcaitech/Open-Sora
opensora/utils/config.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/config.py
Apache-2.0
def auto_convert(value: str) -> int | float | bool | list | dict | None: """ Automatically convert a string to the appropriate Python data type, including int, float, bool, list, dict, etc. Args: value (str): The string to convert. Returns: int, float, bool, list | dict: The converted value. """ # Handle empty string if value == "": return value # Handle None if value.lower() == "none": return None # Handle boolean values lower_value = value.lower() if lower_value == "true": return True elif lower_value == "false": return False # Try to convert the string to an integer or float try: # Try converting to an integer return int(value) except ValueError: pass try: # Try converting to a float return float(value) except ValueError: pass # Try to convert the string to a list, dict, tuple, etc. try: return ast.literal_eval(value) except (ValueError, SyntaxError): pass # If all attempts fail, return the original string return value
Automatically convert a string to the appropriate Python data type, including int, float, bool, list, dict, etc. Args: value (str): The string to convert. Returns: int, float, bool, list | dict: The converted value.
auto_convert
python
hpcaitech/Open-Sora
opensora/utils/config.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/config.py
Apache-2.0
def sync_string(value: str): """ This function synchronizes a string across all processes. """ if not is_distributed(): return value bytes_value = value.encode("utf-8") max_len = 256 bytes_tensor = torch.zeros(max_len, dtype=torch.uint8).cuda() bytes_tensor[: len(bytes_value)] = torch.tensor( list(bytes_value), dtype=torch.uint8 ) torch.distributed.broadcast(bytes_tensor, 0) synced_value = bytes_tensor.cpu().numpy().tobytes().decode("utf-8").rstrip("\x00") return synced_value
This function synchronizes a string across all processes.
sync_string
python
hpcaitech/Open-Sora
opensora/utils/config.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/config.py
Apache-2.0
def create_experiment_workspace( output_dir: str, model_name: str = None, config: dict = None, exp_name: str = None ) -> tuple[str, str]: """ This function creates a folder for experiment tracking. Args: output_dir: The path to the output directory. model_name: The name of the model. exp_name: The given name of the experiment, if None will use default. Returns: tuple[str, str]: The experiment name and the experiment directory. """ if exp_name is None: # Make outputs folder (holds all experiment subfolders) experiment_index = datetime.now().strftime("%y%m%d_%H%M%S") experiment_index = sync_string(experiment_index) # Create an experiment folder model_name = ( "-" + model_name.replace("/", "-") if model_name is not None else "" ) exp_name = f"{experiment_index}{model_name}" exp_dir = f"{output_dir}/{exp_name}" if is_main_process(): os.makedirs(exp_dir, exist_ok=True) # Save the config with open(f"{exp_dir}/config.txt", "w", encoding="utf-8") as f: json.dump(config, f, indent=4) return exp_name, exp_dir
This function creates a folder for experiment tracking. Args: output_dir: The path to the output directory. model_name: The name of the model. exp_name: The given name of the experiment, if None will use default. Returns: tuple[str, str]: The experiment name and the experiment directory.
create_experiment_workspace
python
hpcaitech/Open-Sora
opensora/utils/config.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/config.py
Apache-2.0
def create_tmp_csv(save_dir: str, prompt: str, ref: str = None, create=True) -> str: """ Create a temporary CSV file with the prompt text. Args: save_dir (str): The directory where the CSV file will be saved. prompt (str): The prompt text. Returns: str: The path to the temporary CSV file. """ tmp_file = os.path.join(save_dir, "prompt.csv") if not create: return tmp_file with open(tmp_file, "w", encoding="utf-8") as f: if ref is not None: f.write(f'text,ref\n"{prompt}","{ref}"') else: f.write(f'text\n"{prompt}"') return tmp_file
Create a temporary CSV file with the prompt text. Args: save_dir (str): The directory where the CSV file will be saved. prompt (str): The prompt text. Returns: str: The path to the temporary CSV file.
create_tmp_csv
python
hpcaitech/Open-Sora
opensora/utils/inference.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/inference.py
Apache-2.0
def modify_option_to_t2i(sampling_option, distilled: bool = False, img_resolution: str = "1080px"): """ Modify the sampling option to be used for text-to-image generation. """ sampling_option_t2i = copy.copy(sampling_option) if distilled: sampling_option_t2i.method = SamplingMethod.DISTILLED sampling_option_t2i.num_frames = 1 sampling_option_t2i.height, sampling_option_t2i.width = get_image_size(img_resolution, sampling_option.aspect_ratio) sampling_option_t2i.guidance = 4.0 sampling_option_t2i.resized_resolution = sampling_option.resolution return sampling_option_t2i
Modify the sampling option to be used for text-to-image generation.
modify_option_to_t2i
python
hpcaitech/Open-Sora
opensora/utils/inference.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/inference.py
Apache-2.0
def get_save_path_name( save_dir, sub_dir, save_prefix="", name=None, fallback_name=None, index=None, num_sample_pos=None, # idx for prompt as path prompt_as_path=False, # save sample with same name as prompt prompt=None, ): """ Get the save path for the generated samples. """ if prompt_as_path: # for vbench cleaned_prompt = prompt.strip(".") fname = f"{cleaned_prompt}-{num_sample_pos}" else: if name is not None: fname = save_prefix + name else: fname = f"{save_prefix + fallback_name}_{index:04d}" if num_sample_pos > 0: fname += f"_{num_sample_pos}" return os.path.join(save_dir, sub_dir, fname)
Get the save path for the generated samples.
get_save_path_name
python
hpcaitech/Open-Sora
opensora/utils/inference.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/inference.py
Apache-2.0
def get_names_from_path(path): """ Get the filename and extension from a path. Args: path (str): The path to the file. Returns: tuple[str, str]: The filename and the extension. """ filename = os.path.basename(path) name, _ = os.path.splitext(filename) return name
Get the filename and extension from a path. Args: path (str): The path to the file. Returns: tuple[str, str]: The filename and the extension.
get_names_from_path
python
hpcaitech/Open-Sora
opensora/utils/inference.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/inference.py
Apache-2.0
def process_and_save( x: torch.Tensor, batch: dict, cfg: dict, sub_dir: str, generate_sampling_option, epoch: int, start_index: int, saving: bool = True, ): """ Process the generated samples and save them to disk. """ fallback_name = cfg.dataset.data_path.split("/")[-1].split(".")[0] prompt_as_path = cfg.get("prompt_as_path", False) fps_save = cfg.get("fps_save", 16) save_dir = cfg.save_dir names = batch["name"] if "name" in batch else [None] * len(x) indices = batch["index"] if "index" in batch else [None] * len(x) if "index" in batch: indices = [idx + start_index for idx in indices] prompts = batch["text"] ret_names = [] is_image = generate_sampling_option.num_frames == 1 for img, name, index, prompt in zip(x, names, indices, prompts): # == get save path == save_path = get_save_path_name( save_dir, sub_dir, save_prefix=cfg.get("save_prefix", ""), name=name, fallback_name=fallback_name, index=index, num_sample_pos=epoch, prompt_as_path=prompt_as_path, prompt=prompt, ) ret_name = get_names_from_path(save_path) ret_names.append(ret_name) if saving: # == write txt to disk == with open(save_path + ".txt", "w", encoding="utf-8") as f: f.write(prompt) # == save samples == save_sample(img, save_path=save_path, fps=fps_save) # == resize image for t2i2v == if ( cfg.get("use_t2i2v", False) and is_image and generate_sampling_option.resolution != generate_sampling_option.resized_resolution ): log_message("Rescaling image to %s...", generate_sampling_option.resized_resolution) height, width = get_image_size( generate_sampling_option.resized_resolution, generate_sampling_option.aspect_ratio ) rescale_image_by_path(save_path + ".png", width, height) return ret_names
Process the generated samples and save them to disk.
process_and_save
python
hpcaitech/Open-Sora
opensora/utils/inference.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/inference.py
Apache-2.0
def check_fps_added(sentence): """ Check if the sentence ends with the FPS information. """ pattern = r"\d+ FPS\.$" if re.search(pattern, sentence): return True return False
Check if the sentence ends with the FPS information.
check_fps_added
python
hpcaitech/Open-Sora
opensora/utils/inference.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/inference.py
Apache-2.0
def ensure_sentence_ends_with_period(sentence: str): """ Ensure that the sentence ends with a period. """ sentence = sentence.strip() if not sentence.endswith("."): sentence += "." return sentence
Ensure that the sentence ends with a period.
ensure_sentence_ends_with_period
python
hpcaitech/Open-Sora
opensora/utils/inference.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/inference.py
Apache-2.0
def add_fps_info_to_text(text: list[str], fps: int = 16): """ Add the FPS information to the text. """ mod_text = [] for item in text: item = ensure_sentence_ends_with_period(item) if not check_fps_added(item): item = item + f" {fps} FPS." mod_text.append(item) return mod_text
Add the FPS information to the text.
add_fps_info_to_text
python
hpcaitech/Open-Sora
opensora/utils/inference.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/inference.py
Apache-2.0
def add_motion_score_to_text(text, motion_score: int | str): """ Add the motion score to the text. """ if motion_score == "dynamic": ms = refine_prompts(text, type="motion_score") return [f"{t} {ms[i]}." for i, t in enumerate(text)] else: return [f"{t} {motion_score} motion score." for t in text]
Add the motion score to the text.
add_motion_score_to_text
python
hpcaitech/Open-Sora
opensora/utils/inference.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/inference.py
Apache-2.0
def prepare_inference_condition( z: torch.Tensor, mask_cond: str, ref_list: list[list[torch.Tensor]] = None, causal: bool = True, ) -> torch.Tensor: """ Prepare the visual condition for the model, using causal vae. Args: z (torch.Tensor): The latent noise tensor, of shape [B, C, T, H, W] mask_cond (dict): The condition configuration. ref_list: list of lists of media (image/video) for i2v and v2v condition, of shape [C, T', H, W]; len(ref_list)==B; ref_list[i] is the list of media for the generation in batch idx i, we use a list of media for each batch item so that it can have multiple references. For example, ref_list[i] could be [ref_image_1, ref_image_2] for i2v_loop condition. Returns: torch.Tensor: The visual condition tensor. """ # x has shape [b, c, t, h, w], where b is the batch size B, C, T, H, W = z.shape masks = torch.zeros(B, 1, T, H, W) masked_z = torch.zeros(B, C, T, H, W) if ref_list is None: assert mask_cond == "t2v", f"reference is required for {mask_cond}" for i in range(B): ref = ref_list[i] # warning message if ref is None and mask_cond != "t2v": print("no reference found. will default to cond_type t2v!") if ref is not None and T > 1: # video # Apply the selected mask condition directly on the masks tensor if mask_cond == "i2v_head": # equivalent to masking the first timestep masks[i, :, 0, :, :] = 1 masked_z[i, :, 0, :, :] = ref[0][:, 0, :, :] elif mask_cond == "i2v_tail": # mask the last timestep masks[i, :, -1, :, :] = 1 masked_z[i, :, -1, :, :] = ref[-1][:, -1, :, :] elif mask_cond == "v2v_head": k = 8 + int(causal) masks[i, :, :k, :, :] = 1 masked_z[i, :, :k, :, :] = ref[0][:, :k, :, :] elif mask_cond == "v2v_tail": k = 8 + int(causal) masks[i, :, -k:, :, :] = 1 masked_z[i, :, -k:, :, :] = ref[0][:, -k:, :, :] elif mask_cond == "v2v_head_easy": k = 16 + int(causal) masks[i, :, :k, :, :] = 1 masked_z[i, :, :k, :, :] = ref[0][:, :k, :, :] elif mask_cond == "v2v_tail_easy": k = 16 + int(causal) masks[i, :, -k:, :, :] = 1 masked_z[i, :, -k:, :, :] = ref[0][:, -k:, :, :] elif mask_cond == "i2v_loop": # mask first and last timesteps masks[i, :, 0, :, :] = 1 masks[i, :, -1, :, :] = 1 masked_z[i, :, 0, :, :] = ref[0][:, 0, :, :] masked_z[i, :, -1, :, :] = ref[-1][:, -1, :, :] # last frame of last referenced content else: # "t2v" is the fallback case where no specific condition is specified assert mask_cond == "t2v", f"Unknown mask condition {mask_cond}" masks = masks.to(z.device, z.dtype) masked_z = masked_z.to(z.device, z.dtype) return masks, masked_z
Prepare the visual condition for the model, using causal vae. Args: z (torch.Tensor): The latent noise tensor, of shape [B, C, T, H, W] mask_cond (dict): The condition configuration. ref_list: list of lists of media (image/video) for i2v and v2v condition, of shape [C, T', H, W]; len(ref_list)==B; ref_list[i] is the list of media for the generation in batch idx i, we use a list of media for each batch item so that it can have multiple references. For example, ref_list[i] could be [ref_image_1, ref_image_2] for i2v_loop condition. Returns: torch.Tensor: The visual condition tensor.
prepare_inference_condition
python
hpcaitech/Open-Sora
opensora/utils/inference.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/inference.py
Apache-2.0
def load_from_hf_hub(repo_path: str, cache_dir: str = None) -> str: """ Loads a checkpoint from the Hugging Face Hub. Args: repo_path (str): The path to the checkpoint on the Hugging Face Hub. cache_dir (str): The directory to cache the downloaded checkpoint. Returns: str: The path to the downloaded checkpoint. """ repo_id = "/".join(repo_path.split("/")[:-1]) repo_file = repo_path.split("/")[-1] ckpt_path = hf_hub_download(repo_id=repo_id, filename=repo_file, cache_dir=cache_dir) return ckpt_path
Loads a checkpoint from the Hugging Face Hub. Args: repo_path (str): The path to the checkpoint on the Hugging Face Hub. cache_dir (str): The directory to cache the downloaded checkpoint. Returns: str: The path to the downloaded checkpoint.
load_from_hf_hub
python
hpcaitech/Open-Sora
opensora/utils/ckpt.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/ckpt.py
Apache-2.0
def load_from_sharded_state_dict(model: nn.Module, ckpt_path: str, model_name: str = "model", strict=False): """ Loads a model from a sharded checkpoint. Args: model (nn.Module): The model to load the checkpoint into. ckpt_path (str): The path to the checkpoint. model_name (str): The name of the model in the checkpoint. strict (bool): Whether to strictly enforce that the keys in the checkpoint match the keys in the model. """ ckpt_io = GeneralCheckpointIO() ckpt_io.load_model(model, os.path.join(ckpt_path, model_name), strict=strict)
Loads a model from a sharded checkpoint. Args: model (nn.Module): The model to load the checkpoint into. ckpt_path (str): The path to the checkpoint. model_name (str): The name of the model in the checkpoint. strict (bool): Whether to strictly enforce that the keys in the checkpoint match the keys in the model.
load_from_sharded_state_dict
python
hpcaitech/Open-Sora
opensora/utils/ckpt.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/ckpt.py
Apache-2.0
def print_load_warning(missing: list[str], unexpected: list[str]) -> None: """ Prints a warning if there are missing or unexpected keys when loading a model. Args: missing (list[str]): The missing keys. unexpected (list[str]): The unexpected keys. """ if len(missing) > 0 and len(unexpected) > 0: log_message(f"Got {len(missing)} missing keys:\n\t" + "\n\t".join(missing)) log_message("\n" + "-" * 79 + "\n") log_message(f"Got {len(unexpected)} unexpected keys:\n\t" + "\n\t".join(unexpected)) elif len(missing) > 0: log_message(f"Got {len(missing)} missing keys:\n\t" + "\n\t".join(missing)) elif len(unexpected) > 0: log_message(f"Got {len(unexpected)} unexpected keys:\n\t" + "\n\t".join(unexpected)) else: log_message("Model loaded successfully")
Prints a warning if there are missing or unexpected keys when loading a model. Args: missing (list[str]): The missing keys. unexpected (list[str]): The unexpected keys.
print_load_warning
python
hpcaitech/Open-Sora
opensora/utils/ckpt.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/ckpt.py
Apache-2.0
def load_checkpoint( model: nn.Module, path: str, cache_dir: str = None, device_map: torch.device | str = "cpu", cai_model_name: str = "model", strict: bool = False, rename_keys: dict = None, # rename keys in the checkpoint to support fine-tuning with a different model architecture; map old_key_prefix to new_key_prefix ) -> nn.Module: """ Loads a checkpoint into model from a path. Support three types of checkpoints: 1. huggingface safetensors 2. local .pt or .pth 3. colossalai sharded checkpoint Args: model (nn.Module): The model to load the checkpoint into. path (str): The path to the checkpoint. cache_dir (str): The directory to cache the downloaded checkpoint. device_map (torch.device | str): The device to map the checkpoint to. cai_model_name (str): The name of the model in the checkpoint. Returns: nn.Module: The model with the loaded checkpoint. """ if not os.path.exists(path): log_message(f"Checkpoint not found at {path}, trying to download from Hugging Face Hub") path = load_from_hf_hub(path, cache_dir) assert os.path.exists(path), f"Could not find checkpoint at {path}" log_message(f"Loading checkpoint from {path}") if path.endswith(".safetensors"): # ckpt = load_file(path, device=str(device_map)) ckpt = load_file(path, device=torch.cuda.current_device()) if rename_keys is not None: # rename keys in the loaded state_dict with old_key_prefix to with new_key_prefix. renamed_ckpt = {} for old_key, v in ckpt.items(): new_key = old_key for old_key_prefix, new_key_prefix in rename_keys.items(): if old_key_prefix in old_key: new_key = old_key.replace(old_key_prefix, new_key_prefix) print(f"Renamed {old_key} to {new_key} in the loaded state_dict") break renamed_ckpt[new_key] = v ckpt = renamed_ckpt missing, unexpected = model.load_state_dict(ckpt, strict=strict) print_load_warning(missing, unexpected) elif path.endswith(".pt") or path.endswith(".pth"): ckpt = torch.load(path, map_location=device_map) missing, unexpected = model.load_state_dict(ckpt, strict=strict) print_load_warning(missing, unexpected) else: assert os.path.isdir(path), f"Invalid checkpoint path: {path}" load_from_sharded_state_dict(model, path, model_name=cai_model_name, strict=strict) return model
Loads a checkpoint into model from a path. Support three types of checkpoints: 1. huggingface safetensors 2. local .pt or .pth 3. colossalai sharded checkpoint Args: model (nn.Module): The model to load the checkpoint into. path (str): The path to the checkpoint. cache_dir (str): The directory to cache the downloaded checkpoint. device_map (torch.device | str): The device to map the checkpoint to. cai_model_name (str): The name of the model in the checkpoint. Returns: nn.Module: The model with the loaded checkpoint.
load_checkpoint
python
hpcaitech/Open-Sora
opensora/utils/ckpt.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/ckpt.py
Apache-2.0
def rm_checkpoints( save_dir: str, keep_n_latest: int = 0, ): """ Remove old checkpoints. Args: save_dir (str): The directory to save the checkpoints. keep_n_latest (int): The number of latest checkpoints to keep. """ if keep_n_latest <= 0 or dist.get_rank() != 0: return files = glob(os.path.join(save_dir, "epoch*-global_step*")) files = sorted( files, key=lambda s: tuple(map(int, re.search(r"epoch(\d+)-global_step(\d+)", s).groups())), reverse=True ) to_remove = files[keep_n_latest:] for f in to_remove: # shutil.rmtree(f) for item in glob(os.path.join(f, "*")): if os.path.isdir(item): dir_name = os.path.basename(item) if dir_name != "eval": shutil.rmtree(item) else: os.remove(item)
Remove old checkpoints. Args: save_dir (str): The directory to save the checkpoints. keep_n_latest (int): The number of latest checkpoints to keep.
rm_checkpoints
python
hpcaitech/Open-Sora
opensora/utils/ckpt.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/ckpt.py
Apache-2.0
def model_sharding(model: torch.nn.Module, device: torch.device = None): """ Sharding the model parameters across multiple GPUs. Args: model (torch.nn.Module): The model to shard. device (torch.device): The device to shard the model to. """ global_rank = dist.get_rank() world_size = dist.get_world_size() for _, param in model.named_parameters(): if device is None: device = param.device padding_size = (world_size - param.numel() % world_size) % world_size if padding_size > 0: padding_param = torch.nn.functional.pad(param.data.view(-1), [0, padding_size]) else: padding_param = param.data.view(-1) splited_params = padding_param.split(padding_param.numel() // world_size) splited_params = splited_params[global_rank] param.data = splited_params.to(device)
Sharding the model parameters across multiple GPUs. Args: model (torch.nn.Module): The model to shard. device (torch.device): The device to shard the model to.
model_sharding
python
hpcaitech/Open-Sora
opensora/utils/ckpt.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/ckpt.py
Apache-2.0
def model_gathering(model: torch.nn.Module, model_shape_dict: dict, pinned_state_dict: dict) -> None: """ Gather the model parameters from multiple GPUs. Args: model (torch.nn.Module): The model to gather. model_shape_dict (dict): The shape of the model parameters. device (torch.device): The device to gather the model to. """ global_rank = dist.get_rank() global_size = dist.get_world_size() params = set() for name, param in model.named_parameters(): params.add(name) all_params = [torch.empty_like(param.data) for _ in range(global_size)] dist.all_gather(all_params, param.data, group=dist.group.WORLD) if int(global_rank) == 0: all_params = torch.cat(all_params) gathered_param = remove_padding(all_params, model_shape_dict[name]).view(model_shape_dict[name]) pinned_state_dict[name].copy_(gathered_param) if int(global_rank) == 0: for k, v in model.state_dict(keep_vars=True).items(): if k not in params: pinned_state_dict[k].copy_(v) dist.barrier()
Gather the model parameters from multiple GPUs. Args: model (torch.nn.Module): The model to gather. model_shape_dict (dict): The shape of the model parameters. device (torch.device): The device to gather the model to.
model_gathering
python
hpcaitech/Open-Sora
opensora/utils/ckpt.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/ckpt.py
Apache-2.0
def remove_padding(tensor: torch.Tensor, original_shape: tuple) -> torch.Tensor: """ Remove padding from a tensor. Args: tensor (torch.Tensor): The tensor to remove padding from. original_shape (tuple): The original shape of the tensor. """ return tensor[: functools.reduce(operator.mul, original_shape)]
Remove padding from a tensor. Args: tensor (torch.Tensor): The tensor to remove padding from. original_shape (tuple): The original shape of the tensor.
remove_padding
python
hpcaitech/Open-Sora
opensora/utils/ckpt.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/ckpt.py
Apache-2.0
def record_model_param_shape(model: torch.nn.Module) -> dict: """ Record the shape of the model parameters. Args: model (torch.nn.Module): The model to record the parameter shape of. Returns: dict: The shape of the model parameters. """ param_shape = {} for name, param in model.named_parameters(): param_shape[name] = param.shape return param_shape
Record the shape of the model parameters. Args: model (torch.nn.Module): The model to record the parameter shape of. Returns: dict: The shape of the model parameters.
record_model_param_shape
python
hpcaitech/Open-Sora
opensora/utils/ckpt.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/ckpt.py
Apache-2.0
def load_json(file_path: str) -> dict: """ Load a JSON file. Args: file_path (str): The path to the JSON file. Returns: dict: The loaded JSON file. """ with open(file_path, "r", encoding="utf-8") as f: return json.load(f)
Load a JSON file. Args: file_path (str): The path to the JSON file. Returns: dict: The loaded JSON file.
load_json
python
hpcaitech/Open-Sora
opensora/utils/ckpt.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/ckpt.py
Apache-2.0
def save_json(data, file_path: str): """ Save a dictionary to a JSON file. Args: data: The dictionary to save. file_path (str): The path to save the JSON file. """ with open(file_path, "w", encoding="utf-8") as f: json.dump(data, f, indent=4)
Save a dictionary to a JSON file. Args: data: The dictionary to save. file_path (str): The path to save the JSON file.
save_json
python
hpcaitech/Open-Sora
opensora/utils/ckpt.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/ckpt.py
Apache-2.0
def master_weights_gathering(model: torch.nn.Module, optimizer: LowLevelZeroOptimizer, pinned_state_dict: dict) -> None: """ Gather the model parameters from multiple GPUs. Args: model (torch.nn.Module): The model to gather. model_shape_dict (dict): The shape of the model parameters. device (torch.device): The device to gather the model to. """ pg = get_data_parallel_group(get_mixed_dp_pg=True) world_size = dist.get_world_size(pg) w2m = optimizer.get_working_to_master_map() for name, param in model.named_parameters(): master_p = w2m[id(param)] all_params = [torch.empty_like(master_p) for _ in range(world_size)] dist.all_gather(all_params, master_p, group=pg) if dist.get_rank() == 0: all_params = torch.cat(all_params) gathered_param = remove_padding(all_params, param.shape).view(param.shape) pinned_state_dict[name].copy_(gathered_param) dist.barrier()
Gather the model parameters from multiple GPUs. Args: model (torch.nn.Module): The model to gather. model_shape_dict (dict): The shape of the model parameters. device (torch.device): The device to gather the model to.
master_weights_gathering
python
hpcaitech/Open-Sora
opensora/utils/ckpt.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/ckpt.py
Apache-2.0
def save( self, booster: Booster, save_dir: str, model: nn.Module = None, ema: nn.Module = None, optimizer: Optimizer = None, lr_scheduler: _LRScheduler = None, sampler=None, epoch: int = None, step: int = None, global_step: int = None, batch_size: int = None, lora: bool = False, actual_update_step: int = None, ema_shape_dict: dict = None, async_io: bool = True, include_master_weights: bool = False, ) -> str: """ Save a checkpoint. Args: booster (Booster): The Booster object. save_dir (str): The directory to save the checkpoint to. model (nn.Module): The model to save the checkpoint from. ema (nn.Module): The EMA model to save the checkpoint from. optimizer (Optimizer): The optimizer to save the checkpoint from. lr_scheduler (_LRScheduler): The learning rate scheduler to save the checkpoint from. sampler: The sampler to save the checkpoint from. epoch (int): The epoch of the checkpoint. step (int): The step of the checkpoint. global_step (int): The global step of the checkpoint. batch_size (int): The batch size of the checkpoint. lora (bool): Whether the model is trained with LoRA. Returns: str: The path to the saved checkpoint """ self._sync_io() save_dir = os.path.join(save_dir, f"epoch{epoch}-global_step{actual_update_step}") os.environ["TENSORNVME_DEBUG_LOG"] = os.path.join(save_dir, "async_file_io.log") if model is not None: if not lora: os.makedirs(os.path.join(save_dir, "model"), exist_ok=True) booster.save_model( model, os.path.join(save_dir, "model"), shard=True, use_safetensors=True, size_per_shard=4096, use_async=async_io, ) else: os.makedirs(os.path.join(save_dir, "lora"), exist_ok=True) booster.save_lora_as_pretrained(model, os.path.join(save_dir, "lora")) if optimizer is not None: booster.save_optimizer( optimizer, os.path.join(save_dir, "optimizer"), shard=True, size_per_shard=4096, use_async=async_io ) if include_master_weights: self._prepare_master_pinned_state_dict(model, optimizer) master_weights_gathering(model, optimizer, self.master_pinned_state_dict) if lr_scheduler is not None: booster.save_lr_scheduler(lr_scheduler, os.path.join(save_dir, "lr_scheduler")) if ema is not None: self._prepare_pinned_state_dict(ema, ema_shape_dict) model_gathering(ema, ema_shape_dict, self.pinned_state_dict) if dist.get_rank() == 0: running_states = { "epoch": epoch, "step": step, "global_step": global_step, "batch_size": batch_size, "actual_update_step": actual_update_step, } save_json(running_states, os.path.join(save_dir, "running_states.json")) if ema is not None: if async_io: self.writer = async_save(os.path.join(save_dir, "ema.safetensors"), self.pinned_state_dict) else: torch.save(ema.state_dict(), os.path.join(save_dir, "ema.pt")) if sampler is not None: # only for VariableVideoBatchSampler torch.save(sampler.state_dict(step), os.path.join(save_dir, "sampler")) if optimizer is not None and include_master_weights: self.master_writer = async_save( os.path.join(save_dir, "master.safetensors"), self.master_pinned_state_dict ) dist.barrier() return save_dir
Save a checkpoint. Args: booster (Booster): The Booster object. save_dir (str): The directory to save the checkpoint to. model (nn.Module): The model to save the checkpoint from. ema (nn.Module): The EMA model to save the checkpoint from. optimizer (Optimizer): The optimizer to save the checkpoint from. lr_scheduler (_LRScheduler): The learning rate scheduler to save the checkpoint from. sampler: The sampler to save the checkpoint from. epoch (int): The epoch of the checkpoint. step (int): The step of the checkpoint. global_step (int): The global step of the checkpoint. batch_size (int): The batch size of the checkpoint. lora (bool): Whether the model is trained with LoRA. Returns: str: The path to the saved checkpoint
save
python
hpcaitech/Open-Sora
opensora/utils/ckpt.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/ckpt.py
Apache-2.0
def load( self, booster: Booster, load_dir: str, model: nn.Module = None, ema: nn.Module = None, optimizer: Optimizer = None, lr_scheduler: _LRScheduler = None, sampler=None, strict: bool = False, include_master_weights: bool = False, ) -> tuple[int, int]: """ Load a checkpoint. Args: booster (Booster): The Booster object. load_dir (str): The directory to load the checkpoint from. model (nn.Module): The model to load the checkpoint into. ema (nn.Module): The EMA model to load the checkpoint into. optimizer (Optimizer): The optimizer to load the checkpoint into. lr_scheduler (_LRScheduler): The learning rate scheduler to load the checkpoint into. sampler: The sampler to load the checkpoint into. Returns: tuple[int, int]: The epoch and step of the checkpoint. """ assert os.path.exists(load_dir), f"Checkpoint directory {load_dir} does not exist" assert os.path.exists(os.path.join(load_dir, "running_states.json")), "running_states.json does not exist" running_states = load_json(os.path.join(load_dir, "running_states.json")) if model is not None: booster.load_model( model, _search_valid_path(os.path.join(load_dir, "model")), strict=strict, low_cpu_mem_mode=False, num_threads=32, ) if ema is not None: if os.path.exists(os.path.join(load_dir, "ema.safetensors")): ema_state_dict = load_file(os.path.join(load_dir, "ema.safetensors")) else: ema_state_dict = torch.load(os.path.join(load_dir, "ema.pt"), map_location=torch.device("cpu")) # ema is not boosted, so we don't use booster.load_model ema.load_state_dict(ema_state_dict, strict=strict, assign=True) if optimizer is not None: booster.load_optimizer( optimizer, os.path.join(load_dir, "optimizer"), low_cpu_mem_mode=False, num_threads=32 ) if include_master_weights: master_state_dict = load_file(os.path.join(load_dir, "master.safetensors")) load_master_weights(model, optimizer, master_state_dict) if lr_scheduler is not None: booster.load_lr_scheduler(lr_scheduler, os.path.join(load_dir, "lr_scheduler")) if sampler is not None: sampler.load_state_dict(torch.load(os.path.join(load_dir, "sampler"))) dist.barrier() return (running_states["epoch"], running_states["step"])
Load a checkpoint. Args: booster (Booster): The Booster object. load_dir (str): The directory to load the checkpoint from. model (nn.Module): The model to load the checkpoint into. ema (nn.Module): The EMA model to load the checkpoint into. optimizer (Optimizer): The optimizer to load the checkpoint into. lr_scheduler (_LRScheduler): The learning rate scheduler to load the checkpoint into. sampler: The sampler to load the checkpoint into. Returns: tuple[int, int]: The epoch and step of the checkpoint.
load
python
hpcaitech/Open-Sora
opensora/utils/ckpt.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/ckpt.py
Apache-2.0
def setup_device() -> tuple[torch.device, DistCoordinator]: """ Setup the device and the distributed coordinator. Returns: tuple[torch.device, DistCoordinator]: The device and the distributed coordinator. """ assert torch.cuda.is_available(), "Training currently requires at least one GPU." # NOTE: A very large timeout is set to avoid some processes exit early dist.init_process_group(backend="nccl", timeout=timedelta(hours=24)) torch.cuda.set_device(dist.get_rank() % torch.cuda.device_count()) coordinator = DistCoordinator() device = get_current_device() return device, coordinator
Setup the device and the distributed coordinator. Returns: tuple[torch.device, DistCoordinator]: The device and the distributed coordinator.
setup_device
python
hpcaitech/Open-Sora
opensora/utils/train.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/train.py
Apache-2.0
def create_colossalai_plugin( plugin: str, dtype: str, grad_clip: float, **kwargs, ) -> LowLevelZeroPlugin | HybridParallelPlugin: """ Create a ColossalAI plugin. Args: plugin (str): The plugin name. dtype (str): The data type. grad_clip (float): The gradient clip value. Returns: LowLevelZeroPlugin | HybridParallelPlugin: The plugin. """ plugin_kwargs = dict( precision=dtype, initial_scale=2**16, max_norm=grad_clip, overlap_allgather=True, cast_inputs=False, reduce_bucket_size_in_m=20, ) plugin_kwargs.update(kwargs) sp_size = plugin_kwargs.get("sp_size", 1) if plugin == "zero1" or plugin == "zero2": assert sp_size == 1, "Zero plugin does not support sequence parallelism" stage = 1 if plugin == "zero1" else 2 plugin = LowLevelZeroPlugin( stage=stage, **plugin_kwargs, ) set_data_parallel_group(dist.group.WORLD) elif plugin == "hybrid": plugin_kwargs["find_unused_parameters"] = True reduce_bucket_size_in_m = plugin_kwargs.pop("reduce_bucket_size_in_m") if "zero_bucket_size_in_m" not in plugin_kwargs: plugin_kwargs["zero_bucket_size_in_m"] = reduce_bucket_size_in_m plugin_kwargs.pop("cast_inputs") plugin_kwargs["enable_metadata_cache"] = False custom_policy = plugin_kwargs.pop("custom_policy", None) if custom_policy is not None: custom_policy = custom_policy() plugin = HybridParallelPlugin( custom_policy=custom_policy, **plugin_kwargs, ) set_tensor_parallel_group(plugin.tp_group) set_sequence_parallel_group(plugin.sp_group) set_data_parallel_group(plugin.dp_group) else: raise ValueError(f"Unknown plugin {plugin}") return plugin
Create a ColossalAI plugin. Args: plugin (str): The plugin name. dtype (str): The data type. grad_clip (float): The gradient clip value. Returns: LowLevelZeroPlugin | HybridParallelPlugin: The plugin.
create_colossalai_plugin
python
hpcaitech/Open-Sora
opensora/utils/train.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/train.py
Apache-2.0
def update_ema( ema_model: torch.nn.Module, model: torch.nn.Module, optimizer=None, decay: float = 0.9999, sharded: bool = True ): """ Step the EMA model towards the current model. Args: ema_model (torch.nn.Module): The EMA model. model (torch.nn.Module): The current model. optimizer (torch.optim.Optimizer): The optimizer. decay (float): The decay rate. sharded (bool): Whether the model is sharded. """ ema_params = OrderedDict(ema_model.named_parameters()) model_params = OrderedDict(model.named_parameters()) for name, param in model_params.items(): if name == "pos_embed": continue if not param.requires_grad: continue if not sharded: param_data = param.data ema_params[name].mul_(decay).add_(param_data, alpha=1 - decay) else: if param.data.dtype != torch.float32: param_id = id(param) master_param = optimizer.get_working_to_master_map()[param_id] param_data = master_param.data else: param_data = param.data ema_params[name].mul_(decay).add_(param_data, alpha=1 - decay)
Step the EMA model towards the current model. Args: ema_model (torch.nn.Module): The EMA model. model (torch.nn.Module): The current model. optimizer (torch.optim.Optimizer): The optimizer. decay (float): The decay rate. sharded (bool): Whether the model is sharded.
update_ema
python
hpcaitech/Open-Sora
opensora/utils/train.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/train.py
Apache-2.0
def dropout_condition(prob: float, txt: torch.Tensor, null_txt: torch.Tensor) -> torch.Tensor: """ Apply dropout to the text tensor. Args: prob (float): The dropout probability. txt (torch.Tensor): The text tensor. null_txt (torch.Tensor): The null text tensor. Returns: torch.Tensor: The text tensor with dropout applied. """ if prob == 0: warnings.warn("Dropout probability is 0, skipping dropout") drop_ids = torch.rand(txt.shape[0], device=txt.device) < prob drop_ids = drop_ids.view((drop_ids.shape[0],) + (1,) * (txt.ndim - 1)) new_txt = torch.where(drop_ids, null_txt, txt) return new_txt
Apply dropout to the text tensor. Args: prob (float): The dropout probability. txt (torch.Tensor): The text tensor. null_txt (torch.Tensor): The null text tensor. Returns: torch.Tensor: The text tensor with dropout applied.
dropout_condition
python
hpcaitech/Open-Sora
opensora/utils/train.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/train.py
Apache-2.0
def prepare_visual_condition_uncausal( x: torch.Tensor, condition_config: dict, model_ae: torch.nn.Module, pad: bool = False ) -> torch.Tensor: """ Prepare the visual condition for the model. Args: x: (torch.Tensor): The input video tensor. condition_config (dict): The condition configuration. model_ae (torch.nn.Module): The video encoder module. Returns: torch.Tensor: The visual condition tensor. """ # x has shape [b, c, t, h, w], where b is the batch size B = x.shape[0] C = model_ae.cfg.latent_channels T, H, W = model_ae.get_latent_size(x.shape[-3:]) # Initialize masks tensor to match the shape of x, but only the time dimension will be masked masks = torch.zeros(B, 1, T, H, W).to( x.device, x.dtype ) # broadcasting over channel, concat to masked_x with 1 + 16 = 17 channesl # to prevent information leakage, image must be encoded separately and copied to latent latent = torch.zeros(B, C, T, H, W).to(x.device, x.dtype) x_0 = torch.zeros(B, C, T, H, W).to(x.device, x.dtype) if T > 1: # video # certain v2v conditions not are applicable for short videos if T <= 32 // model_ae.time_compression_ratio: condition_config.pop("v2v_head", None) # given first 32 frames condition_config.pop("v2v_tail", None) # given last 32 frames condition_config.pop("v2v_head_easy", None) # given first 64 frames condition_config.pop("v2v_tail_easy", None) # given last 64 frames if T <= 64 // model_ae.time_compression_ratio: condition_config.pop("v2v_head_easy", None) # given first 64 frames condition_config.pop("v2v_tail_easy", None) # given last 64 frames mask_cond_options = list(condition_config.keys()) # list of mask conditions mask_cond_weights = list(condition_config.values()) # corresponding probabilities for i in range(B): # Randomly select a mask condition based on the provided probabilities mask_cond = random.choices(mask_cond_options, weights=mask_cond_weights, k=1)[0] # Apply the selected mask condition directly on the masks tensor if mask_cond == "i2v_head": # NOTE: modify video, mask first latent frame # padded video such that the first latent frame correspond to image only masks[i, :, 0, :, :] = 1 if pad: pad_num = model_ae.time_compression_ratio - 1 # 32 --> new video: 7 + (1+31-7) padded_x = torch.cat([x[i, :, :1]] * pad_num + [x[i, :, :-pad_num]], dim=1).unsqueeze(0) x_0[i] = model_ae.encode(padded_x)[0] else: x_0[i] = model_ae.encode(x[i : i + 1])[0] # condition: encode the image only latent[i, :, :1, :, :] = model_ae.encode( x[i, :, :1, :, :].unsqueeze(0) ) # since the first dimension of right hand side is singleton, torch auto-ignores it elif mask_cond == "i2v_loop": # # NOTE: modify video, mask first and last latent frame # pad video such that first and last latent frame correspond to image only masks[i, :, 0, :, :] = 1 masks[i, :, -1, :, :] = 1 if pad: pad_num = model_ae.time_compression_ratio - 1 padded_x = torch.cat( [x[i, :, :1]] * pad_num + [x[i, :, : -pad_num * 2]] + [x[i, :, -pad_num * 2 - 1].unsqueeze(1)] * pad_num, dim=1, ).unsqueeze( 0 ) # remove the last pad_num * 2 frames from the end of the video x_0[i] = model_ae.encode(padded_x)[0] # condition: encode the image only latent[i, :, :1, :, :] = model_ae.encode(x[i, :, :1, :, :].unsqueeze(0)) latent[i, :, -1:, :, :] = model_ae.encode(x[i, :, -pad_num * 2 - 1, :, :].unsqueeze(1).unsqueeze(0)) else: x_0[i] = model_ae.encode(x[i : i + 1])[0] latent[i, :, :1, :, :] = model_ae.encode(x[i, :, :1, :, :].unsqueeze(0)) latent[i, :, -1:, :, :] = model_ae.encode(x[i, :, -1:, :, :].unsqueeze(0)) elif mask_cond == "i2v_tail": # mask the last latent frame masks[i, :, -1, :, :] = 1 if pad: pad_num = model_ae.time_compression_ratio - 1 padded_x = torch.cat([x[i, :, pad_num:]] + [x[i, :, -1:]] * pad_num, dim=1).unsqueeze(0) x_0[i] = model_ae.encode(padded_x)[0] latent[i, :, -1:, :, :] = model_ae.encode(x[i, :, -pad_num * 2 - 1, :, :].unsqueeze(1).unsqueeze(0)) else: x_0[i] = model_ae.encode(x[i : i + 1])[0] latent[i, :, -1:, :, :] = model_ae.encode(x[i, :, -1:, :, :].unsqueeze(0)) elif mask_cond == "v2v_head": # mask the first 32 video frames assert T > 32 // model_ae.time_compression_ratio conditioned_t = 32 // model_ae.time_compression_ratio masks[i, :, :conditioned_t, :, :] = 1 x_0[i] = model_ae.encode(x[i].unsqueeze(0))[0] latent[i, :, :conditioned_t, :, :] = x_0[i, :, :conditioned_t, :, :] elif mask_cond == "v2v_tail": # mask the last 32 video frames assert T > 32 // model_ae.time_compression_ratio conditioned_t = 32 // model_ae.time_compression_ratio masks[i, :, -conditioned_t:, :, :] = 1 x_0[i] = model_ae.encode(x[i].unsqueeze(0))[0] latent[i, :, -conditioned_t:, :, :] = x_0[i, :, -conditioned_t:, :, :] elif mask_cond == "v2v_head_easy": # mask the first 64 video frames assert T > 64 // model_ae.time_compression_ratio conditioned_t = 64 // model_ae.time_compression_ratio masks[i, :, :conditioned_t, :, :] = 1 x_0[i] = model_ae.encode(x[i].unsqueeze(0))[0] latent[i, :, :conditioned_t, :, :] = x_0[i, :, :conditioned_t, :, :] elif mask_cond == "v2v_tail_easy": # mask the last 64 video frames assert T > 64 // model_ae.time_compression_ratio conditioned_t = 64 // model_ae.time_compression_ratio masks[i, :, -conditioned_t:, :, :] = 1 x_0[i] = model_ae.encode(x[i].unsqueeze(0))[0] latent[i, :, -conditioned_t:, :, :] = x_0[i, :, -conditioned_t:, :, :] # elif mask_cond == "v2v_head": # mask from the beginning to a random point # masks[i, :, : random.randint(1, T - 2), :, :] = 1 # elif mask_cond == "v2v_tail": # mask from a random point to the end # masks[i, :, -random.randint(1, T - 2) :, :, :] = 1 else: # "t2v" is the fallback case where no specific condition is specified assert mask_cond == "t2v", f"Unknown mask condition {mask_cond}" x_0[i] = model_ae.encode(x[i].unsqueeze(0))[0] else: # image x_0 = model_ae.encode(x) # latent video latent = masks * latent # condition latent # merge the masks and the masked_x into a single tensor cond = torch.cat((masks, latent), dim=1) return x_0, cond
Prepare the visual condition for the model. Args: x: (torch.Tensor): The input video tensor. condition_config (dict): The condition configuration. model_ae (torch.nn.Module): The video encoder module. Returns: torch.Tensor: The visual condition tensor.
prepare_visual_condition_uncausal
python
hpcaitech/Open-Sora
opensora/utils/train.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/train.py
Apache-2.0
def prepare_visual_condition_causal(x: torch.Tensor, condition_config: dict, model_ae: torch.nn.Module) -> torch.Tensor: """ Prepare the visual condition for the model. Args: x: (torch.Tensor): The input video tensor. condition_config (dict): The condition configuration. model_ae (torch.nn.Module): The video encoder module. Returns: torch.Tensor: The visual condition tensor. """ # x has shape [b, c, t, h, w], where b is the batch size B = x.shape[0] C = model_ae.cfg.latent_channels T, H, W = model_ae.get_latent_size(x.shape[-3:]) # Initialize masks tensor to match the shape of x, but only the time dimension will be masked masks = torch.zeros(B, 1, T, H, W).to( x.device, x.dtype ) # broadcasting over channel, concat to masked_x with 1 + 16 = 17 channesl # to prevent information leakage, image must be encoded separately and copied to latent latent = torch.zeros(B, C, T, H, W).to(x.device, x.dtype) x_0 = torch.zeros(B, C, T, H, W).to(x.device, x.dtype) if T > 1: # video # certain v2v conditions not are applicable for short videos if T <= (32 // model_ae.time_compression_ratio) + 1: condition_config.pop("v2v_head", None) # given first 33 frames condition_config.pop("v2v_tail", None) # given last 33 frames condition_config.pop("v2v_head_easy", None) # given first 65 frames condition_config.pop("v2v_tail_easy", None) # given last 65 frames if T <= (64 // model_ae.time_compression_ratio) + 1: condition_config.pop("v2v_head_easy", None) # given first 65 frames condition_config.pop("v2v_tail_easy", None) # given last 65 frames mask_cond_options = list(condition_config.keys()) # list of mask conditions mask_cond_weights = list(condition_config.values()) # corresponding probabilities for i in range(B): # Randomly select a mask condition based on the provided probabilities mask_cond = random.choices(mask_cond_options, weights=mask_cond_weights, k=1)[0] # Apply the selected mask condition directly on the masks tensor if mask_cond == "i2v_head": # NOTE: modify video, mask first latent frame masks[i, :, 0, :, :] = 1 x_0[i] = model_ae.encode(x[i].unsqueeze(0))[0] # condition: encode the image only latent[i, :, :1, :, :] = model_ae.encode(x[i, :, :1, :, :].unsqueeze(0)) elif mask_cond == "i2v_loop": # # NOTE: modify video, mask first and last latent frame # pad video such that first and last latent frame correspond to image only masks[i, :, 0, :, :] = 1 masks[i, :, -1, :, :] = 1 x_0[i] = model_ae.encode(x[i].unsqueeze(0))[0] # condition: encode the image only latent[i, :, :1, :, :] = model_ae.encode(x[i, :, :1, :, :].unsqueeze(0)) latent[i, :, -1:, :, :] = model_ae.encode(x[i, :, -1:, :, :].unsqueeze(0)) elif mask_cond == "i2v_tail": # mask the last latent frame masks[i, :, -1, :, :] = 1 x_0[i] = model_ae.encode(x[i].unsqueeze(0))[0] # condition: encode the last image only latent[i, :, -1:, :, :] = model_ae.encode(x[i, :, -1:, :, :].unsqueeze(0)) elif "v2v_head" in mask_cond: # mask the first 33 video frames ref_t = 33 if not "easy" in mask_cond else 65 assert (ref_t - 1) % model_ae.time_compression_ratio == 0 conditioned_t = (ref_t - 1) // model_ae.time_compression_ratio + 1 masks[i, :, :conditioned_t, :, :] = 1 x_0[i] = model_ae.encode(x[i].unsqueeze(0))[0] # encode the first ref_t frame video separately latent[i, :, :conditioned_t, :, :] = model_ae.encode(x[i, :, :ref_t, :, :].unsqueeze(0)) elif "v2v_tail" in mask_cond: # mask the last 32 video frames ref_t = 33 if not "easy" in mask_cond else 65 assert (ref_t - 1) % model_ae.time_compression_ratio == 0 conditioned_t = (ref_t - 1) // model_ae.time_compression_ratio + 1 masks[i, :, -conditioned_t:, :, :] = 1 x_0[i] = model_ae.encode(x[i].unsqueeze(0))[0] # encode the first ref_t frame video separately latent[i, :, -conditioned_t:, :, :] = model_ae.encode(x[i, :, -ref_t:, :, :].unsqueeze(0)) else: # "t2v" is the fallback case where no specific condition is specified assert mask_cond == "t2v", f"Unknown mask condition {mask_cond}" x_0[i] = model_ae.encode(x[i].unsqueeze(0))[0] else: # image x_0 = model_ae.encode(x) # latent video latent = masks * latent # condition latent # merge the masks and the masked_x into a single tensor cond = torch.cat((masks, latent), dim=1) return x_0, cond
Prepare the visual condition for the model. Args: x: (torch.Tensor): The input video tensor. condition_config (dict): The condition configuration. model_ae (torch.nn.Module): The video encoder module. Returns: torch.Tensor: The visual condition tensor.
prepare_visual_condition_causal
python
hpcaitech/Open-Sora
opensora/utils/train.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/train.py
Apache-2.0
def create_optimizer( model: torch.nn.Module, optimizer_config: dict, ) -> torch.optim.Optimizer: """ Create an optimizer. Args: model (torch.nn.Module): The model to be optimized. optimizer_config (dict): The configuration of the optimizer. Returns: torch.optim.Optimizer: The optimizer. """ optimizer_name = optimizer_config.pop("cls", "HybridAdam") if optimizer_name == "HybridAdam": optimizer_cls = HybridAdam else: raise ValueError(f"Unknown optimizer: {optimizer_name}") optimizer = optimizer_cls( filter(lambda p: p.requires_grad, model.parameters()), **optimizer_config, ) return optimizer
Create an optimizer. Args: model (torch.nn.Module): The model to be optimized. optimizer_config (dict): The configuration of the optimizer. Returns: torch.optim.Optimizer: The optimizer.
create_optimizer
python
hpcaitech/Open-Sora
opensora/utils/optimizer.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/optimizer.py
Apache-2.0
def create_lr_scheduler( optimizer: torch.optim.Optimizer, num_steps_per_epoch: int, epochs: int = 1000, warmup_steps: int | None = None, use_cosine_scheduler: bool = False, initial_lr: float = 1e-6, ) -> _LRScheduler | None: """ Create a learning rate scheduler. Args: optimizer (torch.optim.Optimizer): The optimizer to be used. num_steps_per_epoch (int): The number of steps per epoch. epochs (int): The number of epochs. warmup_steps (int | None): The number of warmup steps. use_cosine_scheduler (bool): Whether to use cosine scheduler. Returns: _LRScheduler | None: The learning rate scheduler """ if warmup_steps is None and not use_cosine_scheduler: lr_scheduler = None elif use_cosine_scheduler: lr_scheduler = CosineAnnealingWarmupLR( optimizer, total_steps=num_steps_per_epoch * epochs, warmup_steps=warmup_steps, ) else: lr_scheduler = LinearWarmupLR(optimizer, initial_lr=1e-6, warmup_steps=warmup_steps) # lr_scheduler = LinearWarmupLR(optimizer, warmup_steps=warmup_steps) return lr_scheduler
Create a learning rate scheduler. Args: optimizer (torch.optim.Optimizer): The optimizer to be used. num_steps_per_epoch (int): The number of steps per epoch. epochs (int): The number of epochs. warmup_steps (int | None): The number of warmup steps. use_cosine_scheduler (bool): Whether to use cosine scheduler. Returns: _LRScheduler | None: The learning rate scheduler
create_lr_scheduler
python
hpcaitech/Open-Sora
opensora/utils/optimizer.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/optimizer.py
Apache-2.0
def create_tensorboard_writer(exp_dir: str) -> SummaryWriter: """ Create a tensorboard writer. Args: exp_dir (str): The directory to save tensorboard logs. Returns: SummaryWriter: The tensorboard writer. """ tensorboard_dir = f"{exp_dir}/tensorboard" os.makedirs(tensorboard_dir, exist_ok=True) writer = SummaryWriter(tensorboard_dir) return writer
Create a tensorboard writer. Args: exp_dir (str): The directory to save tensorboard logs. Returns: SummaryWriter: The tensorboard writer.
create_tensorboard_writer
python
hpcaitech/Open-Sora
opensora/utils/misc.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/misc.py
Apache-2.0
def log_cuda_memory(stage: str = None): """ Log the current CUDA memory usage. Args: stage (str): The stage of the training process. """ text = "CUDA memory usage" if stage is not None: text += f" at {stage}" log_message(text + ": %.1f GB", torch.cuda.memory_allocated() / GIGABYTE)
Log the current CUDA memory usage. Args: stage (str): The stage of the training process.
log_cuda_memory
python
hpcaitech/Open-Sora
opensora/utils/misc.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/misc.py
Apache-2.0
def log_cuda_max_memory(stage: str = None): """ Log the max CUDA memory usage. Args: stage (str): The stage of the training process. """ torch.cuda.synchronize() max_memory_allocated = torch.cuda.max_memory_allocated() max_memory_reserved = torch.cuda.max_memory_reserved() log_message("CUDA max memory max memory allocated at " + stage + ": %.1f GB", max_memory_allocated / GIGABYTE) log_message("CUDA max memory max memory reserved at " + stage + ": %.1f GB", max_memory_reserved / GIGABYTE)
Log the max CUDA memory usage. Args: stage (str): The stage of the training process.
log_cuda_max_memory
python
hpcaitech/Open-Sora
opensora/utils/misc.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/misc.py
Apache-2.0
def get_model_numel(model: torch.nn.Module) -> tuple[int, int]: """ Get the number of parameters in a model. Args: model (torch.nn.Module): The model. Returns: tuple[int, int]: The total number of parameters and the number of trainable parameters. """ num_params = 0 num_params_trainable = 0 for p in model.parameters(): num_params += p.numel() if p.requires_grad: num_params_trainable += p.numel() return num_params, num_params_trainable
Get the number of parameters in a model. Args: model (torch.nn.Module): The model. Returns: tuple[int, int]: The total number of parameters and the number of trainable parameters.
get_model_numel
python
hpcaitech/Open-Sora
opensora/utils/misc.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/misc.py
Apache-2.0
def log_model_params(model: nn.Module): """ Log the number of parameters in a model. Args: model (torch.nn.Module): The model. """ num_params, num_params_trainable = get_model_numel(model) model_name = model.__class__.__name__ log_message(f"[{model_name}] Number of parameters: {format_numel_str(num_params)}") log_message(f"[{model_name}] Number of trainable parameters: {format_numel_str(num_params_trainable)}")
Log the number of parameters in a model. Args: model (torch.nn.Module): The model.
log_model_params
python
hpcaitech/Open-Sora
opensora/utils/misc.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/misc.py
Apache-2.0
def format_numel_str(numel: int) -> str: """ Format a number of elements to a human-readable string. Args: numel (int): The number of elements. Returns: str: The formatted string. """ B = 1024**3 M = 1024**2 K = 1024 if numel >= B: return f"{numel / B:.2f} B" elif numel >= M: return f"{numel / M:.2f} M" elif numel >= K: return f"{numel / K:.2f} K" else: return f"{numel}"
Format a number of elements to a human-readable string. Args: numel (int): The number of elements. Returns: str: The formatted string.
format_numel_str
python
hpcaitech/Open-Sora
opensora/utils/misc.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/misc.py
Apache-2.0
def to_tensor(data: torch.Tensor | np.ndarray | Sequence | int | float) -> torch.Tensor: """Convert objects of various python types to :obj:`torch.Tensor`. Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`, :class:`Sequence`, :class:`int` and :class:`float`. Args: data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to be converted. Returns: torch.Tensor: The converted tensor. """ if isinstance(data, torch.Tensor): return data elif isinstance(data, np.ndarray): return torch.from_numpy(data) elif isinstance(data, Sequence) and not isinstance(data, str): return torch.tensor(data) elif isinstance(data, int): return torch.LongTensor([data]) elif isinstance(data, float): return torch.FloatTensor([data]) else: raise TypeError(f"type {type(data)} cannot be converted to tensor.")
Convert objects of various python types to :obj:`torch.Tensor`. Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`, :class:`Sequence`, :class:`int` and :class:`float`. Args: data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to be converted. Returns: torch.Tensor: The converted tensor.
to_tensor
python
hpcaitech/Open-Sora
opensora/utils/misc.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/misc.py
Apache-2.0
def to_ndarray(data: torch.Tensor | np.ndarray | Sequence | int | float) -> np.ndarray: """Convert objects of various python types to :obj:`numpy.ndarray`. Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`, :class:`Sequence`, :class:`int` and :class:`float`. Args: data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to be converted. Returns: numpy.ndarray: The converted ndarray. """ if isinstance(data, torch.Tensor): return data.numpy() elif isinstance(data, np.ndarray): return data elif isinstance(data, Sequence): return np.array(data) elif isinstance(data, int): return np.ndarray([data], dtype=int) elif isinstance(data, float): return np.array([data], dtype=float) else: raise TypeError(f"type {type(data)} cannot be converted to ndarray.")
Convert objects of various python types to :obj:`numpy.ndarray`. Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`, :class:`Sequence`, :class:`int` and :class:`float`. Args: data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to be converted. Returns: numpy.ndarray: The converted ndarray.
to_ndarray
python
hpcaitech/Open-Sora
opensora/utils/misc.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/misc.py
Apache-2.0
def to_torch_dtype(dtype: str | torch.dtype) -> torch.dtype: """ Convert a string or a torch.dtype to a torch.dtype. Args: dtype (str | torch.dtype): The input dtype. Returns: torch.dtype: The converted dtype. """ if isinstance(dtype, torch.dtype): return dtype elif isinstance(dtype, str): dtype_mapping = { "float64": torch.float64, "float32": torch.float32, "float16": torch.float16, "fp32": torch.float32, "fp16": torch.float16, "half": torch.float16, "bf16": torch.bfloat16, } if dtype not in dtype_mapping: raise ValueError(f"Unsupported dtype {dtype}") dtype = dtype_mapping[dtype] return dtype else: raise ValueError(f"Unsupported dtype {dtype}")
Convert a string or a torch.dtype to a torch.dtype. Args: dtype (str | torch.dtype): The input dtype. Returns: torch.dtype: The converted dtype.
to_torch_dtype
python
hpcaitech/Open-Sora
opensora/utils/misc.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/misc.py
Apache-2.0
def refine_prompt(prompt: str, retry_times: int = 3, type: str = "t2v", image_path: str = None): """ Refine a prompt to a format that can be used by the model for inference """ client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY")) text = prompt.strip() response = None for i in range(retry_times): if type == "t2v": response = client.chat.completions.create( messages=[ {"role": "system", "content": f"{sys_prompt_t2v}"}, { "role": "user", "content": 'Create an imaginative video descriptive caption or modify an earlier caption for the user input : "A street with parked cars on both sides, lined with commercial buildings featuring Korean signs. The overcast sky suggests early morning or late afternoon."', }, { "role": "assistant", "content": "A view of a street lined with parked cars on both sides. the buildings flanking the street have various signs and advertisements, some of which are in korean, indicating that this might be a location in south korea. the sky is overcast, suggesting either early morning or late afternoon light. the architecture of the buildings is typical of urban commercial areas, with storefronts on the ground level and possibly offices or residences above.", }, { "role": "user", "content": 'Create an imaginative video descriptive caption or modify an earlier caption for the user input : "Hands with rings and bracelets wash small greenish-brown seeds in a blue basin under running water, likely outdoors."', }, { "role": "assistant", "content": "A close-up shot of a person's hands, adorned with rings and bracelets, washing a pile of small, round, greenish-brown seeds in a blue plastic basin. the water is running from an unseen source, likely a tap, and the person is using their hands to agitate the seeds, presumably to clean them. the background is indistinct but appears to be an outdoor setting with natural light.", }, { "role": "user", "content": 'Create an imaginative video descriptive caption or modify an earlier caption for the user input : "Three men stand near an open black car in a parking lot, with parked vehicles and a partly cloudy sky in the background."', }, { "role": "assistant", "content": "A scene showing three men in an outdoor setting, likely a parking lot. the man on the left is wearing a light blue shirt and dark shorts, the man in the middle is dressed in a white shirt with a pattern and dark shorts, and the man on the right is wearing a green shirt and jeans. they are standing near a black car with its door open. in the background, there are parked vehicles, including a white truck and a red trailer. the sky is partly cloudy, suggesting it might be a sunny day.", }, { "role": "user", "content": f'Create an imaginative video descriptive caption or modify an earlier caption in ENGLISH for the user input: " {text} "', }, ], model="gpt-4o", # glm-4-plus and gpt-4o have be tested temperature=0.01, top_p=0.7, stream=False, max_tokens=250, ) elif type == "t2i": response = client.chat.completions.create( messages=[ {"role": "system", "content": f"{sys_prompt_t2i}"}, { "role": "user", "content": 'Create an imaginative image descriptive caption or modify an earlier caption for the user input : "a girl on the beach"', }, { "role": "assistant", "content": "A radiant woman stands on a deserted beach, arms outstretched, wearing a beige trench coat, white blouse, light blue jeans, and chic boots, against a backdrop of soft sky and sea.", }, { "role": "user", "content": 'Create an imaginative image descriptive caption or modify an earlier caption for the user input : "A man in a blue shirt"', }, { "role": "assistant", "content": "A determined man in athletic attire, including a blue long-sleeve shirt, black shorts, and blue socks, against a backdrop of a snowy field.", }, { "role": "user", "content": f'Create an imaginative image descriptive caption or modify an earlier caption in ENGLISH for the user input: " {text} "', }, ], model="gpt-4o", # glm-4-plus and gpt-4o have be tested temperature=0.01, top_p=0.7, stream=False, max_tokens=250, ) elif type == "i2v": response = client.chat.completions.create( model="gpt-4o", messages=[ {"role": "system", "content": f"{sys_prompt_i2v}"}, { "role": "user", "content": 'Create an imaginative video descriptive caption or modify an earlier caption for the user input : "A street with parked cars on both sides, lined with commercial buildings featuring Korean signs. The overcast sky suggests early morning or late afternoon."', }, { "role": "assistant", "content": "A view of a street lined with parked cars on both sides. the buildings flanking the street have various signs and advertisements, some of which are in korean, indicating that this might be a location in south korea. the sky is overcast, suggesting either early morning or late afternoon light. the architecture of the buildings is typical of urban commercial areas, with storefronts on the ground level and possibly offices or residences above.", }, { "role": "user", "content": 'Create an imaginative video descriptive caption or modify an earlier caption for the user input : "Hands with rings and bracelets wash small greenish-brown seeds in a blue basin under running water, likely outdoors."', }, { "role": "assistant", "content": "A close-up shot of a person's hands, adorned with rings and bracelets, washing a pile of small, round, greenish-brown seeds in a blue plastic basin. the water is running from an unseen source, likely a tap, and the person is using their hands to agitate the seeds, presumably to clean them. the background is indistinct but appears to be an outdoor setting with natural light.", }, { "role": "user", "content": 'Create an imaginative video descriptive caption or modify an earlier caption for the user input : "Three men stand near an open black car in a parking lot, with parked vehicles and a partly cloudy sky in the background."', }, { "role": "assistant", "content": "A scene showing three men in an outdoor setting, likely a parking lot. the man on the left is wearing a light blue shirt and dark shorts, the man in the middle is dressed in a white shirt with a pattern and dark shorts, and the man on the right is wearing a green shirt and jeans. they are standing near a black car with its door open. in the background, there are parked vehicles, including a white truck and a red trailer. the sky is partly cloudy, suggesting it might be a sunny day.", }, { "role": "user", "content": f'Create an imaginative video descriptive caption or modify an earlier caption in ENGLISH for the user input based on the image: " {text} "', }, { "role": "user", "content": [ { "type": "image_url", "image_url": { "url": image_to_url(image_path), }, }, ], }, ], temperature=0.01, top_p=0.7, stream=False, max_tokens=250, ) elif type == "motion_score": response = client.chat.completions.create( messages=[ {"role": "system", "content": f"{sys_prompt_motion_score}"}, { "role": "user", "content": f"{text}", }, ], model="gpt-4o", # glm-4-plus and gpt-4o have be tested temperature=0.01, top_p=0.7, stream=False, max_tokens=100, ) if response is None: continue if response.choices: return response.choices[0].message.content return prompt
Refine a prompt to a format that can be used by the model for inference
refine_prompt
python
hpcaitech/Open-Sora
opensora/utils/prompt_refine.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/prompt_refine.py
Apache-2.0
def is_distributed() -> bool: """ Check if the code is running in a distributed setting. Returns: bool: True if running in a distributed setting, False otherwise """ return os.environ.get("WORLD_SIZE", None) is not None
Check if the code is running in a distributed setting. Returns: bool: True if running in a distributed setting, False otherwise
is_distributed
python
hpcaitech/Open-Sora
opensora/utils/logger.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/logger.py
Apache-2.0
def is_main_process() -> bool: """ Check if the current process is the main process. Returns: bool: True if the current process is the main process, False otherwise. """ return not is_distributed() or dist.get_rank() == 0
Check if the current process is the main process. Returns: bool: True if the current process is the main process, False otherwise.
is_main_process
python
hpcaitech/Open-Sora
opensora/utils/logger.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/logger.py
Apache-2.0
def get_world_size() -> int: """ Get the number of processes in the distributed setting. Returns: int: The number of processes. """ if is_distributed(): return dist.get_world_size() else: return 1
Get the number of processes in the distributed setting. Returns: int: The number of processes.
get_world_size
python
hpcaitech/Open-Sora
opensora/utils/logger.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/logger.py
Apache-2.0
def create_logger(logging_dir: str = None) -> logging.Logger: """ Create a logger that writes to a log file and stdout. Only the main process logs. Args: logging_dir (str): The directory to save the log file. Returns: logging.Logger: The logger. """ if is_main_process(): additional_args = dict() if logging_dir is not None: additional_args["handlers"] = [ logging.StreamHandler(), logging.FileHandler(f"{logging_dir}/log.txt"), ] logging.basicConfig( level=logging.INFO, format="[\033[34m%(asctime)s\033[0m] %(message)s", datefmt="%Y-%m-%d %H:%M:%S", **additional_args, ) logger = logging.getLogger(__name__) if logging_dir is not None: logger.info("Experiment directory created at %s", logging_dir) else: logger = logging.getLogger(__name__) logger.addHandler(logging.NullHandler()) return logger
Create a logger that writes to a log file and stdout. Only the main process logs. Args: logging_dir (str): The directory to save the log file. Returns: logging.Logger: The logger.
create_logger
python
hpcaitech/Open-Sora
opensora/utils/logger.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/logger.py
Apache-2.0
def log_message(*args, level: str = "info"): """ Log a message to the logger. Args: *args: The message to log. level (str): The logging level. """ logger = logging.getLogger(__name__) if level == "info": logger.info(*args) elif level == "warning": logger.warning(*args) elif level == "error": logger.error(*args) elif level == "print": print(*args) else: raise ValueError(f"Invalid logging level: {level}")
Log a message to the logger. Args: *args: The message to log. level (str): The logging level.
log_message
python
hpcaitech/Open-Sora
opensora/utils/logger.py
https://github.com/hpcaitech/Open-Sora/blob/master/opensora/utils/logger.py
Apache-2.0
def install_dependencies(enable_optimization=False): """ Install the required dependencies for the demo if they are not already installed. """ def _is_package_available(name) -> bool: try: importlib.import_module(name) return True except (ImportError, ModuleNotFoundError): return False if enable_optimization: # install flash attention if not _is_package_available("flash_attn"): subprocess.run( f"{sys.executable} -m pip install flash-attn --no-build-isolation", env={"FLASH_ATTENTION_SKIP_CUDA_BUILD": "TRUE"}, shell=True, ) # install apex for fused layernorm if not _is_package_available("apex"): subprocess.run( f'{sys.executable} -m pip install -v --disable-pip-version-check --no-cache-dir --no-build-isolation --config-settings "--build-option=--cpp_ext" --config-settings "--build-option=--cuda_ext" git+https://github.com/NVIDIA/apex.git', shell=True, ) # install ninja if not _is_package_available("ninja"): subprocess.run(f"{sys.executable} -m pip install ninja", shell=True) # install xformers if not _is_package_available("xformers"): subprocess.run( f"{sys.executable} -m pip install -v -U git+https://github.com/facebookresearch/xformers.git@main#egg=xformers", shell=True, )
Install the required dependencies for the demo if they are not already installed.
install_dependencies
python
hpcaitech/Open-Sora
gradio/app.py
https://github.com/hpcaitech/Open-Sora/blob/master/gradio/app.py
Apache-2.0
def read_config(config_path): """ Read the configuration file. """ from mmengine.config import Config return Config.fromfile(config_path)
Read the configuration file.
read_config
python
hpcaitech/Open-Sora
gradio/app.py
https://github.com/hpcaitech/Open-Sora/blob/master/gradio/app.py
Apache-2.0
def build_models(mode, resolution, enable_optimization=False): """ Build the models for the given mode, resolution, and configuration. """ # build vae from opensora.registry import MODELS, build_module if mode == "i2v": config = read_config(CONFIG_MAP["v1.3_i2v"]) else: config = read_config(CONFIG_MAP["v1.3"]) vae = build_module(config.vae, MODELS).cuda() # build text encoder text_encoder = build_module(config.text_encoder, MODELS) # T5 must be fp32 text_encoder.t5.model = text_encoder.t5.model.cuda() # Determine model weights based on mode and resolution if mode == "i2v": weight_path = HF_STDIT_MAP["i2v"] else: # t2v weight_path = HF_STDIT_MAP["t2v"].get(resolution, None) if not weight_path: raise ValueError(f"Unsupported resolution {resolution} for mode {mode}") # build stdit from opensora.models.stdit.stdit3 import STDiT3 model_kwargs = {k: v for k, v in config.model.items() if k not in ("type", "from_pretrained", "force_huggingface")} print("Load STDIT3 from ", weight_path) stdit = STDiT3.from_pretrained(weight_path, **model_kwargs).cuda() # build scheduler from opensora.registry import SCHEDULERS scheduler = build_module(config.scheduler, SCHEDULERS) # hack for classifier-free guidance text_encoder.y_embedder = stdit.y_embedder # move models to device vae = vae.to(torch.bfloat16).eval() text_encoder.t5.model = text_encoder.t5.model.eval() # t5 must be in fp32 stdit = stdit.to(torch.bfloat16).eval() # clear cuda torch.cuda.empty_cache() return vae, text_encoder, stdit, scheduler, config
Build the models for the given mode, resolution, and configuration.
build_models
python
hpcaitech/Open-Sora
gradio/app.py
https://github.com/hpcaitech/Open-Sora/blob/master/gradio/app.py
Apache-2.0
def main(): # create demo with gr.Blocks() as demo: with gr.Row(): with gr.Column(): gr.HTML( """ <div style='text-align: center;'> <p align="center"> <img src="https://github.com/hpcaitech/Open-Sora-Demo/blob/main/readme/icon.png" width="250"/> </p> <div style="display: flex; gap: 10px; justify-content: center;"> <a href="https://github.com/hpcaitech/Open-Sora/stargazers"><img src="https://img.shields.io/github/stars/hpcaitech/Open-Sora?style=social"></a> <a href="https://hpcaitech.github.io/Open-Sora/"><img src="https://img.shields.io/badge/Gallery-View-orange?logo=&amp"></a> <a href="https://discord.gg/kZakZzrSUT"><img src="https://img.shields.io/badge/Discord-join-blueviolet?logo=discord&amp"></a> <a href="https://join.slack.com/t/colossalaiworkspace/shared_invite/zt-247ipg9fk-KRRYmUl~u2ll2637WRURVA"><img src="https://img.shields.io/badge/Slack-ColossalAI-blueviolet?logo=slack&amp"></a> <a href="https://twitter.com/yangyou1991/status/1769411544083996787?s=61&t=jT0Dsx2d-MS5vS9rNM5e5g"><img src="https://img.shields.io/badge/Twitter-Discuss-blue?logo=twitter&amp"></a> <a href="https://raw.githubusercontent.com/hpcaitech/public_assets/main/colossalai/img/WeChat.png"><img src="https://img.shields.io/badge/微信-小助手加群-green?logo=wechat&amp"></a> <a href="https://hpc-ai.com/blog/open-sora-v1.0"><img src="https://img.shields.io/badge/Open_Sora-Blog-blue"></a> </div> <h1 style='margin-top: 5px;'>Open-Sora: Democratizing Efficient Video Production for All</h1> </div> """ ) with gr.Row(): with gr.Column(): prompt_text = gr.Textbox(label="Prompt", placeholder="Describe your video here", lines=4) refine_prompt = gr.Checkbox( value=has_openai_key(), label="Refine prompt with GPT4o", interactive=has_openai_key() ) random_prompt_btn = gr.Button("Random Prompt By GPT4o", interactive=has_openai_key()) gr.Markdown("## Basic Settings") resolution = gr.Radio( choices=["360p", "720p"], value="720p", label="Resolution", ) aspect_ratio = gr.Radio( choices=["9:16", "16:9", "3:4", "4:3", "1:1"], value="9:16", label="Aspect Ratio (H:W)", ) length = gr.Radio( choices=[1, 49, 65, 81, 97, 113], value=97, label="Video Length (Number of Frames)", info="Setting the number of frames to 1 indicates image generation instead of video generation.", ) with gr.Row(): seed = gr.Slider(value=1024, minimum=1, maximum=2048, step=1, label="Seed") sampling_steps = gr.Slider(value=30, minimum=1, maximum=200, step=1, label="Sampling steps") cfg_scale = gr.Slider(value=7.0, minimum=0.0, maximum=10.0, step=0.1, label="CFG Scale") with gr.Row(): with gr.Column(): motion_strength = gr.Radio( choices=["very low", "low", "fair", "high", "very high", "extremely high"], value="fair", label="Motion Strength", info="Only effective for video generation", ) use_motion_strength = gr.Checkbox(value=True, label="Enable") with gr.Column(): aesthetic_score = gr.Radio( choices=["terrible", "very poor", "poor", "fair", "good", "very good", "excellent"], value="excellent", label="Aesthetic", info="Effective for text & video generation", ) use_aesthetic_score = gr.Checkbox(value=True, label="Enable") camera_motion = gr.Radio( value="none", label="Camera Motion", choices=["none", "pan right", "pan left", "tilt up", "tilt down", "zoom in", "zoom out", "static"], interactive=True, ) gr.Markdown("## Advanced Settings") with gr.Row(): fps = gr.Slider( value=24, minimum=1, maximum=60, step=1, label="FPS", info="This is the frames per seconds for video generation, keep it to 24 if you are not sure", ) num_loop = gr.Slider( value=1, minimum=1, maximum=20, step=1, label="Number of Loops", info="This will change the length of the generated video, keep it to 1 if you are not sure", ) gr.Markdown("## Reference Image") reference_image = gr.Image(label="Image (optional)", show_download_button=True) with gr.Column(): output_video = gr.Video(label="Output Video", height="100%") with gr.Row(): image_gen_button = gr.Button("Generate image") video_gen_button = gr.Button("Generate video") image_gen_button.click( fn=run_image_inference, inputs=[ prompt_text, resolution, aspect_ratio, length, motion_strength, aesthetic_score, use_motion_strength, use_aesthetic_score, camera_motion, reference_image, refine_prompt, fps, num_loop, seed, sampling_steps, cfg_scale, ], outputs=reference_image, ) video_gen_button.click( fn=run_video_inference, inputs=[ prompt_text, resolution, aspect_ratio, length, motion_strength, aesthetic_score, use_motion_strength, use_aesthetic_score, camera_motion, reference_image, refine_prompt, fps, num_loop, seed, sampling_steps, cfg_scale, ], outputs=output_video, ) random_prompt_btn.click(fn=generate_random_prompt, outputs=prompt_text) # launch demo.queue(max_size=5, default_concurrency_limit=1) demo.launch(server_port=args.port, server_name=args.host, share=args.share, max_threads=1)
<div style='text-align: center;'> <p align="center"> <img src="https://github.com/hpcaitech/Open-Sora-Demo/blob/main/readme/icon.png" width="250"/> </p> <div style="display: flex; gap: 10px; justify-content: center;"> <a href="https://github.com/hpcaitech/Open-Sora/stargazers"><img src="https://img.shields.io/github/stars/hpcaitech/Open-Sora?style=social"></a> <a href="https://hpcaitech.github.io/Open-Sora/"><img src="https://img.shields.io/badge/Gallery-View-orange?logo=&amp"></a> <a href="https://discord.gg/kZakZzrSUT"><img src="https://img.shields.io/badge/Discord-join-blueviolet?logo=discord&amp"></a> <a href="https://join.slack.com/t/colossalaiworkspace/shared_invite/zt-247ipg9fk-KRRYmUl~u2ll2637WRURVA"><img src="https://img.shields.io/badge/Slack-ColossalAI-blueviolet?logo=slack&amp"></a> <a href="https://twitter.com/yangyou1991/status/1769411544083996787?s=61&t=jT0Dsx2d-MS5vS9rNM5e5g"><img src="https://img.shields.io/badge/Twitter-Discuss-blue?logo=twitter&amp"></a> <a href="https://raw.githubusercontent.com/hpcaitech/public_assets/main/colossalai/img/WeChat.png"><img src="https://img.shields.io/badge/微信-小助手加群-green?logo=wechat&amp"></a> <a href="https://hpc-ai.com/blog/open-sora-v1.0"><img src="https://img.shields.io/badge/Open_Sora-Blog-blue"></a> </div> <h1 style='margin-top: 5px;'>Open-Sora: Democratizing Efficient Video Production for All</h1> </div>
main
python
hpcaitech/Open-Sora
gradio/app.py
https://github.com/hpcaitech/Open-Sora/blob/master/gradio/app.py
Apache-2.0
def test_validation_pydantic_v1(clear_sqlmodel): """Test validation of implicit and explicit None values. # For consistency with pydantic, validators are not to be called on # arguments that are not explicitly provided. https://github.com/tiangolo/sqlmodel/issues/230 https://github.com/samuelcolvin/pydantic/issues/1223 """ from pydantic import validator class Hero(SQLModel): name: Optional[str] = None secret_name: Optional[str] = None age: Optional[int] = None @validator("name", "secret_name", "age") def reject_none(cls, v): assert v is not None return v Hero.validate({"age": 25}) with pytest.raises(ValidationError): Hero.validate({"name": None, "age": 25})
Test validation of implicit and explicit None values. # For consistency with pydantic, validators are not to be called on # arguments that are not explicitly provided. https://github.com/tiangolo/sqlmodel/issues/230 https://github.com/samuelcolvin/pydantic/issues/1223
test_validation_pydantic_v1
python
fastapi/sqlmodel
tests/test_validation.py
https://github.com/fastapi/sqlmodel/blob/master/tests/test_validation.py
MIT
def test_validation_pydantic_v2(clear_sqlmodel): """Test validation of implicit and explicit None values. # For consistency with pydantic, validators are not to be called on # arguments that are not explicitly provided. https://github.com/tiangolo/sqlmodel/issues/230 https://github.com/samuelcolvin/pydantic/issues/1223 """ from pydantic import field_validator class Hero(SQLModel): name: Optional[str] = None secret_name: Optional[str] = None age: Optional[int] = None @field_validator("name", "secret_name", "age") def reject_none(cls, v): assert v is not None return v Hero.model_validate({"age": 25}) with pytest.raises(ValidationError): Hero.model_validate({"name": None, "age": 25})
Test validation of implicit and explicit None values. # For consistency with pydantic, validators are not to be called on # arguments that are not explicitly provided. https://github.com/tiangolo/sqlmodel/issues/230 https://github.com/samuelcolvin/pydantic/issues/1223
test_validation_pydantic_v2
python
fastapi/sqlmodel
tests/test_validation.py
https://github.com/fastapi/sqlmodel/blob/master/tests/test_validation.py
MIT
def test_sa_relationship_property(clear_sqlmodel): """Test https://github.com/tiangolo/sqlmodel/issues/315#issuecomment-1272122306""" class Team(SQLModel, table=True): id: Optional[int] = Field(default=None, primary_key=True) name: str = Field(unique=True) heroes: List["Hero"] = Relationship( # noqa: F821 sa_relationship=RelationshipProperty("Hero", back_populates="team") ) class Hero(SQLModel, table=True): id: Optional[int] = Field(default=None, primary_key=True) name: str = Field(unique=True) team_id: Optional[int] = Field(default=None, foreign_key="team.id") team: Optional[Team] = Relationship( sa_relationship=RelationshipProperty("Team", back_populates="heroes") ) team_preventers = Team(name="Preventers") hero_rusty_man = Hero(name="Rusty-Man", team=team_preventers) engine = create_engine("sqlite://", echo=True) SQLModel.metadata.create_all(engine) with Session(engine) as session: session.add(hero_rusty_man) session.commit() session.refresh(hero_rusty_man) # The next statement should not raise an AttributeError assert hero_rusty_man.team assert hero_rusty_man.team.name == "Preventers"
Test https://github.com/tiangolo/sqlmodel/issues/315#issuecomment-1272122306
test_sa_relationship_property
python
fastapi/sqlmodel
tests/test_main.py
https://github.com/fastapi/sqlmodel/blob/master/tests/test_main.py
MIT