HunyuanVideo-vae / handler.py
hlky's picture
Upload folder using huggingface_hub
15dbd5a verified
raw
history blame
2.08 kB
from typing import Dict, List, Any
import torch
from base64 import b64decode
from diffusers import AutoencoderKLHunyuanVideo
from diffusers.video_processor import VideoProcessor
from diffusers.utils import export_to_video
class EndpointHandler:
def __init__(self, path=""):
self.device = "cpu"
self.dtype = torch.float32
self.vae = (
AutoencoderKLHunyuanVideo.from_pretrained(
path, subfolder="vae", torch_dtype=self.dtype
)
.to(self.device, self.dtype)
.eval()
)
self.vae_scale_factor_spatial = self.vae.spatial_compression_ratio
self.video_processor = VideoProcessor(
vae_scale_factor=self.vae_scale_factor_spatial
)
@torch.no_grad()
def __call__(self, data: Any) -> List[List[Dict[str, float]]]:
"""
Args:
data (:obj:):
includes the input data and the parameters for the inference.
"""
tensor = data["inputs"]
tensor = b64decode(tensor.encode("utf-8"))
parameters = data.get("parameters", {})
if "shape" not in parameters:
raise ValueError("Expected `shape` in parameters.")
if "dtype" not in parameters:
raise ValueError("Expected `dtype` in parameters.")
DTYPE_MAP = {
"float16": torch.float16,
"float32": torch.float32,
"bfloat16": torch.bfloat16,
}
shape = parameters.get("shape")
dtype = DTYPE_MAP.get(parameters.get("dtype"))
tensor = torch.frombuffer(bytearray(tensor), dtype=dtype).reshape(shape)
tensor = tensor.to(self.device, self.dtype)
tensor = tensor / self.vae.config.scaling_factor
with torch.no_grad():
frames = self.vae.decode(tensor, return_dict=False)[0]
frames = self.video_processor.postprocess_video(frames, output_type="pil")[0]
path = export_to_video(frames, fps=15)
with open(path, "rb") as f:
video = f.read()
return video