Diffusion / src /model_loader.py
torinriley's picture
nooooooo se
20c21fe
raw
history blame contribute delete
848 Bytes
from .clip import CLIP
from .encoder import VAE_Encoder
from .decoder import VAE_Decoder
from .diffusion import Diffusion
from . import model_converter
import torch
def load_models(ckpt_path, device):
state_dict = model_converter.load_from_standard_weights(ckpt_path, device)
encoder = VAE_Encoder().to(device)
encoder.load_state_dict(state_dict['encoder'], strict=True)
decoder = VAE_Decoder().to(device)
decoder.load_state_dict(state_dict['decoder'], strict=True)
# Initialize diffusion model
diffusion = Diffusion().to(device)
diffusion.load_state_dict(state_dict['diffusion'], strict=True)
clip = CLIP().to(device)
clip.load_state_dict(state_dict['clip'], strict=True)
return {
'clip': clip,
'encoder': encoder,
'decoder': decoder,
'diffusion': diffusion,
}