from typing import Dict, List, Any import torch from diffusers import StableDiffusionPipeline, EulerAncestralDiscreteScheduler from PIL import Image import base64 from io import BytesIO # set device device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') if device.type != 'cuda': raise ValueError("need to run on GPU") class EndpointHandler(): def __init__(self, path=""): # load the optimized model self.pipe = StableDiffusionPipeline.from_pretrained(path, torch_dtype=torch.float16) self.pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(self.pipe.scheduler.config) self.pipe = self.pipe.to(device) def __call__(self, data: Any) -> List[List[Dict[str, float]]]: """ Args: data (:obj:): includes the input data and the parameters for the inference. Return: A :obj:`dict`:. base64 encoded image """ inputs = data.pop("inputs", data) encoded_image = data.pop("image", None) params = data.pop("parameters", data) # hyperparamters num_inference_steps = params.pop("num_inference_steps", 20) guidance_scale = params.pop("guidance_scale", 7.5) negative_prompt = params.pop("negative_prompt", None) height = params.pop("height", None) width = params.pop("width", None) manual_seed = params.pop("manual_seed", -1) generator = torch.Generator(device).manual_seed(manual_seed) if encoded_image is not None: image = self.decode_base64_image(encoded_image) # run inference pipeline out = self.pipe(inputs, image=image, generator=generator, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, num_images_per_prompt=1, negative_prompt=negative_prompt, height=height, width=width ) # return first generate PIL image return out.images[0] # helper to decode input image def decode_base64_image(self, image_string): base64_image = base64.b64decode(image_string) buffer = BytesIO(base64_image) image = Image.open(buffer) return image