#!/usr/bin/env python3 import torch import numpy as np import os from huggingface_hub import HfApi from pathlib import Path import cv2 from PIL import Image from diffusers.utils import load_image from diffusers import ( ControlNetModel, StableDiffusionControlNetPipeline, UniPCMultistepScheduler, ) image = load_image( "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png" ) image = np.array(image) low_threshold = 100 high_threshold = 200 image = cv2.Canny(image, low_threshold, high_threshold) image = image[:, :, None] image = np.concatenate([image, image, image], axis=2) canny_image = Image.fromarray(image) controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16) pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16 ) pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() generator = torch.manual_seed(0) out_image = pipe("futuristic-looking woman", num_inference_steps=20, generator=generator, image=canny_image).images[0] path = os.path.join(Path.home(), "images", "aa.png") out_image.save(path) api = HfApi() api.upload_file( path_or_fileobj=path, path_in_repo=path.split("/")[-1], repo_id="patrickvonplaten/images", repo_type="dataset", ) print("https://huggingface.co/datasets/patrickvonplaten/images/blob/main/aa.png")