Sketcher / sd /sd_controller.py
Gainward777's picture
Upload sd_controller.py
d99382c verified
from sd.utils.utils import *
from utils.utils import sketch_process, prompt_preprocess
#from controlnet_aux.pidi import PidiNetDetector
import spaces
class Controller():
def __init__(self,
models_names=["cagliostrolab/animagine-xl-3.1",
"stabilityai/stable-diffusion-xl-base-1.0"],
lora_path='sd/lora/lora.safetensors'):
self.models_names=models_names
self.lora_path=lora_path
self.vae=get_vae()
self.controlnet=get_controlnet()
self.adapter=get_adapter()
self.scheduler=get_scheduler(model_name=self.models_names[1])
self.detector=get_detector()
self.first_pipe=get_pipe(vae=self.vae,
model_name=self.models_names[0],
controlnet=self.controlnet,
lora_path=self.lora_path)
self.second_pipe=get_pipe(vae=self.vae,
model_name=self.models_names[1],
adapter=self.adapter,
scheduler=self.scheduler)
@spaces.GPU
def get_first_result(self, img, prompt, negative_prompt,
controlnet_scale=0.5, strength=1.0,n_steps=30,eta=1.0):
substrate, resized_image = sketch_process(input_image)
prompt=prompt_preprocess(prompt)
result=self.first_pipe(image=substrate,
control_image=resized_image,
strength=strength,
prompt=prompt,
negative_prompt = negative_prompt,
controlnet_conditioning_scale=float(controlnet_scale),
generator=torch.manual_seed(0),
num_inference_steps=n_steps,
eta=eta)
return result.images[0]
@spaces.GPU
def get_second_result(self, img, prompt, negative_prompt,
g_scale=7.5, n_steps=25,
adapter_scale=0.9, adapter_factor=1.0):
preprocessed_img=self.detector(img,
detect_resolution=1024,
image_resolution=1024,
apply_filter=True).convert("L")
result=self.second_pipe(prompt=prompt,
negative_prompt=negative_prompt,
image=preprocessed_img,
guidance_scale=g_scale,
num_inference_steps=n_steps,
adapter_conditioning_scale=adapter_scale,
adapter_conditioning_factor=adapter_factor,
generator = torch.manual_seed(42))
return result.images[0]