Spaces:
Sleeping
Sleeping
File size: 3,063 Bytes
9b843da b684a98 9b843da be89809 9b843da be89809 9b843da d99382c 9b843da |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 |
from sd.utils.utils import *
from utils.utils import sketch_process, prompt_preprocess
#from controlnet_aux.pidi import PidiNetDetector
import spaces
class Controller():
def __init__(self,
models_names=["cagliostrolab/animagine-xl-3.1",
"stabilityai/stable-diffusion-xl-base-1.0"],
lora_path='sd/lora/lora.safetensors'):
self.models_names=models_names
self.lora_path=lora_path
self.vae=get_vae()
self.controlnet=get_controlnet()
self.adapter=get_adapter()
self.scheduler=get_scheduler(model_name=self.models_names[1])
self.detector=get_detector()
self.first_pipe=get_pipe(vae=self.vae,
model_name=self.models_names[0],
controlnet=self.controlnet,
lora_path=self.lora_path)
self.second_pipe=get_pipe(vae=self.vae,
model_name=self.models_names[1],
adapter=self.adapter,
scheduler=self.scheduler)
@spaces.GPU
def get_first_result(self, img, prompt, negative_prompt,
controlnet_scale=0.5, strength=1.0,n_steps=30,eta=1.0):
substrate, resized_image = sketch_process(input_image)
prompt=prompt_preprocess(prompt)
result=self.first_pipe(image=substrate,
control_image=resized_image,
strength=strength,
prompt=prompt,
negative_prompt = negative_prompt,
controlnet_conditioning_scale=float(controlnet_scale),
generator=torch.manual_seed(0),
num_inference_steps=n_steps,
eta=eta)
return result.images[0]
@spaces.GPU
def get_second_result(self, img, prompt, negative_prompt,
g_scale=7.5, n_steps=25,
adapter_scale=0.9, adapter_factor=1.0):
preprocessed_img=self.detector(img,
detect_resolution=1024,
image_resolution=1024,
apply_filter=True).convert("L")
result=self.second_pipe(prompt=prompt,
negative_prompt=negative_prompt,
image=preprocessed_img,
guidance_scale=g_scale,
num_inference_steps=n_steps,
adapter_conditioning_scale=adapter_scale,
adapter_conditioning_factor=adapter_factor,
generator = torch.manual_seed(42))
return result.images[0]
|