File size: 2,627 Bytes
a093ea5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
from ui.gradio_ui import ui
import spaces
from sd import zerogpu_controller as controller
from sd.utils.utils import *
from utils.utils import sketch_process, prompt_preprocess
#from sd.sd_controller import Controller

#controller=Controller()
"""

MODELS_NAMES=["cagliostrolab/animagine-xl-3.1",

              "stabilityai/stable-diffusion-xl-base-1.0"]

LORA_PATH='sd/lora/lora.safetensors'

VAE=get_vae()

CONTROLNET=get_controlnet()

ADAPTER=get_adapter()

SCHEDULER=get_scheduler(model_name=MODELS_NAMES[1])

DETECTOR=get_detector()



FIRST_PIPE=get_pipe(vae=VAE, 

                    model_name=MODELS_NAMES[0], 

                    controlnet=CONTROLNET,

                    lora_path=LORA_PATH)



SECOND_PIPE=get_pipe(vae=VAE, 

                     model_name=MODELS_NAMES[1], 

                     adapter=ADAPTER,

                     scheduler=SCHEDULER)





@spaces.GPU

def get_first_result(img, prompt, negative_prompt, 

                     controlnet_scale=0.5, strength=1.0,n_steps=30,eta=1.0):



    substrate, resized_image = sketch_process(img)

    prompt=prompt_preprocess(prompt)



    result=FIRST_PIPE(image=substrate,

                            control_image=resized_image,

                            strength=strength,

                            prompt=prompt,

                            negative_prompt = negative_prompt,

                            controlnet_conditioning_scale=float(controlnet_scale),

                            generator=torch.manual_seed(0),

                            num_inference_steps=n_steps,

                            eta=eta)



    return result.images[0]





@spaces.GPU

def get_second_result(img, prompt, negative_prompt,

                      g_scale=7.5, n_steps=25, 

                      adapter_scale=0.9, adapter_factor=1.0):



    preprocessed_img=DETECTOR(img, 

                              detect_resolution=1024, 

                              image_resolution=1024,

                              apply_filter=True).convert("L")



    result=SECOND_PIPE(prompt=prompt,

                            negative_prompt=negative_prompt,

                            image=preprocessed_img,

                            guidance_scale=g_scale,

                            num_inference_steps=n_steps,

                            adapter_conditioning_scale=adapter_scale,

                            adapter_conditioning_factor=adapter_factor,

                            generator = torch.manual_seed(42))



    return result.images[0]





"""

ui(controller)#get_first_result, get_second_result) #controller)