Gainward777 commited on
Commit
97293f9
1 Parent(s): 0c3af90

Upload zerogpu_controller.py

Browse files
Files changed (1) hide show
  1. sd/zerogpu_controller.py +74 -0
sd/zerogpu_controller.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #from ui.gradio_ui import ui
2
+ import spaces
3
+
4
+ from sd.utils.utils import *
5
+ from utils.utils import sketch_process, prompt_preprocess
6
+ #from sd.sd_controller import Controller
7
+
8
+ #controller=Controller()
9
+
10
+ MODELS_NAMES=["cagliostrolab/animagine-xl-3.1",
11
+ "stabilityai/stable-diffusion-xl-base-1.0"]
12
+ LORA_PATH='sd/lora/lora.safetensors'
13
+ VAE=get_vae()
14
+ CONTROLNET=get_controlnet()
15
+ ADAPTER=get_adapter()
16
+ SCHEDULER=get_scheduler(model_name=MODELS_NAMES[1])
17
+ DETECTOR=get_detector()
18
+
19
+ FIRST_PIPE=get_pipe(vae=VAE,
20
+ model_name=MODELS_NAMES[0],
21
+ controlnet=CONTROLNET,
22
+ lora_path=LORA_PATH)
23
+
24
+ SECOND_PIPE=get_pipe(vae=VAE,
25
+ model_name=MODELS_NAMES[1],
26
+ adapter=ADAPTER,
27
+ scheduler=SCHEDULER)
28
+
29
+
30
+ @spaces.GPU
31
+ def get_first_result(img, prompt, negative_prompt,
32
+ controlnet_scale=0.5, strength=1.0,n_steps=30,eta=1.0):
33
+
34
+ substrate, resized_image = sketch_process(img)
35
+ prompt=prompt_preprocess(prompt)
36
+
37
+ result=FIRST_PIPE(image=substrate,
38
+ control_image=resized_image,
39
+ strength=strength,
40
+ prompt=prompt,
41
+ negative_prompt = negative_prompt,
42
+ controlnet_conditioning_scale=float(controlnet_scale),
43
+ generator=torch.manual_seed(0),
44
+ num_inference_steps=n_steps,
45
+ eta=eta)
46
+
47
+ return result.images[0]
48
+
49
+
50
+ @spaces.GPU
51
+ def get_second_result(img, prompt, negative_prompt,
52
+ g_scale=7.5, n_steps=25,
53
+ adapter_scale=0.9, adapter_factor=1.0):
54
+
55
+ preprocessed_img=DETECTOR(img,
56
+ detect_resolution=1024,
57
+ image_resolution=1024,
58
+ apply_filter=True).convert("L")
59
+
60
+ result=SECOND_PIPE(prompt=prompt,
61
+ negative_prompt=negative_prompt,
62
+ image=preprocessed_img,
63
+ guidance_scale=g_scale,
64
+ num_inference_steps=n_steps,
65
+ adapter_conditioning_scale=adapter_scale,
66
+ adapter_conditioning_factor=adapter_factor,
67
+ generator = torch.manual_seed(42))
68
+
69
+ return result.images[0]
70
+
71
+
72
+
73
+
74
+ #ui(get_first_result, get_second_result) #controller)