Gainward777 commited on
Commit
a093ea5
1 Parent(s): effbd63

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +74 -85
app.py CHANGED
@@ -1,85 +1,74 @@
1
- from ui.gradio_ui import ui
2
- import spaces
3
-
4
- from sd.utils.utils import *
5
- from utils.utils import sketch_process, prompt_preprocess
6
- #from sd.sd_controller import Controller
7
-
8
- #controller=Controller()
9
-
10
- MODELS_NAMES=["cagliostrolab/animagine-xl-3.1",
11
- "stabilityai/stable-diffusion-xl-base-1.0"]
12
- LORA_PATH='sd/lora/lora.safetensors'
13
- VAE=get_vae()
14
- CONTROLNET=get_controlnet()
15
- ADAPTER=get_adapter()
16
- SCHEDULER=get_scheduler(model_name=MODELS_NAMES[1])
17
- DETECTOR=get_detector()
18
-
19
- FIRST_PIPE=get_pipe(vae=VAE,
20
- model_name=MODELS_NAMES[0],
21
- controlnet=CONTROLNET,
22
- lora_path=LORA_PATH)
23
-
24
- SECOND_PIPE=get_pipe(vae=VAE,
25
- model_name=MODELS_NAMES[1],
26
- adapter=ADAPTER,
27
- scheduler=SCHEDULER)
28
-
29
-
30
- @spaces.GPU(duration=120)
31
- def get_first_result(img, prompt, negative_prompt,
32
- controlnet_scale=0.5, strength=1.0,n_steps=30,eta=1.0):
33
-
34
- substrate, resized_image = sketch_process(img)
35
- prompt=prompt_preprocess(prompt)
36
-
37
- FIRST_PIPE.to('cuda')
38
-
39
- result=FIRST_PIPE(image=substrate,
40
- control_image=resized_image,
41
- strength=strength,
42
- prompt=prompt,
43
- negative_prompt = negative_prompt,
44
- controlnet_conditioning_scale=float(controlnet_scale),
45
- generator=torch.manual_seed(0),
46
- num_inference_steps=n_steps,
47
- eta=eta)
48
-
49
- FIRST_PIPE.to('cpu')
50
-
51
- return result.images[0]
52
-
53
-
54
- @spaces.GPU(duration=120)
55
- def get_second_result(img, prompt, negative_prompt,
56
- g_scale=7.5, n_steps=25,
57
- adapter_scale=0.9, adapter_factor=1.0):
58
-
59
- DETECTOR.to('cuda')
60
- SECOND_PIPE.to('cuda')
61
-
62
- preprocessed_img=DETECTOR(img,
63
- detect_resolution=1024,
64
- image_resolution=1024,
65
- apply_filter=True).convert("L")
66
-
67
-
68
-
69
- result=SECOND_PIPE(prompt=prompt,
70
- negative_prompt=negative_prompt,
71
- image=preprocessed_img,
72
- guidance_scale=g_scale,
73
- num_inference_steps=n_steps,
74
- adapter_conditioning_scale=adapter_scale,
75
- adapter_conditioning_factor=adapter_factor,
76
- generator = torch.manual_seed(42))
77
- DETECTOR.to('cpu')
78
- SECOND_PIPE.to('cpu')
79
-
80
- return result.images[0]
81
-
82
-
83
-
84
-
85
- ui(get_first_result, get_second_result) #controller)
 
1
+ from ui.gradio_ui import ui
2
+ import spaces
3
+ from sd import zerogpu_controller as controller
4
+ from sd.utils.utils import *
5
+ from utils.utils import sketch_process, prompt_preprocess
6
+ #from sd.sd_controller import Controller
7
+
8
+ #controller=Controller()
9
+ """
10
+ MODELS_NAMES=["cagliostrolab/animagine-xl-3.1",
11
+ "stabilityai/stable-diffusion-xl-base-1.0"]
12
+ LORA_PATH='sd/lora/lora.safetensors'
13
+ VAE=get_vae()
14
+ CONTROLNET=get_controlnet()
15
+ ADAPTER=get_adapter()
16
+ SCHEDULER=get_scheduler(model_name=MODELS_NAMES[1])
17
+ DETECTOR=get_detector()
18
+
19
+ FIRST_PIPE=get_pipe(vae=VAE,
20
+ model_name=MODELS_NAMES[0],
21
+ controlnet=CONTROLNET,
22
+ lora_path=LORA_PATH)
23
+
24
+ SECOND_PIPE=get_pipe(vae=VAE,
25
+ model_name=MODELS_NAMES[1],
26
+ adapter=ADAPTER,
27
+ scheduler=SCHEDULER)
28
+
29
+
30
+ @spaces.GPU
31
+ def get_first_result(img, prompt, negative_prompt,
32
+ controlnet_scale=0.5, strength=1.0,n_steps=30,eta=1.0):
33
+
34
+ substrate, resized_image = sketch_process(img)
35
+ prompt=prompt_preprocess(prompt)
36
+
37
+ result=FIRST_PIPE(image=substrate,
38
+ control_image=resized_image,
39
+ strength=strength,
40
+ prompt=prompt,
41
+ negative_prompt = negative_prompt,
42
+ controlnet_conditioning_scale=float(controlnet_scale),
43
+ generator=torch.manual_seed(0),
44
+ num_inference_steps=n_steps,
45
+ eta=eta)
46
+
47
+ return result.images[0]
48
+
49
+
50
+ @spaces.GPU
51
+ def get_second_result(img, prompt, negative_prompt,
52
+ g_scale=7.5, n_steps=25,
53
+ adapter_scale=0.9, adapter_factor=1.0):
54
+
55
+ preprocessed_img=DETECTOR(img,
56
+ detect_resolution=1024,
57
+ image_resolution=1024,
58
+ apply_filter=True).convert("L")
59
+
60
+ result=SECOND_PIPE(prompt=prompt,
61
+ negative_prompt=negative_prompt,
62
+ image=preprocessed_img,
63
+ guidance_scale=g_scale,
64
+ num_inference_steps=n_steps,
65
+ adapter_conditioning_scale=adapter_scale,
66
+ adapter_conditioning_factor=adapter_factor,
67
+ generator = torch.manual_seed(42))
68
+
69
+ return result.images[0]
70
+
71
+
72
+ """
73
+
74
+ ui(controller)#get_first_result, get_second_result) #controller)