Spaces:
Running
on
Zero
Running
on
Zero
tori29umai
commited on
Commit
•
2421eec
1
Parent(s):
c458cb3
app.py
Browse files
app.py
CHANGED
@@ -27,6 +27,12 @@ dl_cn_config(cn_dir)
|
|
27 |
dl_tagger_model(tagger_dir)
|
28 |
dl_lora_model(lora_dir)
|
29 |
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
def load_model(lora_dir, cn_dir):
|
31 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
32 |
dtype = torch.float16
|
@@ -44,13 +50,11 @@ def load_model(lora_dir, cn_dir):
|
|
44 |
|
45 |
|
46 |
@spaces.GPU
|
47 |
-
def predict(input_image_path,
|
48 |
pipe = load_model(lora_dir, cn_dir)
|
49 |
-
|
50 |
-
base_size =
|
51 |
-
resize_image = resize_image_aspect_ratio(
|
52 |
-
white_base_pil = base_generation(resize_image.size, (255, 255, 255, 255)).convert("RGB")
|
53 |
-
line_image = line_image.resize(resize_image.size, Image.LANCZOS)
|
54 |
generator = torch.manual_seed(0)
|
55 |
last_time = time.time()
|
56 |
prompt = "masterpiece, best quality, monochrome, lineart, white background, " + prompt
|
@@ -61,7 +65,7 @@ def predict(input_image_path, line_image, prompt, negative_prompt, controlnet_sc
|
|
61 |
print(prompt)
|
62 |
|
63 |
output_image = pipe(
|
64 |
-
image=
|
65 |
control_image=resize_image,
|
66 |
strength=1.0,
|
67 |
prompt=prompt,
|
@@ -95,10 +99,7 @@ class Img2Img:
|
|
95 |
tags_list = remove_color(tags)
|
96 |
return tags_list
|
97 |
|
98 |
-
|
99 |
-
sigma = float(sigma )
|
100 |
-
gamma = float(gamma)
|
101 |
-
return line_process(img_path, sigma, gamma)
|
102 |
|
103 |
def layout(self):
|
104 |
css = """
|
@@ -113,11 +114,6 @@ class Img2Img:
|
|
113 |
with gr.Column():
|
114 |
self.input_image_path = gr.Image(label="input_image", type='filepath')
|
115 |
self.line_image = gr.Image(label="line_image", type='pil')
|
116 |
-
with gr.Row():
|
117 |
-
line_sigma = gr.Slider(label="sigma", minimum=0.1, value=1.4, maximum=3.0, show_label=False)
|
118 |
-
line_gamma = gr.Slider(label="gamma", minimum=0.5, value=0.98, maximum=2.0, show_label=False)
|
119 |
-
line_generate_button = gr.Button("line_generate")
|
120 |
-
|
121 |
self.prompt = gr.Textbox(label="prompt", lines=3)
|
122 |
self.negative_prompt = gr.Textbox(label="negative_prompt", lines=3, value="lowres, error, extra digit, fewer digits, cropped, worst quality,low quality, normal quality, jpeg artifacts, blurry")
|
123 |
|
@@ -129,13 +125,6 @@ class Img2Img:
|
|
129 |
with gr.Column():
|
130 |
self.output_image = gr.Image(type="pil", label="output_image")
|
131 |
|
132 |
-
line_generate_button.click(
|
133 |
-
self._make_line,
|
134 |
-
inputs=[self.input_image_path, line_sigma, line_gamma],
|
135 |
-
outputs=self.line_image
|
136 |
-
)
|
137 |
-
|
138 |
-
|
139 |
prompt_analysis_button.click(
|
140 |
self.process_prompt_analysis,
|
141 |
inputs=[self.input_image_path],
|
@@ -148,7 +137,7 @@ class Img2Img:
|
|
148 |
|
149 |
generate_button.click(
|
150 |
fn=predict,
|
151 |
-
inputs=[self.input_image_path, self.
|
152 |
outputs=self.output_image
|
153 |
)
|
154 |
return demo
|
|
|
27 |
dl_tagger_model(tagger_dir)
|
28 |
dl_lora_model(lora_dir)
|
29 |
|
30 |
+
def make_line(img_path, sigma, gamma):
|
31 |
+
sigma = float(sigma )
|
32 |
+
gamma = float(gamma)
|
33 |
+
return line_process(img_path, sigma, gamma)
|
34 |
+
|
35 |
+
|
36 |
def load_model(lora_dir, cn_dir):
|
37 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
38 |
dtype = torch.float16
|
|
|
50 |
|
51 |
|
52 |
@spaces.GPU
|
53 |
+
def predict(input_image_path, prompt, negative_prompt, controlnet_scale):
|
54 |
pipe = load_model(lora_dir, cn_dir)
|
55 |
+
line_image =make_line(input_image_path, 1.4, 0.98)
|
56 |
+
base_size = line_image.size
|
57 |
+
resize_image = resize_image_aspect_ratio(resize_image)
|
|
|
|
|
58 |
generator = torch.manual_seed(0)
|
59 |
last_time = time.time()
|
60 |
prompt = "masterpiece, best quality, monochrome, lineart, white background, " + prompt
|
|
|
65 |
print(prompt)
|
66 |
|
67 |
output_image = pipe(
|
68 |
+
image=resize_image,
|
69 |
control_image=resize_image,
|
70 |
strength=1.0,
|
71 |
prompt=prompt,
|
|
|
99 |
tags_list = remove_color(tags)
|
100 |
return tags_list
|
101 |
|
102 |
+
|
|
|
|
|
|
|
103 |
|
104 |
def layout(self):
|
105 |
css = """
|
|
|
114 |
with gr.Column():
|
115 |
self.input_image_path = gr.Image(label="input_image", type='filepath')
|
116 |
self.line_image = gr.Image(label="line_image", type='pil')
|
|
|
|
|
|
|
|
|
|
|
117 |
self.prompt = gr.Textbox(label="prompt", lines=3)
|
118 |
self.negative_prompt = gr.Textbox(label="negative_prompt", lines=3, value="lowres, error, extra digit, fewer digits, cropped, worst quality,low quality, normal quality, jpeg artifacts, blurry")
|
119 |
|
|
|
125 |
with gr.Column():
|
126 |
self.output_image = gr.Image(type="pil", label="output_image")
|
127 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
128 |
prompt_analysis_button.click(
|
129 |
self.process_prompt_analysis,
|
130 |
inputs=[self.input_image_path],
|
|
|
137 |
|
138 |
generate_button.click(
|
139 |
fn=predict,
|
140 |
+
inputs=[self.input_image_path, self.prompt, self.negative_prompt, self.controlnet_scale],
|
141 |
outputs=self.output_image
|
142 |
)
|
143 |
return demo
|