tori29umai commited on
Commit
a50b44f
1 Parent(s): 088a973
Files changed (1) hide show
  1. app.py +94 -89
app.py CHANGED
@@ -72,94 +72,99 @@ pipe = pipe.to(device)
72
 
73
 
74
 
75
- @spaces.GPU
76
- def predict(
77
- input_image_path,
78
- prompt,
79
- negative_prompt,
80
- controlnet_conditioning_scale,
81
- ):
82
- input_image_pil = Image.open(input_image_path)
83
- base_size =input_image_pil.size
84
- resize_image= resize_image_aspect_ratio(input_image_pil)
85
- resize_image_size = resize_image.size
86
- width = resize_image_size[0]
87
- height = resize_image_size[1]
88
- white_base_pil = base_generation(resize_image.size, (255, 255, 255, 255)).convert("RGB")
89
- conditioning, pooled = compel([prompt, negative_prompt])
90
- generator = torch.manual_seed(0)
91
- last_time = time.time()
92
-
93
- output_image = pipe(
94
- image=white_base_pil,
95
- control_image=resize_image,
96
- strength=1.0,
97
- prompt_embeds=conditioning[0:1],
98
- pooled_prompt_embeds=pooled[0:1],
99
- negative_prompt_embeds=conditioning[1:2],
100
- negative_pooled_prompt_embeds=pooled[1:2],
101
- width=width,
102
- height=height,
103
- controlnet_conditioning_scale=float(controlnet_conditioning_scale),
104
- controlnet_start=0.0,
105
- controlnet_end=1.0,
106
- generator=generator,
107
- num_inference_steps=30,
108
- guidance_scale=8.5,
109
- eta=1.0,
110
- )
111
- print(f"Time taken: {time.time() - last_time}")
112
- output_image = output_image.resize(base_size, Image.LANCZOS)
113
- return output_image
114
-
115
-
116
- css = """
117
- #intro{
118
- # max-width: 32rem;
119
- # text-align: center;
120
- # margin: 0 auto;
121
- }
122
- """
123
-
124
- with gr.Blocks(css=css) as demo:
125
- with gr.Row() as block:
126
- with gr.Column():
127
- # 画像アップロード用の行
128
- with gr.Row():
129
- with gr.Column():
130
- input_image_path = gr.Image(label="入力画像", type='filepath')
131
-
132
- # プロンプト入力用の行
133
- with gr.Row():
134
- prompt_analysis = PromptAnalysis(tagger_dir)
135
- [prompt, nega] = prompt_analysis.layout(input_image_path)
136
- # 画像の詳細設定用のスライダー行
137
- with gr.Row():
138
- controlnet_conditioning_scale = gr.Slider(minimum=0.5, maximum=1.25, value=1.0, step=0.01, interactive=True, label="線画忠実度")
139
-
140
- # 画像生成ボタンの行
141
- with gr.Row():
142
- generate_button = gr.Button("生成", interactive=False)
143
-
144
- with gr.Column():
145
- output_image = gr.Image(type="pil", label="Output Image")
146
-
147
- # インプットとアウトプットの設定
148
- inputs = [
149
- input_image_path,
150
- prompt,
151
- nega,
152
- controlnet_conditioning_scale,
153
- ]
154
- outputs = [output_image]
155
-
156
- # ボタンのクリックイベントを設定
157
- generate_button.click(
158
- fn=predict,
159
- inputs=[input_image_path, prompt, nega, controlnet_conditioning_scale],
160
- outputs=[output_image]
161
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
162
 
163
- # デモの設定と起動
164
- demo.queue(api_open=True)
165
- demo.launch(show_api=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
 
73
 
74
 
75
+ class Img2Img:
76
+ def __init__(self):
77
+ self.input_image_path = None
78
+
79
+ @spaces.GPU
80
+ def predict(
81
+ self,
82
+ input_image_path,
83
+ prompt,
84
+ negative_prompt,
85
+ controlnet_conditioning_scale,
86
+ ):
87
+ input_image_pil = Image.open(input_image_path)
88
+ base_size =input_image_pil.size
89
+ resize_image= resize_image_aspect_ratio(input_image_pil)
90
+ resize_image_size = resize_image.size
91
+ width = resize_image_size[0]
92
+ height = resize_image_size[1]
93
+ white_base_pil = base_generation(resize_image.size, (255, 255, 255, 255)).convert("RGB")
94
+ conditioning, pooled = compel([prompt, negative_prompt])
95
+ generator = torch.manual_seed(0)
96
+ last_time = time.time()
97
+
98
+ output_image = pipe(
99
+ image=white_base_pil,
100
+ control_image=resize_image,
101
+ strength=1.0,
102
+ prompt_embeds=conditioning[0:1],
103
+ pooled_prompt_embeds=pooled[0:1],
104
+ negative_prompt_embeds=conditioning[1:2],
105
+ negative_pooled_prompt_embeds=pooled[1:2],
106
+ width=width,
107
+ height=height,
108
+ controlnet_conditioning_scale=float(controlnet_conditioning_scale),
109
+ controlnet_start=0.0,
110
+ controlnet_end=1.0,
111
+ generator=generator,
112
+ num_inference_steps=30,
113
+ guidance_scale=8.5,
114
+ eta=1.0,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
  )
116
+ print(f"Time taken: {time.time() - last_time}")
117
+ output_image = output_image.resize(base_size, Image.LANCZOS)
118
+ return output_image
119
+
120
+
121
+ css = """
122
+ #intro{
123
+ # max-width: 32rem;
124
+ # text-align: center;
125
+ # margin: 0 auto;
126
+ }
127
+ """
128
+ def layout(self):
129
+ with gr.Blocks(css=css) as demo:
130
+ with gr.Row() as block:
131
+ with gr.Column():
132
+ # 画像アップロード用の行
133
+ with gr.Row():
134
+ with gr.Column():
135
+ self.input_image_path = gr.Image(label="入���画像", type='filepath')
136
+
137
+ # プロンプト入力用の行
138
+ with gr.Row():
139
+ prompt_analysis = PromptAnalysis(tagger_dir)
140
+ [prompt, nega] = prompt_analysis.layout(self.input_image_path)
141
+ # 画像の詳細設定用のスライダー行
142
+ with gr.Row():
143
+ controlnet_conditioning_scale = gr.Slider(minimum=0.5, maximum=1.25, value=1.0, step=0.01, interactive=True, label="線画忠実度")
144
+
145
+ # 画像生成ボタンの行
146
+ with gr.Row():
147
+ generate_button = gr.Button("生成", interactive=False)
148
 
149
+ with gr.Column():
150
+ output_image = gr.Image(type="pil", label="Output Image")
151
+
152
+ # インプットとアウトプットの設定
153
+ inputs = [
154
+ input_image_path,
155
+ prompt,
156
+ nega,
157
+ controlnet_conditioning_scale,
158
+ ]
159
+ outputs = [output_image]
160
+
161
+ # ボタンのクリックイベントを設定
162
+ generate_button.click(
163
+ fn=self.predict,
164
+ inputs=[self.input_image_path, prompt, nega, controlnet_conditioning_scale],
165
+ outputs=[output_image]
166
+ )
167
+
168
+ # デモの設定と起動
169
+ demo.queue(api_open=True)
170
+ demo.launch(show_api=True)