yeq6x commited on
Commit
f611314
·
1 Parent(s): ced2b28

load_model

Browse files
Files changed (1) hide show
  1. app.py +130 -184
app.py CHANGED
@@ -1,192 +1,138 @@
1
  import spaces
2
- import numpy as np
 
 
3
  from PIL import Image
4
- import gradio as gr
5
- import open3d as o3d
6
- import trimesh
7
- from diffusers import ControlNetModel, StableDiffusionXLControlNetPipeline, EulerAncestralDiscreteScheduler
8
  import torch
9
- from collections import Counter
10
- import random
11
-
12
- pipe = None
13
- device = None
14
- torch_dtype = None
15
-
16
- def load_model():
17
- global pipe, device, torch_dtype
18
- device = "cuda" if torch.cuda.is_available() else "cpu"
19
- torch_dtype = torch.float16 if device == "cuda" else torch.float32
20
-
21
- pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
22
- "yeq6x/animagine_position_map",
23
- controlnet=ControlNetModel.from_pretrained("yeq6x/Image2PositionColor_v3"),
24
- ).to(device)
25
- pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
26
-
27
- return pipe
28
-
29
- def convert_pil_to_opencv(pil_image):
30
- return np.array(pil_image)
31
-
32
- def inv_func(y,
33
- c = -712.380100,
34
- a = 137.375240,
35
- b = 192.435866):
36
- return (np.exp((y - c) / a) - np.exp(-c/a)) / 964.8468371292845
37
-
38
- def create_point_cloud(img1, img2):
39
- if img1.shape != img2.shape:
40
- raise ValueError("Both images must have the same dimensions.")
41
-
42
- h, w, _ = img1.shape
43
- points = []
44
- colors = []
45
- for y in range(h):
46
- for x in range(w):
47
- # ピクセル位置 (x, y) のRGBをXYZとして取得
48
- r, g, b = img1[y, x]
49
- r = inv_func(r) * 0.9
50
- g = inv_func(g) / 1.7 * 0.6
51
- b = inv_func(b)
52
- r *= 150
53
- g *= 150
54
- b *= 150
55
- points.append([g, b, r]) # X, Y, Z
56
- # 対応するピクセル位置の画像2の色を取得
57
- colors.append(img2[y, x] / 255.0) # 色は0〜1にスケール
58
-
59
- return np.array(points), np.array(colors)
60
-
61
- def point_cloud_to_glb(points, colors):
62
- # Open3Dでポイントクラウドを作成
63
- pc = o3d.geometry.PointCloud()
64
- pc.points = o3d.utility.Vector3dVector(points)
65
- pc.colors = o3d.utility.Vector3dVector(colors)
66
-
67
- # 一時的にPLY形式で保存
68
- temp_ply_file = "temp_output.ply"
69
- o3d.io.write_point_cloud(temp_ply_file, pc)
70
-
71
- # PLYをGLBに変換
72
- mesh = trimesh.load(temp_ply_file)
73
- glb_file = "output.glb"
74
- mesh.export(glb_file)
75
-
76
- return glb_file
77
-
78
- def visualize_3d(image1, image2):
79
- print("Processing...")
80
- # PIL画像をOpenCV形式に変換
81
- img1 = convert_pil_to_opencv(image1)
82
- img2 = convert_pil_to_opencv(image2)
83
-
84
- # ポイントクラウド生成
85
- points, colors = create_point_cloud(img1, img2)
86
-
87
- # GLB形式に変換
88
- glb_file = point_cloud_to_glb(points, colors)
89
-
90
- return glb_file
91
-
92
- def scale_image(original_image):
93
- aspect_ratio = original_image.width / original_image.height
94
-
95
- if original_image.width > original_image.height:
96
- new_width = 1024
97
- new_height = round(new_width / aspect_ratio)
98
- else:
99
- new_height = 1024
100
- new_width = round(new_height * aspect_ratio)
101
-
102
- resized_original = original_image.resize((new_width, new_height), Image.LANCZOS)
103
-
104
- return resized_original
105
-
106
- def get_edge_mode_color(img, edge_width=10):
107
- # 外周の10ピクセル領域を取得
108
- left = img.crop((0, 0, edge_width, img.height)) # 左端
109
- right = img.crop((img.width - edge_width, 0, img.width, img.height)) # 右端
110
- top = img.crop((0, 0, img.width, edge_width)) # 上端
111
- bottom = img.crop((0, img.height - edge_width, img.width, img.height)) # 下端
112
-
113
- # 各領域のピクセルデータを取得して結合
114
- colors = list(left.getdata()) + list(right.getdata()) + list(top.getdata()) + list(bottom.getdata())
115
-
116
- # 最頻値(mode)を計算
117
- mode_color = Counter(colors).most_common(1)[0][0] # 最も頻繁に出現する色を取得
118
-
119
- return mode_color
120
-
121
- def paste_image(resized_img):
122
- # 外周10pxの最頻値を背景色に設定
123
- mode_color = get_edge_mode_color(resized_img, edge_width=10)
124
- mode_background = Image.new("RGBA", (1024, 1024), mode_color)
125
- mode_background = mode_background.convert('RGB')
126
-
127
- x = (1024 - resized_img.width) // 2
128
- y = (1024 - resized_img.height) // 2
129
- mode_background.paste(resized_img, (x, y))
130
-
131
- return mode_background
132
 
133
- def outpaint_image(image):
134
- if type(image) == type(None):
135
- return None
136
- resized_img = scale_image(image)
137
- image = paste_image(resized_img)
138
-
139
- return image
140
 
141
  @spaces.GPU
142
- def predict_image(cond_image, prompt, negative_prompt):
143
- print("predict position map")
144
- global pipe
145
- generator = torch.Generator()
146
- generator.manual_seed(random.randint(0, 2147483647))
147
- image = pipe(
148
- prompt,
149
- prompt,
150
- cond_image,
151
- negative_prompt=negative_prompt,
152
- width=1024,
153
- height=1024,
154
- guidance_scale=8,
155
- num_inference_steps=20,
156
- generator=generator,
157
- guess_mode = True,
158
- controlnet_conditioning_scale = 0.6,
159
- ).images[0]
160
-
161
- return image
162
-
163
- load_model()
164
 
165
- # Gradioアプリケーション
166
- with gr.Blocks() as demo:
167
- gr.Markdown("## Position Map Visualizer")
 
 
 
 
168
 
169
- with gr.Row():
170
- with gr.Column():
171
- with gr.Row():
172
- img1 = gr.Image(type="pil", label="color Image", height=300)
173
- img2 = gr.Image(type="pil", label="map Image", height=300)
174
- prompt = gr.Textbox("position map, 1girl, white background", label="Prompt")
175
- negative_prompt = gr.Textbox("lowres, bad anatomy, bad hands, bad feet, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry", label="Negative Prompt")
176
- predict_map_btn = gr.Button("Predict Position Map")
177
- visualize_3d_btn = gr.Button("Generate 3D Point Cloud")
178
- with gr.Column():
179
- reconstruction_output = gr.Model3D(label="3D Viewer", height=600)
180
- gr.Examples(
181
- examples=[
182
- ["resources/source/000006.png", "resources/target/000006.png"],
183
- ["resources/source/006420.png", "resources/target/006420.png"],
184
- ],
185
- inputs=[img1, img2]
186
- )
187
-
188
- img1.input(outpaint_image, inputs=img1, outputs=img1)
189
- predict_map_btn.click(predict_image, inputs=[img1, prompt, negative_prompt], outputs=img2)
190
- visualize_3d_btn.click(visualize_3d, inputs=[img2, img1], outputs=reconstruction_output)
191
-
192
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
1
  import spaces
2
+ from diffusers import ControlNetModel
3
+ from diffusers import StableDiffusionXLControlNetPipeline
4
+ from diffusers import EulerAncestralDiscreteScheduler
5
  from PIL import Image
 
 
 
 
6
  import torch
7
+ import numpy as np
8
+ import cv2
9
+ import gradio as gr
10
+ from torchvision import transforms
11
+ from controlnet_aux import OpenposeDetector
12
+
13
+ ratios_map = {
14
+ 0.5:{"width":704,"height":1408},
15
+ 0.57:{"width":768,"height":1344},
16
+ 0.68:{"width":832,"height":1216},
17
+ 0.72:{"width":832,"height":1152},
18
+ 0.78:{"width":896,"height":1152},
19
+ 0.82:{"width":896,"height":1088},
20
+ 0.88:{"width":960,"height":1088},
21
+ 0.94:{"width":960,"height":1024},
22
+ 1.00:{"width":1024,"height":1024},
23
+ 1.13:{"width":1088,"height":960},
24
+ 1.21:{"width":1088,"height":896},
25
+ 1.29:{"width":1152,"height":896},
26
+ 1.38:{"width":1152,"height":832},
27
+ 1.46:{"width":1216,"height":832},
28
+ 1.67:{"width":1280,"height":768},
29
+ 1.75:{"width":1344,"height":768},
30
+ 2.00:{"width":1408,"height":704}
31
+ }
32
+ ratios = np.array(list(ratios_map.keys()))
33
+
34
+
35
+ openpose = OpenposeDetector.from_pretrained('lllyasviel/ControlNet')
36
+
37
+ controlnet = ControlNetModel.from_pretrained(
38
+ "briaai/BRIA-2.3-ControlNet-Pose",
39
+ torch_dtype=torch.float16
40
+ ).to('cuda')
41
+
42
+ pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
43
+ "briaai/BRIA-2.3",
44
+ controlnet=controlnet,
45
+ torch_dtype=torch.float16,
46
+ low_cpu_mem_usage=True,
47
+ offload_state_dict=True,
48
+ ).to('cuda').to(torch.float16)
49
+
50
+ pipe.scheduler = EulerAncestralDiscreteScheduler(
51
+ beta_start=0.00085,
52
+ beta_end=0.012,
53
+ beta_schedule="scaled_linear",
54
+ num_train_timesteps=1000,
55
+ steps_offset=1
56
+ )
57
+ # pipe.enable_freeu(b1=1.1, b2=1.1, s1=0.5, s2=0.7)
58
+ # pipe.enable_xformers_memory_efficient_attention()
59
+ pipe.force_zeros_for_empty_prompt = False
60
+
61
+ def get_size(init_image):
62
+ w,h=init_image.size
63
+ curr_ratio = w/h
64
+ ind = np.argmin(np.abs(curr_ratio-ratios))
65
+ ratio = ratios[ind]
66
+ chosen_ratio = ratios_map[ratio]
67
+ w,h = chosen_ratio['width'], chosen_ratio['height']
68
+ return w,h
69
+
70
+ def resize_image(image):
71
+ image = image.convert('RGB')
72
+ w,h = get_size(image)
73
+ resized_image = image.resize((w, h))
74
+ return resized_image
75
+
76
+ def resize_image_old(image):
77
+ image = image.convert('RGB')
78
+ current_size = image.size
79
+ if current_size[0] > current_size[1]:
80
+ center_cropped_image = transforms.functional.center_crop(image, (current_size[1], current_size[1]))
81
+ else:
82
+ center_cropped_image = transforms.functional.center_crop(image, (current_size[0], current_size[0]))
83
+ resized_image = transforms.functional.resize(center_cropped_image, (1024, 1024))
84
+ return resized_image
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
 
 
 
 
 
 
 
 
86
 
87
  @spaces.GPU
88
+ def generate_(prompt, negative_prompt, pose_image, input_image, num_steps, controlnet_conditioning_scale, seed):
89
+ generator = torch.Generator("cuda").manual_seed(seed)
90
+ images = pipe(
91
+ prompt, negative_prompt=negative_prompt, image=pose_image, num_inference_steps=num_steps, controlnet_conditioning_scale=float(controlnet_conditioning_scale),
92
+ generator=generator, height=input_image.size[1], width=input_image.size[0],
93
+ ).images
94
+ return images
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
 
96
+ @spaces.GPU
97
+ def process(input_image, prompt, negative_prompt, num_steps, controlnet_conditioning_scale, seed):
98
+
99
+ # resize input_image to 1024x1024
100
+ input_image = resize_image(input_image)
101
+
102
+ pose_image = openpose(input_image, include_body=True, include_hand=True, include_face=True)
103
 
104
+ images = generate_(prompt, negative_prompt, pose_image, input_image, num_steps, controlnet_conditioning_scale, seed)
105
+
106
+ return [pose_image,images[0]]
107
+
108
+ block = gr.Blocks().queue()
109
+
110
+ with block:
111
+ gr.Markdown("## BRIA 2.3 ControlNet Pose")
112
+ gr.HTML('''
113
+ <p style="margin-bottom: 10px; font-size: 94%">
114
+ This is a demo for ControlNet Pose that using
115
+ <a href="https://huggingface.co/briaai/BRIA-2.3" target="_blank">BRIA 2.3 text-to-image model</a> as backbone.
116
+ Trained on licensed data, BRIA 2.3 provide full legal liability coverage for copyright and privacy infringement.
117
+ </p>
118
+ ''')
119
+ with gr.Row():
120
+ with gr.Column():
121
+ input_image = gr.Image(sources=None, type="pil") # None for upload, ctrl+v and webcam
122
+ prompt = gr.Textbox(label="Prompt")
123
+ negative_prompt = gr.Textbox(label="Negative prompt", value="Logo,Watermark,Text,Ugly,Morbid,Extra fingers,Poorly drawn hands,Mutation,Blurry,Extra limbs,Gross proportions,Missing arms,Mutated hands,Long neck,Duplicate,Mutilated,Mutilated hands,Poorly drawn face,Deformed,Bad anatomy,Cloned face,Malformed limbs,Missing legs,Too many fingers")
124
+ num_steps = gr.Slider(label="Number of steps", minimum=25, maximum=100, value=50, step=1)
125
+ controlnet_conditioning_scale = gr.Slider(label="ControlNet conditioning scale", minimum=0.1, maximum=2.0, value=1.0, step=0.05)
126
+ seed = gr.Slider(label="Seed", minimum=0, maximum=2147483647, step=1, randomize=True,)
127
+ run_button = gr.Button(value="Run")
128
+
129
+ with gr.Column():
130
+ with gr.Row():
131
+ pose_image_output = gr.Image(label="Pose Image", type="pil", interactive=False)
132
+ generated_image_output = gr.Image(label="Generated Image", type="pil", interactive=False)
133
+
134
+ ips = [input_image, prompt, negative_prompt, num_steps, controlnet_conditioning_scale, seed]
135
+ run_button.click(fn=process, inputs=ips, outputs=[pose_image_output, generated_image_output])
136
+
137
+
138
+ block.launch(debug = True)