Spaces:
Sleeping
Sleeping
File size: 6,065 Bytes
48d3a70 54507dc 2d7003c 54507dc 2d7003c 2d25f01 ea68a6a 2d25f01 ca46043 2a21dfe 54507dc 2d25f01 54507dc 2d25f01 54507dc 2d25f01 54507dc 2d25f01 54507dc 2d25f01 54507dc 2d25f01 54507dc 2d25f01 54507dc 2d25f01 54507dc 2d25f01 54507dc 2d25f01 54507dc 2d25f01 54507dc 2d25f01 54507dc 2d25f01 54507dc 2d25f01 54507dc 2d25f01 54507dc 2d25f01 54507dc 2d25f01 54507dc 2d25f01 54507dc 2d25f01 54507dc fab2b8a 54507dc 2d7003c 2a21dfe 2d25f01 54507dc 2d25f01 54507dc 2d7003c 54507dc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 |
import spaces
import numpy as np
from PIL import Image
import gradio as gr
import open3d as o3d
import trimesh
from diffusers import ControlNetModel, StableDiffusionXLControlNetPipeline, EulerAncestralDiscreteScheduler
import torch
from collections import Counter
import random
pipe = None
device = None
torch_dtype = None
def load_model():
global pipe, device, torch_dtype
device = "cuda" if torch.cuda.is_available() else "cpu"
torch_dtype = torch.float16 if device == "cuda" else torch.float32
pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
"yeq6x/animagine_position_map",
controlnet=ControlNetModel.from_pretrained("yeq6x/Image2PositionColor_v3"),
).to(device)
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
return pipe
def convert_pil_to_opencv(pil_image):
return np.array(pil_image)
def inv_func(y,
c = -712.380100,
a = 137.375240,
b = 192.435866):
return (np.exp((y - c) / a) - np.exp(-c/a)) / 964.8468371292845
def create_point_cloud(img1, img2):
if img1.shape != img2.shape:
raise ValueError("Both images must have the same dimensions.")
h, w, _ = img1.shape
points = []
colors = []
for y in range(h):
for x in range(w):
# ピクセル位置 (x, y) のRGBをXYZとして取得
r, g, b = img1[y, x]
r = inv_func(r) * 0.9
g = inv_func(g) / 1.7 * 0.6
b = inv_func(b)
r *= 150
g *= 150
b *= 150
points.append([g, b, r]) # X, Y, Z
# 対応するピクセル位置の画像2の色を取得
colors.append(img2[y, x] / 255.0) # 色は0〜1にスケール
return np.array(points), np.array(colors)
def point_cloud_to_glb(points, colors):
# Open3Dでポイントクラウドを作成
pc = o3d.geometry.PointCloud()
pc.points = o3d.utility.Vector3dVector(points)
pc.colors = o3d.utility.Vector3dVector(colors)
# 一時的にPLY形式で保存
temp_ply_file = "temp_output.ply"
o3d.io.write_point_cloud(temp_ply_file, pc)
# PLYをGLBに変換
mesh = trimesh.load(temp_ply_file)
glb_file = "output.glb"
mesh.export(glb_file)
return glb_file
def visualize_3d(image1, image2):
print("Processing...")
# PIL画像をOpenCV形式に変換
img1 = convert_pil_to_opencv(image1)
img2 = convert_pil_to_opencv(image2)
# ポイントクラウド生成
points, colors = create_point_cloud(img1, img2)
# GLB形式に変換
glb_file = point_cloud_to_glb(points, colors)
return glb_file
def scale_image(original_image):
aspect_ratio = original_image.width / original_image.height
if original_image.width > original_image.height:
new_width = 1024
new_height = round(new_width / aspect_ratio)
else:
new_height = 1024
new_width = round(new_height * aspect_ratio)
resized_original = original_image.resize((new_width, new_height), Image.LANCZOS)
return resized_original
def get_edge_mode_color(img, edge_width=10):
# 外周の10ピクセル領域を取得
left = img.crop((0, 0, edge_width, img.height)) # 左端
right = img.crop((img.width - edge_width, 0, img.width, img.height)) # 右端
top = img.crop((0, 0, img.width, edge_width)) # 上端
bottom = img.crop((0, img.height - edge_width, img.width, img.height)) # 下端
# 各領域のピクセルデータを取得して結合
colors = list(left.getdata()) + list(right.getdata()) + list(top.getdata()) + list(bottom.getdata())
# 最頻値(mode)を計算
mode_color = Counter(colors).most_common(1)[0][0] # 最も頻繁に出現する色を取得
return mode_color
def paste_image(resized_img):
# 外周10pxの最頻値を背景色に設定
mode_color = get_edge_mode_color(resized_img, edge_width=10)
mode_background = Image.new("RGBA", (1024, 1024), mode_color)
mode_background = mode_background.convert('RGB')
x = (1024 - resized_img.width) // 2
y = (1024 - resized_img.height) // 2
mode_background.paste(resized_img, (x, y))
return mode_background
def outpaint_image(image):
if type(image) == type(None):
return None
resized_img = scale_image(image)
image = paste_image(resized_img)
return image
@spaces.GPU
def predict_image(cond_image, prompt, negative_prompt):
print("predict position map")
global pipe
generator = torch.Generator()
generator.manual_seed(random.randint(0, 2147483647))
image = pipe(
prompt,
prompt,
cond_image,
negative_prompt=negative_prompt,
width=1024,
height=1024,
guidance_scale=8,
num_inference_steps=20,
generator=generator,
guess_mode = True,
controlnet_conditioning_scale = 0.6,
).images[0]
return image
load_model()
# Gradioアプリケーション
with gr.Blocks() as demo:
gr.Markdown("## Position Map Visualizer")
with gr.Row():
with gr.Column():
with gr.Row():
img1 = gr.Image(type="pil", label="color Image", height=300)
img2 = gr.Image(type="pil", label="map Image", height=300)
prompt = gr.Textbox("position map, 1girl, white background", label="Prompt")
negative_prompt = gr.Textbox("lowres, bad anatomy, bad hands, bad feet, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry", label="Negative Prompt")
predict_map_btn = gr.Button("Predict Position Map")
visualize_3d_btn = gr.Button("Generate 3D Point Cloud")
with gr.Column():
reconstruction_output = gr.Model3D(label="3D Viewer", height=600)
gr.Examples(
examples=[
["resources/source/000006.png", "resources/target/000006.png"],
["resources/source/006420.png", "resources/target/006420.png"],
],
inputs=[img1, img2]
)
img1.input(outpaint_image, inputs=img1, outputs=img1)
predict_map_btn.click(predict_image, inputs=[img1, prompt, negative_prompt], outputs=img2)
visualize_3d_btn.click(visualize_3d, inputs=[img2, img1], outputs=reconstruction_output)
demo.launch()
|