File size: 11,392 Bytes
02ba63a
01a01d7
02ba63a
 
 
 
 
 
 
 
 
 
 
 
 
 
01a01d7
 
02ba63a
 
 
01a01d7
 
 
 
 
 
02ba63a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
01a01d7
 
 
02ba63a
 
 
 
 
 
 
01a01d7
02ba63a
01a01d7
 
 
 
 
 
 
 
 
02ba63a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
01a01d7
02ba63a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
01a01d7
02ba63a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
01a01d7
02ba63a
 
 
 
 
 
 
 
 
 
 
01a01d7
 
 
 
 
 
 
 
 
 
02ba63a
 
 
 
 
 
 
 
01a01d7
 
02ba63a
 
 
 
 
01a01d7
02ba63a
 
 
01a01d7
 
 
 
 
 
 
 
 
02ba63a
 
 
01a01d7
02ba63a
 
 
 
 
 
 
01a01d7
 
02ba63a
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
import gradio as gr
# import spaces
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
from model_module import AutoencoderModule
from dataset import MyDataset, load_filenames
from utils import DistanceMapLogger
import numpy as np
from PIL import Image
import base64
from io import BytesIO

# モデルとデータの読み込み
def load_model():
    model_path = "checkpoints/autoencoder-epoch=49-train_loss=1.01.ckpt"
    feature_dim = 64
    model = AutoencoderModule(feature_dim=feature_dim)
    state_dict = torch.load(model_path)
    
    # # state_dict のキーを修正
    # new_state_dict = {}
    # for key in state_dict:
    #     new_key = "model." + key
    #     new_state_dict[new_key] = state_dict[key]
    model.load_state_dict(state_dict['state_dict'])
    model.eval()
    
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model.to(device)
    print("Model loaded successfully.")
    return model, device

def load_data(device, img_dir="resources/trainB/", image_size=112, batch_size=32):
    filenames = load_filenames(img_dir)
    train_X = filenames[:1000]
    train_ds = MyDataset(train_X, img_dir=img_dir, img_size=image_size)
    
    train_loader = DataLoader(
        train_ds,
        batch_size=batch_size,
        shuffle=True,
        num_workers=0,
    )
    
    iterator = iter(train_loader)
    x, _, _ = next(iterator)
    x = x.to(device)
    x = x[:,0].to(device)
    print("Data loaded successfully.")
    return x

model, device = load_model()
image_size = 112
batch_size = 32
x = load_data(device)

# アップロード画像の前処理
def preprocess_uploaded_image(uploaded_image, image_size):
    # ndarrayの場合はPILイメージに変換
    if type(uploaded_image) == np.ndarray:
        uploaded_image = Image.fromarray(uploaded_image)
    uploaded_image = uploaded_image.convert("RGB")
    uploaded_image = uploaded_image.resize((image_size, image_size))
    uploaded_image = np.array(uploaded_image).transpose(2, 0, 1) / 255.0
    uploaded_image = torch.tensor(uploaded_image, dtype=torch.float32).unsqueeze(0).to(device)
    return uploaded_image

# ヒートマップの生成関数
# @spaces.GPU
def get_heatmaps(source_num, x_coords, y_coords, uploaded_image):
    if type(uploaded_image) == str:
        uploaded_image = Image.open(uploaded_image)
    if type(source_num) == str:
        source_num = int(source_num)
    if type(x_coords) == str:
        x_coords = int(x_coords)
    if type(y_coords) == str:
        y_coords = int(y_coords)
    
    with torch.no_grad():
        dec5, _ = model(x)
        img = x
        feature_map = dec5
        batch_size = feature_map.size(0)
        feature_dim = feature_map.size(1)
        
        # アップロード画像の前処理
        if uploaded_image is not None:
            uploaded_image = preprocess_uploaded_image(uploaded_image, image_size)
            target_feature_map, _ = model(uploaded_image)
            img = torch.cat((img, uploaded_image))
            feature_map = torch.cat((feature_map, target_feature_map))
            batch_size += 1
        else:
            uploaded_image = torch.zeros(1, 3, image_size, image_size, device=device)
            
        target_num = batch_size - 1

        x_coords = [x_coords] * batch_size
        y_coords = [y_coords] * batch_size

        vectors = feature_map[torch.arange(feature_map.size(0)), :, y_coords, x_coords]
        vector = vectors[source_num]

        reshaped_feature_map = feature_map.permute(0, 2, 3, 1).view(feature_map.size(0), -1, feature_dim)
        batch_distance_map = F.pairwise_distance(reshaped_feature_map, vector).view(feature_map.size(0), image_size, image_size)
        
        norm_batch_distance_map = 1 / torch.cosh(20 * (batch_distance_map - batch_distance_map.min()) / (batch_distance_map.max() - batch_distance_map.min())) ** 2

        source_map = norm_batch_distance_map[source_num]
        target_map = norm_batch_distance_map[target_num]

        alpha = 0.7
        blended_source = (1 - alpha) * img[source_num] + alpha * torch.cat(((norm_batch_distance_map[source_num] / norm_batch_distance_map[source_num].max()).unsqueeze(0), torch.zeros(2, image_size, image_size, device=device)))
        blended_target = (1 - alpha) * img[target_num] + alpha * torch.cat(((norm_batch_distance_map[target_num] / norm_batch_distance_map[target_num].max()).unsqueeze(0), torch.zeros(2, image_size, image_size, device=device)))
        
        # Matplotlibでプロットして画像として保存
        fig, axs = plt.subplots(2, 2, figsize=(10, 10))
        axs[0, 0].imshow(source_map.cpu(), cmap='hot')
        axs[0, 0].set_title("Source Map")
        axs[0, 1].imshow(target_map.cpu(), cmap='hot')
        axs[0, 1].set_title("Target Map")
        axs[1, 0].imshow(blended_source.permute(1, 2, 0).cpu())
        axs[1, 0].set_title("Blended Source")
        axs[1, 1].imshow(blended_target.permute(1, 2, 0).cpu())
        axs[1, 1].set_title("Blended Target")
        for ax in axs.flat:
            ax.axis('off')
        
        plt.tight_layout()
        plt.close(fig)
        return fig

def process_image(cropped_image_data):
    # Base64からPILイメージに変換
    header, base64_data = cropped_image_data.split(',', 1)
    image_data = base64.b64decode(base64_data)
    image = Image.open(BytesIO(image_data))
    return image

# JavaScriptコード
scripts = """
async () => {
    const script = document.createElement("script");
    script.src = "https://cdnjs.cloudflare.com/ajax/libs/cropperjs/1.5.13/cropper.min.js";
    document.head.appendChild(script);

    const style = document.createElement("link");
    style.rel = "stylesheet";
    style.href = "https://cdnjs.cloudflare.com/ajax/libs/cropperjs/1.5.13/cropper.min.css";
    document.head.appendChild(style);

    script.onload = () => {
        let cropper;
        
        document.getElementById("input_file_button").onclick = function() {
            document.querySelector("#input_file").click();
        };

        // GradioのFileコンポーネントから画像を読み込む
        document.querySelector("#input_file").addEventListener("change", function(e) {
            const files = e.target.files;
            console.log(files);
            if (files && files.length > 0) {
                console.log("File selected");
                document.querySelector("#input_file_button").style.display = "none";
                document.querySelector("#crop_view").style.display = "block";
                document.querySelector("#crop_button").style.display = "block";
                const url = URL.createObjectURL(files[0]);
                const crop_view = document.getElementById("crop_view");
                crop_view.src = url;

                if (cropper) {
                    cropper.destroy();
                }
                cropper = new Cropper(crop_view, {
                    aspectRatio: 1,
                    viewMode: 1,
                });
            }
        });

        // GradioボタンにJavaScriptの機能を追加
        document.getElementById("crop_button").onclick = function() {
            if (cropper) {
                const canvas = cropper.getCroppedCanvas();
                const croppedImageData = canvas.toDataURL();
                
                // Gradioにクロップ画像を送信
                const textbox = document.querySelector("#cropped_image_data textarea");
                textbox.value = croppedImageData;
                textbox.dispatchEvent(new Event("input", { bubbles: true }));

                document.getElementById("crop_view").style.display = "none";
                document.getElementById("crop_button").style.display = "none";
                document.querySelector("#input_file_button").style.display = "block";
                
                cropper.destroy();
            }
        };
        document.getElementById("crop_view").style.display = "none";      
        document.getElementById("crop_button").style.display = "none";
    };
}
"""

with gr.Blocks() as demo:
    # title
    gr.Markdown("# TripletGeoEncoder Feature Map Visualization")
    # description
    gr.Markdown("This demo visualizes the feature maps of a TripletGeoEncoder trained on the CelebA dataset using self-supervised learning without annotations from only 1000 images. "
                  "The feature maps are visualized as heatmaps, where the source map shows the distance of each pixel in the source image to the selected pixel, and the target map shows the distance of each pixel in the target image to the selected pixel. "

                "The blended source and target images show the source and target images with the source and target maps overlaid, respectively. "

                "For further information, please contact me on X (formerly Twitter): @Yeq6X.")

    with gr.Row():
        with gr.Column():
            source_num = gr.Slider(0, batch_size - 1, step=1, label="Source Image Index")
            x_coords = gr.Slider(0, image_size - 1, step=1, value=image_size // 2, label="X Coordinate")
            y_coords = gr.Slider(0, image_size - 1, step=1, value=image_size // 2, label="Y Coordinate")

            # GradioのFileコンポーネントでファイル選択ボタンを追加
            gr.HTML('<input type="file" id="input_file" style="display:none;">')
            input_file_button = gr.Button("Upload Image and Crop", elem_id="input_file_button", variant="primary")
            crop_button = gr.Button("Crop", elem_id="crop_button", variant="primary")
            # 画像を表示するためのHTML画像タグをGradioで表示
            gr.HTML('<img id="crop_view" style="max-width:100%;">')
            # Gradioのボタンコンポーネントを追加し、IDを付与
            # クロップされた画像データのテキストボックス(Base64データ)
            cropped_image_data = gr.Textbox(visible=False, elem_id="cropped_image_data")
            input_image = gr.Image(label="Cropped Image", elem_id="input_image")
            # cropped_image_dataが更新されたらprocess_imageを呼び出す
            cropped_image_data.change(process_image, inputs=cropped_image_data, outputs=input_image)

            # examples
            gr.Markdown("# Examples")
            gr.Examples(
                examples=[
                ["0", "50", "50", "resources/examples/2488.jpg"],
                ["0", "50", "50", "resources/examples/2899.jpg"]
            ],
                inputs=[source_num, x_coords, y_coords, input_image],
            )
        with gr.Column():
            output_plot = gr.Plot()


        # Gradioインターフェースの代わり
        source_num.change(get_heatmaps, inputs=[source_num, x_coords, y_coords, input_image], outputs=output_plot)
        x_coords.change(get_heatmaps, inputs=[source_num, x_coords, y_coords, input_image], outputs=output_plot)
        y_coords.change(get_heatmaps, inputs=[source_num, x_coords, y_coords, input_image], outputs=output_plot)
        input_image.change(get_heatmaps, inputs=[source_num, x_coords, y_coords, input_image], outputs=output_plot)

        # JavaScriptコードをロード
        demo.load(None, None, None, js=scripts)        
        
    demo.launch()