File size: 3,299 Bytes
4db9546
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cc28da2
4db9546
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
import gradio as gr
from torchvision import transforms
import torch
import torch.nn as nn


device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)

class ResidualBlock(nn.Module):
    def __init__(self, channels):
        super(ResidualBlock, self).__init__()
        self.block = nn.Sequential(
            nn.Conv2d(channels, channels, kernel_size=3, stride=1, padding=1, bias=False),
            nn.InstanceNorm2d(channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(channels, channels, kernel_size=3, stride=1, padding=1, bias=False),
            nn.InstanceNorm2d(channels)
        )
    
    def forward(self, x):
        return x + self.block(x)

# 強化版生成器:利用下採樣、殘差塊和上採樣結構
class StrongGenerator(nn.Module):
    def __init__(self, num_residual_blocks=6):
        super(StrongGenerator, self).__init__()
        # 初始卷積層
        model = [
            nn.Conv2d(3, 64, kernel_size=7, stride=1, padding=3, bias=False),
            nn.InstanceNorm2d(64),
            nn.ReLU(inplace=True)
        ]
        
        # 下採樣:連續兩次卷積降維
        in_channels = 64
        for _ in range(2):
            out_channels = in_channels * 2
            model += [
                nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=2, padding=1, bias=False),
                nn.InstanceNorm2d(out_channels),
                nn.ReLU(inplace=True)
            ]
            in_channels = out_channels
        
        # 多個殘差塊
        for _ in range(num_residual_blocks):
            model += [ResidualBlock(in_channels)]
        
        # 上採樣:連續兩次反捲積提升解析度
        for _ in range(2):
            out_channels = in_channels // 2
            model += [
                nn.ConvTranspose2d(in_channels, out_channels, kernel_size=3, stride=2, padding=1, output_padding=1, bias=False),
                nn.InstanceNorm2d(out_channels),
                nn.ReLU(inplace=True)
            ]
            in_channels = out_channels
        
        # 輸出層
        model += [
            nn.Conv2d(in_channels, 3, kernel_size=7, stride=1, padding=3),
            nn.Tanh()
        ]
        
        self.model = nn.Sequential(*model)
    
    def forward(self, x):
        return self.model(x)

generator = StrongGenerator().to(device)


generator.load_state_dict(torch.load("./generator_epoch_100.pth", map_location=device))
generator.eval()

def restore_image(mosaic_image):
    transform_in = transforms.Compose([
        transforms.Resize((256, 256)),
        transforms.ToTensor(),
        transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5))
    ])
    input_tensor = transform_in(mosaic_image).unsqueeze(0).to(device)
    with torch.no_grad():
        restored_tensor = generator(input_tensor)
    restored_tensor = restored_tensor.squeeze(0).cpu()
    restored_tensor = (restored_tensor * 0.5 + 0.5).clamp(0, 1)
    restored_image = transforms.ToPILImage()(restored_tensor)
    return restored_image

iface = gr.Interface(
    fn=restore_image,
    inputs=gr.Image(type="pil"),
    outputs="image",
    title="Dog Image Mosaic Restoration",
    description="上傳打碼後的狗狗圖像,模型將嘗試還原原始圖像。"
)
iface.launch()