BeanSamuel commited on
Commit
307acfb
·
1 Parent(s): 0041bd3

first push

Browse files
Files changed (3) hide show
  1. app.py +92 -0
  2. generator_epoch_10.pth +3 -0
  3. requirements.txt +0 -0
app.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from torchvision import transforms
3
+ import torch
4
+ import torch.nn as nn
5
+
6
+
7
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
8
+ print(device)
9
+
10
+ class ResidualBlock(nn.Module):
11
+ def __init__(self, channels):
12
+ super(ResidualBlock, self).__init__()
13
+ self.block = nn.Sequential(
14
+ nn.Conv2d(channels, channels, kernel_size=3, stride=1, padding=1, bias=False),
15
+ nn.InstanceNorm2d(channels),
16
+ nn.ReLU(inplace=True),
17
+ nn.Conv2d(channels, channels, kernel_size=3, stride=1, padding=1, bias=False),
18
+ nn.InstanceNorm2d(channels)
19
+ )
20
+
21
+ def forward(self, x):
22
+ return x + self.block(x)
23
+
24
+ class StrongGenerator(nn.Module):
25
+ def __init__(self, num_residual_blocks=6):
26
+ super(StrongGenerator, self).__init__()
27
+ model = [
28
+ nn.Conv2d(3, 64, kernel_size=7, stride=1, padding=3, bias=False),
29
+ nn.InstanceNorm2d(64),
30
+ nn.ReLU(inplace=True)
31
+ ]
32
+
33
+ in_channels = 64
34
+ for _ in range(2):
35
+ out_channels = in_channels * 2
36
+ model += [
37
+ nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=2, padding=1, bias=False),
38
+ nn.InstanceNorm2d(out_channels),
39
+ nn.ReLU(inplace=True)
40
+ ]
41
+ in_channels = out_channels
42
+
43
+ for _ in range(num_residual_blocks):
44
+ model += [ResidualBlock(in_channels)]
45
+
46
+ for _ in range(2):
47
+ out_channels = in_channels // 2
48
+ model += [
49
+ nn.ConvTranspose2d(in_channels, out_channels, kernel_size=3, stride=2, padding=1, output_padding=1, bias=False),
50
+ nn.InstanceNorm2d(out_channels),
51
+ nn.ReLU(inplace=True)
52
+ ]
53
+ in_channels = out_channels
54
+
55
+ model += [
56
+ nn.Conv2d(in_channels, 3, kernel_size=7, stride=1, padding=3),
57
+ nn.Tanh()
58
+ ]
59
+
60
+ self.model = nn.Sequential(*model)
61
+
62
+ def forward(self, x):
63
+ return self.model(x)
64
+
65
+ generator = StrongGenerator().to(device)
66
+
67
+
68
+ generator.load_state_dict(torch.load("./generator_epoch_10.pth", map_location=device))
69
+ generator.eval()
70
+
71
+ def restore_image(mosaic_image):
72
+ transform_in = transforms.Compose([
73
+ transforms.Resize((256, 256)),
74
+ transforms.ToTensor(),
75
+ transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5))
76
+ ])
77
+ input_tensor = transform_in(mosaic_image).unsqueeze(0).to(device)
78
+ with torch.no_grad():
79
+ restored_tensor = generator(input_tensor)
80
+ restored_tensor = restored_tensor.squeeze(0).cpu()
81
+ restored_tensor = (restored_tensor * 0.5 + 0.5).clamp(0, 1)
82
+ restored_image = transforms.ToPILImage()(restored_tensor)
83
+ return restored_image
84
+
85
+ iface = gr.Interface(
86
+ fn=restore_image,
87
+ inputs=gr.Image(type="pil"),
88
+ outputs="image",
89
+ title="HorrorMovieStyleGAN",
90
+ description="上傳圖像後,模型將嘗試將圖片變成恐怖。"
91
+ )
92
+ iface.launch()
generator_epoch_10.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c6871d3a43eeee2b2590faac28e54a1ac9431572ed0c679181ffe57863fd6518
3
+ size 31344538
requirements.txt ADDED
Binary file (2.06 kB). View file