Update app.py
Browse files
app.py
CHANGED
@@ -19,17 +19,60 @@ class Generator(nn.Module):
|
|
19 |
def __init__(self, nz=100, ngf=64, nt=768, nc=3):
|
20 |
super().__init__()
|
21 |
self.layer1 = nn.Sequential(
|
22 |
-
nn.ConvTranspose2d(nz
|
23 |
-
nn.BatchNorm2d(ngf
|
24 |
)
|
25 |
self.layer2 = nn.Sequential(
|
26 |
-
nn.Conv2d(ngf
|
27 |
nn.Dropout2d(inplace=True),
|
28 |
-
nn.BatchNorm2d(ngf
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
nn.ReLU(True)
|
30 |
)
|
31 |
-
# Add other layers as needed...
|
32 |
-
|
33 |
self.layer11 = nn.Sequential(
|
34 |
nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),
|
35 |
nn.Tanh()
|
@@ -39,10 +82,19 @@ class Generator(nn.Module):
|
|
39 |
x = torch.cat([noise, encoded_text], dim=1)
|
40 |
x = self.layer1(x)
|
41 |
x = self.layer2(x)
|
42 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
x = self.layer11(x)
|
44 |
return x
|
45 |
|
|
|
|
|
46 |
# Load the model and tokenizer
|
47 |
model_path = "checkpoint.pth" # Adjust as necessary
|
48 |
tokenizer = XLNetTokenizer.from_pretrained('xlnet-base-cased')
|
|
|
19 |
def __init__(self, nz=100, ngf=64, nt=768, nc=3):
|
20 |
super().__init__()
|
21 |
self.layer1 = nn.Sequential(
|
22 |
+
nn.ConvTranspose2d(nz+nt, ngf*8, 4, 1, 0, bias=False),
|
23 |
+
nn.BatchNorm2d(ngf*8)
|
24 |
)
|
25 |
self.layer2 = nn.Sequential(
|
26 |
+
nn.Conv2d(ngf*8, ngf*2, 1, 1),
|
27 |
nn.Dropout2d(inplace=True),
|
28 |
+
nn.BatchNorm2d(ngf*2),
|
29 |
+
nn.ReLU(True)
|
30 |
+
)
|
31 |
+
self.layer3 = nn.Sequential(
|
32 |
+
nn.Conv2d(ngf*2, ngf*2, 3, 1, 1),
|
33 |
+
nn.Dropout2d(inplace=True),
|
34 |
+
nn.BatchNorm2d(ngf*2),
|
35 |
+
nn.ReLU(True)
|
36 |
+
)
|
37 |
+
self.layer4 = nn.Sequential(
|
38 |
+
nn.Conv2d(ngf*2, ngf*8, 3, 1, 1),
|
39 |
+
nn.Dropout2d(inplace=True),
|
40 |
+
nn.BatchNorm2d(ngf*8),
|
41 |
+
nn.ReLU(True)
|
42 |
+
)
|
43 |
+
self.layer5 = nn.Sequential(
|
44 |
+
nn.ConvTranspose2d(ngf*8, ngf*4, 4, 2, 1, bias=False),
|
45 |
+
nn.BatchNorm2d(ngf*4),
|
46 |
+
nn.ReLU(True)
|
47 |
+
)
|
48 |
+
self.layer6 = nn.Sequential(
|
49 |
+
nn.Conv2d(ngf*4, ngf, 1, 1),
|
50 |
+
nn.Dropout2d(inplace=True),
|
51 |
+
nn.BatchNorm2d(ngf),
|
52 |
+
nn.ReLU(True)
|
53 |
+
)
|
54 |
+
self.layer7 = nn.Sequential(
|
55 |
+
nn.Conv2d(ngf, ngf, 3, 1, 1),
|
56 |
+
nn.Dropout2d(inplace=True),
|
57 |
+
nn.BatchNorm2d(ngf),
|
58 |
+
nn.ReLU(True)
|
59 |
+
)
|
60 |
+
self.layer8 = nn.Sequential(
|
61 |
+
nn.Conv2d(ngf, ngf*4, 3, 1, 1),
|
62 |
+
nn.Dropout2d(inplace=True),
|
63 |
+
nn.BatchNorm2d(ngf*4),
|
64 |
+
nn.ReLU(True)
|
65 |
+
)
|
66 |
+
self.layer9 = nn.Sequential(
|
67 |
+
nn.ConvTranspose2d(ngf*4, ngf*2, 4, 2, 1, bias=False),
|
68 |
+
nn.BatchNorm2d(ngf*2),
|
69 |
+
nn.ReLU(True)
|
70 |
+
)
|
71 |
+
self.layer10 = nn.Sequential(
|
72 |
+
nn.ConvTranspose2d(ngf*2, ngf, 4, 2, 1, bias=False),
|
73 |
+
nn.BatchNorm2d(ngf),
|
74 |
nn.ReLU(True)
|
75 |
)
|
|
|
|
|
76 |
self.layer11 = nn.Sequential(
|
77 |
nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),
|
78 |
nn.Tanh()
|
|
|
82 |
x = torch.cat([noise, encoded_text], dim=1)
|
83 |
x = self.layer1(x)
|
84 |
x = self.layer2(x)
|
85 |
+
x = self.layer3(x)
|
86 |
+
x = self.layer4(x)
|
87 |
+
x = self.layer5(x)
|
88 |
+
x = self.layer6(x)
|
89 |
+
x = self.layer7(x)
|
90 |
+
x = self.layer8(x)
|
91 |
+
x = self.layer9(x)
|
92 |
+
x = self.layer10(x)
|
93 |
x = self.layer11(x)
|
94 |
return x
|
95 |
|
96 |
+
|
97 |
+
|
98 |
# Load the model and tokenizer
|
99 |
model_path = "checkpoint.pth" # Adjust as necessary
|
100 |
tokenizer = XLNetTokenizer.from_pretrained('xlnet-base-cased')
|