NeverlandPeter commited on
Commit
dd4d734
·
1 Parent(s): 784bfc9
img_demoAE.py CHANGED
@@ -14,8 +14,9 @@ print(f'loading...')
14
 
15
  ########################################################################################################
16
 
17
- model_prefix = 'out-v7c_d8_256-224-13bit-OB32x0.5-201'
18
- input_img = 'kodim24-modified.png'
 
19
 
20
  ########################################################################################################
21
 
@@ -133,20 +134,20 @@ class R_DECODER(nn.Module):
133
 
134
  ########################################################################################################
135
 
136
- print(f'building model...')
137
  args = types.SimpleNamespace()
138
  args.my_img_bit = 13
139
- encoder = R_ENCODER(args).eval().cuda()
140
- decoder = R_DECODER(args).eval().cuda()
141
 
142
- zpow = torch.tensor([2**i for i in range(0,13)]).reshape(13,1,1).cuda().long()
143
 
144
  encoder.load_state_dict(torch.load(f'{model_prefix}-E.pth'))
145
  decoder.load_state_dict(torch.load(f'{model_prefix}-D.pth'))
146
 
147
  ########################################################################################################
148
 
149
- print(f'test image...')
150
  img_transform = transforms.Compose([
151
  transforms.PILToTensor(),
152
  transforms.ConvertImageDtype(torch.float),
@@ -154,7 +155,7 @@ img_transform = transforms.Compose([
154
  ])
155
 
156
  with torch.no_grad():
157
- img = img_transform(Image.open(input_img)).unsqueeze(0).cuda()
158
  z = encoder(img)
159
  z = ToBinary.apply(z)
160
 
 
14
 
15
  ########################################################################################################
16
 
17
+ model_prefix = 'out-v7c_d8_256-224-13bit-OB32x0.5-226'
18
+ input_img = 'kodim19-modified.png'
19
+ device = 'cpu' # cpu cuda
20
 
21
  ########################################################################################################
22
 
 
134
 
135
  ########################################################################################################
136
 
137
+ print(f'building model {model_prefix}...')
138
  args = types.SimpleNamespace()
139
  args.my_img_bit = 13
140
+ encoder = R_ENCODER(args).eval().to(device)
141
+ decoder = R_DECODER(args).eval().to(device)
142
 
143
+ zpow = torch.tensor([2**i for i in range(0,13)]).reshape(13,1,1).to(device).long()
144
 
145
  encoder.load_state_dict(torch.load(f'{model_prefix}-E.pth'))
146
  decoder.load_state_dict(torch.load(f'{model_prefix}-D.pth'))
147
 
148
  ########################################################################################################
149
 
150
+ print(f'test image {input_img}...')
151
  img_transform = transforms.Compose([
152
  transforms.PILToTensor(),
153
  transforms.ConvertImageDtype(torch.float),
 
155
  ])
156
 
157
  with torch.no_grad():
158
+ img = img_transform(Image.open(input_img)).unsqueeze(0).to(device)
159
  z = encoder(img)
160
  z = ToBinary.apply(z)
161
 
kodim14-modified-out-13bit.png ADDED
kodim14-modified.png ADDED
kodim19-modified-out-13bit.png ADDED
kodim19-modified.png ADDED
kodim24-modified-out-13bit.png CHANGED
out-v7c_d8_256-224-13bit-OB32x0.5-201-D.pth → out-v7c_d8_256-224-13bit-OB32x0.5-226-D.pth RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:917ddad270353caf0243dbd09c2257414b9cb599ee43fe1b41b8e7af49bf03b8
3
  size 25068760
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ef336f14ec15ccaf4c1ef25b96b88ca2ee33e105c6275738eabf519e6aae85c
3
  size 25068760
out-v7c_d8_256-224-13bit-OB32x0.5-201-E.pth → out-v7c_d8_256-224-13bit-OB32x0.5-226-E.pth RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:65933944a19a00241ebfecce4e4b5e9bd2d7f1ac7d10f447b6b8c3e73a92093a
3
  size 25076297
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce5f83d8f6b35f84a2db5ca29b5502f22e2ce4af7265d18e476e20a79a366406
3
  size 25076297