Spaces:
Runtime error
Runtime error
File size: 1,393 Bytes
0d5c9d2 ceac432 0d5c9d2 d2b0313 6887d0a d2b0313 6887d0a ceac432 0d5c9d2 1c6d57c 6887d0a ceac432 0d5c9d2 6887d0a ceac432 0d5c9d2 ceac432 0d5c9d2 ceac432 0d5c9d2 d4772c3 0d5c9d2 2c0c47f 0d5c9d2 ceac432 d4772c3 612f527 0d5c9d2 2c0c47f d4772c3 2c0c47f 26e9ac1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
import sys
import os.path
import glob
import cv2
import numpy as np
import torch
import architecture as arch
import multiprocessing
import util
def is_cuda():
if torch.cuda.is_available() or not util.is_google_colab():
return True
else:
return False
model_path = '4x_eula_digimanga_bw_v2_nc1_307k.pth'
img_path = sys.argv[1]
output_dir = sys.argv[2]
device = torch.device('cuda' if is_cuda() else 'cpu')
model = arch.RRDB_Net(1, 1, 64, 23, gc=32, upscale=4, norm_type=None, act_type='leakyrelu', mode='CNA', res_scale=1, upsample_mode='upconv')
model.load_state_dict(torch.load(model_path, map_location=torch.device('cuda' if is_cuda() else 'cpu')), strict=True)
model.eval()
for k, v in model.named_parameters():
v.requires_grad = False
model = model.to(device)
base = os.path.splitext(os.path.basename(img_path))[0]
# read image
print(img_path);
img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
img = img * 1.0 / 255
img = torch.from_numpy(img[np.newaxis, :, :]).float()
img_LR = img.unsqueeze(0)
img_LR = img_LR.to(device)
print('Start upscaling...')
with torch.no_grad():
output = model(img_LR).squeeze(dim=0).float().cpu().clamp_(0, 1).numpy()
output = np.transpose(output, (1, 2, 0))
output = (output * 255.0).round()
print('Finished upscaling, saving image.')
print(output_dir)
cv2.imwrite(output_dir, output, [int(cv2.IMWRITE_JPEG_QUALITY), 90])
|