0x90e commited on
Commit
dc5de93
·
1 Parent(s): 1f467d3

Support for more models.

Browse files
app.py CHANGED
@@ -26,14 +26,20 @@ def inference(img, size, type):
26
  run_cmd("mkdir " + OUTPUT_DIR)
27
  img.save(INPUT_DIR + "1.jpg", "JPEG")
28
 
29
- run_cmd("python inference_manga.py "+ os.path.join(INPUT_DIR, "1.jpg") + " " + os.path.join(OUTPUT_DIR, "1_out.jpg"))
 
 
 
 
30
  img_out = Image.open(os.path.join(OUTPUT_DIR, "1_out.jpg"))
 
31
  if size is "x2":
32
  img_out = img_out.resize((s//2 for s in img_out.size))
 
33
  return [img_out]
34
 
35
  input_image = gr.Image(type="pil", label="Input")
36
- upscale_type = gr.Radio(["Manga", "Anime", "General"], label="Select the type of picture you want to upscale:", value="Manga")
37
  upscale_size = gr.Radio(["x4", "x2"], label="Upscale by:", value="x4")
38
  output_image = gr.Gallery(label="Generated images", show_label=False, elem_id="gallery").style(grid=[2], height="auto")
39
 
 
26
  run_cmd("mkdir " + OUTPUT_DIR)
27
  img.save(INPUT_DIR + "1.jpg", "JPEG")
28
 
29
+ if type is "Manga v1":
30
+ run_cmd("python inference_manga_v2.py "+ os.path.join(INPUT_DIR, "1.jpg") + " " + os.path.join(OUTPUT_DIR, "1_out.jpg"))
31
+ else:
32
+ run_cmd("python inference.py "+ os.path.join(INPUT_DIR, "1.jpg") + " " + os.path.join(OUTPUT_DIR, "1_out.jpg") + " " + type)
33
+
34
  img_out = Image.open(os.path.join(OUTPUT_DIR, "1_out.jpg"))
35
+
36
  if size is "x2":
37
  img_out = img_out.resize((s//2 for s in img_out.size))
38
+
39
  return [img_out]
40
 
41
  input_image = gr.Image(type="pil", label="Input")
42
+ upscale_type = gr.Radio(["Manga v2", "Manga v1", "Anime", "General"], label="Select the type of picture you want to upscale:", value="Manga v2")
43
  upscale_size = gr.Radio(["x4", "x2"], label="Upscale by:", value="x4")
44
  output_image = gr.Gallery(label="Generated images", show_label=False, elem_id="gallery").style(grid=[2], height="auto")
45
 
inference.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import os.path
3
+ import cv2
4
+ import numpy as np
5
+ import torch
6
+ import architecture as arch
7
+
8
+ def is_cuda():
9
+ if torch.cuda.is_available():
10
+ return True
11
+ else:
12
+ return False
13
+
14
+ model_type = sys.argv[3]
15
+
16
+ if model_type is "Manga v1":
17
+ model_path = "4x_eula_digimanga_bw_v1_860k.pth"
18
+ elif model_type is "Anime":
19
+ model_path = "4x-AnimeSharp.pth"
20
+ else:
21
+ model_path = "4x-UniScaleV2_Sharp.pth"
22
+
23
+ img_path = sys.argv[1]
24
+ output_dir = sys.argv[2]
25
+ device = torch.device('cuda' if is_cuda() else 'cpu')
26
+
27
+ model = arch.RRDB_Net(3, 3, 64, 23, gc=32, upscale=4, norm_type=None, act_type='leakyrelu', mode='CNA', res_scale=1, upsample_mode='upconv')
28
+
29
+ if is_cuda():
30
+ model.load_state_dict(torch.load(model_path), strict=True)
31
+ else:
32
+ model.load_state_dict(torch.load(model_path, map_location=torch.device('cpu')), strict=True)
33
+
34
+ model.eval()
35
+
36
+ for k, v in model.named_parameters():
37
+ v.requires_grad = False
38
+ model = model.to(device)
39
+
40
+ base = os.path.splitext(os.path.basename(img_path))[0]
41
+
42
+ # read image
43
+ print(img_path);
44
+ img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
45
+ img = img * 1.0 / 255
46
+ img = torch.from_numpy(np.transpose(img[:, :, [2, 1, 0]], (2, 0, 1))).float()
47
+ img_LR = img.unsqueeze(0)
48
+ img_LR = img_LR.to(device)
49
+
50
+ print('Start upscaling...')
51
+ with torch.no_grad():
52
+ output = model(img_LR).data.squeeze().float().cpu().clamp_(0, 1).numpy()
53
+ output = np.transpose(output[[2, 1, 0], :, :], (1, 2, 0))
54
+ output = (output * 255.0).round()
55
+ print('Finished upscaling, saving image.')
56
+ cv2.imwrite(output_dir, output, [int(cv2.IMWRITE_JPEG_QUALITY), 90])
inference_manga.py → inference_manga_v2.py RENAMED
File without changes