ai-art w11wo commited on
Commit
efd1556
·
0 Parent(s):

Duplicate from bookbot/Image-Upscaling-Playground

Browse files

Co-authored-by: Wilson Wongso <[email protected]>

.gitattributes ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.wasm filter=lfs diff=lfs merge=lfs -text
25
+ *.xz filter=lfs diff=lfs merge=lfs -text
26
+ *.zip filter=lfs diff=lfs merge=lfs -text
27
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
28
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Image Upscaling Playground
3
+ emoji: 🦆
4
+ colorFrom: green
5
+ colorTo: yellow
6
+ sdk: gradio
7
+ sdk_version: 3.4.1
8
+ app_file: app.py
9
+ pinned: false
10
+ license: apache-2.0
11
+ duplicated_from: bookbot/Image-Upscaling-Playground
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
app.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import cv2
3
+ import onnxruntime
4
+ import gradio as gr
5
+
6
+
7
+ def pre_process(img: np.array) -> np.array:
8
+ # H, W, C -> C, H, W
9
+ img = np.transpose(img[:, :, 0:3], (2, 0, 1))
10
+ # C, H, W -> 1, C, H, W
11
+ img = np.expand_dims(img, axis=0).astype(np.float32)
12
+ return img
13
+
14
+
15
+ def post_process(img: np.array) -> np.array:
16
+ # 1, C, H, W -> C, H, W
17
+ img = np.squeeze(img)
18
+ # C, H, W -> H, W, C
19
+ img = np.transpose(img, (1, 2, 0))[:, :, ::-1].astype(np.uint8)
20
+ return img
21
+
22
+
23
+ def inference(model_path: str, img_array: np.array) -> np.array:
24
+ options = onnxruntime.SessionOptions()
25
+ options.intra_op_num_threads = 1
26
+ options.inter_op_num_threads = 1
27
+ ort_session = onnxruntime.InferenceSession(model_path, options)
28
+ ort_inputs = {ort_session.get_inputs()[0].name: img_array}
29
+ ort_outs = ort_session.run(None, ort_inputs)
30
+
31
+ return ort_outs[0]
32
+
33
+
34
+ def convert_pil_to_cv2(image):
35
+ # pil_image = image.convert("RGB")
36
+ open_cv_image = np.array(image)
37
+ # RGB to BGR
38
+ open_cv_image = open_cv_image[:, :, ::-1].copy()
39
+ return open_cv_image
40
+
41
+
42
+ def upscale(image, model):
43
+ model_path = f"models/{model}.ort"
44
+ img = convert_pil_to_cv2(image)
45
+ if img.ndim == 2:
46
+ img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
47
+
48
+ if img.shape[2] == 4:
49
+ alpha = img[:, :, 3] # GRAY
50
+ alpha = cv2.cvtColor(alpha, cv2.COLOR_GRAY2BGR) # BGR
51
+ alpha_output = post_process(inference(model_path, pre_process(alpha))) # BGR
52
+ alpha_output = cv2.cvtColor(alpha_output, cv2.COLOR_BGR2GRAY) # GRAY
53
+
54
+ img = img[:, :, 0:3] # BGR
55
+ image_output = post_process(inference(model_path, pre_process(img))) # BGR
56
+ image_output = cv2.cvtColor(image_output, cv2.COLOR_BGR2BGRA) # BGRA
57
+ image_output[:, :, 3] = alpha_output
58
+
59
+ elif img.shape[2] == 3:
60
+ image_output = post_process(inference(model_path, pre_process(img))) # BGR
61
+
62
+ return image_output
63
+
64
+
65
+ css = ".output-image, .input-image, .image-preview {height: 480px !important} "
66
+ model_choices = ["modelx2", "modelx2 25 JXL", "modelx4", "minecraft_modelx4"]
67
+
68
+ gr.Interface(
69
+ fn=upscale,
70
+ inputs=[
71
+ gr.inputs.Image(type="pil", label="Input Image"),
72
+ gr.inputs.Radio(
73
+ model_choices,
74
+ type="value",
75
+ default=None,
76
+ label="Choose Upscaler",
77
+ optional=False,
78
+ ),
79
+ ],
80
+ outputs="image",
81
+ title="Image Upscaling 🦆",
82
+ description="Model: [Anchor-based Plain Net for Mobile Image Super-Resolution](https://arxiv.org/abs/2105.09750). Repository: [SR Mobile PyTorch](https://github.com/w11wo/sr_mobile_pytorch)",
83
+ allow_flagging="never",
84
+ css=css,
85
+ ).launch()
examples/example_1.png ADDED
examples/example_2.png ADDED
examples/example_3.png ADDED
examples/example_4.png ADDED
examples/example_5.png ADDED
examples_x2/example_1.png ADDED
examples_x2/example_2.png ADDED
examples_x2/example_3.png ADDED
examples_x2/example_4.png ADDED
examples_x2/example_5.png ADDED
examples_x2_25/example_1.png ADDED
examples_x2_25/example_2.png ADDED
examples_x2_25/example_3.png ADDED
examples_x2_25/example_4.png ADDED
examples_x2_25/example_5.png ADDED
minecraft_examples/minecraft-1.png ADDED
minecraft_examples/minecraft-2.png ADDED
minecraft_examples/minecraft-3.png ADDED
minecraft_examples/minecraft-4.png ADDED
minecraft_examples/minecraft-5.png ADDED
models/minecraft_modelx4.ort ADDED
Binary file (261 kB). View file
 
models/modelx2 25 JXL.ort ADDED
Binary file (147 kB). View file
 
models/modelx2.ort ADDED
Binary file (147 kB). View file
 
models/modelx4.ort ADDED
Binary file (261 kB). View file
 
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ numpy
2
+ onnxruntime==1.12.0
3
+ opencv-python-headless