Update app.py
Browse files
app.py
CHANGED
@@ -18,25 +18,29 @@ os.system("wget https://huggingface.co/spaces/codeslake/RefVSR/resolve/main/RefV
|
|
18 |
|
19 |
sys.path.append("RefVSR")
|
20 |
|
21 |
-
##
|
|
|
22 |
HR_LR_path = "test/RealMCVSR/test/HR/UW/0000"
|
23 |
HR_Ref_path = "test/RealMCVSR/test/HR/W/0000"
|
24 |
HR_Ref_path_T = "test/RealMCVSR/test/HR/T/0000"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
LR_path = "test/RealMCVSR/test/LRx4/UW/0000"
|
26 |
Ref_path = "test/RealMCVSR/test/LRx4/W/0000"
|
27 |
Ref_path_T = "test/RealMCVSR/test/LRx4/T/0000"
|
28 |
os.makedirs(LR_path)
|
29 |
os.makedirs(Ref_path)
|
30 |
os.makedirs(Ref_path_T)
|
31 |
-
os.makedirs(HR_LR_path)
|
32 |
-
os.makedirs(HR_Ref_path)
|
33 |
-
os.makedirs(HR_Ref_path_T)
|
34 |
-
os.makedirs('result')
|
35 |
os.system("wget https://www.dropbox.com/s/vqekqdz80d85gi4/UW.png -O LR.png")
|
36 |
os.system("wget https://www.dropbox.com/s/lsopmquhpm87v83/W.png -O Ref.png")
|
37 |
-
os.system("wget https://www.dropbox.com/s/xv6inxwy0so4ni0/LR.png -O HR_LR.png")
|
38 |
-
os.system("wget https://www.dropbox.com/s/abydd1oczs1163l/Ref.png -O HR_Ref.png")
|
39 |
|
|
|
|
|
40 |
|
41 |
## resize if necessary (not used)
|
42 |
def resize(img):
|
@@ -50,9 +54,11 @@ def resize(img):
|
|
50 |
img = img.resize((wsize,hsize), Image.ANTIALIAS)
|
51 |
return img
|
52 |
|
53 |
-
|
|
|
|
|
54 |
## inference
|
55 |
-
def
|
56 |
## resize for user selected input (not used)
|
57 |
#LR = resize(LR)
|
58 |
#Ref = resize(Ref)
|
@@ -67,35 +73,35 @@ def inference_8K(LR, Ref):
|
|
67 |
|
68 |
## Run RefVSR model
|
69 |
os.system("python -B run.py \
|
70 |
-
--mode
|
71 |
-
--config
|
72 |
--data RealMCVSR \
|
73 |
-
--ckpt_abs_name ckpt/
|
74 |
--data_offset ./test \
|
75 |
--output_offset ./result \
|
76 |
--qualitative_only \
|
77 |
--cpu \
|
78 |
-
--is_gradio")
|
79 |
return "result/0000.png"
|
80 |
-
|
81 |
title="RefVSR"
|
82 |
-
description="Demo application for Reference-based Video Super-Resolution (RefVSR). Upload a low-resolution frame and a reference frame to 'LR' and 'Ref' input windows, respectively. The demo runs on CPUs and takes about
|
83 |
|
84 |
-
article = "<p style='text-align: center'><b>To check the full capability of the module, we recommend to clone Github repository and run RefVSR models on videos using GPUs.</b></p><p style='text-align: center'>This demo runs on CPUs and only supports RefVSR for a single LR and Ref frame due to computational complexity. Hence, the model will not take advantage of temporal LR and Ref frames.</p><p style='text-align: center'>The model is trained by the proposed
|
85 |
|
86 |
## resize for sample (not used)
|
87 |
#LR = resize(Image.open('LR.png')).save('LR.png')
|
88 |
#Ref = resize(Image.open('Ref.png')).save('Ref.png')
|
89 |
|
90 |
## input
|
91 |
-
examples=[['
|
92 |
|
93 |
## interface
|
94 |
-
gr.Interface(
|
95 |
-
|
96 |
-
####################
|
97 |
## inference
|
98 |
-
def
|
99 |
## resize for user selected input (not used)
|
100 |
#LR = resize(LR)
|
101 |
#Ref = resize(Ref)
|
@@ -110,29 +116,28 @@ def inference(LR, Ref):
|
|
110 |
|
111 |
## Run RefVSR model
|
112 |
os.system("python -B run.py \
|
113 |
-
--mode
|
114 |
-
--config
|
115 |
--data RealMCVSR \
|
116 |
-
--ckpt_abs_name ckpt/
|
117 |
--data_offset ./test \
|
118 |
--output_offset ./result \
|
119 |
--qualitative_only \
|
120 |
--cpu \
|
121 |
-
--is_gradio")
|
122 |
return "result/0000.png"
|
123 |
-
|
124 |
title="RefVSR"
|
125 |
-
description="Demo application for Reference-based Video Super-Resolution (RefVSR). Upload a low-resolution frame and a reference frame to 'LR' and 'Ref' input windows, respectively. The demo runs on CPUs and takes about
|
126 |
|
127 |
-
article = "<p style='text-align: center'><b>To check the full capability of the module, we recommend to clone Github repository and run RefVSR models on videos using GPUs.</b></p><p style='text-align: center'>This demo runs on CPUs and only supports RefVSR for a single LR and Ref frame due to computational complexity. Hence, the model will not take advantage of temporal LR and Ref frames.</p><p style='text-align: center'>The model is trained by the proposed
|
128 |
|
129 |
## resize for sample (not used)
|
130 |
#LR = resize(Image.open('LR.png')).save('LR.png')
|
131 |
#Ref = resize(Image.open('Ref.png')).save('Ref.png')
|
132 |
|
133 |
## input
|
134 |
-
examples=[['
|
135 |
|
136 |
## interface
|
137 |
-
gr.Interface(
|
138 |
-
|
|
|
18 |
|
19 |
sys.path.append("RefVSR")
|
20 |
|
21 |
+
## I/O setup (creates folders and places inputs corresponding to the original RefVSR code)
|
22 |
+
# HD input
|
23 |
HR_LR_path = "test/RealMCVSR/test/HR/UW/0000"
|
24 |
HR_Ref_path = "test/RealMCVSR/test/HR/W/0000"
|
25 |
HR_Ref_path_T = "test/RealMCVSR/test/HR/T/0000"
|
26 |
+
os.makedirs(HR_LR_path)
|
27 |
+
os.makedirs(HR_Ref_path)
|
28 |
+
os.makedirs(HR_Ref_path_T)
|
29 |
+
os.system("wget https://www.dropbox.com/s/xv6inxwy0so4ni0/LR.png -O HR_LR.png")
|
30 |
+
os.system("wget https://www.dropbox.com/s/abydd1oczs1163l/Ref.png -O HR_Ref.png")
|
31 |
+
|
32 |
+
# 4x downsampled input
|
33 |
LR_path = "test/RealMCVSR/test/LRx4/UW/0000"
|
34 |
Ref_path = "test/RealMCVSR/test/LRx4/W/0000"
|
35 |
Ref_path_T = "test/RealMCVSR/test/LRx4/T/0000"
|
36 |
os.makedirs(LR_path)
|
37 |
os.makedirs(Ref_path)
|
38 |
os.makedirs(Ref_path_T)
|
|
|
|
|
|
|
|
|
39 |
os.system("wget https://www.dropbox.com/s/vqekqdz80d85gi4/UW.png -O LR.png")
|
40 |
os.system("wget https://www.dropbox.com/s/lsopmquhpm87v83/W.png -O Ref.png")
|
|
|
|
|
41 |
|
42 |
+
# output directory
|
43 |
+
os.makedirs('result')
|
44 |
|
45 |
## resize if necessary (not used)
|
46 |
def resize(img):
|
|
|
54 |
img = img.resize((wsize,hsize), Image.ANTIALIAS)
|
55 |
return img
|
56 |
|
57 |
+
|
58 |
+
|
59 |
+
#################### low res ##################
|
60 |
## inference
|
61 |
+
def inference(LR, Ref):
|
62 |
## resize for user selected input (not used)
|
63 |
#LR = resize(LR)
|
64 |
#Ref = resize(Ref)
|
|
|
73 |
|
74 |
## Run RefVSR model
|
75 |
os.system("python -B run.py \
|
76 |
+
--mode RefVSR_MFID \
|
77 |
+
--config config_RefVSR_MFID \
|
78 |
--data RealMCVSR \
|
79 |
+
--ckpt_abs_name ckpt/RefVSR_MFID.pytorch \
|
80 |
--data_offset ./test \
|
81 |
--output_offset ./result \
|
82 |
--qualitative_only \
|
83 |
--cpu \
|
84 |
+
--is_gradio")
|
85 |
return "result/0000.png"
|
86 |
+
|
87 |
title="RefVSR"
|
88 |
+
description="Demo application for Reference-based Video Super-Resolution (RefVSR). Upload a low-resolution frame and a reference frame to 'LR' and 'Ref' input windows, respectively. The demo runs on CPUs and takes about 150s."
|
89 |
|
90 |
+
article = "<p style='text-align: center'><b>To check the full capability of the module, we recommend to clone Github repository and run RefVSR models on videos using GPUs.</b></p><p style='text-align: center'>This demo runs on CPUs and only supports RefVSR for a single LR and Ref frame due to computational complexity. Hence, the model will not take advantage of temporal LR and Ref frames.</p><p style='text-align: center'>The model is trained by the proposed pre-training strategy only. The sample frames are in 430x270 resolution and saved in the PNG format. </p><p style='text-align: center'><a href='https://junyonglee.me/projects/RefVSR' target='_blank'>Project</a> | <a href='https://arxiv.org/abs/2203.14537' target='_blank'>arXiv</a> | <a href='https://github.com/codeslake/RefVSR' target='_blank'>Github</a></p>"
|
91 |
|
92 |
## resize for sample (not used)
|
93 |
#LR = resize(Image.open('LR.png')).save('LR.png')
|
94 |
#Ref = resize(Image.open('Ref.png')).save('Ref.png')
|
95 |
|
96 |
## input
|
97 |
+
examples=[['LR.png', 'Ref.png']]
|
98 |
|
99 |
## interface
|
100 |
+
gr.Interface(inference,[gr.inputs.Image(type="pil"), gr.inputs.Image(type="pil")],gr.outputs.Image(type="file"),title=title,description=description,article=article,theme ="peach",examples=examples).launch(enable_queue=True)
|
101 |
+
|
102 |
+
#################### 8K ##################
|
103 |
## inference
|
104 |
+
def inference_8K(LR, Ref):
|
105 |
## resize for user selected input (not used)
|
106 |
#LR = resize(LR)
|
107 |
#Ref = resize(Ref)
|
|
|
116 |
|
117 |
## Run RefVSR model
|
118 |
os.system("python -B run.py \
|
119 |
+
--mode RefVSR_MFID_8K \
|
120 |
+
--config config_RefVSR_MFID_8K \
|
121 |
--data RealMCVSR \
|
122 |
+
--ckpt_abs_name ckpt/RefVSR_MFID_8K.pytorch \
|
123 |
--data_offset ./test \
|
124 |
--output_offset ./result \
|
125 |
--qualitative_only \
|
126 |
--cpu \
|
127 |
+
--is_gradio")
|
128 |
return "result/0000.png"
|
129 |
+
|
130 |
title="RefVSR"
|
131 |
+
description="Demo application for Reference-based Video Super-Resolution (RefVSR). Upload a low-resolution frame and a reference frame to 'LR' and 'Ref' input windows, respectively. The demo runs on CPUs and takes about 120s."
|
132 |
|
133 |
+
article = "<p style='text-align: center'><b>To check the full capability of the module, we recommend to clone Github repository and run RefVSR models on videos using GPUs.</b></p><p style='text-align: center'>This demo runs on CPUs and only supports RefVSR for a single LR and Ref frame due to computational complexity. Hence, the model will not take advantage of temporal LR and Ref frames.</p><p style='text-align: center'>The model is trained by the proposed two-stage training strategy. The sample frames are in HD resolution (1920x1080) and saved in the PNG format. </p><p style='text-align: center'><a href='https://junyonglee.me/projects/RefVSR' target='_blank'>Project</a> | <a href='https://arxiv.org/abs/2203.14537' target='_blank'>arXiv</a> | <a href='https://github.com/codeslake/RefVSR' target='_blank'>Github</a></p>"
|
134 |
|
135 |
## resize for sample (not used)
|
136 |
#LR = resize(Image.open('LR.png')).save('LR.png')
|
137 |
#Ref = resize(Image.open('Ref.png')).save('Ref.png')
|
138 |
|
139 |
## input
|
140 |
+
examples=[['HR_LR.png', 'HR_Ref.png']]
|
141 |
|
142 |
## interface
|
143 |
+
gr.Interface(inference_8K,[gr.inputs.Image(type="pil"), gr.inputs.Image(type="pil")],gr.outputs.Image(type="file"),title=title,description=description,article=article,theme ="peach",examples=examples).launch(enable_queue=True)
|
|