File size: 3,804 Bytes
ded9852 e75d574 450d71c 37ae4f3 51a4776 0b8ccd5 97c70fe 51a4776 5dcb962 ded9852 d3fbff5 8eeae86 aca3a01 8eeae86 137fd43 b157748 e7e1eaf e75d574 687282d e75d574 5086b19 d84731a d4740d1 bf86791 6aea108 ded9852 687282d 6bdaae4 709fd3c ded9852 a48e00a ded9852 a48e00a ded9852 a48e00a e7e1eaf ded9852 687282d 7390b66 ded9852 c1436db ded9852 f8aa4fe d84731a 62744f4 ded9852 18d9300 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 |
import os
import sys
import gradio as gr
from PIL import Image
## environment settup
os.system("git clone https://github.com/codeslake/RefVSR.git")
os.chdir("RefVSR")
os.system("./install/install_cudnn113.sh")
os.mkdir("ckpt")
os.system("wget https://huggingface.co/spaces/codeslake/RefVSR/resolve/main/RefVSR_small_MFID_8K.pytorch -O ckpt/RefVSR_small_MFID_8K.pytorch")
os.system("wget https://huggingface.co/spaces/codeslake/RefVSR/resolve/main/RefVSR_MFID_8K.pytorch -O ckpt/RefVSR_MFID_8K.pytorch")
os.system("wget https://huggingface.co/spaces/codeslake/RefVSR/resolve/main/RefVSR_MFID.pytorch -O ckpt/RefVSR_MFID.pytorch")
os.system("wget https://huggingface.co/spaces/codeslake/RefVSR/resolve/main/SPyNet.pytorch -O ckpt/SPyNet.pytorch")
sys.path.append("RefVSR")
## RefVSR
#LR_path = "test/RealMCVSR/test/HR/UW/0000"
#Ref_path = "test/RealMCVSR/test/HR/W/0000"
#Ref_path_T = "test/RealMCVSR/test/HR/T/0000"
LR_path = "test/RealMCVSR/test/LRx4/UW/0000"
Ref_path = "test/RealMCVSR/test/LRx4/W/0000"
Ref_path_T = "test/RealMCVSR/test/LRx4/T/0000"
os.makedirs(LR_path)
os.makedirs(Ref_path)
os.makedirs(Ref_path_T)
os.makedirs('result')
#os.system("wget https://www.dropbox.com/s/xv6inxwy0so4ni0/LR.png -O LR.png")
#os.system("wget https://www.dropbox.com/s/abydd1oczs1163l/Ref.png -O Ref.png")
os.system("wget https://www.dropbox.com/s/vqekqdz80d85gi4/UW.png -O LR.png")
os.system("wget https://www.dropbox.com/s/lsopmquhpm87v83/W.png -O Ref.png")
def resize(img):
max_side = 512
w = img.size[0]
h = img.size[1]
if max(h, w) > max_side:
scale_ratio = max_side / max(h, w)
wsize=int(w*scale_ratio)
hsize=int(h*scale_ratio)
img = img.resize((wsize,hsize), Image.ANTIALIAS)
return img
def inference(LR, Ref):
#LR = resize(LR)
#Ref = resize(Ref)
LR.save(os.path.join(LR_path, '0000.png'))
Ref.save(os.path.join(Ref_path, '0000.png'))
Ref.save(os.path.join(Ref_path_T, '0000.png'))
os.system("python -B run.py \
--mode RefVSR_MFID \
--config config_RefVSR_MFID \
--data RealMCVSR \
--ckpt_abs_name ckpt/RefVSR_MFID.pytorch \
--data_offset ./test \
--output_offset ./result \
--qualitative_only \
--cpu \
--is_gradio")
return "result/0000.png"
title="RefVSR (under construction)"
description="Demo application for Reference-based Video Super-Resolution (RefVSR). Upload a low-resolution frame and a reference frame to 'LR' and 'Ref' input windows, respectively."
article = "<p style='text-align: center'><b>To check the full capability of the module, we recommend to clone Github repository and run RefVSR models on videos using GPUs.</b></p><p style='text-align: center'>This demo runs on CPUs and only supports RefVSR for a single LR and Ref frame due to computational complexity. Hence, the model will not take advantage of temporal LR and Ref frames.</p><p style='text-align: center'>The model is trained proposed two-stage training strategy, and the sample frames are in 430x270 resolution and saved in the PNG format. </p><p style='text-align: center'><a href='https://junyonglee.me/projects/RefVSR' target='_blank'>Project</a> | <a href='https://arxiv.org/abs/2203.14537' target='_blank'>arXiv</a> | <a href='https://github.com/codeslake/RefVSR' target='_blank'>Github</a></p>"
#LR = resize(Image.open('LR.png')).save('LR.png')
#Ref = resize(Image.open('Ref.png')).save('Ref.png')
examples=[['LR.png', 'Ref.png']]
gr.Interface(inference,[gr.inputs.Image(type="pil"), gr.inputs.Image(type="pil")],gr.outputs.Image(type="file"),title=title,description=description,article=article,theme ="peach",examples=examples).launch(enable_queue=True)
|