Spaces:
Runtime error
Runtime error
File size: 5,478 Bytes
ea9f27f f4ce0ac ea9f27f f4ce0ac ea9f27f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 |
#origin
from seg import U2NETP
from GeoTr import GeoTr
from IllTr import IllTr
from inference_ill import rec_ill
import torch
import torch.nn as nn
import torch.nn.functional as F
import skimage.io as io
import numpy as np
import cv2
#import glob
import os
from PIL import Image
#import argparse
import warnings
warnings.filterwarnings('ignore')
import gradio as gr
class GeoTr_Seg(nn.Module):
def __init__(self):
super(GeoTr_Seg, self).__init__()
self.msk = U2NETP(3, 1)
self.GeoTr = GeoTr(num_attn_layers=6)
def forward(self, x):
msk, _1,_2,_3,_4,_5,_6 = self.msk(x)
msk = (msk > 0.5).float()
x = msk * x
bm = self.GeoTr(x)
bm = (2 * (bm / 286.8) - 1) * 0.99
return bm
def reload_model(model, path=""):
if not bool(path):
return model
else:
model_dict = model.state_dict()
pretrained_dict = torch.load(path, map_location='cuda:0')
print(len(pretrained_dict.keys()))
pretrained_dict = {k[7:]: v for k, v in pretrained_dict.items() if k[7:] in model_dict}
print(len(pretrained_dict.keys()))
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
return model
def reload_segmodel(model, path=""):
if not bool(path):
return model
else:
model_dict = model.state_dict()
pretrained_dict = torch.load(path, map_location='cuda:0')
print(len(pretrained_dict.keys()))
pretrained_dict = {k[6:]: v for k, v in pretrained_dict.items() if k[6:] in model_dict}
print(len(pretrained_dict.keys()))
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
return model
def rec(opt):
# print(torch.__version__) # 1.5.1
img_list = os.listdir(opt.distorrted_path) # distorted images list
if not os.path.exists(opt.gsave_path): # create save path
os.mkdir(opt.gsave_path)
if not os.path.exists(opt.isave_path): # create save path
os.mkdir(opt.isave_path)
GeoTr_Seg_model = GeoTr_Seg().cuda()
# reload segmentation model
reload_segmodel(GeoTr_Seg_model.msk, opt.Seg_path)
# reload geometric unwarping model
reload_model(GeoTr_Seg_model.GeoTr, opt.GeoTr_path)
IllTr_model = IllTr().cuda()
# reload illumination rectification model
reload_model(IllTr_model, opt.IllTr_path)
# To eval mode
GeoTr_Seg_model.eval()
IllTr_model.eval()
for img_path in img_list:
name = img_path.split('.')[-2] # image name
img_path = opt.distorrted_path + img_path # read image and to tensor
im_ori = np.array(Image.open(img_path))[:, :, :3] / 255.
h, w, _ = im_ori.shape
im = cv2.resize(im_ori, (288, 288))
im = im.transpose(2, 0, 1)
im = torch.from_numpy(im).float().unsqueeze(0)
with torch.no_grad():
# geometric unwarping
bm = GeoTr_Seg_model(im.cuda())
bm = bm.cpu()
bm0 = cv2.resize(bm[0, 0].numpy(), (w, h)) # x flow
bm1 = cv2.resize(bm[0, 1].numpy(), (w, h)) # y flow
bm0 = cv2.blur(bm0, (3, 3))
bm1 = cv2.blur(bm1, (3, 3))
lbl = torch.from_numpy(np.stack([bm0, bm1], axis=2)).unsqueeze(0) # h * w * 2
out = F.grid_sample(torch.from_numpy(im_ori).permute(2,0,1).unsqueeze(0).float(), lbl, align_corners=True)
img_geo = ((out[0]*255).permute(1, 2, 0).numpy())[:,:,::-1].astype(np.uint8)
cv2.imwrite(opt.gsave_path + name + '_geo' + '.png', img_geo) # save
# illumination rectification
if opt.ill_rec:
ill_savep = opt.isave_path + name + '_ill' + '.png'
rec_ill(IllTr_model, img_geo, saveRecPath=ill_savep)
print('Done: ', img_path)
def process_image(input_image):
GeoTr_Seg_model = GeoTr_Seg().cuda()
reload_segmodel(GeoTr_Seg_model.msk, './model_pretrained/seg.pth')
reload_model(GeoTr_Seg_model.GeoTr, './model_pretrained/geotr.pth')
IllTr_model = IllTr().cuda()
reload_model(IllTr_model, './model_pretrained/illtr.pth')
GeoTr_Seg_model.eval()
IllTr_model.eval()
im_ori = np.array(input_image)[:, :, :3] / 255.
h, w, _ = im_ori.shape
im = cv2.resize(im_ori, (288, 288))
im = im.transpose(2, 0, 1)
im = torch.from_numpy(im).float().unsqueeze(0)
with torch.no_grad():
bm = GeoTr_Seg_model(im.cuda())
bm = bm.cpu()
bm0 = cv2.resize(bm[0, 0].numpy(), (w, h))
bm1 = cv2.resize(bm[0, 1].numpy(), (w, h))
bm0 = cv2.blur(bm0, (3, 3))
bm1 = cv2.blur(bm1, (3, 3))
lbl = torch.from_numpy(np.stack([bm0, bm1], axis=2)).unsqueeze(0)
out = F.grid_sample(torch.from_numpy(im_ori).permute(2, 0, 1).unsqueeze(0).float(), lbl, align_corners=True)
img_geo = ((out[0] * 255).permute(1, 2, 0).numpy()).astype(np.uint8)
ill_rec=False
if ill_rec:
img_ill = rec_ill(IllTr_model, img_geo)
return Image.fromarray(img_ill)
else:
return Image.fromarray(img_geo)
# Define Gradio interface
input_image = gr.inputs.Image()
output_image = gr.outputs.Image(type='pil')
iface = gr.Interface(fn=process_image, inputs=input_image, outputs=output_image, title="Image Correction")
iface.launch()
|