ohayonguy
commited on
Commit
•
a8bd6f6
1
Parent(s):
cd4eb22
removed unnecessary files
Browse files- utils/basicsr_custom.py +0 -954
- utils/create_degradation.py +0 -144
- utils/img_utils.py +0 -5
utils/basicsr_custom.py
DELETED
@@ -1,954 +0,0 @@
|
|
1 |
-
# https://github.com/XPixelGroup/BasicSR/blob/master/basicsr/data/degradations.py
|
2 |
-
# Copyright (c) OpenMMLab. All rights reserved.
|
3 |
-
# https://github.com/open-mmlab/mmcv/blob/master/mmcv/fileio/file_client.py
|
4 |
-
|
5 |
-
import math
|
6 |
-
import random
|
7 |
-
import re
|
8 |
-
from abc import ABCMeta, abstractmethod
|
9 |
-
from pathlib import Path
|
10 |
-
from typing import List, Dict
|
11 |
-
from typing import Mapping, Any
|
12 |
-
from typing import Optional, Union
|
13 |
-
|
14 |
-
import cv2
|
15 |
-
import numpy as np
|
16 |
-
import torch
|
17 |
-
from PIL import Image
|
18 |
-
from scipy import special
|
19 |
-
from scipy.stats import multivariate_normal
|
20 |
-
from torch import Tensor
|
21 |
-
# from torchvision.transforms.functional_tensor import rgb_to_grayscale
|
22 |
-
from torchvision.transforms._functional_tensor import rgb_to_grayscale
|
23 |
-
|
24 |
-
|
25 |
-
# -------------------------------------------------------------------- #
|
26 |
-
# --------------------------- blur kernels --------------------------- #
|
27 |
-
# -------------------------------------------------------------------- #
|
28 |
-
|
29 |
-
|
30 |
-
# --------------------------- util functions --------------------------- #
|
31 |
-
def sigma_matrix2(sig_x, sig_y, theta):
|
32 |
-
"""Calculate the rotated sigma matrix (two dimensional matrix).
|
33 |
-
|
34 |
-
Args:
|
35 |
-
sig_x (float):
|
36 |
-
sig_y (float):
|
37 |
-
theta (float): Radian measurement.
|
38 |
-
|
39 |
-
Returns:
|
40 |
-
ndarray: Rotated sigma matrix.
|
41 |
-
"""
|
42 |
-
d_matrix = np.array([[sig_x ** 2, 0], [0, sig_y ** 2]])
|
43 |
-
u_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])
|
44 |
-
return np.dot(u_matrix, np.dot(d_matrix, u_matrix.T))
|
45 |
-
|
46 |
-
|
47 |
-
def mesh_grid(kernel_size):
|
48 |
-
"""Generate the mesh grid, centering at zero.
|
49 |
-
|
50 |
-
Args:
|
51 |
-
kernel_size (int):
|
52 |
-
|
53 |
-
Returns:
|
54 |
-
xy (ndarray): with the shape (kernel_size, kernel_size, 2)
|
55 |
-
xx (ndarray): with the shape (kernel_size, kernel_size)
|
56 |
-
yy (ndarray): with the shape (kernel_size, kernel_size)
|
57 |
-
"""
|
58 |
-
ax = np.arange(-kernel_size // 2 + 1., kernel_size // 2 + 1.)
|
59 |
-
xx, yy = np.meshgrid(ax, ax)
|
60 |
-
xy = np.hstack((xx.reshape((kernel_size * kernel_size, 1)), yy.reshape(kernel_size * kernel_size,
|
61 |
-
1))).reshape(kernel_size, kernel_size, 2)
|
62 |
-
return xy, xx, yy
|
63 |
-
|
64 |
-
|
65 |
-
def pdf2(sigma_matrix, grid):
|
66 |
-
"""Calculate PDF of the bivariate Gaussian distribution.
|
67 |
-
|
68 |
-
Args:
|
69 |
-
sigma_matrix (ndarray): with the shape (2, 2)
|
70 |
-
grid (ndarray): generated by :func:`mesh_grid`,
|
71 |
-
with the shape (K, K, 2), K is the kernel size.
|
72 |
-
|
73 |
-
Returns:
|
74 |
-
kernel (ndarrray): un-normalized kernel.
|
75 |
-
"""
|
76 |
-
inverse_sigma = np.linalg.inv(sigma_matrix)
|
77 |
-
kernel = np.exp(-0.5 * np.sum(np.dot(grid, inverse_sigma) * grid, 2))
|
78 |
-
return kernel
|
79 |
-
|
80 |
-
|
81 |
-
def cdf2(d_matrix, grid):
|
82 |
-
"""Calculate the CDF of the standard bivariate Gaussian distribution.
|
83 |
-
Used in skewed Gaussian distribution.
|
84 |
-
|
85 |
-
Args:
|
86 |
-
d_matrix (ndarrasy): skew matrix.
|
87 |
-
grid (ndarray): generated by :func:`mesh_grid`,
|
88 |
-
with the shape (K, K, 2), K is the kernel size.
|
89 |
-
|
90 |
-
Returns:
|
91 |
-
cdf (ndarray): skewed cdf.
|
92 |
-
"""
|
93 |
-
rv = multivariate_normal([0, 0], [[1, 0], [0, 1]])
|
94 |
-
grid = np.dot(grid, d_matrix)
|
95 |
-
cdf = rv.cdf(grid)
|
96 |
-
return cdf
|
97 |
-
|
98 |
-
|
99 |
-
def bivariate_Gaussian(kernel_size, sig_x, sig_y, theta, grid=None, isotropic=True):
|
100 |
-
"""Generate a bivariate isotropic or anisotropic Gaussian kernel.
|
101 |
-
|
102 |
-
In the isotropic mode, only `sig_x` is used. `sig_y` and `theta` is ignored.
|
103 |
-
|
104 |
-
Args:
|
105 |
-
kernel_size (int):
|
106 |
-
sig_x (float):
|
107 |
-
sig_y (float):
|
108 |
-
theta (float): Radian measurement.
|
109 |
-
grid (ndarray, optional): generated by :func:`mesh_grid`,
|
110 |
-
with the shape (K, K, 2), K is the kernel size. Default: None
|
111 |
-
isotropic (bool):
|
112 |
-
|
113 |
-
Returns:
|
114 |
-
kernel (ndarray): normalized kernel.
|
115 |
-
"""
|
116 |
-
if grid is None:
|
117 |
-
grid, _, _ = mesh_grid(kernel_size)
|
118 |
-
if isotropic:
|
119 |
-
sigma_matrix = np.array([[sig_x ** 2, 0], [0, sig_x ** 2]])
|
120 |
-
else:
|
121 |
-
sigma_matrix = sigma_matrix2(sig_x, sig_y, theta)
|
122 |
-
kernel = pdf2(sigma_matrix, grid)
|
123 |
-
kernel = kernel / np.sum(kernel)
|
124 |
-
return kernel
|
125 |
-
|
126 |
-
|
127 |
-
def bivariate_generalized_Gaussian(kernel_size, sig_x, sig_y, theta, beta, grid=None, isotropic=True):
|
128 |
-
"""Generate a bivariate generalized Gaussian kernel.
|
129 |
-
|
130 |
-
``Paper: Parameter Estimation For Multivariate Generalized Gaussian Distributions``
|
131 |
-
|
132 |
-
In the isotropic mode, only `sig_x` is used. `sig_y` and `theta` is ignored.
|
133 |
-
|
134 |
-
Args:
|
135 |
-
kernel_size (int):
|
136 |
-
sig_x (float):
|
137 |
-
sig_y (float):
|
138 |
-
theta (float): Radian measurement.
|
139 |
-
beta (float): shape parameter, beta = 1 is the normal distribution.
|
140 |
-
grid (ndarray, optional): generated by :func:`mesh_grid`,
|
141 |
-
with the shape (K, K, 2), K is the kernel size. Default: None
|
142 |
-
|
143 |
-
Returns:
|
144 |
-
kernel (ndarray): normalized kernel.
|
145 |
-
"""
|
146 |
-
if grid is None:
|
147 |
-
grid, _, _ = mesh_grid(kernel_size)
|
148 |
-
if isotropic:
|
149 |
-
sigma_matrix = np.array([[sig_x ** 2, 0], [0, sig_x ** 2]])
|
150 |
-
else:
|
151 |
-
sigma_matrix = sigma_matrix2(sig_x, sig_y, theta)
|
152 |
-
inverse_sigma = np.linalg.inv(sigma_matrix)
|
153 |
-
kernel = np.exp(-0.5 * np.power(np.sum(np.dot(grid, inverse_sigma) * grid, 2), beta))
|
154 |
-
kernel = kernel / np.sum(kernel)
|
155 |
-
return kernel
|
156 |
-
|
157 |
-
|
158 |
-
def bivariate_plateau(kernel_size, sig_x, sig_y, theta, beta, grid=None, isotropic=True):
|
159 |
-
"""Generate a plateau-like anisotropic kernel.
|
160 |
-
|
161 |
-
1 / (1+x^(beta))
|
162 |
-
|
163 |
-
Reference: https://stats.stackexchange.com/questions/203629/is-there-a-plateau-shaped-distribution
|
164 |
-
|
165 |
-
In the isotropic mode, only `sig_x` is used. `sig_y` and `theta` is ignored.
|
166 |
-
|
167 |
-
Args:
|
168 |
-
kernel_size (int):
|
169 |
-
sig_x (float):
|
170 |
-
sig_y (float):
|
171 |
-
theta (float): Radian measurement.
|
172 |
-
beta (float): shape parameter, beta = 1 is the normal distribution.
|
173 |
-
grid (ndarray, optional): generated by :func:`mesh_grid`,
|
174 |
-
with the shape (K, K, 2), K is the kernel size. Default: None
|
175 |
-
|
176 |
-
Returns:
|
177 |
-
kernel (ndarray): normalized kernel.
|
178 |
-
"""
|
179 |
-
if grid is None:
|
180 |
-
grid, _, _ = mesh_grid(kernel_size)
|
181 |
-
if isotropic:
|
182 |
-
sigma_matrix = np.array([[sig_x ** 2, 0], [0, sig_x ** 2]])
|
183 |
-
else:
|
184 |
-
sigma_matrix = sigma_matrix2(sig_x, sig_y, theta)
|
185 |
-
inverse_sigma = np.linalg.inv(sigma_matrix)
|
186 |
-
kernel = np.reciprocal(np.power(np.sum(np.dot(grid, inverse_sigma) * grid, 2), beta) + 1)
|
187 |
-
kernel = kernel / np.sum(kernel)
|
188 |
-
return kernel
|
189 |
-
|
190 |
-
|
191 |
-
def random_bivariate_Gaussian(kernel_size,
|
192 |
-
sigma_x_range,
|
193 |
-
sigma_y_range,
|
194 |
-
rotation_range,
|
195 |
-
noise_range=None,
|
196 |
-
isotropic=True):
|
197 |
-
"""Randomly generate bivariate isotropic or anisotropic Gaussian kernels.
|
198 |
-
|
199 |
-
In the isotropic mode, only `sigma_x_range` is used. `sigma_y_range` and `rotation_range` is ignored.
|
200 |
-
|
201 |
-
Args:
|
202 |
-
kernel_size (int):
|
203 |
-
sigma_x_range (tuple): [0.6, 5]
|
204 |
-
sigma_y_range (tuple): [0.6, 5]
|
205 |
-
rotation range (tuple): [-math.pi, math.pi]
|
206 |
-
noise_range(tuple, optional): multiplicative kernel noise,
|
207 |
-
[0.75, 1.25]. Default: None
|
208 |
-
|
209 |
-
Returns:
|
210 |
-
kernel (ndarray):
|
211 |
-
"""
|
212 |
-
assert kernel_size % 2 == 1, 'Kernel size must be an odd number.'
|
213 |
-
assert sigma_x_range[0] < sigma_x_range[1], 'Wrong sigma_x_range.'
|
214 |
-
sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1])
|
215 |
-
if isotropic is False:
|
216 |
-
assert sigma_y_range[0] < sigma_y_range[1], 'Wrong sigma_y_range.'
|
217 |
-
assert rotation_range[0] < rotation_range[1], 'Wrong rotation_range.'
|
218 |
-
sigma_y = np.random.uniform(sigma_y_range[0], sigma_y_range[1])
|
219 |
-
rotation = np.random.uniform(rotation_range[0], rotation_range[1])
|
220 |
-
else:
|
221 |
-
sigma_y = sigma_x
|
222 |
-
rotation = 0
|
223 |
-
|
224 |
-
kernel = bivariate_Gaussian(kernel_size, sigma_x, sigma_y, rotation, isotropic=isotropic)
|
225 |
-
|
226 |
-
# add multiplicative noise
|
227 |
-
if noise_range is not None:
|
228 |
-
assert noise_range[0] < noise_range[1], 'Wrong noise range.'
|
229 |
-
noise = np.random.uniform(noise_range[0], noise_range[1], size=kernel.shape)
|
230 |
-
kernel = kernel * noise
|
231 |
-
kernel = kernel / np.sum(kernel)
|
232 |
-
return kernel
|
233 |
-
|
234 |
-
|
235 |
-
def random_bivariate_generalized_Gaussian(kernel_size,
|
236 |
-
sigma_x_range,
|
237 |
-
sigma_y_range,
|
238 |
-
rotation_range,
|
239 |
-
beta_range,
|
240 |
-
noise_range=None,
|
241 |
-
isotropic=True):
|
242 |
-
"""Randomly generate bivariate generalized Gaussian kernels.
|
243 |
-
|
244 |
-
In the isotropic mode, only `sigma_x_range` is used. `sigma_y_range` and `rotation_range` is ignored.
|
245 |
-
|
246 |
-
Args:
|
247 |
-
kernel_size (int):
|
248 |
-
sigma_x_range (tuple): [0.6, 5]
|
249 |
-
sigma_y_range (tuple): [0.6, 5]
|
250 |
-
rotation range (tuple): [-math.pi, math.pi]
|
251 |
-
beta_range (tuple): [0.5, 8]
|
252 |
-
noise_range(tuple, optional): multiplicative kernel noise,
|
253 |
-
[0.75, 1.25]. Default: None
|
254 |
-
|
255 |
-
Returns:
|
256 |
-
kernel (ndarray):
|
257 |
-
"""
|
258 |
-
assert kernel_size % 2 == 1, 'Kernel size must be an odd number.'
|
259 |
-
assert sigma_x_range[0] < sigma_x_range[1], 'Wrong sigma_x_range.'
|
260 |
-
sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1])
|
261 |
-
if isotropic is False:
|
262 |
-
assert sigma_y_range[0] < sigma_y_range[1], 'Wrong sigma_y_range.'
|
263 |
-
assert rotation_range[0] < rotation_range[1], 'Wrong rotation_range.'
|
264 |
-
sigma_y = np.random.uniform(sigma_y_range[0], sigma_y_range[1])
|
265 |
-
rotation = np.random.uniform(rotation_range[0], rotation_range[1])
|
266 |
-
else:
|
267 |
-
sigma_y = sigma_x
|
268 |
-
rotation = 0
|
269 |
-
|
270 |
-
# assume beta_range[0] < 1 < beta_range[1]
|
271 |
-
if np.random.uniform() < 0.5:
|
272 |
-
beta = np.random.uniform(beta_range[0], 1)
|
273 |
-
else:
|
274 |
-
beta = np.random.uniform(1, beta_range[1])
|
275 |
-
|
276 |
-
kernel = bivariate_generalized_Gaussian(kernel_size, sigma_x, sigma_y, rotation, beta, isotropic=isotropic)
|
277 |
-
|
278 |
-
# add multiplicative noise
|
279 |
-
if noise_range is not None:
|
280 |
-
assert noise_range[0] < noise_range[1], 'Wrong noise range.'
|
281 |
-
noise = np.random.uniform(noise_range[0], noise_range[1], size=kernel.shape)
|
282 |
-
kernel = kernel * noise
|
283 |
-
kernel = kernel / np.sum(kernel)
|
284 |
-
return kernel
|
285 |
-
|
286 |
-
|
287 |
-
def random_bivariate_plateau(kernel_size,
|
288 |
-
sigma_x_range,
|
289 |
-
sigma_y_range,
|
290 |
-
rotation_range,
|
291 |
-
beta_range,
|
292 |
-
noise_range=None,
|
293 |
-
isotropic=True):
|
294 |
-
"""Randomly generate bivariate plateau kernels.
|
295 |
-
|
296 |
-
In the isotropic mode, only `sigma_x_range` is used. `sigma_y_range` and `rotation_range` is ignored.
|
297 |
-
|
298 |
-
Args:
|
299 |
-
kernel_size (int):
|
300 |
-
sigma_x_range (tuple): [0.6, 5]
|
301 |
-
sigma_y_range (tuple): [0.6, 5]
|
302 |
-
rotation range (tuple): [-math.pi/2, math.pi/2]
|
303 |
-
beta_range (tuple): [1, 4]
|
304 |
-
noise_range(tuple, optional): multiplicative kernel noise,
|
305 |
-
[0.75, 1.25]. Default: None
|
306 |
-
|
307 |
-
Returns:
|
308 |
-
kernel (ndarray):
|
309 |
-
"""
|
310 |
-
assert kernel_size % 2 == 1, 'Kernel size must be an odd number.'
|
311 |
-
assert sigma_x_range[0] < sigma_x_range[1], 'Wrong sigma_x_range.'
|
312 |
-
sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1])
|
313 |
-
if isotropic is False:
|
314 |
-
assert sigma_y_range[0] < sigma_y_range[1], 'Wrong sigma_y_range.'
|
315 |
-
assert rotation_range[0] < rotation_range[1], 'Wrong rotation_range.'
|
316 |
-
sigma_y = np.random.uniform(sigma_y_range[0], sigma_y_range[1])
|
317 |
-
rotation = np.random.uniform(rotation_range[0], rotation_range[1])
|
318 |
-
else:
|
319 |
-
sigma_y = sigma_x
|
320 |
-
rotation = 0
|
321 |
-
|
322 |
-
# TODO: this may be not proper
|
323 |
-
if np.random.uniform() < 0.5:
|
324 |
-
beta = np.random.uniform(beta_range[0], 1)
|
325 |
-
else:
|
326 |
-
beta = np.random.uniform(1, beta_range[1])
|
327 |
-
|
328 |
-
kernel = bivariate_plateau(kernel_size, sigma_x, sigma_y, rotation, beta, isotropic=isotropic)
|
329 |
-
# add multiplicative noise
|
330 |
-
if noise_range is not None:
|
331 |
-
assert noise_range[0] < noise_range[1], 'Wrong noise range.'
|
332 |
-
noise = np.random.uniform(noise_range[0], noise_range[1], size=kernel.shape)
|
333 |
-
kernel = kernel * noise
|
334 |
-
kernel = kernel / np.sum(kernel)
|
335 |
-
|
336 |
-
return kernel
|
337 |
-
|
338 |
-
|
339 |
-
def random_mixed_kernels(kernel_list,
|
340 |
-
kernel_prob,
|
341 |
-
kernel_size=21,
|
342 |
-
sigma_x_range=(0.6, 5),
|
343 |
-
sigma_y_range=(0.6, 5),
|
344 |
-
rotation_range=(-math.pi, math.pi),
|
345 |
-
betag_range=(0.5, 8),
|
346 |
-
betap_range=(0.5, 8),
|
347 |
-
noise_range=None):
|
348 |
-
"""Randomly generate mixed kernels.
|
349 |
-
|
350 |
-
Args:
|
351 |
-
kernel_list (tuple): a list name of kernel types,
|
352 |
-
support ['iso', 'aniso', 'skew', 'generalized', 'plateau_iso',
|
353 |
-
'plateau_aniso']
|
354 |
-
kernel_prob (tuple): corresponding kernel probability for each
|
355 |
-
kernel type
|
356 |
-
kernel_size (int):
|
357 |
-
sigma_x_range (tuple): [0.6, 5]
|
358 |
-
sigma_y_range (tuple): [0.6, 5]
|
359 |
-
rotation range (tuple): [-math.pi, math.pi]
|
360 |
-
beta_range (tuple): [0.5, 8]
|
361 |
-
noise_range(tuple, optional): multiplicative kernel noise,
|
362 |
-
[0.75, 1.25]. Default: None
|
363 |
-
|
364 |
-
Returns:
|
365 |
-
kernel (ndarray):
|
366 |
-
"""
|
367 |
-
kernel_type = random.choices(kernel_list, kernel_prob)[0]
|
368 |
-
if kernel_type == 'iso':
|
369 |
-
kernel = random_bivariate_Gaussian(
|
370 |
-
kernel_size, sigma_x_range, sigma_y_range, rotation_range, noise_range=noise_range, isotropic=True)
|
371 |
-
elif kernel_type == 'aniso':
|
372 |
-
kernel = random_bivariate_Gaussian(
|
373 |
-
kernel_size, sigma_x_range, sigma_y_range, rotation_range, noise_range=noise_range, isotropic=False)
|
374 |
-
elif kernel_type == 'generalized_iso':
|
375 |
-
kernel = random_bivariate_generalized_Gaussian(
|
376 |
-
kernel_size,
|
377 |
-
sigma_x_range,
|
378 |
-
sigma_y_range,
|
379 |
-
rotation_range,
|
380 |
-
betag_range,
|
381 |
-
noise_range=noise_range,
|
382 |
-
isotropic=True)
|
383 |
-
elif kernel_type == 'generalized_aniso':
|
384 |
-
kernel = random_bivariate_generalized_Gaussian(
|
385 |
-
kernel_size,
|
386 |
-
sigma_x_range,
|
387 |
-
sigma_y_range,
|
388 |
-
rotation_range,
|
389 |
-
betag_range,
|
390 |
-
noise_range=noise_range,
|
391 |
-
isotropic=False)
|
392 |
-
elif kernel_type == 'plateau_iso':
|
393 |
-
kernel = random_bivariate_plateau(
|
394 |
-
kernel_size, sigma_x_range, sigma_y_range, rotation_range, betap_range, noise_range=None, isotropic=True)
|
395 |
-
elif kernel_type == 'plateau_aniso':
|
396 |
-
kernel = random_bivariate_plateau(
|
397 |
-
kernel_size, sigma_x_range, sigma_y_range, rotation_range, betap_range, noise_range=None, isotropic=False)
|
398 |
-
return kernel
|
399 |
-
|
400 |
-
|
401 |
-
np.seterr(divide='ignore', invalid='ignore')
|
402 |
-
|
403 |
-
|
404 |
-
def circular_lowpass_kernel(cutoff, kernel_size, pad_to=0):
|
405 |
-
"""2D sinc filter
|
406 |
-
|
407 |
-
Reference: https://dsp.stackexchange.com/questions/58301/2-d-circularly-symmetric-low-pass-filter
|
408 |
-
|
409 |
-
Args:
|
410 |
-
cutoff (float): cutoff frequency in radians (pi is max)
|
411 |
-
kernel_size (int): horizontal and vertical size, must be odd.
|
412 |
-
pad_to (int): pad kernel size to desired size, must be odd or zero.
|
413 |
-
"""
|
414 |
-
assert kernel_size % 2 == 1, 'Kernel size must be an odd number.'
|
415 |
-
kernel = np.fromfunction(
|
416 |
-
lambda x, y: cutoff * special.j1(cutoff * np.sqrt(
|
417 |
-
(x - (kernel_size - 1) / 2) ** 2 + (y - (kernel_size - 1) / 2) ** 2)) / (2 * np.pi * np.sqrt(
|
418 |
-
(x - (kernel_size - 1) / 2) ** 2 + (y - (kernel_size - 1) / 2) ** 2)), [kernel_size, kernel_size])
|
419 |
-
kernel[(kernel_size - 1) // 2, (kernel_size - 1) // 2] = cutoff ** 2 / (4 * np.pi)
|
420 |
-
kernel = kernel / np.sum(kernel)
|
421 |
-
if pad_to > kernel_size:
|
422 |
-
pad_size = (pad_to - kernel_size) // 2
|
423 |
-
kernel = np.pad(kernel, ((pad_size, pad_size), (pad_size, pad_size)))
|
424 |
-
return kernel
|
425 |
-
|
426 |
-
|
427 |
-
# ------------------------------------------------------------- #
|
428 |
-
# --------------------------- noise --------------------------- #
|
429 |
-
# ------------------------------------------------------------- #
|
430 |
-
|
431 |
-
# ----------------------- Gaussian Noise ----------------------- #
|
432 |
-
|
433 |
-
def instantiate_from_config(config: Mapping[str, Any]) -> Any:
|
434 |
-
if not "target" in config:
|
435 |
-
raise KeyError("Expected key `target` to instantiate.")
|
436 |
-
return get_obj_from_str(config["target"])(**config.get("params", dict()))
|
437 |
-
|
438 |
-
|
439 |
-
class BaseStorageBackend(metaclass=ABCMeta):
|
440 |
-
"""Abstract class of storage backends.
|
441 |
-
|
442 |
-
All backends need to implement two apis: ``get()`` and ``get_text()``.
|
443 |
-
``get()`` reads the file as a byte stream and ``get_text()`` reads the file
|
444 |
-
as texts.
|
445 |
-
"""
|
446 |
-
|
447 |
-
@property
|
448 |
-
def name(self) -> str:
|
449 |
-
return self.__class__.__name__
|
450 |
-
|
451 |
-
@abstractmethod
|
452 |
-
def get(self, filepath: str) -> bytes:
|
453 |
-
pass
|
454 |
-
|
455 |
-
|
456 |
-
class PetrelBackend(BaseStorageBackend):
|
457 |
-
"""Petrel storage backend (for internal use).
|
458 |
-
|
459 |
-
PetrelBackend supports reading and writing data to multiple clusters.
|
460 |
-
If the file path contains the cluster name, PetrelBackend will read data
|
461 |
-
from specified cluster or write data to it. Otherwise, PetrelBackend will
|
462 |
-
access the default cluster.
|
463 |
-
|
464 |
-
Args:
|
465 |
-
path_mapping (dict, optional): Path mapping dict from local path to
|
466 |
-
Petrel path. When ``path_mapping={'src': 'dst'}``, ``src`` in
|
467 |
-
``filepath`` will be replaced by ``dst``. Default: None.
|
468 |
-
enable_mc (bool, optional): Whether to enable memcached support.
|
469 |
-
Default: True.
|
470 |
-
conf_path (str, optional): Config path of Petrel client. Default: None.
|
471 |
-
`New in version 1.7.1`.
|
472 |
-
|
473 |
-
Examples:
|
474 |
-
>>> filepath1 = 's3://path/of/file'
|
475 |
-
>>> filepath2 = 'cluster-name:s3://path/of/file'
|
476 |
-
>>> client = PetrelBackend()
|
477 |
-
>>> client.get(filepath1) # get data from default cluster
|
478 |
-
>>> client.get(filepath2) # get data from 'cluster-name' cluster
|
479 |
-
"""
|
480 |
-
|
481 |
-
def __init__(self,
|
482 |
-
path_mapping: Optional[dict] = None,
|
483 |
-
enable_mc: bool = False,
|
484 |
-
conf_path: str = None):
|
485 |
-
try:
|
486 |
-
from petrel_client import client
|
487 |
-
except ImportError:
|
488 |
-
raise ImportError('Please install petrel_client to enable '
|
489 |
-
'PetrelBackend.')
|
490 |
-
|
491 |
-
self._client = client.Client(conf_path=conf_path, enable_mc=enable_mc)
|
492 |
-
assert isinstance(path_mapping, dict) or path_mapping is None
|
493 |
-
self.path_mapping = path_mapping
|
494 |
-
|
495 |
-
def _map_path(self, filepath: Union[str, Path]) -> str:
|
496 |
-
"""Map ``filepath`` to a string path whose prefix will be replaced by
|
497 |
-
:attr:`self.path_mapping`.
|
498 |
-
|
499 |
-
Args:
|
500 |
-
filepath (str): Path to be mapped.
|
501 |
-
"""
|
502 |
-
filepath = str(filepath)
|
503 |
-
if self.path_mapping is not None:
|
504 |
-
for k, v in self.path_mapping.items():
|
505 |
-
filepath = filepath.replace(k, v, 1)
|
506 |
-
return filepath
|
507 |
-
|
508 |
-
def _format_path(self, filepath: str) -> str:
|
509 |
-
"""Convert a ``filepath`` to standard format of petrel oss.
|
510 |
-
|
511 |
-
If the ``filepath`` is concatenated by ``os.path.join``, in a Windows
|
512 |
-
environment, the ``filepath`` will be the format of
|
513 |
-
's3://bucket_name\\image.jpg'. By invoking :meth:`_format_path`, the
|
514 |
-
above ``filepath`` will be converted to 's3://bucket_name/image.jpg'.
|
515 |
-
|
516 |
-
Args:
|
517 |
-
filepath (str): Path to be formatted.
|
518 |
-
"""
|
519 |
-
return re.sub(r'\\+', '/', filepath)
|
520 |
-
|
521 |
-
def get(self, filepath: Union[str, Path]) -> bytes:
|
522 |
-
"""Read data from a given ``filepath`` with 'rb' mode.
|
523 |
-
|
524 |
-
Args:
|
525 |
-
filepath (str or Path): Path to read data.
|
526 |
-
|
527 |
-
Returns:
|
528 |
-
bytes: The loaded bytes.
|
529 |
-
"""
|
530 |
-
filepath = self._map_path(filepath)
|
531 |
-
filepath = self._format_path(filepath)
|
532 |
-
value = self._client.Get(filepath)
|
533 |
-
return value
|
534 |
-
|
535 |
-
|
536 |
-
class HardDiskBackend(BaseStorageBackend):
|
537 |
-
"""Raw hard disks storage backend."""
|
538 |
-
|
539 |
-
def get(self, filepath: Union[str, Path]) -> bytes:
|
540 |
-
"""Read data from a given ``filepath`` with 'rb' mode.
|
541 |
-
|
542 |
-
Args:
|
543 |
-
filepath (str or Path): Path to read data.
|
544 |
-
|
545 |
-
Returns:
|
546 |
-
bytes: Expected bytes object.
|
547 |
-
"""
|
548 |
-
with open(filepath, 'rb') as f:
|
549 |
-
value_buf = f.read()
|
550 |
-
return value_buf
|
551 |
-
|
552 |
-
|
553 |
-
def generate_gaussian_noise(img, sigma=10, gray_noise=False):
|
554 |
-
"""Generate Gaussian noise.
|
555 |
-
|
556 |
-
Args:
|
557 |
-
img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32.
|
558 |
-
sigma (float): Noise scale (measured in range 255). Default: 10.
|
559 |
-
|
560 |
-
Returns:
|
561 |
-
(Numpy array): Returned noisy image, shape (h, w, c), range[0, 1],
|
562 |
-
float32.
|
563 |
-
"""
|
564 |
-
if gray_noise:
|
565 |
-
noise = np.float32(np.random.randn(*(img.shape[0:2]))) * sigma / 255.
|
566 |
-
noise = np.expand_dims(noise, axis=2).repeat(3, axis=2)
|
567 |
-
else:
|
568 |
-
noise = np.float32(np.random.randn(*(img.shape))) * sigma / 255.
|
569 |
-
return noise
|
570 |
-
|
571 |
-
|
572 |
-
def add_gaussian_noise(img, sigma=10, clip=True, rounds=False, gray_noise=False):
|
573 |
-
"""Add Gaussian noise.
|
574 |
-
|
575 |
-
Args:
|
576 |
-
img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32.
|
577 |
-
sigma (float): Noise scale (measured in range 255). Default: 10.
|
578 |
-
|
579 |
-
Returns:
|
580 |
-
(Numpy array): Returned noisy image, shape (h, w, c), range[0, 1],
|
581 |
-
float32.
|
582 |
-
"""
|
583 |
-
noise = generate_gaussian_noise(img, sigma, gray_noise)
|
584 |
-
out = img + noise
|
585 |
-
if clip and rounds:
|
586 |
-
out = np.clip((out * 255.0).round(), 0, 255) / 255.
|
587 |
-
elif clip:
|
588 |
-
out = np.clip(out, 0, 1)
|
589 |
-
elif rounds:
|
590 |
-
out = (out * 255.0).round() / 255.
|
591 |
-
return out
|
592 |
-
|
593 |
-
|
594 |
-
def generate_gaussian_noise_pt(img, sigma=10, gray_noise=0):
|
595 |
-
"""Add Gaussian noise (PyTorch version).
|
596 |
-
|
597 |
-
Args:
|
598 |
-
img (Tensor): Shape (b, c, h, w), range[0, 1], float32.
|
599 |
-
scale (float | Tensor): Noise scale. Default: 1.0.
|
600 |
-
|
601 |
-
Returns:
|
602 |
-
(Tensor): Returned noisy image, shape (b, c, h, w), range[0, 1],
|
603 |
-
float32.
|
604 |
-
"""
|
605 |
-
b, _, h, w = img.size()
|
606 |
-
if not isinstance(sigma, (float, int)):
|
607 |
-
sigma = sigma.view(img.size(0), 1, 1, 1)
|
608 |
-
if isinstance(gray_noise, (float, int)):
|
609 |
-
cal_gray_noise = gray_noise > 0
|
610 |
-
else:
|
611 |
-
gray_noise = gray_noise.view(b, 1, 1, 1)
|
612 |
-
cal_gray_noise = torch.sum(gray_noise) > 0
|
613 |
-
|
614 |
-
if cal_gray_noise:
|
615 |
-
noise_gray = torch.randn(*img.size()[2:4], dtype=img.dtype, device=img.device) * sigma / 255.
|
616 |
-
noise_gray = noise_gray.view(b, 1, h, w)
|
617 |
-
|
618 |
-
# always calculate color noise
|
619 |
-
noise = torch.randn(*img.size(), dtype=img.dtype, device=img.device) * sigma / 255.
|
620 |
-
|
621 |
-
if cal_gray_noise:
|
622 |
-
noise = noise * (1 - gray_noise) + noise_gray * gray_noise
|
623 |
-
return noise
|
624 |
-
|
625 |
-
|
626 |
-
def add_gaussian_noise_pt(img, sigma=10, gray_noise=0, clip=True, rounds=False):
|
627 |
-
"""Add Gaussian noise (PyTorch version).
|
628 |
-
|
629 |
-
Args:
|
630 |
-
img (Tensor): Shape (b, c, h, w), range[0, 1], float32.
|
631 |
-
scale (float | Tensor): Noise scale. Default: 1.0.
|
632 |
-
|
633 |
-
Returns:
|
634 |
-
(Tensor): Returned noisy image, shape (b, c, h, w), range[0, 1],
|
635 |
-
float32.
|
636 |
-
"""
|
637 |
-
noise = generate_gaussian_noise_pt(img, sigma, gray_noise)
|
638 |
-
out = img + noise
|
639 |
-
if clip and rounds:
|
640 |
-
out = torch.clamp((out * 255.0).round(), 0, 255) / 255.
|
641 |
-
elif clip:
|
642 |
-
out = torch.clamp(out, 0, 1)
|
643 |
-
elif rounds:
|
644 |
-
out = (out * 255.0).round() / 255.
|
645 |
-
return out
|
646 |
-
|
647 |
-
|
648 |
-
# ----------------------- Random Gaussian Noise ----------------------- #
|
649 |
-
def random_generate_gaussian_noise(img, sigma_range=(0, 10), gray_prob=0):
|
650 |
-
sigma = np.random.uniform(sigma_range[0], sigma_range[1])
|
651 |
-
if np.random.uniform() < gray_prob:
|
652 |
-
gray_noise = True
|
653 |
-
else:
|
654 |
-
gray_noise = False
|
655 |
-
return generate_gaussian_noise(img, sigma, gray_noise)
|
656 |
-
|
657 |
-
|
658 |
-
def random_add_gaussian_noise(img, sigma_range=(0, 1.0), gray_prob=0, clip=True, rounds=False):
|
659 |
-
noise = random_generate_gaussian_noise(img, sigma_range, gray_prob)
|
660 |
-
out = img + noise
|
661 |
-
if clip and rounds:
|
662 |
-
out = np.clip((out * 255.0).round(), 0, 255) / 255.
|
663 |
-
elif clip:
|
664 |
-
out = np.clip(out, 0, 1)
|
665 |
-
elif rounds:
|
666 |
-
out = (out * 255.0).round() / 255.
|
667 |
-
return out
|
668 |
-
|
669 |
-
|
670 |
-
def random_generate_gaussian_noise_pt(img, sigma_range=(0, 10), gray_prob=0):
|
671 |
-
sigma = torch.rand(
|
672 |
-
img.size(0), dtype=img.dtype, device=img.device) * (sigma_range[1] - sigma_range[0]) + sigma_range[0]
|
673 |
-
gray_noise = torch.rand(img.size(0), dtype=img.dtype, device=img.device)
|
674 |
-
gray_noise = (gray_noise < gray_prob).float()
|
675 |
-
return generate_gaussian_noise_pt(img, sigma, gray_noise)
|
676 |
-
|
677 |
-
|
678 |
-
def random_add_gaussian_noise_pt(img, sigma_range=(0, 1.0), gray_prob=0, clip=True, rounds=False):
|
679 |
-
noise = random_generate_gaussian_noise_pt(img, sigma_range, gray_prob)
|
680 |
-
out = img + noise
|
681 |
-
if clip and rounds:
|
682 |
-
out = torch.clamp((out * 255.0).round(), 0, 255) / 255.
|
683 |
-
elif clip:
|
684 |
-
out = torch.clamp(out, 0, 1)
|
685 |
-
elif rounds:
|
686 |
-
out = (out * 255.0).round() / 255.
|
687 |
-
return out
|
688 |
-
|
689 |
-
|
690 |
-
# ----------------------- Poisson (Shot) Noise ----------------------- #
|
691 |
-
|
692 |
-
|
693 |
-
def generate_poisson_noise(img, scale=1.0, gray_noise=False):
|
694 |
-
"""Generate poisson noise.
|
695 |
-
|
696 |
-
Reference: https://github.com/scikit-image/scikit-image/blob/main/skimage/util/noise.py#L37-L219
|
697 |
-
|
698 |
-
Args:
|
699 |
-
img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32.
|
700 |
-
scale (float): Noise scale. Default: 1.0.
|
701 |
-
gray_noise (bool): Whether generate gray noise. Default: False.
|
702 |
-
|
703 |
-
Returns:
|
704 |
-
(Numpy array): Returned noisy image, shape (h, w, c), range[0, 1],
|
705 |
-
float32.
|
706 |
-
"""
|
707 |
-
if gray_noise:
|
708 |
-
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
709 |
-
# round and clip image for counting vals correctly
|
710 |
-
img = np.clip((img * 255.0).round(), 0, 255) / 255.
|
711 |
-
vals = len(np.unique(img))
|
712 |
-
vals = 2 ** np.ceil(np.log2(vals))
|
713 |
-
out = np.float32(np.random.poisson(img * vals) / float(vals))
|
714 |
-
noise = out - img
|
715 |
-
if gray_noise:
|
716 |
-
noise = np.repeat(noise[:, :, np.newaxis], 3, axis=2)
|
717 |
-
return noise * scale
|
718 |
-
|
719 |
-
|
720 |
-
def add_poisson_noise(img, scale=1.0, clip=True, rounds=False, gray_noise=False):
|
721 |
-
"""Add poisson noise.
|
722 |
-
|
723 |
-
Args:
|
724 |
-
img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32.
|
725 |
-
scale (float): Noise scale. Default: 1.0.
|
726 |
-
gray_noise (bool): Whether generate gray noise. Default: False.
|
727 |
-
|
728 |
-
Returns:
|
729 |
-
(Numpy array): Returned noisy image, shape (h, w, c), range[0, 1],
|
730 |
-
float32.
|
731 |
-
"""
|
732 |
-
noise = generate_poisson_noise(img, scale, gray_noise)
|
733 |
-
out = img + noise
|
734 |
-
if clip and rounds:
|
735 |
-
out = np.clip((out * 255.0).round(), 0, 255) / 255.
|
736 |
-
elif clip:
|
737 |
-
out = np.clip(out, 0, 1)
|
738 |
-
elif rounds:
|
739 |
-
out = (out * 255.0).round() / 255.
|
740 |
-
return out
|
741 |
-
|
742 |
-
|
743 |
-
def generate_poisson_noise_pt(img, scale=1.0, gray_noise=0):
|
744 |
-
"""Generate a batch of poisson noise (PyTorch version)
|
745 |
-
|
746 |
-
Args:
|
747 |
-
img (Tensor): Input image, shape (b, c, h, w), range [0, 1], float32.
|
748 |
-
scale (float | Tensor): Noise scale. Number or Tensor with shape (b).
|
749 |
-
Default: 1.0.
|
750 |
-
gray_noise (float | Tensor): 0-1 number or Tensor with shape (b).
|
751 |
-
0 for False, 1 for True. Default: 0.
|
752 |
-
|
753 |
-
Returns:
|
754 |
-
(Tensor): Returned noisy image, shape (b, c, h, w), range[0, 1],
|
755 |
-
float32.
|
756 |
-
"""
|
757 |
-
b, _, h, w = img.size()
|
758 |
-
if isinstance(gray_noise, (float, int)):
|
759 |
-
cal_gray_noise = gray_noise > 0
|
760 |
-
else:
|
761 |
-
gray_noise = gray_noise.view(b, 1, 1, 1)
|
762 |
-
cal_gray_noise = torch.sum(gray_noise) > 0
|
763 |
-
if cal_gray_noise:
|
764 |
-
img_gray = rgb_to_grayscale(img, num_output_channels=1)
|
765 |
-
# round and clip image for counting vals correctly
|
766 |
-
img_gray = torch.clamp((img_gray * 255.0).round(), 0, 255) / 255.
|
767 |
-
# use for-loop to get the unique values for each sample
|
768 |
-
vals_list = [len(torch.unique(img_gray[i, :, :, :])) for i in range(b)]
|
769 |
-
vals_list = [2 ** np.ceil(np.log2(vals)) for vals in vals_list]
|
770 |
-
vals = img_gray.new_tensor(vals_list).view(b, 1, 1, 1)
|
771 |
-
out = torch.poisson(img_gray * vals) / vals
|
772 |
-
noise_gray = out - img_gray
|
773 |
-
noise_gray = noise_gray.expand(b, 3, h, w)
|
774 |
-
|
775 |
-
# always calculate color noise
|
776 |
-
# round and clip image for counting vals correctly
|
777 |
-
img = torch.clamp((img * 255.0).round(), 0, 255) / 255.
|
778 |
-
# use for-loop to get the unique values for each sample
|
779 |
-
vals_list = [len(torch.unique(img[i, :, :, :])) for i in range(b)]
|
780 |
-
vals_list = [2 ** np.ceil(np.log2(vals)) for vals in vals_list]
|
781 |
-
vals = img.new_tensor(vals_list).view(b, 1, 1, 1)
|
782 |
-
out = torch.poisson(img * vals) / vals
|
783 |
-
noise = out - img
|
784 |
-
if cal_gray_noise:
|
785 |
-
noise = noise * (1 - gray_noise) + noise_gray * gray_noise
|
786 |
-
if not isinstance(scale, (float, int)):
|
787 |
-
scale = scale.view(b, 1, 1, 1)
|
788 |
-
return noise * scale
|
789 |
-
|
790 |
-
|
791 |
-
def add_poisson_noise_pt(img, scale=1.0, clip=True, rounds=False, gray_noise=0):
|
792 |
-
"""Add poisson noise to a batch of images (PyTorch version).
|
793 |
-
|
794 |
-
Args:
|
795 |
-
img (Tensor): Input image, shape (b, c, h, w), range [0, 1], float32.
|
796 |
-
scale (float | Tensor): Noise scale. Number or Tensor with shape (b).
|
797 |
-
Default: 1.0.
|
798 |
-
gray_noise (float | Tensor): 0-1 number or Tensor with shape (b).
|
799 |
-
0 for False, 1 for True. Default: 0.
|
800 |
-
|
801 |
-
Returns:
|
802 |
-
(Tensor): Returned noisy image, shape (b, c, h, w), range[0, 1],
|
803 |
-
float32.
|
804 |
-
"""
|
805 |
-
noise = generate_poisson_noise_pt(img, scale, gray_noise)
|
806 |
-
out = img + noise
|
807 |
-
if clip and rounds:
|
808 |
-
out = torch.clamp((out * 255.0).round(), 0, 255) / 255.
|
809 |
-
elif clip:
|
810 |
-
out = torch.clamp(out, 0, 1)
|
811 |
-
elif rounds:
|
812 |
-
out = (out * 255.0).round() / 255.
|
813 |
-
return out
|
814 |
-
|
815 |
-
|
816 |
-
# ----------------------- Random Poisson (Shot) Noise ----------------------- #
|
817 |
-
|
818 |
-
|
819 |
-
def random_generate_poisson_noise(img, scale_range=(0, 1.0), gray_prob=0):
|
820 |
-
scale = np.random.uniform(scale_range[0], scale_range[1])
|
821 |
-
if np.random.uniform() < gray_prob:
|
822 |
-
gray_noise = True
|
823 |
-
else:
|
824 |
-
gray_noise = False
|
825 |
-
return generate_poisson_noise(img, scale, gray_noise)
|
826 |
-
|
827 |
-
|
828 |
-
def random_add_poisson_noise(img, scale_range=(0, 1.0), gray_prob=0, clip=True, rounds=False):
|
829 |
-
noise = random_generate_poisson_noise(img, scale_range, gray_prob)
|
830 |
-
out = img + noise
|
831 |
-
if clip and rounds:
|
832 |
-
out = np.clip((out * 255.0).round(), 0, 255) / 255.
|
833 |
-
elif clip:
|
834 |
-
out = np.clip(out, 0, 1)
|
835 |
-
elif rounds:
|
836 |
-
out = (out * 255.0).round() / 255.
|
837 |
-
return out
|
838 |
-
|
839 |
-
|
840 |
-
def random_generate_poisson_noise_pt(img, scale_range=(0, 1.0), gray_prob=0):
|
841 |
-
scale = torch.rand(
|
842 |
-
img.size(0), dtype=img.dtype, device=img.device) * (scale_range[1] - scale_range[0]) + scale_range[0]
|
843 |
-
gray_noise = torch.rand(img.size(0), dtype=img.dtype, device=img.device)
|
844 |
-
gray_noise = (gray_noise < gray_prob).float()
|
845 |
-
return generate_poisson_noise_pt(img, scale, gray_noise)
|
846 |
-
|
847 |
-
|
848 |
-
def random_add_poisson_noise_pt(img, scale_range=(0, 1.0), gray_prob=0, clip=True, rounds=False):
|
849 |
-
noise = random_generate_poisson_noise_pt(img, scale_range, gray_prob)
|
850 |
-
out = img + noise
|
851 |
-
if clip and rounds:
|
852 |
-
out = torch.clamp((out * 255.0).round(), 0, 255) / 255.
|
853 |
-
elif clip:
|
854 |
-
out = torch.clamp(out, 0, 1)
|
855 |
-
elif rounds:
|
856 |
-
out = (out * 255.0).round() / 255.
|
857 |
-
return out
|
858 |
-
|
859 |
-
|
860 |
-
# ------------------------------------------------------------------------ #
|
861 |
-
# --------------------------- JPEG compression --------------------------- #
|
862 |
-
# ------------------------------------------------------------------------ #
|
863 |
-
|
864 |
-
|
865 |
-
def add_jpg_compression(img, quality=90):
|
866 |
-
"""Add JPG compression artifacts.
|
867 |
-
|
868 |
-
Args:
|
869 |
-
img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32.
|
870 |
-
quality (float): JPG compression quality. 0 for lowest quality, 100 for
|
871 |
-
best quality. Default: 90.
|
872 |
-
|
873 |
-
Returns:
|
874 |
-
(Numpy array): Returned image after JPG, shape (h, w, c), range[0, 1],
|
875 |
-
float32.
|
876 |
-
"""
|
877 |
-
img = np.clip(img, 0, 1)
|
878 |
-
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), quality]
|
879 |
-
_, encimg = cv2.imencode('.jpg', img * 255., encode_param)
|
880 |
-
img = np.float32(cv2.imdecode(encimg, 1)) / 255.
|
881 |
-
return img
|
882 |
-
|
883 |
-
|
884 |
-
def random_add_jpg_compression(img, quality_range=(90, 100)):
|
885 |
-
"""Randomly add JPG compression artifacts.
|
886 |
-
|
887 |
-
Args:
|
888 |
-
img (Numpy array): Input image, shape (h, w, c), range [0, 1], float32.
|
889 |
-
quality_range (tuple[float] | list[float]): JPG compression quality
|
890 |
-
range. 0 for lowest quality, 100 for best quality.
|
891 |
-
Default: (90, 100).
|
892 |
-
|
893 |
-
Returns:
|
894 |
-
(Numpy array): Returned image after JPG, shape (h, w, c), range[0, 1],
|
895 |
-
float32.
|
896 |
-
"""
|
897 |
-
quality = np.random.uniform(quality_range[0], quality_range[1])
|
898 |
-
return add_jpg_compression(img, int(quality))
|
899 |
-
|
900 |
-
|
901 |
-
def load_file_list(file_list_path: str) -> List[Dict[str, str]]:
|
902 |
-
files = []
|
903 |
-
with open(file_list_path, "r") as fin:
|
904 |
-
for line in fin:
|
905 |
-
path = line.strip()
|
906 |
-
if path:
|
907 |
-
files.append({"image_path": path, "prompt": ""})
|
908 |
-
return files
|
909 |
-
|
910 |
-
|
911 |
-
# https://github.com/openai/guided-diffusion/blob/main/guided_diffusion/image_datasets.py
|
912 |
-
def center_crop_arr(pil_image, image_size):
|
913 |
-
# We are not on a new enough PIL to support the `reducing_gap`
|
914 |
-
# argument, which uses BOX downsampling at powers of two first.
|
915 |
-
# Thus, we do it by hand to improve downsample quality.
|
916 |
-
while min(*pil_image.size) >= 2 * image_size:
|
917 |
-
pil_image = pil_image.resize(
|
918 |
-
tuple(x // 2 for x in pil_image.size), resample=Image.BOX
|
919 |
-
)
|
920 |
-
|
921 |
-
scale = image_size / min(*pil_image.size)
|
922 |
-
pil_image = pil_image.resize(
|
923 |
-
tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC
|
924 |
-
)
|
925 |
-
|
926 |
-
arr = np.array(pil_image)
|
927 |
-
crop_y = (arr.shape[0] - image_size) // 2
|
928 |
-
crop_x = (arr.shape[1] - image_size) // 2
|
929 |
-
return arr[crop_y: crop_y + image_size, crop_x: crop_x + image_size]
|
930 |
-
|
931 |
-
|
932 |
-
# https://github.com/openai/guided-diffusion/blob/main/guided_diffusion/image_datasets.py
|
933 |
-
def random_crop_arr(pil_image, image_size, min_crop_frac=0.8, max_crop_frac=1.0):
|
934 |
-
min_smaller_dim_size = math.ceil(image_size / max_crop_frac)
|
935 |
-
max_smaller_dim_size = math.ceil(image_size / min_crop_frac)
|
936 |
-
smaller_dim_size = random.randrange(min_smaller_dim_size, max_smaller_dim_size + 1)
|
937 |
-
|
938 |
-
# We are not on a new enough PIL to support the `reducing_gap`
|
939 |
-
# argument, which uses BOX downsampling at powers of two first.
|
940 |
-
# Thus, we do it by hand to improve downsample quality.
|
941 |
-
while min(*pil_image.size) >= 2 * smaller_dim_size:
|
942 |
-
pil_image = pil_image.resize(
|
943 |
-
tuple(x // 2 for x in pil_image.size), resample=Image.BOX
|
944 |
-
)
|
945 |
-
|
946 |
-
scale = smaller_dim_size / min(*pil_image.size)
|
947 |
-
pil_image = pil_image.resize(
|
948 |
-
tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC
|
949 |
-
)
|
950 |
-
|
951 |
-
arr = np.array(pil_image)
|
952 |
-
crop_y = random.randrange(arr.shape[0] - image_size + 1)
|
953 |
-
crop_x = random.randrange(arr.shape[1] - image_size + 1)
|
954 |
-
return arr[crop_y: crop_y + image_size, crop_x: crop_x + image_size]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
utils/create_degradation.py
DELETED
@@ -1,144 +0,0 @@
|
|
1 |
-
import math
|
2 |
-
from functools import partial
|
3 |
-
|
4 |
-
import cv2
|
5 |
-
import numpy as np
|
6 |
-
import torch
|
7 |
-
from basicsr.data import degradations as degradations
|
8 |
-
from basicsr.data.transforms import augment
|
9 |
-
from basicsr.utils import img2tensor
|
10 |
-
from torch.nn.functional import interpolate
|
11 |
-
from torchvision.transforms import Compose
|
12 |
-
from utils.basicsr_custom import (
|
13 |
-
random_mixed_kernels,
|
14 |
-
random_add_gaussian_noise,
|
15 |
-
random_add_jpg_compression,
|
16 |
-
)
|
17 |
-
|
18 |
-
|
19 |
-
def create_degradation(degradation):
|
20 |
-
if degradation == 'sr_bicubic_x8_gaussian_noise_005':
|
21 |
-
return Compose([
|
22 |
-
partial(down_scale, scale_factor=1.0 / 8.0, mode='bicubic'),
|
23 |
-
partial(add_gaussian_noise, std=0.05),
|
24 |
-
partial(interpolate, scale_factor=8.0, mode='nearest-exact'),
|
25 |
-
partial(torch.clip, min=0, max=1),
|
26 |
-
partial(torch.squeeze, dim=0),
|
27 |
-
lambda x: (x, None)
|
28 |
-
|
29 |
-
])
|
30 |
-
elif degradation == 'gaussian_noise_035':
|
31 |
-
return Compose([
|
32 |
-
partial(add_gaussian_noise, std=0.35),
|
33 |
-
partial(torch.clip, min=0, max=1),
|
34 |
-
partial(torch.squeeze, dim=0),
|
35 |
-
lambda x: (x, None)
|
36 |
-
|
37 |
-
])
|
38 |
-
elif degradation == 'colorization_gaussian_noise_025':
|
39 |
-
return Compose([
|
40 |
-
lambda x: torch.mean(x, dim=0, keepdim=True),
|
41 |
-
partial(add_gaussian_noise, std=0.25),
|
42 |
-
partial(torch.clip, min=0, max=1),
|
43 |
-
lambda x: (x, None)
|
44 |
-
])
|
45 |
-
elif degradation == 'random_inpainting_gaussian_noise_01':
|
46 |
-
def inpainting_dps(x):
|
47 |
-
total = x.shape[1] ** 2
|
48 |
-
# random pixel sampling
|
49 |
-
l, h = [0.9, 0.9]
|
50 |
-
prob = np.random.uniform(l, h)
|
51 |
-
mask_vec = torch.ones([1, x.shape[1] * x.shape[1]])
|
52 |
-
samples = np.random.choice(x.shape[1] * x.shape[1], int(total * prob), replace=False)
|
53 |
-
mask_vec[:, samples] = 0
|
54 |
-
mask_b = mask_vec.view(1, x.shape[1], x.shape[1])
|
55 |
-
mask_b = mask_b.repeat(3, 1, 1)
|
56 |
-
mask = torch.ones_like(x, device=x.device)
|
57 |
-
mask[:, ...] = mask_b
|
58 |
-
return add_gaussian_noise(x * mask, 0.1).clip(0, 1), None
|
59 |
-
|
60 |
-
return inpainting_dps
|
61 |
-
elif degradation == 'difface':
|
62 |
-
def deg(x):
|
63 |
-
blur_kernel_size = 41
|
64 |
-
kernel_list = ['iso', 'aniso']
|
65 |
-
kernel_prob = [0.5, 0.5]
|
66 |
-
blur_sigma = [0.1, 15]
|
67 |
-
downsample_range = [0.8, 32]
|
68 |
-
noise_range = [0, 20]
|
69 |
-
jpeg_range = [30, 100]
|
70 |
-
gt_gray = True
|
71 |
-
gray_prob = 0.01
|
72 |
-
x = x.permute(1, 2, 0).numpy()[..., ::-1].astype(np.float32)
|
73 |
-
# random horizontal flip
|
74 |
-
img_gt = augment(x.copy(), hflip=True, rotation=False)
|
75 |
-
h, w, _ = img_gt.shape
|
76 |
-
|
77 |
-
# ------------------------ generate lq image ------------------------ #
|
78 |
-
# blur
|
79 |
-
kernel = degradations.random_mixed_kernels(
|
80 |
-
kernel_list,
|
81 |
-
kernel_prob,
|
82 |
-
blur_kernel_size,
|
83 |
-
blur_sigma,
|
84 |
-
blur_sigma, [-math.pi, math.pi],
|
85 |
-
noise_range=None)
|
86 |
-
img_lq = cv2.filter2D(img_gt, -1, kernel)
|
87 |
-
# downsample
|
88 |
-
scale = np.random.uniform(downsample_range[0], downsample_range[1])
|
89 |
-
img_lq = cv2.resize(img_lq, (int(w // scale), int(h // scale)), interpolation=cv2.INTER_LINEAR)
|
90 |
-
# noise
|
91 |
-
if noise_range is not None:
|
92 |
-
img_lq = random_add_gaussian_noise(img_lq, noise_range)
|
93 |
-
# jpeg compression
|
94 |
-
if jpeg_range is not None:
|
95 |
-
img_lq = random_add_jpg_compression(img_lq, jpeg_range)
|
96 |
-
|
97 |
-
# resize to original size
|
98 |
-
img_lq = cv2.resize(img_lq, (w, h), interpolation=cv2.INTER_LINEAR)
|
99 |
-
|
100 |
-
# random color jitter (only for lq)
|
101 |
-
# if self.color_jitter_prob is not None and (np.random.uniform() < self.color_jitter_prob):
|
102 |
-
# img_lq = self.color_jitter(img_lq, self.color_jitter_shift)
|
103 |
-
# random to gray (only for lq)
|
104 |
-
if np.random.uniform() < gray_prob:
|
105 |
-
img_lq = cv2.cvtColor(img_lq, cv2.COLOR_BGR2GRAY)
|
106 |
-
img_lq = np.tile(img_lq[:, :, None], [1, 1, 3])
|
107 |
-
if gt_gray: # whether convert GT to gray images
|
108 |
-
img_gt = cv2.cvtColor(img_gt, cv2.COLOR_BGR2GRAY)
|
109 |
-
img_gt = np.tile(img_gt[:, :, None], [1, 1, 3]) # repeat the color channels
|
110 |
-
|
111 |
-
# BGR to RGB, HWC to CHW, numpy to tensor
|
112 |
-
img_gt, img_lq = img2tensor([img_gt, img_lq], bgr2rgb=True, float32=True)
|
113 |
-
|
114 |
-
# random color jitter (pytorch version) (only for lq)
|
115 |
-
# if self.color_jitter_pt_prob is not None and (np.random.uniform() < self.color_jitter_pt_prob):
|
116 |
-
# brightness = self.opt.get('brightness', (0.5, 1.5))
|
117 |
-
# contrast = self.opt.get('contrast', (0.5, 1.5))
|
118 |
-
# saturation = self.opt.get('saturation', (0, 1.5))
|
119 |
-
# hue = self.opt.get('hue', (-0.1, 0.1))
|
120 |
-
# img_lq = self.color_jitter_pt(img_lq, brightness, contrast, saturation, hue)
|
121 |
-
|
122 |
-
# round and clip
|
123 |
-
img_lq = torch.clamp((img_lq * 255.0).round(), 0, 255) / 255.
|
124 |
-
|
125 |
-
return img_lq, img_gt.clip(0, 1)
|
126 |
-
|
127 |
-
return deg
|
128 |
-
else:
|
129 |
-
raise NotImplementedError()
|
130 |
-
|
131 |
-
|
132 |
-
def down_scale(x, scale_factor, mode):
|
133 |
-
with torch.no_grad():
|
134 |
-
return interpolate(x.unsqueeze(0),
|
135 |
-
scale_factor=scale_factor,
|
136 |
-
mode=mode,
|
137 |
-
antialias=True,
|
138 |
-
align_corners=False).clip(0, 1)
|
139 |
-
|
140 |
-
|
141 |
-
def add_gaussian_noise(x, std):
|
142 |
-
with torch.no_grad():
|
143 |
-
x = x + torch.randn_like(x) * std
|
144 |
-
return x
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
utils/img_utils.py
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
from torchvision.utils import make_grid
|
2 |
-
|
3 |
-
|
4 |
-
def create_grid(img, normalize=False, num_images=5):
|
5 |
-
return make_grid(img[:num_images], padding=0, normalize=normalize, nrow=16)
|
|
|
|
|
|
|
|
|
|
|
|