code
stringlengths
26
870k
docstring
stringlengths
1
65.6k
func_name
stringlengths
1
194
language
stringclasses
1 value
repo
stringlengths
8
68
path
stringlengths
5
194
url
stringlengths
46
254
license
stringclasses
4 values
def __init__(self, features, activation, bn): """Init. Args: features (int): number of features """ super().__init__() self.bn = bn self.groups=1 self.conv1 = nn.Conv2d( features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups ) self.conv2 = nn.Conv2d( features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups ) if self.bn==True: self.bn1 = nn.BatchNorm2d(features) self.bn2 = nn.BatchNorm2d(features) self.activation = activation self.skip_add = nn.quantized.FloatFunctional()
Init. Args: features (int): number of features
__init__
python
ali-vilab/AnyDoor
ldm/modules/midas/midas/blocks.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/midas/midas/blocks.py
MIT
def forward(self, x): """Forward pass. Args: x (tensor): input Returns: tensor: output """ out = self.activation(x) out = self.conv1(out) if self.bn==True: out = self.bn1(out) out = self.activation(out) out = self.conv2(out) if self.bn==True: out = self.bn2(out) if self.groups > 1: out = self.conv_merge(out) return self.skip_add.add(out, x)
Forward pass. Args: x (tensor): input Returns: tensor: output
forward
python
ali-vilab/AnyDoor
ldm/modules/midas/midas/blocks.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/midas/midas/blocks.py
MIT
def __init__(self, features, activation, deconv=False, bn=False, expand=False, align_corners=True): """Init. Args: features (int): number of features """ super(FeatureFusionBlock_custom, self).__init__() self.deconv = deconv self.align_corners = align_corners self.groups=1 self.expand = expand out_features = features if self.expand==True: out_features = features//2 self.out_conv = nn.Conv2d(features, out_features, kernel_size=1, stride=1, padding=0, bias=True, groups=1) self.resConfUnit1 = ResidualConvUnit_custom(features, activation, bn) self.resConfUnit2 = ResidualConvUnit_custom(features, activation, bn) self.skip_add = nn.quantized.FloatFunctional()
Init. Args: features (int): number of features
__init__
python
ali-vilab/AnyDoor
ldm/modules/midas/midas/blocks.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/midas/midas/blocks.py
MIT
def forward(self, *xs): """Forward pass. Returns: tensor: output """ output = xs[0] if len(xs) == 2: res = self.resConfUnit1(xs[1]) output = self.skip_add.add(output, res) # output += res output = self.resConfUnit2(output) output = nn.functional.interpolate( output, scale_factor=2, mode="bilinear", align_corners=self.align_corners ) output = self.out_conv(output) return output
Forward pass. Returns: tensor: output
forward
python
ali-vilab/AnyDoor
ldm/modules/midas/midas/blocks.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/midas/midas/blocks.py
MIT
def load(self, path): """Load model from file. Args: path (str): file path """ parameters = torch.load(path, map_location=torch.device('cpu')) if "optimizer" in parameters: parameters = parameters["model"] self.load_state_dict(parameters)
Load model from file. Args: path (str): file path
load
python
ali-vilab/AnyDoor
ldm/modules/midas/midas/base_model.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/midas/midas/base_model.py
MIT
def __init__(self, path=None, features=64, backbone="efficientnet_lite3", non_negative=True, exportable=True, channels_last=False, align_corners=True, blocks={'expand': True}): """Init. Args: path (str, optional): Path to saved model. Defaults to None. features (int, optional): Number of features. Defaults to 256. backbone (str, optional): Backbone network for encoder. Defaults to resnet50 """ print("Loading weights: ", path) super(MidasNet_small, self).__init__() use_pretrained = False if path else True self.channels_last = channels_last self.blocks = blocks self.backbone = backbone self.groups = 1 features1=features features2=features features3=features features4=features self.expand = False if "expand" in self.blocks and self.blocks['expand'] == True: self.expand = True features1=features features2=features*2 features3=features*4 features4=features*8 self.pretrained, self.scratch = _make_encoder(self.backbone, features, use_pretrained, groups=self.groups, expand=self.expand, exportable=exportable) self.scratch.activation = nn.ReLU(False) self.scratch.refinenet4 = FeatureFusionBlock_custom(features4, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners) self.scratch.refinenet3 = FeatureFusionBlock_custom(features3, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners) self.scratch.refinenet2 = FeatureFusionBlock_custom(features2, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners) self.scratch.refinenet1 = FeatureFusionBlock_custom(features1, self.scratch.activation, deconv=False, bn=False, align_corners=align_corners) self.scratch.output_conv = nn.Sequential( nn.Conv2d(features, features//2, kernel_size=3, stride=1, padding=1, groups=self.groups), Interpolate(scale_factor=2, mode="bilinear"), nn.Conv2d(features//2, 32, kernel_size=3, stride=1, padding=1), self.scratch.activation, nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0), nn.ReLU(True) if non_negative else nn.Identity(), nn.Identity(), ) if path: self.load(path)
Init. Args: path (str, optional): Path to saved model. Defaults to None. features (int, optional): Number of features. Defaults to 256. backbone (str, optional): Backbone network for encoder. Defaults to resnet50
__init__
python
ali-vilab/AnyDoor
ldm/modules/midas/midas/midas_net_custom.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/midas/midas/midas_net_custom.py
MIT
def forward(self, x): """Forward pass. Args: x (tensor): input data (image) Returns: tensor: depth """ if self.channels_last==True: print("self.channels_last = ", self.channels_last) x.contiguous(memory_format=torch.channels_last) layer_1 = self.pretrained.layer1(x) layer_2 = self.pretrained.layer2(layer_1) layer_3 = self.pretrained.layer3(layer_2) layer_4 = self.pretrained.layer4(layer_3) layer_1_rn = self.scratch.layer1_rn(layer_1) layer_2_rn = self.scratch.layer2_rn(layer_2) layer_3_rn = self.scratch.layer3_rn(layer_3) layer_4_rn = self.scratch.layer4_rn(layer_4) path_4 = self.scratch.refinenet4(layer_4_rn) path_3 = self.scratch.refinenet3(path_4, layer_3_rn) path_2 = self.scratch.refinenet2(path_3, layer_2_rn) path_1 = self.scratch.refinenet1(path_2, layer_1_rn) out = self.scratch.output_conv(path_1) return torch.squeeze(out, dim=1)
Forward pass. Args: x (tensor): input data (image) Returns: tensor: depth
forward
python
ali-vilab/AnyDoor
ldm/modules/midas/midas/midas_net_custom.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/midas/midas/midas_net_custom.py
MIT
def apply_min_size(sample, size, image_interpolation_method=cv2.INTER_AREA): """Rezise the sample to ensure the given size. Keeps aspect ratio. Args: sample (dict): sample size (tuple): image size Returns: tuple: new size """ shape = list(sample["disparity"].shape) if shape[0] >= size[0] and shape[1] >= size[1]: return sample scale = [0, 0] scale[0] = size[0] / shape[0] scale[1] = size[1] / shape[1] scale = max(scale) shape[0] = math.ceil(scale * shape[0]) shape[1] = math.ceil(scale * shape[1]) # resize sample["image"] = cv2.resize( sample["image"], tuple(shape[::-1]), interpolation=image_interpolation_method ) sample["disparity"] = cv2.resize( sample["disparity"], tuple(shape[::-1]), interpolation=cv2.INTER_NEAREST ) sample["mask"] = cv2.resize( sample["mask"].astype(np.float32), tuple(shape[::-1]), interpolation=cv2.INTER_NEAREST, ) sample["mask"] = sample["mask"].astype(bool) return tuple(shape)
Rezise the sample to ensure the given size. Keeps aspect ratio. Args: sample (dict): sample size (tuple): image size Returns: tuple: new size
apply_min_size
python
ali-vilab/AnyDoor
ldm/modules/midas/midas/transforms.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/midas/midas/transforms.py
MIT
def __init__( self, width, height, resize_target=True, keep_aspect_ratio=False, ensure_multiple_of=1, resize_method="lower_bound", image_interpolation_method=cv2.INTER_AREA, ): """Init. Args: width (int): desired output width height (int): desired output height resize_target (bool, optional): True: Resize the full sample (image, mask, target). False: Resize image only. Defaults to True. keep_aspect_ratio (bool, optional): True: Keep the aspect ratio of the input sample. Output sample might not have the given width and height, and resize behaviour depends on the parameter 'resize_method'. Defaults to False. ensure_multiple_of (int, optional): Output width and height is constrained to be multiple of this parameter. Defaults to 1. resize_method (str, optional): "lower_bound": Output will be at least as large as the given size. "upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.) "minimal": Scale as least as possible. (Output size might be smaller than given size.) Defaults to "lower_bound". """ self.__width = width self.__height = height self.__resize_target = resize_target self.__keep_aspect_ratio = keep_aspect_ratio self.__multiple_of = ensure_multiple_of self.__resize_method = resize_method self.__image_interpolation_method = image_interpolation_method
Init. Args: width (int): desired output width height (int): desired output height resize_target (bool, optional): True: Resize the full sample (image, mask, target). False: Resize image only. Defaults to True. keep_aspect_ratio (bool, optional): True: Keep the aspect ratio of the input sample. Output sample might not have the given width and height, and resize behaviour depends on the parameter 'resize_method'. Defaults to False. ensure_multiple_of (int, optional): Output width and height is constrained to be multiple of this parameter. Defaults to 1. resize_method (str, optional): "lower_bound": Output will be at least as large as the given size. "upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.) "minimal": Scale as least as possible. (Output size might be smaller than given size.) Defaults to "lower_bound".
__init__
python
ali-vilab/AnyDoor
ldm/modules/midas/midas/transforms.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/midas/midas/transforms.py
MIT
def imssave(imgs, img_path): """ imgs: list, N images of size WxHxC """ img_name, ext = os.path.splitext(os.path.basename(img_path)) for i, img in enumerate(imgs): if img.ndim == 3: img = img[:, :, [2, 1, 0]] new_path = os.path.join(os.path.dirname(img_path), img_name+str('_s{:04d}'.format(i))+'.png') cv2.imwrite(new_path, img)
imgs: list, N images of size WxHxC
imssave
python
ali-vilab/AnyDoor
ldm/modules/image_degradation/utils_image.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/image_degradation/utils_image.py
MIT
def split_imageset(original_dataroot, taget_dataroot, n_channels=3, p_size=800, p_overlap=96, p_max=1000): """ split the large images from original_dataroot into small overlapped images with size (p_size)x(p_size), and save them into taget_dataroot; only the images with larger size than (p_max)x(p_max) will be splitted. Args: original_dataroot: taget_dataroot: p_size: size of small images p_overlap: patch size in training is a good choice p_max: images with smaller size than (p_max)x(p_max) keep unchanged. """ paths = get_image_paths(original_dataroot) for img_path in paths: # img_name, ext = os.path.splitext(os.path.basename(img_path)) img = imread_uint(img_path, n_channels=n_channels) patches = patches_from_image(img, p_size, p_overlap, p_max) imssave(patches, os.path.join(taget_dataroot,os.path.basename(img_path)))
split the large images from original_dataroot into small overlapped images with size (p_size)x(p_size), and save them into taget_dataroot; only the images with larger size than (p_max)x(p_max) will be splitted. Args: original_dataroot: taget_dataroot: p_size: size of small images p_overlap: patch size in training is a good choice p_max: images with smaller size than (p_max)x(p_max) keep unchanged.
split_imageset
python
ali-vilab/AnyDoor
ldm/modules/image_degradation/utils_image.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/image_degradation/utils_image.py
MIT
def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)): ''' Converts a torch Tensor into an image Numpy array of BGR channel order Input: 4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order Output: 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default) ''' tensor = tensor.squeeze().float().cpu().clamp_(*min_max) # squeeze first, then clamp tensor = (tensor - min_max[0]) / (min_max[1] - min_max[0]) # to range [0,1] n_dim = tensor.dim() if n_dim == 4: n_img = len(tensor) img_np = make_grid(tensor, nrow=int(math.sqrt(n_img)), normalize=False).numpy() img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR elif n_dim == 3: img_np = tensor.numpy() img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR elif n_dim == 2: img_np = tensor.numpy() else: raise TypeError( 'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim)) if out_type == np.uint8: img_np = (img_np * 255.0).round() # Important. Unlike matlab, numpy.unit8() WILL NOT round by default. return img_np.astype(out_type)
Converts a torch Tensor into an image Numpy array of BGR channel order Input: 4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order Output: 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default)
tensor2img
python
ali-vilab/AnyDoor
ldm/modules/image_degradation/utils_image.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/image_degradation/utils_image.py
MIT
def augment_img(img, mode=0): '''Kai Zhang (github: https://github.com/cszn) ''' if mode == 0: return img elif mode == 1: return np.flipud(np.rot90(img)) elif mode == 2: return np.flipud(img) elif mode == 3: return np.rot90(img, k=3) elif mode == 4: return np.flipud(np.rot90(img, k=2)) elif mode == 5: return np.rot90(img) elif mode == 6: return np.rot90(img, k=2) elif mode == 7: return np.flipud(np.rot90(img, k=3))
Kai Zhang (github: https://github.com/cszn)
augment_img
python
ali-vilab/AnyDoor
ldm/modules/image_degradation/utils_image.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/image_degradation/utils_image.py
MIT
def augment_img_tensor4(img, mode=0): '''Kai Zhang (github: https://github.com/cszn) ''' if mode == 0: return img elif mode == 1: return img.rot90(1, [2, 3]).flip([2]) elif mode == 2: return img.flip([2]) elif mode == 3: return img.rot90(3, [2, 3]) elif mode == 4: return img.rot90(2, [2, 3]).flip([2]) elif mode == 5: return img.rot90(1, [2, 3]) elif mode == 6: return img.rot90(2, [2, 3]) elif mode == 7: return img.rot90(3, [2, 3]).flip([2])
Kai Zhang (github: https://github.com/cszn)
augment_img_tensor4
python
ali-vilab/AnyDoor
ldm/modules/image_degradation/utils_image.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/image_degradation/utils_image.py
MIT
def augment_img_tensor(img, mode=0): '''Kai Zhang (github: https://github.com/cszn) ''' img_size = img.size() img_np = img.data.cpu().numpy() if len(img_size) == 3: img_np = np.transpose(img_np, (1, 2, 0)) elif len(img_size) == 4: img_np = np.transpose(img_np, (2, 3, 1, 0)) img_np = augment_img(img_np, mode=mode) img_tensor = torch.from_numpy(np.ascontiguousarray(img_np)) if len(img_size) == 3: img_tensor = img_tensor.permute(2, 0, 1) elif len(img_size) == 4: img_tensor = img_tensor.permute(3, 2, 0, 1) return img_tensor.type_as(img)
Kai Zhang (github: https://github.com/cszn)
augment_img_tensor
python
ali-vilab/AnyDoor
ldm/modules/image_degradation/utils_image.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/image_degradation/utils_image.py
MIT
def rgb2ycbcr(img, only_y=True): '''same as matlab rgb2ycbcr only_y: only return Y channel Input: uint8, [0, 255] float, [0, 1] ''' in_img_type = img.dtype img.astype(np.float32) if in_img_type != np.uint8: img *= 255. # convert if only_y: rlt = np.dot(img, [65.481, 128.553, 24.966]) / 255.0 + 16.0 else: rlt = np.matmul(img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786], [24.966, 112.0, -18.214]]) / 255.0 + [16, 128, 128] if in_img_type == np.uint8: rlt = rlt.round() else: rlt /= 255. return rlt.astype(in_img_type)
same as matlab rgb2ycbcr only_y: only return Y channel Input: uint8, [0, 255] float, [0, 1]
rgb2ycbcr
python
ali-vilab/AnyDoor
ldm/modules/image_degradation/utils_image.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/image_degradation/utils_image.py
MIT
def ycbcr2rgb(img): '''same as matlab ycbcr2rgb Input: uint8, [0, 255] float, [0, 1] ''' in_img_type = img.dtype img.astype(np.float32) if in_img_type != np.uint8: img *= 255. # convert rlt = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621], [0, -0.00153632, 0.00791071], [0.00625893, -0.00318811, 0]]) * 255.0 + [-222.921, 135.576, -276.836] if in_img_type == np.uint8: rlt = rlt.round() else: rlt /= 255. return rlt.astype(in_img_type)
same as matlab ycbcr2rgb Input: uint8, [0, 255] float, [0, 1]
ycbcr2rgb
python
ali-vilab/AnyDoor
ldm/modules/image_degradation/utils_image.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/image_degradation/utils_image.py
MIT
def bgr2ycbcr(img, only_y=True): '''bgr version of rgb2ycbcr only_y: only return Y channel Input: uint8, [0, 255] float, [0, 1] ''' in_img_type = img.dtype img.astype(np.float32) if in_img_type != np.uint8: img *= 255. # convert if only_y: rlt = np.dot(img, [24.966, 128.553, 65.481]) / 255.0 + 16.0 else: rlt = np.matmul(img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786], [65.481, -37.797, 112.0]]) / 255.0 + [16, 128, 128] if in_img_type == np.uint8: rlt = rlt.round() else: rlt /= 255. return rlt.astype(in_img_type)
bgr version of rgb2ycbcr only_y: only return Y channel Input: uint8, [0, 255] float, [0, 1]
bgr2ycbcr
python
ali-vilab/AnyDoor
ldm/modules/image_degradation/utils_image.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/image_degradation/utils_image.py
MIT
def calculate_ssim(img1, img2, border=0): '''calculate SSIM the same outputs as MATLAB's img1, img2: [0, 255] ''' #img1 = img1.squeeze() #img2 = img2.squeeze() if not img1.shape == img2.shape: raise ValueError('Input images must have the same dimensions.') h, w = img1.shape[:2] img1 = img1[border:h-border, border:w-border] img2 = img2[border:h-border, border:w-border] if img1.ndim == 2: return ssim(img1, img2) elif img1.ndim == 3: if img1.shape[2] == 3: ssims = [] for i in range(3): ssims.append(ssim(img1[:,:,i], img2[:,:,i])) return np.array(ssims).mean() elif img1.shape[2] == 1: return ssim(np.squeeze(img1), np.squeeze(img2)) else: raise ValueError('Wrong input image dimensions.')
calculate SSIM the same outputs as MATLAB's img1, img2: [0, 255]
calculate_ssim
python
ali-vilab/AnyDoor
ldm/modules/image_degradation/utils_image.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/image_degradation/utils_image.py
MIT
def modcrop_np(img, sf): ''' Args: img: numpy image, WxH or WxHxC sf: scale factor Return: cropped image ''' w, h = img.shape[:2] im = np.copy(img) return im[:w - w % sf, :h - h % sf, ...]
Args: img: numpy image, WxH or WxHxC sf: scale factor Return: cropped image
modcrop_np
python
ali-vilab/AnyDoor
ldm/modules/image_degradation/bsrgan_light.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/image_degradation/bsrgan_light.py
MIT
def analytic_kernel(k): """Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)""" k_size = k.shape[0] # Calculate the big kernels size big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2)) # Loop over the small kernel to fill the big one for r in range(k_size): for c in range(k_size): big_k[2 * r:2 * r + k_size, 2 * c:2 * c + k_size] += k[r, c] * k # Crop the edges of the big kernel to ignore very small values and increase run time of SR crop = k_size // 2 cropped_big_k = big_k[crop:-crop, crop:-crop] # Normalize to 1 return cropped_big_k / cropped_big_k.sum()
Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)
analytic_kernel
python
ali-vilab/AnyDoor
ldm/modules/image_degradation/bsrgan_light.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/image_degradation/bsrgan_light.py
MIT
def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6): """ generate an anisotropic Gaussian kernel Args: ksize : e.g., 15, kernel size theta : [0, pi], rotation angle range l1 : [0.1,50], scaling of eigenvalues l2 : [0.1,l1], scaling of eigenvalues If l1 = l2, will get an isotropic Gaussian kernel. Returns: k : kernel """ v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.])) V = np.array([[v[0], v[1]], [v[1], -v[0]]]) D = np.array([[l1, 0], [0, l2]]) Sigma = np.dot(np.dot(V, D), np.linalg.inv(V)) k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize) return k
generate an anisotropic Gaussian kernel Args: ksize : e.g., 15, kernel size theta : [0, pi], rotation angle range l1 : [0.1,50], scaling of eigenvalues l2 : [0.1,l1], scaling of eigenvalues If l1 = l2, will get an isotropic Gaussian kernel. Returns: k : kernel
anisotropic_Gaussian
python
ali-vilab/AnyDoor
ldm/modules/image_degradation/bsrgan_light.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/image_degradation/bsrgan_light.py
MIT
def shift_pixel(x, sf, upper_left=True): """shift pixel for super-resolution with different scale factors Args: x: WxHxC or WxH sf: scale factor upper_left: shift direction """ h, w = x.shape[:2] shift = (sf - 1) * 0.5 xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0) if upper_left: x1 = xv + shift y1 = yv + shift else: x1 = xv - shift y1 = yv - shift x1 = np.clip(x1, 0, w - 1) y1 = np.clip(y1, 0, h - 1) if x.ndim == 2: x = interp2d(xv, yv, x)(x1, y1) if x.ndim == 3: for i in range(x.shape[-1]): x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1) return x
shift pixel for super-resolution with different scale factors Args: x: WxHxC or WxH sf: scale factor upper_left: shift direction
shift_pixel
python
ali-vilab/AnyDoor
ldm/modules/image_degradation/bsrgan_light.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/image_degradation/bsrgan_light.py
MIT
def blur(x, k): ''' x: image, NxcxHxW k: kernel, Nx1xhxw ''' n, c = x.shape[:2] p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2 x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate') k = k.repeat(1, c, 1, 1) k = k.view(-1, 1, k.shape[2], k.shape[3]) x = x.view(1, -1, x.shape[2], x.shape[3]) x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c) x = x.view(n, c, x.shape[2], x.shape[3]) return x
x: image, NxcxHxW k: kernel, Nx1xhxw
blur
python
ali-vilab/AnyDoor
ldm/modules/image_degradation/bsrgan_light.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/image_degradation/bsrgan_light.py
MIT
def fspecial(filter_type, *args, **kwargs): ''' python code from: https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py ''' if filter_type == 'gaussian': return fspecial_gaussian(*args, **kwargs) if filter_type == 'laplacian': return fspecial_laplacian(*args, **kwargs)
python code from: https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py
fspecial
python
ali-vilab/AnyDoor
ldm/modules/image_degradation/bsrgan_light.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/image_degradation/bsrgan_light.py
MIT
def bicubic_degradation(x, sf=3): ''' Args: x: HxWxC image, [0, 1] sf: down-scale factor Return: bicubicly downsampled LR image ''' x = util.imresize_np(x, scale=1 / sf) return x
Args: x: HxWxC image, [0, 1] sf: down-scale factor Return: bicubicly downsampled LR image
bicubic_degradation
python
ali-vilab/AnyDoor
ldm/modules/image_degradation/bsrgan_light.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/image_degradation/bsrgan_light.py
MIT
def srmd_degradation(x, k, sf=3): ''' blur + bicubic downsampling Args: x: HxWxC image, [0, 1] k: hxw, double sf: down-scale factor Return: downsampled LR image Reference: @inproceedings{zhang2018learning, title={Learning a single convolutional super-resolution network for multiple degradations}, author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, pages={3262--3271}, year={2018} } ''' x = ndimage.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # 'nearest' | 'mirror' x = bicubic_degradation(x, sf=sf) return x
blur + bicubic downsampling Args: x: HxWxC image, [0, 1] k: hxw, double sf: down-scale factor Return: downsampled LR image Reference: @inproceedings{zhang2018learning, title={Learning a single convolutional super-resolution network for multiple degradations}, author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, pages={3262--3271}, year={2018} }
srmd_degradation
python
ali-vilab/AnyDoor
ldm/modules/image_degradation/bsrgan_light.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/image_degradation/bsrgan_light.py
MIT
def dpsr_degradation(x, k, sf=3): ''' bicubic downsampling + blur Args: x: HxWxC image, [0, 1] k: hxw, double sf: down-scale factor Return: downsampled LR image Reference: @inproceedings{zhang2019deep, title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels}, author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, pages={1671--1681}, year={2019} } ''' x = bicubic_degradation(x, sf=sf) x = ndimage.convolve(x, np.expand_dims(k, axis=2), mode='wrap') return x
bicubic downsampling + blur Args: x: HxWxC image, [0, 1] k: hxw, double sf: down-scale factor Return: downsampled LR image Reference: @inproceedings{zhang2019deep, title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels}, author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, pages={1671--1681}, year={2019} }
dpsr_degradation
python
ali-vilab/AnyDoor
ldm/modules/image_degradation/bsrgan_light.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/image_degradation/bsrgan_light.py
MIT
def classical_degradation(x, k, sf=3): ''' blur + downsampling Args: x: HxWxC image, [0, 1]/[0, 255] k: hxw, double sf: down-scale factor Return: downsampled LR image ''' x = ndimage.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2)) st = 0 return x[st::sf, st::sf, ...]
blur + downsampling Args: x: HxWxC image, [0, 1]/[0, 255] k: hxw, double sf: down-scale factor Return: downsampled LR image
classical_degradation
python
ali-vilab/AnyDoor
ldm/modules/image_degradation/bsrgan_light.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/image_degradation/bsrgan_light.py
MIT
def add_sharpening(img, weight=0.5, radius=50, threshold=10): """USM sharpening. borrowed from real-ESRGAN Input image: I; Blurry image: B. 1. K = I + weight * (I - B) 2. Mask = 1 if abs(I - B) > threshold, else: 0 3. Blur mask: 4. Out = Mask * K + (1 - Mask) * I Args: img (Numpy array): Input image, HWC, BGR; float32, [0, 1]. weight (float): Sharp weight. Default: 1. radius (float): Kernel size of Gaussian blur. Default: 50. threshold (int): """ if radius % 2 == 0: radius += 1 blur = cv2.GaussianBlur(img, (radius, radius), 0) residual = img - blur mask = np.abs(residual) * 255 > threshold mask = mask.astype('float32') soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0) K = img + weight * residual K = np.clip(K, 0, 1) return soft_mask * K + (1 - soft_mask) * img
USM sharpening. borrowed from real-ESRGAN Input image: I; Blurry image: B. 1. K = I + weight * (I - B) 2. Mask = 1 if abs(I - B) > threshold, else: 0 3. Blur mask: 4. Out = Mask * K + (1 - Mask) * I Args: img (Numpy array): Input image, HWC, BGR; float32, [0, 1]. weight (float): Sharp weight. Default: 1. radius (float): Kernel size of Gaussian blur. Default: 50. threshold (int):
add_sharpening
python
ali-vilab/AnyDoor
ldm/modules/image_degradation/bsrgan_light.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/image_degradation/bsrgan_light.py
MIT
def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None): """ This is the degradation model of BSRGAN from the paper "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" ---------- img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) sf: scale factor isp_model: camera ISP model Returns ------- img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] """ isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 sf_ori = sf h1, w1 = img.shape[:2] img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop h, w = img.shape[:2] if h < lq_patchsize * sf or w < lq_patchsize * sf: raise ValueError(f'img size ({h1}X{w1}) is too small!') hq = img.copy() if sf == 4 and random.random() < scale2_prob: # downsample1 if np.random.rand() < 0.5: img = cv2.resize(img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])), interpolation=random.choice([1, 2, 3])) else: img = util.imresize_np(img, 1 / 2, True) img = np.clip(img, 0.0, 1.0) sf = 2 shuffle_order = random.sample(range(7), 7) idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) if idx1 > idx2: # keep downsample3 last shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] for i in shuffle_order: if i == 0: img = add_blur(img, sf=sf) elif i == 1: img = add_blur(img, sf=sf) elif i == 2: a, b = img.shape[1], img.shape[0] # downsample2 if random.random() < 0.75: sf1 = random.uniform(1, 2 * sf) img = cv2.resize(img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3])) else: k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) k_shifted = shift_pixel(k, sf) k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel img = ndimage.convolve(img, np.expand_dims(k_shifted, axis=2), mode='mirror') img = img[0::sf, 0::sf, ...] # nearest downsampling img = np.clip(img, 0.0, 1.0) elif i == 3: # downsample3 img = cv2.resize(img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) img = np.clip(img, 0.0, 1.0) elif i == 4: # add Gaussian noise img = add_Gaussian_noise(img, noise_level1=2, noise_level2=8) elif i == 5: # add JPEG noise if random.random() < jpeg_prob: img = add_JPEG_noise(img) elif i == 6: # add processed camera sensor noise if random.random() < isp_prob and isp_model is not None: with torch.no_grad(): img, hq = isp_model.forward(img.copy(), hq) # add final JPEG compression noise img = add_JPEG_noise(img) # random crop img, hq = random_crop(img, hq, sf_ori, lq_patchsize) return img, hq
This is the degradation model of BSRGAN from the paper "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" ---------- img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) sf: scale factor isp_model: camera ISP model Returns ------- img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
degradation_bsrgan
python
ali-vilab/AnyDoor
ldm/modules/image_degradation/bsrgan_light.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/image_degradation/bsrgan_light.py
MIT
def degradation_bsrgan_variant(image, sf=4, isp_model=None, up=False): """ This is the degradation model of BSRGAN from the paper "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" ---------- sf: scale factor isp_model: camera ISP model Returns ------- img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] """ image = util.uint2single(image) isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 sf_ori = sf h1, w1 = image.shape[:2] image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop h, w = image.shape[:2] hq = image.copy() if sf == 4 and random.random() < scale2_prob: # downsample1 if np.random.rand() < 0.5: image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])), interpolation=random.choice([1, 2, 3])) else: image = util.imresize_np(image, 1 / 2, True) image = np.clip(image, 0.0, 1.0) sf = 2 shuffle_order = random.sample(range(7), 7) idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) if idx1 > idx2: # keep downsample3 last shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] for i in shuffle_order: if i == 0: image = add_blur(image, sf=sf) # elif i == 1: # image = add_blur(image, sf=sf) if i == 0: pass elif i == 2: a, b = image.shape[1], image.shape[0] # downsample2 if random.random() < 0.8: sf1 = random.uniform(1, 2 * sf) image = cv2.resize(image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])), interpolation=random.choice([1, 2, 3])) else: k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) k_shifted = shift_pixel(k, sf) k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel image = ndimage.convolve(image, np.expand_dims(k_shifted, axis=2), mode='mirror') image = image[0::sf, 0::sf, ...] # nearest downsampling image = np.clip(image, 0.0, 1.0) elif i == 3: # downsample3 image = cv2.resize(image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) image = np.clip(image, 0.0, 1.0) elif i == 4: # add Gaussian noise image = add_Gaussian_noise(image, noise_level1=1, noise_level2=2) elif i == 5: # add JPEG noise if random.random() < jpeg_prob: image = add_JPEG_noise(image) # # elif i == 6: # # add processed camera sensor noise # if random.random() < isp_prob and isp_model is not None: # with torch.no_grad(): # img, hq = isp_model.forward(img.copy(), hq) # add final JPEG compression noise image = add_JPEG_noise(image) image = util.single2uint(image) if up: image = cv2.resize(image, (w1, h1), interpolation=cv2.INTER_CUBIC) # todo: random, as above? want to condition on it then example = {"image": image} return example
This is the degradation model of BSRGAN from the paper "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" ---------- sf: scale factor isp_model: camera ISP model Returns ------- img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
degradation_bsrgan_variant
python
ali-vilab/AnyDoor
ldm/modules/image_degradation/bsrgan_light.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/image_degradation/bsrgan_light.py
MIT
def modcrop_np(img, sf): ''' Args: img: numpy image, WxH or WxHxC sf: scale factor Return: cropped image ''' w, h = img.shape[:2] im = np.copy(img) return im[:w - w % sf, :h - h % sf, ...]
Args: img: numpy image, WxH or WxHxC sf: scale factor Return: cropped image
modcrop_np
python
ali-vilab/AnyDoor
ldm/modules/image_degradation/bsrgan.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/image_degradation/bsrgan.py
MIT
def analytic_kernel(k): """Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)""" k_size = k.shape[0] # Calculate the big kernels size big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2)) # Loop over the small kernel to fill the big one for r in range(k_size): for c in range(k_size): big_k[2 * r:2 * r + k_size, 2 * c:2 * c + k_size] += k[r, c] * k # Crop the edges of the big kernel to ignore very small values and increase run time of SR crop = k_size // 2 cropped_big_k = big_k[crop:-crop, crop:-crop] # Normalize to 1 return cropped_big_k / cropped_big_k.sum()
Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)
analytic_kernel
python
ali-vilab/AnyDoor
ldm/modules/image_degradation/bsrgan.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/image_degradation/bsrgan.py
MIT
def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6): """ generate an anisotropic Gaussian kernel Args: ksize : e.g., 15, kernel size theta : [0, pi], rotation angle range l1 : [0.1,50], scaling of eigenvalues l2 : [0.1,l1], scaling of eigenvalues If l1 = l2, will get an isotropic Gaussian kernel. Returns: k : kernel """ v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.])) V = np.array([[v[0], v[1]], [v[1], -v[0]]]) D = np.array([[l1, 0], [0, l2]]) Sigma = np.dot(np.dot(V, D), np.linalg.inv(V)) k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize) return k
generate an anisotropic Gaussian kernel Args: ksize : e.g., 15, kernel size theta : [0, pi], rotation angle range l1 : [0.1,50], scaling of eigenvalues l2 : [0.1,l1], scaling of eigenvalues If l1 = l2, will get an isotropic Gaussian kernel. Returns: k : kernel
anisotropic_Gaussian
python
ali-vilab/AnyDoor
ldm/modules/image_degradation/bsrgan.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/image_degradation/bsrgan.py
MIT
def shift_pixel(x, sf, upper_left=True): """shift pixel for super-resolution with different scale factors Args: x: WxHxC or WxH sf: scale factor upper_left: shift direction """ h, w = x.shape[:2] shift = (sf - 1) * 0.5 xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0) if upper_left: x1 = xv + shift y1 = yv + shift else: x1 = xv - shift y1 = yv - shift x1 = np.clip(x1, 0, w - 1) y1 = np.clip(y1, 0, h - 1) if x.ndim == 2: x = interp2d(xv, yv, x)(x1, y1) if x.ndim == 3: for i in range(x.shape[-1]): x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1) return x
shift pixel for super-resolution with different scale factors Args: x: WxHxC or WxH sf: scale factor upper_left: shift direction
shift_pixel
python
ali-vilab/AnyDoor
ldm/modules/image_degradation/bsrgan.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/image_degradation/bsrgan.py
MIT
def blur(x, k): ''' x: image, NxcxHxW k: kernel, Nx1xhxw ''' n, c = x.shape[:2] p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2 x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate') k = k.repeat(1, c, 1, 1) k = k.view(-1, 1, k.shape[2], k.shape[3]) x = x.view(1, -1, x.shape[2], x.shape[3]) x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c) x = x.view(n, c, x.shape[2], x.shape[3]) return x
x: image, NxcxHxW k: kernel, Nx1xhxw
blur
python
ali-vilab/AnyDoor
ldm/modules/image_degradation/bsrgan.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/image_degradation/bsrgan.py
MIT
def fspecial(filter_type, *args, **kwargs): ''' python code from: https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py ''' if filter_type == 'gaussian': return fspecial_gaussian(*args, **kwargs) if filter_type == 'laplacian': return fspecial_laplacian(*args, **kwargs)
python code from: https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py
fspecial
python
ali-vilab/AnyDoor
ldm/modules/image_degradation/bsrgan.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/image_degradation/bsrgan.py
MIT
def bicubic_degradation(x, sf=3): ''' Args: x: HxWxC image, [0, 1] sf: down-scale factor Return: bicubicly downsampled LR image ''' x = util.imresize_np(x, scale=1 / sf) return x
Args: x: HxWxC image, [0, 1] sf: down-scale factor Return: bicubicly downsampled LR image
bicubic_degradation
python
ali-vilab/AnyDoor
ldm/modules/image_degradation/bsrgan.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/image_degradation/bsrgan.py
MIT
def srmd_degradation(x, k, sf=3): ''' blur + bicubic downsampling Args: x: HxWxC image, [0, 1] k: hxw, double sf: down-scale factor Return: downsampled LR image Reference: @inproceedings{zhang2018learning, title={Learning a single convolutional super-resolution network for multiple degradations}, author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, pages={3262--3271}, year={2018} } ''' x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # 'nearest' | 'mirror' x = bicubic_degradation(x, sf=sf) return x
blur + bicubic downsampling Args: x: HxWxC image, [0, 1] k: hxw, double sf: down-scale factor Return: downsampled LR image Reference: @inproceedings{zhang2018learning, title={Learning a single convolutional super-resolution network for multiple degradations}, author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, pages={3262--3271}, year={2018} }
srmd_degradation
python
ali-vilab/AnyDoor
ldm/modules/image_degradation/bsrgan.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/image_degradation/bsrgan.py
MIT
def dpsr_degradation(x, k, sf=3): ''' bicubic downsampling + blur Args: x: HxWxC image, [0, 1] k: hxw, double sf: down-scale factor Return: downsampled LR image Reference: @inproceedings{zhang2019deep, title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels}, author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, pages={1671--1681}, year={2019} } ''' x = bicubic_degradation(x, sf=sf) x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') return x
bicubic downsampling + blur Args: x: HxWxC image, [0, 1] k: hxw, double sf: down-scale factor Return: downsampled LR image Reference: @inproceedings{zhang2019deep, title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels}, author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, pages={1671--1681}, year={2019} }
dpsr_degradation
python
ali-vilab/AnyDoor
ldm/modules/image_degradation/bsrgan.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/image_degradation/bsrgan.py
MIT
def classical_degradation(x, k, sf=3): ''' blur + downsampling Args: x: HxWxC image, [0, 1]/[0, 255] k: hxw, double sf: down-scale factor Return: downsampled LR image ''' x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2)) st = 0 return x[st::sf, st::sf, ...]
blur + downsampling Args: x: HxWxC image, [0, 1]/[0, 255] k: hxw, double sf: down-scale factor Return: downsampled LR image
classical_degradation
python
ali-vilab/AnyDoor
ldm/modules/image_degradation/bsrgan.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/image_degradation/bsrgan.py
MIT
def add_sharpening(img, weight=0.5, radius=50, threshold=10): """USM sharpening. borrowed from real-ESRGAN Input image: I; Blurry image: B. 1. K = I + weight * (I - B) 2. Mask = 1 if abs(I - B) > threshold, else: 0 3. Blur mask: 4. Out = Mask * K + (1 - Mask) * I Args: img (Numpy array): Input image, HWC, BGR; float32, [0, 1]. weight (float): Sharp weight. Default: 1. radius (float): Kernel size of Gaussian blur. Default: 50. threshold (int): """ if radius % 2 == 0: radius += 1 blur = cv2.GaussianBlur(img, (radius, radius), 0) residual = img - blur mask = np.abs(residual) * 255 > threshold mask = mask.astype('float32') soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0) K = img + weight * residual K = np.clip(K, 0, 1) return soft_mask * K + (1 - soft_mask) * img
USM sharpening. borrowed from real-ESRGAN Input image: I; Blurry image: B. 1. K = I + weight * (I - B) 2. Mask = 1 if abs(I - B) > threshold, else: 0 3. Blur mask: 4. Out = Mask * K + (1 - Mask) * I Args: img (Numpy array): Input image, HWC, BGR; float32, [0, 1]. weight (float): Sharp weight. Default: 1. radius (float): Kernel size of Gaussian blur. Default: 50. threshold (int):
add_sharpening
python
ali-vilab/AnyDoor
ldm/modules/image_degradation/bsrgan.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/image_degradation/bsrgan.py
MIT
def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None): """ This is the degradation model of BSRGAN from the paper "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" ---------- img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) sf: scale factor isp_model: camera ISP model Returns ------- img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] """ isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 sf_ori = sf h1, w1 = img.shape[:2] img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop h, w = img.shape[:2] if h < lq_patchsize * sf or w < lq_patchsize * sf: raise ValueError(f'img size ({h1}X{w1}) is too small!') hq = img.copy() if sf == 4 and random.random() < scale2_prob: # downsample1 if np.random.rand() < 0.5: img = cv2.resize(img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])), interpolation=random.choice([1, 2, 3])) else: img = util.imresize_np(img, 1 / 2, True) img = np.clip(img, 0.0, 1.0) sf = 2 shuffle_order = random.sample(range(7), 7) idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) if idx1 > idx2: # keep downsample3 last shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] for i in shuffle_order: if i == 0: img = add_blur(img, sf=sf) elif i == 1: img = add_blur(img, sf=sf) elif i == 2: a, b = img.shape[1], img.shape[0] # downsample2 if random.random() < 0.75: sf1 = random.uniform(1, 2 * sf) img = cv2.resize(img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3])) else: k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) k_shifted = shift_pixel(k, sf) k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel img = ndimage.filters.convolve(img, np.expand_dims(k_shifted, axis=2), mode='mirror') img = img[0::sf, 0::sf, ...] # nearest downsampling img = np.clip(img, 0.0, 1.0) elif i == 3: # downsample3 img = cv2.resize(img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) img = np.clip(img, 0.0, 1.0) elif i == 4: # add Gaussian noise img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25) elif i == 5: # add JPEG noise if random.random() < jpeg_prob: img = add_JPEG_noise(img) elif i == 6: # add processed camera sensor noise if random.random() < isp_prob and isp_model is not None: with torch.no_grad(): img, hq = isp_model.forward(img.copy(), hq) # add final JPEG compression noise img = add_JPEG_noise(img) # random crop img, hq = random_crop(img, hq, sf_ori, lq_patchsize) return img, hq
This is the degradation model of BSRGAN from the paper "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" ---------- img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) sf: scale factor isp_model: camera ISP model Returns ------- img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
degradation_bsrgan
python
ali-vilab/AnyDoor
ldm/modules/image_degradation/bsrgan.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/image_degradation/bsrgan.py
MIT
def degradation_bsrgan_variant(image, sf=4, isp_model=None): """ This is the degradation model of BSRGAN from the paper "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" ---------- sf: scale factor isp_model: camera ISP model Returns ------- img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] """ image = util.uint2single(image) isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 sf_ori = sf h1, w1 = image.shape[:2] image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop h, w = image.shape[:2] hq = image.copy() if sf == 4 and random.random() < scale2_prob: # downsample1 if np.random.rand() < 0.5: image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])), interpolation=random.choice([1, 2, 3])) else: image = util.imresize_np(image, 1 / 2, True) image = np.clip(image, 0.0, 1.0) sf = 2 shuffle_order = random.sample(range(7), 7) idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) if idx1 > idx2: # keep downsample3 last shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] for i in shuffle_order: if i == 0: image = add_blur(image, sf=sf) elif i == 1: image = add_blur(image, sf=sf) elif i == 2: a, b = image.shape[1], image.shape[0] # downsample2 if random.random() < 0.75: sf1 = random.uniform(1, 2 * sf) image = cv2.resize(image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])), interpolation=random.choice([1, 2, 3])) else: k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) k_shifted = shift_pixel(k, sf) k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel image = ndimage.filters.convolve(image, np.expand_dims(k_shifted, axis=2), mode='mirror') image = image[0::sf, 0::sf, ...] # nearest downsampling image = np.clip(image, 0.0, 1.0) elif i == 3: # downsample3 image = cv2.resize(image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) image = np.clip(image, 0.0, 1.0) elif i == 4: # add Gaussian noise image = add_Gaussian_noise(image, noise_level1=2, noise_level2=25) elif i == 5: # add JPEG noise if random.random() < jpeg_prob: image = add_JPEG_noise(image) # elif i == 6: # # add processed camera sensor noise # if random.random() < isp_prob and isp_model is not None: # with torch.no_grad(): # img, hq = isp_model.forward(img.copy(), hq) # add final JPEG compression noise image = add_JPEG_noise(image) image = util.single2uint(image) example = {"image":image} return example
This is the degradation model of BSRGAN from the paper "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" ---------- sf: scale factor isp_model: camera ISP model Returns ------- img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
degradation_bsrgan_variant
python
ali-vilab/AnyDoor
ldm/modules/image_degradation/bsrgan.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/image_degradation/bsrgan.py
MIT
def degradation_bsrgan_plus(img, sf=4, shuffle_prob=0.5, use_sharp=True, lq_patchsize=64, isp_model=None): """ This is an extended degradation model by combining the degradation models of BSRGAN and Real-ESRGAN ---------- img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) sf: scale factor use_shuffle: the degradation shuffle use_sharp: sharpening the img Returns ------- img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] """ h1, w1 = img.shape[:2] img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop h, w = img.shape[:2] if h < lq_patchsize * sf or w < lq_patchsize * sf: raise ValueError(f'img size ({h1}X{w1}) is too small!') if use_sharp: img = add_sharpening(img) hq = img.copy() if random.random() < shuffle_prob: shuffle_order = random.sample(range(13), 13) else: shuffle_order = list(range(13)) # local shuffle for noise, JPEG is always the last one shuffle_order[2:6] = random.sample(shuffle_order[2:6], len(range(2, 6))) shuffle_order[9:13] = random.sample(shuffle_order[9:13], len(range(9, 13))) poisson_prob, speckle_prob, isp_prob = 0.1, 0.1, 0.1 for i in shuffle_order: if i == 0: img = add_blur(img, sf=sf) elif i == 1: img = add_resize(img, sf=sf) elif i == 2: img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25) elif i == 3: if random.random() < poisson_prob: img = add_Poisson_noise(img) elif i == 4: if random.random() < speckle_prob: img = add_speckle_noise(img) elif i == 5: if random.random() < isp_prob and isp_model is not None: with torch.no_grad(): img, hq = isp_model.forward(img.copy(), hq) elif i == 6: img = add_JPEG_noise(img) elif i == 7: img = add_blur(img, sf=sf) elif i == 8: img = add_resize(img, sf=sf) elif i == 9: img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25) elif i == 10: if random.random() < poisson_prob: img = add_Poisson_noise(img) elif i == 11: if random.random() < speckle_prob: img = add_speckle_noise(img) elif i == 12: if random.random() < isp_prob and isp_model is not None: with torch.no_grad(): img, hq = isp_model.forward(img.copy(), hq) else: print('check the shuffle!') # resize to desired size img = cv2.resize(img, (int(1 / sf * hq.shape[1]), int(1 / sf * hq.shape[0])), interpolation=random.choice([1, 2, 3])) # add final JPEG compression noise img = add_JPEG_noise(img) # random crop img, hq = random_crop(img, hq, sf, lq_patchsize) return img, hq
This is an extended degradation model by combining the degradation models of BSRGAN and Real-ESRGAN ---------- img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) sf: scale factor use_shuffle: the degradation shuffle use_sharp: sharpening the img Returns ------- img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
degradation_bsrgan_plus
python
ali-vilab/AnyDoor
ldm/modules/image_degradation/bsrgan.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/image_degradation/bsrgan.py
MIT
def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self
Overwrite model.train with this function to make sure train/eval mode does not change anymore.
disabled_train
python
ali-vilab/AnyDoor
ldm/modules/encoders/modules.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/encoders/modules.py
MIT
def get_timestep_embedding(timesteps, embedding_dim): """ This matches the implementation in Denoising Diffusion Probabilistic Models: From Fairseq. Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need". """ assert len(timesteps.shape) == 1 half_dim = embedding_dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb) emb = emb.to(device=timesteps.device) emb = timesteps.float()[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: # zero pad emb = torch.nn.functional.pad(emb, (0,1,0,0)) return emb
This matches the implementation in Denoising Diffusion Probabilistic Models: From Fairseq. Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need".
get_timestep_embedding
python
ali-vilab/AnyDoor
ldm/modules/diffusionmodules/model.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/diffusionmodules/model.py
MIT
def forward(self, x, emb): """ Apply the module to `x` given `emb` timestep embeddings. """
Apply the module to `x` given `emb` timestep embeddings.
forward
python
ali-vilab/AnyDoor
ldm/modules/diffusionmodules/openaimodel.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/diffusionmodules/openaimodel.py
MIT
def forward(self, x, emb): """ Apply the block to a Tensor, conditioned on a timestep embedding. :param x: an [N x C x ...] Tensor of features. :param emb: an [N x emb_channels] Tensor of timestep embeddings. :return: an [N x C x ...] Tensor of outputs. """ return checkpoint( self._forward, (x, emb), self.parameters(), self.use_checkpoint )
Apply the block to a Tensor, conditioned on a timestep embedding. :param x: an [N x C x ...] Tensor of features. :param emb: an [N x emb_channels] Tensor of timestep embeddings. :return: an [N x C x ...] Tensor of outputs.
forward
python
ali-vilab/AnyDoor
ldm/modules/diffusionmodules/openaimodel.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/diffusionmodules/openaimodel.py
MIT
def count_flops_attn(model, _x, y): """ A counter for the `thop` package to count the operations in an attention operation. Meant to be used like: macs, params = thop.profile( model, inputs=(inputs, timestamps), custom_ops={QKVAttention: QKVAttention.count_flops}, ) """ b, c, *spatial = y[0].shape num_spatial = int(np.prod(spatial)) # We perform two matmuls with the same number of ops. # The first computes the weight matrix, the second computes # the combination of the value vectors. matmul_ops = 2 * b * (num_spatial ** 2) * c model.total_ops += th.DoubleTensor([matmul_ops])
A counter for the `thop` package to count the operations in an attention operation. Meant to be used like: macs, params = thop.profile( model, inputs=(inputs, timestamps), custom_ops={QKVAttention: QKVAttention.count_flops}, )
count_flops_attn
python
ali-vilab/AnyDoor
ldm/modules/diffusionmodules/openaimodel.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/diffusionmodules/openaimodel.py
MIT
def forward(self, qkv): """ Apply QKV attention. :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs. :return: an [N x (H * C) x T] tensor after attention. """ bs, width, length = qkv.shape assert width % (3 * self.n_heads) == 0 ch = width // (3 * self.n_heads) q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1) scale = 1 / math.sqrt(math.sqrt(ch)) weight = th.einsum( "bct,bcs->bts", q * scale, k * scale ) # More stable with f16 than dividing afterwards weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) a = th.einsum("bts,bcs->bct", weight, v) return a.reshape(bs, -1, length)
Apply QKV attention. :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs. :return: an [N x (H * C) x T] tensor after attention.
forward
python
ali-vilab/AnyDoor
ldm/modules/diffusionmodules/openaimodel.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/diffusionmodules/openaimodel.py
MIT
def forward(self, qkv): """ Apply QKV attention. :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs. :return: an [N x (H * C) x T] tensor after attention. """ bs, width, length = qkv.shape assert width % (3 * self.n_heads) == 0 ch = width // (3 * self.n_heads) q, k, v = qkv.chunk(3, dim=1) scale = 1 / math.sqrt(math.sqrt(ch)) weight = th.einsum( "bct,bcs->bts", (q * scale).view(bs * self.n_heads, ch, length), (k * scale).view(bs * self.n_heads, ch, length), ) # More stable with f16 than dividing afterwards weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length)) return a.reshape(bs, -1, length)
Apply QKV attention. :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs. :return: an [N x (H * C) x T] tensor after attention.
forward
python
ali-vilab/AnyDoor
ldm/modules/diffusionmodules/openaimodel.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/diffusionmodules/openaimodel.py
MIT
def convert_to_fp16(self): """ Convert the torso of the model to float16. """ self.input_blocks.apply(convert_module_to_f16) self.middle_block.apply(convert_module_to_f16) self.output_blocks.apply(convert_module_to_f16)
Convert the torso of the model to float16.
convert_to_fp16
python
ali-vilab/AnyDoor
ldm/modules/diffusionmodules/openaimodel.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/diffusionmodules/openaimodel.py
MIT
def convert_to_fp32(self): """ Convert the torso of the model to float32. """ self.input_blocks.apply(convert_module_to_f32) self.middle_block.apply(convert_module_to_f32) self.output_blocks.apply(convert_module_to_f32)
Convert the torso of the model to float32.
convert_to_fp32
python
ali-vilab/AnyDoor
ldm/modules/diffusionmodules/openaimodel.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/diffusionmodules/openaimodel.py
MIT
def forward(self, x, timesteps=None, context=None, y=None,**kwargs): """ Apply the model to an input batch. :param x: an [N x C x ...] Tensor of inputs. :param timesteps: a 1-D batch of timesteps. :param context: conditioning plugged in via crossattn :param y: an [N] Tensor of labels, if class-conditional. :return: an [N x C x ...] Tensor of outputs. """ assert (y is not None) == ( self.num_classes is not None ), "must specify y if and only if the model is class-conditional" hs = [] t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) emb = self.time_embed(t_emb) if self.num_classes is not None: assert y.shape[0] == x.shape[0] emb = emb + self.label_emb(y) h = x.type(self.dtype) for module in self.input_blocks: h = module(h, emb, context) hs.append(h) h = self.middle_block(h, emb, context) for module in self.output_blocks: h = th.cat([h, hs.pop()], dim=1) h = module(h, emb, context) h = h.type(x.dtype) if self.predict_codebook_ids: return self.id_predictor(h) else: return self.out(h)
Apply the model to an input batch. :param x: an [N x C x ...] Tensor of inputs. :param timesteps: a 1-D batch of timesteps. :param context: conditioning plugged in via crossattn :param y: an [N] Tensor of labels, if class-conditional. :return: an [N x C x ...] Tensor of outputs.
forward
python
ali-vilab/AnyDoor
ldm/modules/diffusionmodules/openaimodel.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/diffusionmodules/openaimodel.py
MIT
def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999): """ Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of (1-beta) over time from t = [0,1]. :param num_diffusion_timesteps: the number of betas to produce. :param alpha_bar: a lambda that takes an argument t from 0 to 1 and produces the cumulative product of (1-beta) up to that part of the diffusion process. :param max_beta: the maximum beta to use; use values lower than 1 to prevent singularities. """ betas = [] for i in range(num_diffusion_timesteps): t1 = i / num_diffusion_timesteps t2 = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) return np.array(betas)
Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of (1-beta) over time from t = [0,1]. :param num_diffusion_timesteps: the number of betas to produce. :param alpha_bar: a lambda that takes an argument t from 0 to 1 and produces the cumulative product of (1-beta) up to that part of the diffusion process. :param max_beta: the maximum beta to use; use values lower than 1 to prevent singularities.
betas_for_alpha_bar
python
ali-vilab/AnyDoor
ldm/modules/diffusionmodules/util.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/diffusionmodules/util.py
MIT
def checkpoint(func, inputs, params, flag): """ Evaluate a function without caching intermediate activations, allowing for reduced memory at the expense of extra compute in the backward pass. :param func: the function to evaluate. :param inputs: the argument sequence to pass to `func`. :param params: a sequence of parameters `func` depends on but does not explicitly take as arguments. :param flag: if False, disable gradient checkpointing. """ if flag: args = tuple(inputs) + tuple(params) return CheckpointFunction.apply(func, len(inputs), *args) else: return func(*inputs)
Evaluate a function without caching intermediate activations, allowing for reduced memory at the expense of extra compute in the backward pass. :param func: the function to evaluate. :param inputs: the argument sequence to pass to `func`. :param params: a sequence of parameters `func` depends on but does not explicitly take as arguments. :param flag: if False, disable gradient checkpointing.
checkpoint
python
ali-vilab/AnyDoor
ldm/modules/diffusionmodules/util.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/diffusionmodules/util.py
MIT
def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False): """ Create sinusoidal timestep embeddings. :param timesteps: a 1-D Tensor of N indices, one per batch element. These may be fractional. :param dim: the dimension of the output. :param max_period: controls the minimum frequency of the embeddings. :return: an [N x dim] Tensor of positional embeddings. """ if not repeat_only: half = dim // 2 freqs = torch.exp( -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half ).to(device=timesteps.device) args = timesteps[:, None].float() * freqs[None] embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) if dim % 2: embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1) else: embedding = repeat(timesteps, 'b -> b d', d=dim) return embedding
Create sinusoidal timestep embeddings. :param timesteps: a 1-D Tensor of N indices, one per batch element. These may be fractional. :param dim: the dimension of the output. :param max_period: controls the minimum frequency of the embeddings. :return: an [N x dim] Tensor of positional embeddings.
timestep_embedding
python
ali-vilab/AnyDoor
ldm/modules/diffusionmodules/util.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/diffusionmodules/util.py
MIT
def zero_module(module): """ Zero out the parameters of a module and return it. """ for p in module.parameters(): p.detach().zero_() return module
Zero out the parameters of a module and return it.
zero_module
python
ali-vilab/AnyDoor
ldm/modules/diffusionmodules/util.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/diffusionmodules/util.py
MIT
def scale_module(module, scale): """ Scale the parameters of a module and return it. """ for p in module.parameters(): p.detach().mul_(scale) return module
Scale the parameters of a module and return it.
scale_module
python
ali-vilab/AnyDoor
ldm/modules/diffusionmodules/util.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/diffusionmodules/util.py
MIT
def mean_flat(tensor): """ Take the mean over all non-batch dimensions. """ return tensor.mean(dim=list(range(1, len(tensor.shape))))
Take the mean over all non-batch dimensions.
mean_flat
python
ali-vilab/AnyDoor
ldm/modules/diffusionmodules/util.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/diffusionmodules/util.py
MIT
def normalization(channels): """ Make a standard normalization layer. :param channels: number of input channels. :return: an nn.Module for normalization. """ return GroupNorm32(32, channels)
Make a standard normalization layer. :param channels: number of input channels. :return: an nn.Module for normalization.
normalization
python
ali-vilab/AnyDoor
ldm/modules/diffusionmodules/util.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/diffusionmodules/util.py
MIT
def conv_nd(dims, *args, **kwargs): """ Create a 1D, 2D, or 3D convolution module. """ if dims == 1: return nn.Conv1d(*args, **kwargs) elif dims == 2: return nn.Conv2d(*args, **kwargs) elif dims == 3: return nn.Conv3d(*args, **kwargs) raise ValueError(f"unsupported dimensions: {dims}")
Create a 1D, 2D, or 3D convolution module.
conv_nd
python
ali-vilab/AnyDoor
ldm/modules/diffusionmodules/util.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/diffusionmodules/util.py
MIT
def linear(*args, **kwargs): """ Create a linear module. """ return nn.Linear(*args, **kwargs)
Create a linear module.
linear
python
ali-vilab/AnyDoor
ldm/modules/diffusionmodules/util.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/diffusionmodules/util.py
MIT
def avg_pool_nd(dims, *args, **kwargs): """ Create a 1D, 2D, or 3D average pooling module. """ if dims == 1: return nn.AvgPool1d(*args, **kwargs) elif dims == 2: return nn.AvgPool2d(*args, **kwargs) elif dims == 3: return nn.AvgPool3d(*args, **kwargs) raise ValueError(f"unsupported dimensions: {dims}")
Create a 1D, 2D, or 3D average pooling module.
avg_pool_nd
python
ali-vilab/AnyDoor
ldm/modules/diffusionmodules/util.py
https://github.com/ali-vilab/AnyDoor/blob/master/ldm/modules/diffusionmodules/util.py
MIT
def process_pairs(self, ref_image, ref_mask, tar_image, tar_mask, max_ratio = 0.8): assert mask_score(ref_mask) > 0.90 assert self.check_mask_area(ref_mask) == True assert self.check_mask_area(tar_mask) == True # ========= Reference =========== ''' # similate the case that the mask for reference object is coarse. Seems useless :( if np.random.uniform(0, 1) < 0.7: ref_mask_clean = ref_mask.copy() ref_mask_clean = np.stack([ref_mask_clean,ref_mask_clean,ref_mask_clean],-1) ref_mask = perturb_mask(ref_mask, 0.6, 0.9) # select a fake bg to avoid the background leakage fake_target = tar_image.copy() h,w = ref_image.shape[0], ref_image.shape[1] fake_targe = cv2.resize(fake_target, (w,h)) fake_back = np.fliplr(np.flipud(fake_target)) fake_back = self.aug_data_back(fake_back) ref_image = ref_mask_clean * ref_image + (1-ref_mask_clean) * fake_back ''' # Get the outline Box of the reference image ref_box_yyxx = get_bbox_from_mask(ref_mask) assert self.check_region_size(ref_mask, ref_box_yyxx, ratio = 0.10, mode = 'min') == True # Filtering background for the reference image ref_mask_3 = np.stack([ref_mask,ref_mask,ref_mask],-1) masked_ref_image = ref_image * ref_mask_3 + np.ones_like(ref_image) * 255 * (1-ref_mask_3) y1,y2,x1,x2 = ref_box_yyxx masked_ref_image = masked_ref_image[y1:y2,x1:x2,:] ref_mask = ref_mask[y1:y2,x1:x2] ratio = np.random.randint(11, 15) / 10 masked_ref_image, ref_mask = expand_image_mask(masked_ref_image, ref_mask, ratio=ratio) ref_mask_3 = np.stack([ref_mask,ref_mask,ref_mask],-1) # Padding reference image to square and resize to 224 masked_ref_image = pad_to_square(masked_ref_image, pad_value = 255, random = False) masked_ref_image = cv2.resize(masked_ref_image.astype(np.uint8), (224,224) ).astype(np.uint8) ref_mask_3 = pad_to_square(ref_mask_3 * 255, pad_value = 0, random = False) ref_mask_3 = cv2.resize(ref_mask_3.astype(np.uint8), (224,224) ).astype(np.uint8) ref_mask = ref_mask_3[:,:,0] # Augmenting reference image #masked_ref_image_aug = self.aug_data(masked_ref_image) # Getting for high-freqency map masked_ref_image_compose, ref_mask_compose = self.aug_data_mask(masked_ref_image, ref_mask) masked_ref_image_aug = masked_ref_image_compose.copy() ref_mask_3 = np.stack([ref_mask_compose,ref_mask_compose,ref_mask_compose],-1) ref_image_collage = sobel(masked_ref_image_compose, ref_mask_compose/255) # ========= Training Target =========== tar_box_yyxx = get_bbox_from_mask(tar_mask) tar_box_yyxx = expand_bbox(tar_mask, tar_box_yyxx, ratio=[1.1,1.2]) #1.1 1.3 assert self.check_region_size(tar_mask, tar_box_yyxx, ratio = max_ratio, mode = 'max') == True # Cropping around the target object tar_box_yyxx_crop = expand_bbox(tar_image, tar_box_yyxx, ratio=[1.3, 3.0]) tar_box_yyxx_crop = box2squre(tar_image, tar_box_yyxx_crop) # crop box y1,y2,x1,x2 = tar_box_yyxx_crop cropped_target_image = tar_image[y1:y2,x1:x2,:] cropped_tar_mask = tar_mask[y1:y2,x1:x2] tar_box_yyxx = box_in_box(tar_box_yyxx, tar_box_yyxx_crop) y1,y2,x1,x2 = tar_box_yyxx # Prepairing collage image ref_image_collage = cv2.resize(ref_image_collage.astype(np.uint8), (x2-x1, y2-y1)) ref_mask_compose = cv2.resize(ref_mask_compose.astype(np.uint8), (x2-x1, y2-y1)) ref_mask_compose = (ref_mask_compose > 128).astype(np.uint8) collage = cropped_target_image.copy() collage[y1:y2,x1:x2,:] = ref_image_collage collage_mask = cropped_target_image.copy() * 0.0 collage_mask[y1:y2,x1:x2,:] = 1.0 if np.random.uniform(0, 1) < 0.7: cropped_tar_mask = perturb_mask(cropped_tar_mask) collage_mask = np.stack([cropped_tar_mask,cropped_tar_mask,cropped_tar_mask],-1) H1, W1 = collage.shape[0], collage.shape[1] cropped_target_image = pad_to_square(cropped_target_image, pad_value = 0, random = False).astype(np.uint8) collage = pad_to_square(collage, pad_value = 0, random = False).astype(np.uint8) collage_mask = pad_to_square(collage_mask, pad_value = 2, random = False).astype(np.uint8) H2, W2 = collage.shape[0], collage.shape[1] cropped_target_image = cv2.resize(cropped_target_image.astype(np.uint8), (512,512)).astype(np.float32) collage = cv2.resize(collage.astype(np.uint8), (512,512)).astype(np.float32) collage_mask = cv2.resize(collage_mask.astype(np.uint8), (512,512), interpolation = cv2.INTER_NEAREST).astype(np.float32) collage_mask[collage_mask == 2] = -1 # Prepairing dataloader items masked_ref_image_aug = masked_ref_image_aug / 255 cropped_target_image = cropped_target_image / 127.5 - 1.0 collage = collage / 127.5 - 1.0 collage = np.concatenate([collage, collage_mask[:,:,:1] ] , -1) item = dict( ref=masked_ref_image_aug.copy(), jpg=cropped_target_image.copy(), hint=collage.copy(), extra_sizes=np.array([H1, W1, H2, W2]), tar_box_yyxx_crop=np.array(tar_box_yyxx_crop) ) return item
# similate the case that the mask for reference object is coarse. Seems useless :( if np.random.uniform(0, 1) < 0.7: ref_mask_clean = ref_mask.copy() ref_mask_clean = np.stack([ref_mask_clean,ref_mask_clean,ref_mask_clean],-1) ref_mask = perturb_mask(ref_mask, 0.6, 0.9) # select a fake bg to avoid the background leakage fake_target = tar_image.copy() h,w = ref_image.shape[0], ref_image.shape[1] fake_targe = cv2.resize(fake_target, (w,h)) fake_back = np.fliplr(np.flipud(fake_target)) fake_back = self.aug_data_back(fake_back) ref_image = ref_mask_clean * ref_image + (1-ref_mask_clean) * fake_back
process_pairs
python
ali-vilab/AnyDoor
datasets/base.py
https://github.com/ali-vilab/AnyDoor/blob/master/datasets/base.py
MIT
def mask_score(mask): '''Scoring the mask according to connectivity.''' mask = mask.astype(np.uint8) if mask.sum() < 10: return 0 contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) cnt_area = [cv2.contourArea(cnt) for cnt in contours] conc_score = np.max(cnt_area) / sum(cnt_area) return conc_score
Scoring the mask according to connectivity.
mask_score
python
ali-vilab/AnyDoor
datasets/data_utils.py
https://github.com/ali-vilab/AnyDoor/blob/master/datasets/data_utils.py
MIT
def sobel(img, mask, thresh = 50): '''Calculating the high-frequency map.''' H,W = img.shape[0], img.shape[1] img = cv2.resize(img,(256,256)) mask = (cv2.resize(mask,(256,256)) > 0.5).astype(np.uint8) kernel = np.ones((5,5),np.uint8) mask = cv2.erode(mask, kernel, iterations = 2) Ksize = 3 sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=Ksize) sobely = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=Ksize) sobel_X = cv2.convertScaleAbs(sobelx) sobel_Y = cv2.convertScaleAbs(sobely) scharr = cv2.addWeighted(sobel_X, 0.5, sobel_Y, 0.5, 0) scharr = np.max(scharr,-1) * mask scharr[scharr < thresh] = 0.0 scharr = np.stack([scharr,scharr,scharr],-1) scharr = (scharr.astype(np.float32)/255 * img.astype(np.float32) ).astype(np.uint8) scharr = cv2.resize(scharr,(W,H)) return scharr
Calculating the high-frequency map.
sobel
python
ali-vilab/AnyDoor
datasets/data_utils.py
https://github.com/ali-vilab/AnyDoor/blob/master/datasets/data_utils.py
MIT
def resize_and_pad(image, box): '''Fitting an image to the box region while keeping the aspect ratio.''' y1,y2,x1,x2 = box H,W = y2-y1, x2-x1 h,w = image.shape[0], image.shape[1] r_box = W / H r_image = w / h if r_box >= r_image: h_target = H w_target = int(w * H / h) image = cv2.resize(image, (w_target, h_target)) w1 = (W - w_target) // 2 w2 = W - w_target - w1 pad_param = ((0,0),(w1,w2),(0,0)) image = np.pad(image, pad_param, 'constant', constant_values=255) else: w_target = W h_target = int(h * W / w) image = cv2.resize(image, (w_target, h_target)) h1 = (H-h_target) // 2 h2 = H - h_target - h1 pad_param =((h1,h2),(0,0),(0,0)) image = np.pad(image, pad_param, 'constant', constant_values=255) return image
Fitting an image to the box region while keeping the aspect ratio.
resize_and_pad
python
ali-vilab/AnyDoor
datasets/data_utils.py
https://github.com/ali-vilab/AnyDoor/blob/master/datasets/data_utils.py
MIT
def q_x(x_0,t=65): '''Adding noise for and given image.''' x_0 = torch.from_numpy(x_0).float() / 127.5 - 1 num_steps = 100 betas = torch.linspace(-6,6,num_steps) betas = torch.sigmoid(betas)*(0.5e-2 - 1e-5)+1e-5 alphas = 1-betas alphas_prod = torch.cumprod(alphas,0) alphas_prod_p = torch.cat([torch.tensor([1]).float(),alphas_prod[:-1]],0) alphas_bar_sqrt = torch.sqrt(alphas_prod) one_minus_alphas_bar_log = torch.log(1 - alphas_prod) one_minus_alphas_bar_sqrt = torch.sqrt(1 - alphas_prod) noise = torch.randn_like(x_0) alphas_t = alphas_bar_sqrt[t] alphas_1_m_t = one_minus_alphas_bar_sqrt[t] return (alphas_t * x_0 + alphas_1_m_t * noise).numpy() * 127.5 + 127.5
Adding noise for and given image.
q_x
python
ali-vilab/AnyDoor
datasets/data_utils.py
https://github.com/ali-vilab/AnyDoor/blob/master/datasets/data_utils.py
MIT
def __init__(self, fg_dir, bg_dir): self.bg_dir = bg_dir bg_data = os.listdir(self.bg_dir) self.bg_data = [i for i in bg_data if 'mask' in i] self.image_dir = fg_dir self.data = os.listdir(self.image_dir) self.size = (512,512) self.clip_size = (224,224) ''' Dynamic: 0: Static View, High Quality 1: Multi-view, Low Quality 2: Multi-view, High Quality ''' self.dynamic = 1
Dynamic: 0: Static View, High Quality 1: Multi-view, Low Quality 2: Multi-view, High Quality
__init__
python
ali-vilab/AnyDoor
datasets/dreambooth.py
https://github.com/ali-vilab/AnyDoor/blob/master/datasets/dreambooth.py
MIT
def reset(self): """reset analyser, clear any state""" # If this flag is set to True, detection is done and conclusion has # been made self._mDone = False self._mTotalChars = 0 # Total characters encountered # The number of characters whose frequency order is less than 512 self._mFreqChars = 0
reset analyser, clear any state
reset
python
seanliang/ConvertToUTF8
chardet/chardistribution.py
https://github.com/seanliang/ConvertToUTF8/blob/master/chardet/chardistribution.py
MIT
def feed(self, aBuf, aCharLen): """feed a character with known length""" if aCharLen == 2: # we only care about 2-bytes character in our distribution analysis order = self.get_order(aBuf) else: order = -1 if order >= 0: self._mTotalChars += 1 # order is valid if order < self._mTableSize: if 512 > self._mCharToFreqOrder[order]: self._mFreqChars += 1
feed a character with known length
feed
python
seanliang/ConvertToUTF8
chardet/chardistribution.py
https://github.com/seanliang/ConvertToUTF8/blob/master/chardet/chardistribution.py
MIT
def get_confidence(self): """return confidence based on existing data""" # if we didn't receive any character in our consideration range, # return negative answer if self._mTotalChars <= 0: return SURE_NO if self._mTotalChars != self._mFreqChars: r = (self._mFreqChars / ((self._mTotalChars - self._mFreqChars) * self._mTypicalDistributionRatio)) if r < SURE_YES: return r # normalize confidence (we don't want to be 100% sure) return SURE_YES
return confidence based on existing data
get_confidence
python
seanliang/ConvertToUTF8
chardet/chardistribution.py
https://github.com/seanliang/ConvertToUTF8/blob/master/chardet/chardistribution.py
MIT
async def test_edgeql_expr_op_04(self): await self.assert_query_result( r'''SELECT -1 + 2 * 3 - 5 - 6.0 / 2;''', [-3], ) await self.assert_query_result( r''' SELECT -1 + 2 * 3 - 5 - 6.0 / 2 > 0 OR 25 % 4 = 3 AND 42 IN {12, 42, 14}; ''', [False], ) await self.assert_query_result( r'''SELECT (-1 + 2) * 3 - (5 - 6.0) / 2;''', [3.5], ) await self.assert_query_result( r''' SELECT ((-1 + 2) * 3 - (5 - 6.0) / 2 > 0 OR 25 % 4 = 3) AND 42 IN {12, 42, 14}; ''', [True], ) await self.assert_query_result( r'''SELECT 1 * 0.2;''', [0.2], ) await self.assert_query_result( r'''SELECT 0.2 * 1;''', [0.2], ) await self.assert_query_result( r'''SELECT -0.2 * 1;''', [-0.2], ) await self.assert_query_result( r'''SELECT 0.2 + 1;''', [1.2], ) await self.assert_query_result( r'''SELECT 1 + 0.2;''', [1.2], ) await self.assert_query_result( r'''SELECT -0.2 - 1;''', [-1.2], ) await self.assert_query_result( r'''SELECT -1 - 0.2;''', [-1.2], ) await self.assert_query_result( r'''SELECT -1 / 0.2;''', [-5], ) await self.assert_query_result( r'''SELECT 0.2 / -1;''', [-0.2], ) await self.assert_query_result( r'''SELECT 5 // 2;''', [2], ) await self.assert_query_result( r'''SELECT 5.5 // 1.2;''', [4.0], ) await self.assert_query_result( r'''SELECT (INTROSPECT TYPEOF (5.5 // 1.2)).name;''', ['std::float64'], ) await self.assert_query_result( r'''SELECT -9.6 // 2;''', [-5.0], ) await self.assert_query_result( r'''SELECT (INTROSPECT TYPEOF (<float32>-9.6 // 2)).name;''', ['std::float64'], )
, [False], ) await self.assert_query_result( r'''SELECT (-1 + 2) * 3 - (5 - 6.0) / 2;''', [3.5], ) await self.assert_query_result( r
test_edgeql_expr_op_04
python
geldata/gel
tests/test_edgeql_expressions.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_expressions.py
Apache-2.0
async def test_edgeql_expr_op_14(self): await self.assert_query_result( r""" SELECT _ := {9, 1, 13} FILTER _ IN {11, 12, 13}; """, {13}, ) await self.assert_query_result( r""" SELECT _ := {9, 1, 13, 11} FILTER _ IN {11, 12, 13}; """, {11, 13}, )
, {13}, ) await self.assert_query_result( r
test_edgeql_expr_op_14
python
geldata/gel
tests/test_edgeql_expressions.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_expressions.py
Apache-2.0
async def test_edgeql_expr_op_15(self): await self.assert_query_result( r""" SELECT _ := {9, 12, 13} FILTER _ NOT IN {11, 12, 13}; """, {9}, ) await self.assert_query_result( r""" SELECT _ := {9, 1, 13, 11} FILTER _ NOT IN {11, 12, 13}; """, {1, 9}, )
, {9}, ) await self.assert_query_result( r
test_edgeql_expr_op_15
python
geldata/gel
tests/test_edgeql_expressions.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_expressions.py
Apache-2.0
async def test_edgeql_expr_op_16(self): await self.assert_query_result( r""" WITH a := {11, 12, 13} SELECT _ := {9, 1, 13} FILTER _ IN a; """, {13}, ) await self.assert_query_result( r""" WITH MODULE schema SELECT _ := {9, 1, 13} FILTER _ IN ( # Lengths of names for schema::Map, Type, and Array are # 11, 12, and 13, respectively. len(( SELECT ObjectType FILTER ObjectType.name LIKE 'schema::%' ).name) ); """, {13}, )
, {13}, ) await self.assert_query_result( r
test_edgeql_expr_op_16
python
geldata/gel
tests/test_edgeql_expressions.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_expressions.py
Apache-2.0
async def test_edgeql_expr_op_17(self): await self.assert_query_result( r""" WITH a := {11, 12, 13} SELECT _ := {9, 1, 13} FILTER _ NOT IN a; """, {9, 1}, ) await self.assert_query_result( r""" WITH MODULE schema SELECT _ := {9, 1, 13} FILTER _ NOT IN ( # Lengths of names for schema::Map, Type, and Array are # 11, 12, and 13, respectively. len(( SELECT ObjectType FILTER ObjectType.name LIKE 'schema::%' ).name) ); """, {9, 1}, )
, {9, 1}, ) await self.assert_query_result( r
test_edgeql_expr_op_17
python
geldata/gel
tests/test_edgeql_expressions.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_expressions.py
Apache-2.0
async def test_edgeql_expr_op_21(self): # There was a bug that caused `=` to not always be equivalent # to `>= AND <=` due to difference in casting decimals to # floats or floats into decimal. await self.assert_query_result( r''' SELECT 0.797693134862311111111n = <decimal>0.797693134862311111111; ''', [False], ) await self.assert_query_result( r''' SELECT 0.797693134862311111111n >= <decimal>0.797693134862311111111 AND 0.797693134862311111111n <= <decimal>0.797693134862311111111; ''', [False], )
, [False], ) await self.assert_query_result( r
test_edgeql_expr_op_21
python
geldata/gel
tests/test_edgeql_expressions.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_expressions.py
Apache-2.0
async def test_edgeql_expr_valid_comp_04(self): # bytes and uuids are orderable in the same way as a "similar" # ascii string. For uuid this works out because ord('9') < ord('a'). # # Motivation: In some sense str and uuid are a special kind of # byte-string. A different way of representing them would be # as arrays (sequences) of bytes. Conceptually, as long as the # individual elements of these arrays are orderable (and a # total ordering can be naturally defined on actual bytes), # the array of these elements is also orderable. # "ordered" uuid-like strings uuids = [ '04b4318e-1a01-41e4-b29c-b57b94db9402', '94b4318e-1a01-41e4-b29c-b57b94db9402', 'a4b4318e-1a01-41e4-b29c-b57b94db9402', 'a5b4318e-1a01-41e4-b29c-b57b94db9402', 'f4b4318e-1a01-41e4-b29c-b57b94db9402', 'f4b4318e-1a01-41e4-b29c-b67b94db9402', 'f4b4318e-1a01-41e4-b29c-b68b94db9402', ] for left in uuids[:-1]: for right in uuids[1:]: for op in ('>=', '<', '<=', '>'): query = f''' SELECT (b'{left}' {op} b'{right}') = ('{left}' {op} '{right}'); ''' await self.assert_query_result( query, {True}, msg=query) query = f''' SELECT (<uuid>'{left}' {op} <uuid>'{right}') = ('{left}' {op} '{right}'); ''' await self.assert_query_result( query, {True}, msg=query)
await self.assert_query_result( query, {True}, msg=query) query = f
test_edgeql_expr_valid_comp_04
python
geldata/gel
tests/test_edgeql_expressions.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_expressions.py
Apache-2.0
async def test_edgeql_expr_valid_order_01(self): # JSON ordering is a bit difficult to conceptualize across # non-homogeneous JSON types, but it is stable and can be used # reliably in ORDER BY clauses. In fact, many tests rely on this. await self.assert_query_result( r'''SELECT <json>2 < <json>'2';''', [False], ) await self.assert_query_result( r''' WITH X := {<json>1, <json>True, <json>'1'} SELECT X ORDER BY X; ''', # JSON ['1', 1, True], # Binary ['"1"', '1', 'true'], ) await self.assert_query_result( r''' WITH X := { <json>1, <json>2, <json>'b', to_json('{"a":1,"b":2}'), to_json('{"b":3,"a":1,"b":2}'), to_json('["a", 1, "b", 2]') } SELECT X ORDER BY X; ''', # JSON ['b', 1, 2, ['a', 1, 'b', 2], {'a': 1, 'b': 2}, {'a': 1, 'b': 2}], # Binary [ '"b"', '1', '2', '["a", 1, "b", 2]', '{"a": 1, "b": 2}', '{"a": 1, "b": 2}' ], )
, # JSON ['1', 1, True], # Binary ['"1"', '1', 'true'], ) await self.assert_query_result( r
test_edgeql_expr_valid_order_01
python
geldata/gel
tests/test_edgeql_expressions.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_expressions.py
Apache-2.0
async def test_edgeql_expr_valid_order_06(self): # make sure various date&time scalaras are usable in order by clause await self.assert_query_result( r''' WITH A := <datetime>{ "2018-05-07T20:01:22.306916+00:00", "2017-05-07T20:01:22.306916+00:00" } SELECT A ORDER BY A; ''', [ "2017-05-07T20:01:22.306916+00:00", "2018-05-07T20:01:22.306916+00:00", ], ) await self.assert_query_result( r''' WITH A := <cal::local_datetime>{ "2018-05-07T20:01:22.306916", "2017-05-07T20:01:22.306916" } SELECT A ORDER BY A; ''', [ "2017-05-07T20:01:22.306916", "2018-05-07T20:01:22.306916", ], ) await self.assert_query_result( r''' WITH A := <cal::local_date>{ "2018-05-07", "2017-05-07" } SELECT A ORDER BY A; ''', [ "2017-05-07", "2018-05-07", ], ) await self.assert_query_result( r''' WITH A := <cal::local_time>{ "20:01:22.306916", "19:01:22.306916" } SELECT A ORDER BY A; ''', [ "19:01:22.306916", "20:01:22.306916", ], ) await self.assert_query_result( r''' WITH A := to_str( <duration>{ "20:01:22.306916", "19:01:22.306916" } ) SELECT A ORDER BY A; ''', [ "PT19H1M22.306916S", "PT20H1M22.306916S", ] )
, [ "2017-05-07T20:01:22.306916+00:00", "2018-05-07T20:01:22.306916+00:00", ], ) await self.assert_query_result( r
test_edgeql_expr_valid_order_06
python
geldata/gel
tests/test_edgeql_expressions.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_expressions.py
Apache-2.0
async def test_edgeql_expr_valid_arithmetic_08(self): # Test (5) - decimal is incompatible with everything except integers expected_error_msg = 'cannot be applied to operands' for left in get_test_values(anynumeric=True): for right in get_test_values(anyint=False, anynumeric=False): for op in ['+', '-', '*', '/', '//', '%', '^']: query = f"""SELECT {left} {op} {right};""" with self.assertRaisesRegex(edgedb.QueryError, expected_error_msg, msg=query): async with self.con.transaction(): await self.con.execute(query) for left, ldesc in get_test_items(anynumeric=True): for right in get_test_values(anyint=True): for op in ['+', '-', '*', '%']: # bigint/decimal are "contagious" await self.assert_query_result( f""" SELECT ({left} {op} {right}) IS {ldesc.typename}; """, [True], ) await self.assert_query_result( f""" SELECT ({right} {op} {left}) IS {ldesc.typename}; """, [True], ) for left, ldesc in get_test_items(anynumeric=True): for right in get_test_values(anyint=True): op = '//' await self.assert_query_result( f""" SELECT ({left} {op} {right}) IS {ldesc.typename}; """, [True], ) await self.assert_query_result( f""" SELECT ({right} {op} {left}) IS {ldesc.typename}; """, [True], ) for left in get_test_values(anynumeric=True): for right in get_test_values(anyint=True): # regular division and power with anynumeric always # results in decimal. for op in ['/', '^']: await self.assert_query_result( f""" SELECT ({left} {op} {right}) IS decimal; """, [True], ) await self.assert_query_result( f""" SELECT ({right} {op} {left}) IS decimal; """, [True], )
, [True], ) await self.assert_query_result( f
test_edgeql_expr_valid_arithmetic_08
python
geldata/gel
tests/test_edgeql_expressions.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_expressions.py
Apache-2.0
async def test_edgeql_expr_valid_setop_06(self): # decimals are tricky because integers implicitly cast into # them and floats don't expected_error_msg = "operator 'UNION' cannot be applied" # decimal UNION numerics for left, left_t in get_test_items(anynumeric=True): for right in get_test_values(anyint=True): query = f"""SELECT count({left} UNION {right});""" # decimals and integers can be UNIONed in any # combination await self.assert_query_result(query, [2]) query = f""" SELECT (INTROSPECT TYPEOF ({left} UNION {right})).name; """ # this operation should always be valid await self.assert_query_result( query, {f'std::{left_t.typename}'}) for left in get_test_values(anynumeric=True): for right in get_test_values(anyfloat=True): query = f"""SELECT count({left} UNION {right});""" # decimal UNION float is illegal with self.assertRaisesRegex(edgedb.QueryError, expected_error_msg, msg=query): async with self.con.transaction(): await self.con.execute(query)
# this operation should always be valid await self.assert_query_result( query, {f'std::{left_t.typename}'}) for left in get_test_values(anynumeric=True): for right in get_test_values(anyfloat=True): query = f"""SELECT count({left} UNION {right});
test_edgeql_expr_valid_setop_06
python
geldata/gel
tests/test_edgeql_expressions.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_expressions.py
Apache-2.0
async def test_edgeql_expr_polymorphic_01(self): await self.con.execute(r""" SELECT Text { [IS Issue].number, [IS Issue].related_to, [IS Issue].`priority`, [IS Comment].owner: { name } }; """) await self.con.execute(r""" SELECT Owned { [IS Named].name }; """)
) await self.con.execute(r
test_edgeql_expr_polymorphic_01
python
geldata/gel
tests/test_edgeql_expressions.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_expressions.py
Apache-2.0
async def test_edgeql_expr_cast_10(self): await self.assert_query_result( r''' SELECT <array<tuple<EmulatedEnum>>> (SELECT [('v1',)] ++ [('v2',)]) ''', [[["v1"], ["v2"]]], ) await self.assert_query_result( r''' SELECT <tuple<array<EmulatedEnum>>> (SELECT (['v1'] ++ ['v2'],)) ''', [[["v1", "v2"]]], )
, [[["v1"], ["v2"]]], ) await self.assert_query_result( r
test_edgeql_expr_cast_10
python
geldata/gel
tests/test_edgeql_expressions.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_expressions.py
Apache-2.0
async def test_edgeql_expr_set_04(self): await self.assert_query_result( r""" select _ := {1, 2, 3, 4} except 2 order by _; """, [1, 3, 4], ) await self.assert_query_result( r""" select _ := {1, 2, 3, 4} except 2 except 4 order by _; """, [1, 3], ) await self.assert_query_result( r""" select _ := {1, 2, 3, 4} except {1, 2} order by _; """, [3, 4], ) await self.assert_query_result( r""" select _ := {1, 2, 3, 4} except {4, 5} order by _; """, [1, 2, 3], ) await self.assert_query_result( r""" select _ := {1, 2, 3, 4} except {5, 6} order by _; """, [1, 2, 3, 4], ) await self.assert_query_result( r""" select _ := {1, 1, 1, 2, 2, 3} except {1, 3, 3, 2} order by _; """, [1, 1, 2], )
, [1, 3, 4], ) await self.assert_query_result( r
test_edgeql_expr_set_04
python
geldata/gel
tests/test_edgeql_expressions.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_expressions.py
Apache-2.0
async def test_edgeql_expr_set_06(self): await self.assert_query_result( r""" select _ := {1, 2, 3, 4} intersect 2 order by _; """, [2], ) await self.assert_query_result( r""" select _ := {1, 2, 3, 4} intersect {2, 3, 4} intersect {2, 4} order by _; """, [2, 4], ) await self.assert_query_result( r""" select _ := {1, 2, 3, 4} intersect {5, 6} order by _; """, [], ) await self.assert_query_result( r""" select _ := {1, 2, 3, 4} intersect 4 order by _; """, [4], ) await self.assert_query_result( r""" select _ := {1, 1, 1, 2, 2, 3} intersect {1, 3, 3, 2, 2, 5} order by _; """, [1, 2, 2, 3], )
, [2], ) await self.assert_query_result( r
test_edgeql_expr_set_06
python
geldata/gel
tests/test_edgeql_expressions.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_expressions.py
Apache-2.0
async def test_edgeql_expr_tuple_13(self): await self.assert_query_result( r""" SELECT (1, ('a', 'b', (0.1, 0.2)), 2, 3); """, [[1, ['a', 'b', [0.1, 0.2]], 2, 3]], ) await self.assert_query_result( r""" # should be the same as above WITH _ := (1, ('a', 'b', (0.1, 0.2)), 2, 3) SELECT _; """, [[1, ['a', 'b', [0.1, 0.2]], 2, 3]], )
, [[1, ['a', 'b', [0.1, 0.2]], 2, 3]], ) await self.assert_query_result( r
test_edgeql_expr_tuple_13
python
geldata/gel
tests/test_edgeql_expressions.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_expressions.py
Apache-2.0
async def test_edgeql_expr_tuple_indirection_01(self): await self.assert_query_result( r""" SELECT ('foo', 42).0; """, ['foo'], ) await self.assert_query_result( r""" SELECT ('foo', 42).1; """, [42], )
, ['foo'], ) await self.assert_query_result( r
test_edgeql_expr_tuple_indirection_01
python
geldata/gel
tests/test_edgeql_expressions.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_expressions.py
Apache-2.0
async def test_edgeql_expr_range_empty_01(self): # Test handling of empty ranges for st in ['int32', 'int64', 'float32', 'float64', 'decimal']: await self.assert_query_result( f''' select range( <{st}>1, <{st}>1 ) = range(<{st}>{{}}, empty := true); ''', [True], ) await self.assert_query_result( f''' select range( <{st}>1, <{st}>1, inc_lower := false, inc_upper := true, ) = range(<{st}>{{}}, empty := true); ''', [True], ) await self.assert_query_result( f''' select range(<{st}>1, <{st}>1, inc_upper := true) = range(<{st}>{{}}, empty := true); ''', [False], ) await self.assert_query_result( f''' select range_is_empty( range( <{st}>{{}}, empty := true, ) ) ''', [True], ) await self.assert_query_result( f''' select range_is_empty( range( <{st}>{{}}, empty := false, ) ) ''', [False], )
, [True], ) await self.assert_query_result( f
test_edgeql_expr_range_empty_01
python
geldata/gel
tests/test_edgeql_expressions.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_expressions.py
Apache-2.0
async def test_edgeql_expr_range_05(self): # Test addition for numeric ranges. for st in ['int32', 'int64', 'float32', 'float64', 'decimal']: await self.assert_query_result( f''' select range(<{st}>1, <{st}>5) + range(<{st}>2, <{st}>7) = range(<{st}>1, <{st}>7); ''', [True], ) await self.assert_query_result( f''' select range(<{st}>{{}}, <{st}>5) + range(<{st}>2, <{st}>7) = range(<{st}>{{}}, <{st}>7); ''', [True], ) await self.assert_query_result( f''' select range(<{st}>2) + range(<{st}>1, <{st}>7) = range(<{st}>1); ''', [True], )
, [True], ) await self.assert_query_result( f
test_edgeql_expr_range_05
python
geldata/gel
tests/test_edgeql_expressions.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_expressions.py
Apache-2.0
async def test_edgeql_expr_range_06(self): # Test intersection for numeric ranges. for st in ['int32', 'int64', 'float32', 'float64', 'decimal']: await self.assert_query_result( f''' select range(<{st}>1, <{st}>5) * range(<{st}>2, <{st}>7) = range(<{st}>2, <{st}>5); ''', [True], ) await self.assert_query_result( f''' select range(<{st}>{{}}, <{st}>5) * range(<{st}>2, <{st}>7) = range(<{st}>2, <{st}>5); ''', [True], ) await self.assert_query_result( f''' select range(<{st}>2) * range(<{st}>1, <{st}>7) = range(<{st}>2, <{st}>7); ''', [True], )
, [True], ) await self.assert_query_result( f
test_edgeql_expr_range_06
python
geldata/gel
tests/test_edgeql_expressions.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_expressions.py
Apache-2.0
async def test_edgeql_expr_range_07(self): # Test subtraction for numeric ranges. for st in ['int32', 'int64', 'float32', 'float64', 'decimal']: await self.assert_query_result( f''' select range(<{st}>1, <{st}>5) - range(<{st}>2, <{st}>7) = range(<{st}>1, <{st}>2); ''', [True], ) await self.assert_query_result( f''' select range(<{st}>{{}}, <{st}>5) - range(<{st}>2, <{st}>7) = range(<{st}>{{}}, <{st}>2); ''', [True], ) await self.assert_query_result( f''' select range(<{st}>2) - range(<{st}>1, <{st}>7) = range(<{st}>7); ''', [True], )
, [True], ) await self.assert_query_result( f
test_edgeql_expr_range_07
python
geldata/gel
tests/test_edgeql_expressions.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_expressions.py
Apache-2.0
async def test_edgeql_expr_range_12(self): # Test addition for datetime ranges. await self.assert_query_result( f''' select range(<datetime>'2022-06-06T00:00:00Z', <datetime>'2022-06-15T00:00:00Z') + range(<datetime>'2022-06-10T00:00:00Z', <datetime>'2022-06-17T00:00:00Z') = range(<datetime>'2022-06-06T00:00:00Z', <datetime>'2022-06-17T00:00:00Z'); ''', [True], ) await self.assert_query_result( f''' select range(<datetime>{{}}, <datetime>'2022-06-15T00:00:00Z') + range(<datetime>'2022-06-10T00:00:00Z', <datetime>'2022-06-17T00:00:00Z') = range(<datetime>{{}}, <datetime>'2022-06-17T00:00:00Z'); ''', [True], ) await self.assert_query_result( f''' select range(<datetime>'2022-06-10T00:00:00Z') + range(<datetime>'2022-06-06T00:00:00Z', <datetime>'2022-06-17T00:00:00Z') = range(<datetime>'2022-06-06T00:00:00Z'); ''', [True], )
, [True], ) await self.assert_query_result( f
test_edgeql_expr_range_12
python
geldata/gel
tests/test_edgeql_expressions.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_expressions.py
Apache-2.0
async def test_edgeql_expr_range_13(self): # Test intersection for datetime ranges. await self.assert_query_result( f''' select range(<datetime>'2022-06-06T00:00:00Z', <datetime>'2022-06-15T00:00:00Z') * range(<datetime>'2022-06-10T00:00:00Z', <datetime>'2022-06-17T00:00:00Z') = range(<datetime>'2022-06-10T00:00:00Z', <datetime>'2022-06-15T00:00:00Z'); ''', [True], ) await self.assert_query_result( f''' select range(<datetime>{{}}, <datetime>'2022-06-15T00:00:00Z') * range(<datetime>'2022-06-10T00:00:00Z', <datetime>'2022-06-17T00:00:00Z') = range(<datetime>'2022-06-10T00:00:00Z', <datetime>'2022-06-15T00:00:00Z'); ''', [True], ) await self.assert_query_result( f''' select range(<datetime>'2022-06-10T00:00:00Z') * range(<datetime>'2022-06-06T00:00:00Z', <datetime>'2022-06-17T00:00:00Z') = range(<datetime>'2022-06-10T00:00:00Z', <datetime>'2022-06-17T00:00:00Z'); ''', [True], )
, [True], ) await self.assert_query_result( f
test_edgeql_expr_range_13
python
geldata/gel
tests/test_edgeql_expressions.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_expressions.py
Apache-2.0
async def test_edgeql_expr_range_14(self): # Test subtraction for datetime ranges. await self.assert_query_result( f''' select range(<datetime>'2022-06-06T00:00:00Z', <datetime>'2022-06-15T00:00:00Z') - range(<datetime>'2022-06-10T00:00:00Z', <datetime>'2022-06-17T00:00:00Z') = range(<datetime>'2022-06-06T00:00:00Z', <datetime>'2022-06-10T00:00:00Z'); ''', [True], ) await self.assert_query_result( f''' select range(<datetime>{{}}, <datetime>'2022-06-15T00:00:00Z') - range(<datetime>'2022-06-10T00:00:00Z', <datetime>'2022-06-17T00:00:00Z') = range(<datetime>{{}}, <datetime>'2022-06-10T00:00:00Z'); ''', [True], ) await self.assert_query_result( f''' select range(<datetime>'2022-06-10T00:00:00Z') - range(<datetime>'2022-06-06T00:00:00Z', <datetime>'2022-06-17T00:00:00Z') = range(<datetime>'2022-06-17T00:00:00Z'); ''', [True], )
, [True], ) await self.assert_query_result( f
test_edgeql_expr_range_14
python
geldata/gel
tests/test_edgeql_expressions.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_expressions.py
Apache-2.0
async def test_edgeql_expr_range_19(self): # Test addition for datetime ranges. await self.assert_query_result( f''' select range(<cal::local_datetime>'2022-06-06T00:00:00', <cal::local_datetime>'2022-06-15T00:00:00') + range(<cal::local_datetime>'2022-06-10T00:00:00', <cal::local_datetime>'2022-06-17T00:00:00') = range(<cal::local_datetime>'2022-06-06T00:00:00', <cal::local_datetime>'2022-06-17T00:00:00'); ''', [True], ) await self.assert_query_result( f''' select range(<cal::local_datetime>{{}}, <cal::local_datetime>'2022-06-15T00:00:00') + range(<cal::local_datetime>'2022-06-10T00:00:00', <cal::local_datetime>'2022-06-17T00:00:00') = range(<cal::local_datetime>{{}}, <cal::local_datetime>'2022-06-17T00:00:00'); ''', [True], ) await self.assert_query_result( f''' select range(<cal::local_datetime>'2022-06-10T00:00:00') + range(<cal::local_datetime>'2022-06-06T00:00:00', <cal::local_datetime>'2022-06-17T00:00:00') = range(<cal::local_datetime>'2022-06-06T00:00:00'); ''', [True], )
, [True], ) await self.assert_query_result( f
test_edgeql_expr_range_19
python
geldata/gel
tests/test_edgeql_expressions.py
https://github.com/geldata/gel/blob/master/tests/test_edgeql_expressions.py
Apache-2.0