repo
stringlengths 1
99
| file
stringlengths 13
215
| code
stringlengths 12
59.2M
| file_length
int64 12
59.2M
| avg_line_length
float64 3.82
1.48M
| max_line_length
int64 12
2.51M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
UString | UString-master/script/extract_res101_dad.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path as osp
import numpy as np
import os, cv2
import argparse, sys
from tqdm import tqdm
import torch
import torch.nn as nn
from torchvision import models, transforms
from torch.autograd import Variable
from PIL import Image
CLASSES = ('__background__', 'Car', 'Pedestrian', 'Cyclist')
class ResNet(nn.Module):
def __init__(self, n_layers=101):
super(ResNet, self).__init__()
if n_layers == 50:
self.net = models.resnet50(pretrained=True)
elif n_layers == 101:
self.net = models.resnet101(pretrained=True)
else:
raise NotImplementedError
self.dim_feat = 2048
def forward(self, input):
output = self.net.conv1(input)
output = self.net.bn1(output)
output = self.net.relu(output)
output = self.net.maxpool(output)
output = self.net.layer1(output)
output = self.net.layer2(output)
output = self.net.layer3(output)
output = self.net.layer4(output)
output = self.net.avgpool(output)
return output
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Test a Fast R-CNN network')
parser.add_argument('--dad_dir', dest='dad_dir', help='The directory to the Dashcam Accident Dataset', type=str)
parser.add_argument('--out_dir', dest='out_dir', help='The directory to the output files.', type=str)
parser.add_argument('--n_frames', dest='n_frames', help='The number of frames sampled from each video', default=100)
parser.add_argument('--n_boxes', dest='n_boxes', help='The number of bounding boxes for each frame', default=19)
parser.add_argument('--dim_feat', dest='dim_feat', help='The dimension of extracted ResNet101 features', default=2048)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def get_video_frames(video_file, n_frames=100):
# get the video data
cap = cv2.VideoCapture(video_file)
ret, frame = cap.read()
video_data = []
counter = 0
while (ret):
video_data.append(frame)
ret, frame = cap.read()
counter += 1
assert counter == n_frames
return video_data
def bbox_to_imroi(bboxes, image):
"""
bboxes: (n, 4), ndarray
image: (H, W, 3), ndarray
"""
imroi_data = []
for bbox in bboxes:
imroi = image[bbox[1]:bbox[3], bbox[0]:bbox[2], :]
imroi = transform(Image.fromarray(imroi)) # (3, 224, 224), torch.Tensor
imroi_data.append(imroi)
imroi_data = torch.stack(imroi_data)
return imroi_data
def get_boxes(dets_all, im_size):
bboxes = []
for bbox in dets_all:
x1, y1, x2, y2 = bbox[:4].astype(np.int32)
x1 = min(max(0, x1), im_size[1]-1) # 0<=x1<=W-1
y1 = min(max(0, y1), im_size[0]-1) # 0<=y1<=H-1
x2 = min(max(x1, x2), im_size[1]-1) # x1<=x2<=W-1
y2 = min(max(y1, y2), im_size[0]-1) # y1<=y2<=H-1
h = y2 - y1 + 1
w = x2 - x1 + 1
if h > 2 and w > 2: # the area is at least 9
bboxes.append([x1, y1, x2, y2])
bboxes = np.array(bboxes, dtype=np.int32)
return bboxes
def extract_features(data_path, video_path, dest_path, phase):
files_list = []
batch_id = 1
all_batches = os.listdir(os.path.join(data_path, phase))
for filename in sorted(all_batches):
filepath = os.path.join(data_path, phase, filename)
all_data = np.load(filepath)
# parse the original DAD dataset
labels = all_data['labels'] # 10 x 2
videos = all_data['ID'] # 10
# features_old = all_data['data'] # 10 x 100 x 20 x 4096 (will be replaced)
detections = all_data['det'] # 10 x 100 x 19 x 6
# start to process each video
nid = 1
for i, vid in tqdm(enumerate(videos), desc="The %d-th batch"%(batch_id), total=len(all_batches)):
vidname = 'b' + str(batch_id).zfill(3) + '_' + vid.decode('UTF-8')
if vidname in files_list:
vidname = vidname + '_' + str(nid).zfill(2)
nid += 1
feat_file = os.path.join(dest_path, vidname + '.npz')
if os.path.exists(feat_file):
continue
# continue on feature extraction
tag = 'positive' if labels[i, 1] > 0 else 'negative'
video_file = os.path.join(video_path, phase, tag, vid.decode('UTF-8') + '.mp4')
video_frames = get_video_frames(video_file, n_frames=args.n_frames)
# start to process each frame
features_res101 = np.zeros((args.n_frames, args.n_boxes + 1, args.dim_feat), dtype=np.float32) # (100 x 20 x 2048)
for j, frame in tqdm(enumerate(video_frames), desc="The %d-th video"%(i+1), total=len(video_frames)):
# find the non-empty boxes
bboxes = get_boxes(detections[i, j], frame.shape) # n x 4
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
with torch.no_grad():
# extract image feature
image = transform(Image.fromarray(frame))
ims_frame = torch.unsqueeze(image, dim=0).float().to(device=device)
feature_frame = torch.squeeze(feat_extractor(ims_frame))
features_res101[j, 0, :] = feature_frame.cpu().numpy() if feature_frame.is_cuda else feature_frame.detach().numpy()
# extract object feature
if len(bboxes) > 0:
# bboxes to roi data
ims_roi = bbox_to_imroi(bboxes, frame) # (n, 3, 224, 224)
ims_roi = ims_roi.float().to(device=device)
feature_roi = torch.squeeze(torch.squeeze(feat_extractor(ims_roi), dim=-1), dim=-1) # (2048,)
features_res101[j, 1:len(bboxes)+1,:] = feature_roi.cpu().numpy() if feature_roi.is_cuda else feature_roi.detach().numpy()
# we only update the features
np.savez_compressed(feat_file, data=features_res101, det=detections[i], labels=labels[i], ID=vidname)
files_list.append(vidname)
batch_id += 1
return files_list
def run(data_path, video_path, dest_path):
# prepare the result paths
train_path = os.path.join(dest_path, 'training')
if not os.path.exists(train_path):
os.makedirs(train_path)
test_path = os.path.join(dest_path, 'testing')
if not os.path.exists(test_path):
os.makedirs(test_path)
# process training set
train_list = extract_features(data_path, video_path, train_path, 'training')
print('Training samples: %d'%(len(train_list)))
# process testing set
test_list = extract_features(data_path, video_path, test_path, 'testing')
print('Testing samples: %d' % (len(test_list)))
if __name__ == "__main__":
args = parse_args()
# prepare faster rcnn detector
feat_extractor = ResNet(n_layers=101)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
feat_extractor = feat_extractor.to(device=device)
feat_extractor.eval()
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor()]
)
data_path = osp.join(args.dad_dir, 'features') # /data/DAD/features
video_path = osp.join(args.dad_dir, 'videos') # /data/DAD/videos
run(data_path, video_path, args.out_dir) # out: /data/DAD/res101_features
print("Done!") | 7,692 | 38.654639 | 146 | py |
SelfDeblur | SelfDeblur-master/selfdeblur_levin_reproduce.py |
# coding: utf-8
from __future__ import print_function
import matplotlib.pyplot as plt
import argparse
import os
import numpy as np
import cv2
import torch
import torch.optim
import glob
from skimage.io import imread
from skimage.io import imsave
import warnings
from tqdm import tqdm
from torch.optim.lr_scheduler import MultiStepLR
from utils.common_utils import *
from SSIM import SSIM
parser = argparse.ArgumentParser()
parser.add_argument("--preprocess", type=bool, default=False, help='run prepare_data or not')
parser.add_argument('--num_iter', type=int, default=2, help='number of epochs of training')
parser.add_argument('--img_size', type=int, default=[256, 256], help='size of each image dimension')
parser.add_argument('--kernel_size', type=int, default=[21, 21], help='size of blur kernel [height, width]')
parser.add_argument('--data_path', type=str, default="imgs/levin/", help='path to blurry image')
parser.add_argument('--save_path', type=str, default="results/levin_reproduce/", help='path to save results')
parser.add_argument('--save_frequency', type=int, default=1, help='frequency to save results')
opt = parser.parse_args()
#print(opt)
#os.environ['CUDA_VISIBLE_DEVICES'] = '1'
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark =True
dtype = torch.cuda.FloatTensor
warnings.filterwarnings("ignore")
files_source = glob.glob(os.path.join(opt.data_path, '*.png'))
files_source.sort()
save_path = opt.save_path
os.makedirs(save_path, exist_ok=True)
# start #image
for f in files_source:
INPUT = 'noise'
pad = 'reflection'
LR = 0.0001
num_iter = opt.num_iter
reg_noise_std = 0.001
path_to_image = f
imgname = os.path.basename(f)
imgname = os.path.splitext(imgname)[0]
if imgname.find('kernel1') != -1:
opt.kernel_size = [17, 17]
if imgname.find('kernel2') != -1:
opt.kernel_size = [15, 15]
if imgname.find('kernel3') != -1:
opt.kernel_size = [13, 13]
if imgname.find('kernel4') != -1:
opt.kernel_size = [27, 27]
if imgname.find('kernel5') != -1:
opt.kernel_size = [11, 11]
if imgname.find('kernel6') != -1:
opt.kernel_size = [19, 19]
if imgname.find('kernel7') != -1:
opt.kernel_size = [21, 21]
if imgname.find('kernel8') != -1:
opt.kernel_size = [21, 21]
_, imgs = get_image(path_to_image, -1) # load image and convert to np.
y = np_to_torch(imgs).type(dtype)
img_size = imgs.shape
print(imgname)
# ######################################################################
padh, padw = opt.kernel_size[0]-1, opt.kernel_size[1]-1
opt.img_size[0], opt.img_size[1] = img_size[1]+padh, img_size[2]+padw
'''
x_net:
'''
input_depth = 8
net_input = get_noise(input_depth, INPUT, (opt.img_size[0], opt.img_size[1])).type(dtype)
net = torch.load(os.path.join(opt.save_path, "%s_xnet.pth" % imgname))
net = net.type(dtype)
n_k = 200
net_input_kernel = get_noise(n_k, INPUT, (1, 1)).type(dtype)
net_input_kernel.squeeze_()
net_kernel = torch.load(os.path.join(opt.save_path, "%s_knet.pth" % imgname))
net_kernel = net_kernel.type(dtype)
# Losses
mse = torch.nn.MSELoss().type(dtype)
L1 = torch.nn.L1Loss(reduction='sum').type(dtype)
ssim = SSIM().type(dtype)
# optimizer
optimizer = torch.optim.Adam([{'params':net.parameters()},{'params':net_kernel.parameters(),'lr':0e-4}], lr=LR)
scheduler = MultiStepLR(optimizer, milestones=[700, 800, 900], gamma=0.5) # learning rates
# initilization inputs
net_input_saved = net_input.detach().clone()
net_input_kernel_saved = net_input_kernel.detach().clone()
### start SelfDeblur
for step in tqdm(range(num_iter)):
# input regularization
net_input = net_input_saved + reg_noise_std*torch.zeros(net_input_saved.shape).type_as(net_input_saved.data).normal_()
# net_input_kernel = net_input_kernel_saved + reg_noise_std*torch.zeros(net_input_kernel_saved.shape).type_as(net_input_kernel_saved.data).normal_()
# change the learning rate
scheduler.step(step)
optimizer.zero_grad()
# get the network output
out_x = net(net_input)
out_k = net_kernel(net_input_kernel)
out_k_m = out_k.view(-1,1,opt.kernel_size[0],opt.kernel_size[1])
# print(out_k_m)
out_y = nn.functional.conv2d(out_x, out_k_m, padding=0, bias=None)
if step < 0:
total_loss = mse(out_y, y)
else:
total_loss = 1 - ssim(out_y, y) # + tv_loss(out_x) #+ tv_loss2(out_k_m)
total_loss.backward()
optimizer.step()
if (step+1) % opt.save_frequency == 0:
#print('Iteration %05d' %(step+1))
save_path = os.path.join(opt.save_path, '%s_x.png'%imgname)
out_x_np = torch_to_np(out_x)
out_x_np = out_x_np.squeeze()
out_x_np = out_x_np[padh//2:padh//2+img_size[1], padw//2:padw//2+img_size[2]]
#out_x_np = np.uint8(out_x_np*255)
#cv2.imwrite(save_path, out_x_np)
imsave(save_path, out_x_np)
save_path = os.path.join(opt.save_path, '%s_k.png'%imgname)
out_k_np = torch_to_np(out_k_m)
out_k_np = out_k_np.squeeze()
out_k_np /= np.max(out_k_np)
imsave(save_path, out_k_np)
#torch.save(net, os.path.join(opt.save_path, "%s_xnet.pth" % imgname))
#torch.save(net_kernel, os.path.join(opt.save_path, "%s_knet.pth" % imgname))
| 5,559 | 33.75 | 156 | py |
SelfDeblur | SelfDeblur-master/SSIM.py | import torch
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from math import exp
def gaussian(window_size, sigma):
gauss = torch.Tensor([exp(-(x - window_size // 2) ** 2 / float(2 * sigma ** 2)) for x in range(window_size)])
return gauss / gauss.sum()
def create_window(window_size, channel):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())
return window
def _ssim(img1, img2, window, window_size, channel, size_average=True):
mu1 = F.conv2d(img1, window, padding=window_size // 2, groups=channel)
mu2 = F.conv2d(img2, window, padding=window_size // 2, groups=channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1 * mu2
sigma1_sq = F.conv2d(img1 * img1, window, padding=window_size // 2, groups=channel) - mu1_sq
sigma2_sq = F.conv2d(img2 * img2, window, padding=window_size // 2, groups=channel) - mu2_sq
sigma12 = F.conv2d(img1 * img2, window, padding=window_size // 2, groups=channel) - mu1_mu2
C1 = 0.01 ** 2
C2 = 0.03 ** 2
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))
if size_average:
return ssim_map.mean()
else:
return ssim_map.mean(1).mean(1).mean(1)
class SSIM(torch.nn.Module):
def __init__(self, window_size=11, size_average=True):
super(SSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.channel = 1
self.window = create_window(window_size, self.channel)
def forward(self, img1, img2):
(_, channel, _, _) = img1.size()
if channel == self.channel and self.window.data.type() == img1.data.type():
window = self.window
else:
window = create_window(self.window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
self.window = window
self.channel = channel
return _ssim(img1, img2, window, self.window_size, channel, self.size_average)
def ssim(img1, img2, window_size=11, size_average=True):
(_, channel, _, _) = img1.size()
window = create_window(window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
return _ssim(img1, img2, window, window_size, channel, size_average) | 2,620 | 33.038961 | 114 | py |
SelfDeblur | SelfDeblur-master/selfdeblur_lai_reproduce.py |
# coding: utf-8
from __future__ import print_function
import matplotlib.pyplot as plt
import argparse
import os
import numpy as np
import cv2
import torch
import torch.optim
import glob
from skimage.io import imread
from skimage.io import imsave
import warnings
from tqdm import tqdm
from torch.optim.lr_scheduler import MultiStepLR
from utils.common_utils import *
from SSIM import SSIM
parser = argparse.ArgumentParser()
parser.add_argument("--preprocess", type=bool, default=False, help='run prepare_data or not')
parser.add_argument('--num_iter', type=int, default=2, help='number of epochs of training')
parser.add_argument('--img_size', type=int, default=[256, 256], help='size of each image dimension')
parser.add_argument('--kernel_size', type=int, default=[21, 21], help='size of blur kernel [height, width]')
parser.add_argument('--data_path', type=str, default="imgs/lai/uniform_ycbcr/", help='path to blurry image')
parser.add_argument('--save_path', type=str, default="results/lai/uniform_reproduce/", help='path to save results')
parser.add_argument('--save_frequency', type=int, default=1, help='lfrequency to save results')
opt = parser.parse_args()
#print(opt)
#os.environ['CUDA_VISIBLE_DEVICES'] = '1'
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark =True
dtype = torch.cuda.FloatTensor
warnings.filterwarnings("ignore")
files_source = glob.glob(os.path.join(opt.data_path, '*.png'))
files_source.sort()
save_path = opt.save_path
os.makedirs(save_path, exist_ok=True)
# start #image
for f in files_source:
INPUT = 'noise'
pad = 'reflection'
LR = 0.0001
num_iter = opt.num_iter
reg_noise_std = 0.001
path_to_image = f
imgname = os.path.basename(f)
imgname = os.path.splitext(imgname)[0]
if imgname.find('kernel_01') != -1:
opt.kernel_size = [31, 31]
if imgname.find('kernel_02') != -1:
opt.kernel_size = [51, 51]
if imgname.find('kernel_03') != -1:
opt.kernel_size = [55, 55]
if imgname.find('kernel_04') != -1:
opt.kernel_size = [75, 75]
_, imgs = get_image(path_to_image, -1) # load image and convert to np.
y = np_to_torch(imgs).type(dtype)
img_size = imgs.shape
print(imgname)
# ######################################################################
padh, padw = opt.kernel_size[0]-1, opt.kernel_size[1]-1
opt.img_size[0], opt.img_size[1] = img_size[1]+padh, img_size[2]+padw
'''
x_net:
'''
input_depth = 8
net_input = get_noise(input_depth, INPUT, (opt.img_size[0], opt.img_size[1])).type(dtype)
net = torch.load(os.path.join(opt.save_path, "%s_xnet.pth" % imgname))
net = net.type(dtype)
n_k = 200
net_input_kernel = get_noise(n_k, INPUT, (1, 1)).type(dtype)
net_input_kernel.squeeze_()
net_kernel = torch.load(os.path.join(opt.save_path, "%s_knet.pth" % imgname))
net_kernel = net_kernel.type(dtype)
# Losses
mse = torch.nn.MSELoss().type(dtype)
L1 = torch.nn.L1Loss(reduction='sum').type(dtype)
ssim = SSIM().type(dtype)
# optimizer
optimizer = torch.optim.Adam([{'params':net.parameters()},{'params':net_kernel.parameters(),'lr':0e-4}], lr=LR)
scheduler = MultiStepLR(optimizer, milestones=[700, 800, 900], gamma=0.5) # learning rates
# initilization inputs
net_input_saved = net_input.detach().clone()
net_input_kernel_saved = net_input_kernel.detach().clone()
### start SelfDeblur
for step in tqdm(range(num_iter)):
# input regularization
net_input = net_input_saved + reg_noise_std*torch.zeros(net_input_saved.shape).type_as(net_input_saved.data).normal_()
# net_input_kernel = net_input_kernel_saved + reg_noise_std*torch.zeros(net_input_kernel_saved.shape).type_as(net_input_kernel_saved.data).normal_()
# change the learning rate
scheduler.step(step)
optimizer.zero_grad()
# get the network output
out_x = net(net_input)
out_k = net_kernel(net_input_kernel)
out_k_m = out_k.view(-1,1,opt.kernel_size[0],opt.kernel_size[1])
# print(out_k_m)
out_y = nn.functional.conv2d(out_x, out_k_m, padding=0, bias=None)
if step < 0:
total_loss = mse(out_y, y)
else:
total_loss = 1 - ssim(out_y, y) # + tv_loss(out_x) #+ tv_loss2(out_k_m)
total_loss.backward()
optimizer.step()
if (step+1) % opt.save_frequency == 0:
#print('Iteration %05d' %(step+1))
save_path = os.path.join(opt.save_path, '%s_x.png'%imgname)
out_x_np = torch_to_np(out_x)
out_x_np = out_x_np.squeeze()
out_x_np = out_x_np[padh//2:padh//2+img_size[1], padw//2:padw//2+img_size[2]]
#out_x_np = np.uint8(out_x_np*255)
#cv2.imwrite(save_path, out_x_np)
imsave(save_path, out_x_np)
save_path = os.path.join(opt.save_path, '%s_k.png'%imgname)
out_k_np = torch_to_np(out_k_m)
out_k_np = out_k_np.squeeze()
out_k_np /= np.max(out_k_np)
imsave(save_path, out_k_np)
#torch.save(net, os.path.join(opt.save_path, "%s_xnet.pth" % imgname))
#torch.save(net_kernel, os.path.join(opt.save_path, "%s_knet.pth" % imgname))
| 5,294 | 33.835526 | 156 | py |
SelfDeblur | SelfDeblur-master/selfdeblur_lai.py |
from __future__ import print_function
import matplotlib.pyplot as plt
import argparse
import os
import numpy as np
from networks.skip import skip
from networks.fcn import *
import cv2
import torch
import torch.optim
import glob
from skimage.io import imread
from skimage.io import imsave
import warnings
from tqdm import tqdm
from torch.optim.lr_scheduler import MultiStepLR
from utils.common_utils import *
from SSIM import SSIM
parser = argparse.ArgumentParser()
parser.add_argument('--num_iter', type=int, default=5000, help='number of epochs of training')
parser.add_argument('--img_size', type=int, default=[256, 256], help='size of each image dimension')
parser.add_argument('--kernel_size', type=int, default=[21, 21], help='size of blur kernel [height, width]')
parser.add_argument('--data_path', type=str, default="datasets/lai/uniform_ycbcr/", help='path to blurry image')
parser.add_argument('--save_path', type=str, default="results/lai/uniform", help='path to save results')
parser.add_argument('--save_frequency', type=int, default=100, help='lfrequency to save results')
opt = parser.parse_args()
#print(opt)
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark =True
dtype = torch.cuda.FloatTensor
warnings.filterwarnings("ignore")
files_source = glob.glob(os.path.join(opt.data_path, '*.png'))
files_source.sort()
save_path = opt.save_path
os.makedirs(save_path, exist_ok=True)
# start #image
for f in files_source:
INPUT = 'noise'
pad = 'reflection'
LR = 0.01
num_iter = opt.num_iter
reg_noise_std = 0.001
path_to_image = f
imgname = os.path.basename(f)
imgname = os.path.splitext(imgname)[0]
if imgname.find('kernel_01') != -1:
opt.kernel_size = [31, 31]
if imgname.find('kernel_02') != -1:
opt.kernel_size = [51, 51]
if imgname.find('kernel_03') != -1:
opt.kernel_size = [55, 55]
if imgname.find('kernel_04') != -1:
opt.kernel_size = [75, 75]
_, imgs = get_image(path_to_image, -1) # load image and convert to np.
y = np_to_torch(imgs).type(dtype)
img_size = imgs.shape
print(imgname)
# ######################################################################
padh, padw = opt.kernel_size[0]-1, opt.kernel_size[1]-1
opt.img_size[0], opt.img_size[1] = img_size[1]+padh, img_size[2]+padw
'''
x_net:
'''
input_depth = 8
net_input = get_noise(input_depth, INPUT, (opt.img_size[0], opt.img_size[1])).type(dtype)
net = skip( input_depth, 1,
num_channels_down = [128, 128, 128, 128, 128],
num_channels_up = [128, 128, 128, 128, 128],
num_channels_skip = [16, 16, 16, 16, 16],
upsample_mode='bilinear',
need_sigmoid=True, need_bias=True, pad=pad, act_fun='LeakyReLU')
net = net.type(dtype)
n_k = 200
net_input_kernel = get_noise(n_k, INPUT, (1, 1)).type(dtype)
net_input_kernel.squeeze_()
net_kernel = fcn(n_k, opt.kernel_size[0]*opt.kernel_size[1])
net_kernel = net_kernel.type(dtype)
# Losses
mse = torch.nn.MSELoss().type(dtype)
ssim = SSIM().type(dtype)
# optimizer
optimizer = torch.optim.Adam([{'params':net.parameters()},{'params':net_kernel.parameters(),'lr':1e-4}], lr=LR)
scheduler = MultiStepLR(optimizer, milestones=[2000, 3000, 4000], gamma=0.5) # learning rates
#
net_input_saved = net_input.detach().clone()
net_input_kernel_saved = net_input_kernel.detach().clone()
### start SelfDeblur
for step in tqdm(range(num_iter)):
# input regularization
net_input = net_input_saved + reg_noise_std*torch.zeros(net_input_saved.shape).type_as(net_input_saved.data).normal_()
# net_input_kernel = net_input_kernel_saved + reg_noise_std*torch.zeros(net_input_kernel_saved.shape).type_as(net_input_kernel_saved.data).normal_()
# change the learning rate
scheduler.step(step)
optimizer.zero_grad()
# get the network output
out_x = net(net_input)
out_k = net_kernel(net_input_kernel)
out_k_m = out_k.view(-1,1,opt.kernel_size[0],opt.kernel_size[1])
# print(out_k_m)
out_y = nn.functional.conv2d(out_x, out_k_m, padding=0, bias=None)
if step < 500:
total_loss = mse(out_y, y)
else:
total_loss = 1 - ssim(out_y, y)
total_loss.backward()
optimizer.step()
if (step+1) % opt.save_frequency == 0:
#print('Iteration %05d' %(step+1))
save_path = os.path.join(opt.save_path, '%s_x.png'%imgname)
out_x_np = torch_to_np(out_x)
out_x_np = out_x_np.squeeze()
out_x_np = out_x_np[padh//2:padh//2+img_size[1], padw//2:padw//2+img_size[2]]
imsave(save_path, out_x_np)
save_path = os.path.join(opt.save_path, '%s_k.png'%imgname)
out_k_np = torch_to_np(out_k_m)
out_k_np = out_k_np.squeeze()
out_k_np /= np.max(out_k_np)
imsave(save_path, out_k_np)
torch.save(net, os.path.join(opt.save_path, "%s_xnet.pth" % imgname))
torch.save(net_kernel, os.path.join(opt.save_path, "%s_knet.pth" % imgname))
| 5,242 | 33.045455 | 156 | py |
SelfDeblur | SelfDeblur-master/selfdeblur_nonblind.py |
from __future__ import print_function
import matplotlib.pyplot as plt
import argparse
import os
import numpy as np
from networks.skip import skip
from networks.fcn import *
import cv2
import torch
import torch.optim
import glob
from skimage.io import imread
from skimage.io import imsave
import warnings
from tqdm import tqdm
from torch.optim.lr_scheduler import MultiStepLR
from utils.common_utils import *
from SSIM import SSIM
parser = argparse.ArgumentParser()
parser.add_argument("--preprocess", type=bool, default=False, help='run prepare_data or not')
parser.add_argument('--num_iter', type=int, default=1000, help='number of epochs of training')
parser.add_argument('--img_size', type=int, default=[256, 256], help='size of each image dimension')
parser.add_argument('--kernel_size', type=int, default=[21, 21], help='size of blur kernel [height, width]')
parser.add_argument('--data_path', type=str, default="results/lai/uniform/nonblind/blurry/", help='path to blurry image')
parser.add_argument('--save_path', type=str, default="results/lai/uniform/nonblind/blurry/results", help='path to save results')
parser.add_argument('--save_frequency', type=int, default=100, help='lfrequency to save results')
opt = parser.parse_args()
#print(opt)
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark =True
dtype = torch.cuda.FloatTensor
warnings.filterwarnings("ignore")
files_source = glob.glob(os.path.join(opt.data_path, '*.png'))
files_source.sort()
save_path = opt.save_path
os.makedirs(save_path, exist_ok=True)
# start #image
for f in files_source:
INPUT = 'noise'
pad = 'reflection'
LR = 0.01
num_iter = opt.num_iter
reg_noise_std = 0.001
path_to_image = f
imgname = os.path.basename(f)
imgname = os.path.splitext(imgname)[0]
if imgname.find('kernel_01') != -1:
opt.kernel_size = [31, 31]
if imgname.find('kernel_02') != -1:
opt.kernel_size = [51, 51]
if imgname.find('kernel_03') != -1:
opt.kernel_size = [55, 55]
if imgname.find('kernel_04') != -1:
opt.kernel_size = [75, 75]
_, imgs = get_image(path_to_image, -1) # load image and convert to np.
y = np_to_torch(imgs).type(dtype)
img_size = imgs.shape
path_to_kernel = os.path.join(opt.save_path, "%s_k.png" % imgname)
out_k = cv2.imread(path_to_kernel,cv2.IMREAD_GRAYSCALE)
out_k = np.expand_dims(np.float32(out_k/255.),0)
out_k = np_to_torch(out_k).type(dtype)
out_k = torch.clamp(out_k, 0., 1.)
out_k /= torch.sum(out_k)
opt.kernel_size = [out_k.shape[2], out_k.shape[3]]
print(imgname)
# ######################################################################
padh, padw = opt.kernel_size[0]-1, opt.kernel_size[1]-1
opt.img_size[0], opt.img_size[1] = img_size[1]+padh, img_size[2]+padw
'''
x_net:
'''
input_depth = 8
net_input = get_noise(input_depth, INPUT, (opt.img_size[0], opt.img_size[1])).type(dtype)
net = skip( input_depth, 1,
num_channels_down = [128, 128, 128, 128, 128],
num_channels_up = [128, 128, 128, 128, 128],
num_channels_skip = [16, 16, 16, 16, 16],
upsample_mode='bilinear',
need_sigmoid=True, need_bias=True, pad=pad, act_fun='LeakyReLU')
net = net.type(dtype)
# Losses
mse = torch.nn.MSELoss().type(dtype)
ssim = SSIM().type(dtype)
# optimizer
optimizer = torch.optim.Adam([{'params':net.parameters()}], lr=LR)
scheduler = MultiStepLR(optimizer, milestones=[700, 800, 900], gamma=0.5) # learning rates
# initilization inputs
net_input_saved = net_input.detach().clone()
### start SelfDeblur
for step in tqdm(range(num_iter)):
# input regularization
net_input = net_input_saved + reg_noise_std*torch.zeros(net_input_saved.shape).type_as(net_input_saved.data).normal_()
# change the learning rate
scheduler.step(step)
optimizer.zero_grad()
# get the network output
out_x = net(net_input)
# print(out_k_m)
out_y = nn.functional.conv2d(out_x, out_k, padding=0, bias=None)
total_loss = 1 - ssim(out_y, y)
total_loss.backward()
optimizer.step()
if (step+1) % opt.save_frequency == 0:
#print('Iteration %05d' %(step+1))
save_path = os.path.join(opt.save_path, '%s_x.png'%imgname)
out_x_np = torch_to_np(out_x)
out_x_np = out_x_np.squeeze()
out_x_np = out_x_np[padh//2:padh//2+img_size[1], padw//2:padw//2+img_size[2]]
imsave(save_path, out_x_np)
torch.save(net, os.path.join(opt.save_path, "%s_xnet.pth" % imgname))
| 4,721 | 32.489362 | 128 | py |
SelfDeblur | SelfDeblur-master/selfdeblur_ycbcr.py |
from __future__ import print_function
import matplotlib.pyplot as plt
import argparse
import os
import numpy as np
from networks.skip import skip
from networks.fcn import fcn
import cv2
import torch
import torch.optim
from torch.autograd import Variable
import glob
from skimage.io import imread
from skimage.io import imsave
from PIL import Image
import warnings
from tqdm import tqdm
from torch.optim.lr_scheduler import MultiStepLR
from utils.common_utils import *
from SSIM import SSIM
parser = argparse.ArgumentParser()
parser.add_argument('--num_iter', type=int, default=2500, help='number of epochs of training')
parser.add_argument('--img_size', type=int, default=[256, 256], help='size of each image dimension')
parser.add_argument('--kernel_size', type=int, default=[31, 31], help='size of blur kernel [height, width]')
parser.add_argument('--data_path', type=str, default="datasets/real", help='path to blurry image')
parser.add_argument('--save_path', type=str, default="results/real/", help='path to deblurring results')
parser.add_argument('--save_frequency', type=int, default=100, help='lfrequency to save results')
opt = parser.parse_args()
# print(opt)
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
dtype = torch.cuda.FloatTensor
warnings.filterwarnings("ignore")
files_source = glob.glob(os.path.join(opt.data_path, '*.jpg'))
files_source.sort()
save_path = opt.save_path
os.makedirs(save_path, exist_ok=True)
# start #image
for f in files_source:
INPUT = 'noise'
pad = 'reflection'
LR = 0.01
num_iter = opt.num_iter
reg_noise_std = 0.001
path_to_image = f
imgname = os.path.basename(f)
imgname = os.path.splitext(imgname)[0]
if imgname.find('fish') != -1:
opt.kernel_size = [41, 41]
if imgname.find('flower') != -1:
opt.kernel_size = [25, 25]
if imgname.find('house') != -1:
opt.kernel_size = [51, 51]
img, y, cb, cr = readimg(path_to_image)
y = np.float32(y / 255.0)
y = np.expand_dims(y, 0)
img_size = y.shape
print(imgname)
# ######################################################################
padw, padh = opt.kernel_size[0]-1, opt.kernel_size[1]-1
opt.img_size[0], opt.img_size[1] = img_size[1]+padw, img_size[2]+padh
#y = y[:, padh//2:img_size[1]-padh//2, padw//2:img_size[2]-padw//2]
y = np_to_torch(y).type(dtype)
input_depth = 8
net_input = get_noise(input_depth, INPUT, (opt.img_size[0], opt.img_size[1])).type(dtype)
net = skip(input_depth, 1,
num_channels_down=[128, 128, 128, 128, 128],
num_channels_up=[128, 128, 128, 128, 128],
num_channels_skip=[16, 16, 16, 16, 16],
upsample_mode='bilinear',
need_sigmoid=True, need_bias=True, pad=pad, act_fun='LeakyReLU')
net = net.type(dtype)
n_k = 200
net_input_kernel = get_noise(n_k, INPUT, (1, 1)).type(dtype)
net_input_kernel.squeeze_()
net_kernel = fcn(n_k, opt.kernel_size[0] * opt.kernel_size[1])
net_kernel = net_kernel.type(dtype)
# Losses
mse = torch.nn.MSELoss().type(dtype)
ssim = SSIM().type(dtype)
# optimizer
optimizer = torch.optim.Adam([{'params': net.parameters()}, {'params': net_kernel.parameters(), 'lr': 1e-4}], lr=LR)
scheduler = MultiStepLR(optimizer, milestones=[1600, 1900, 2200], gamma=0.5) # learning rates
# initilization inputs
net_input_saved = net_input.detach().clone()
net_input_kernel_saved = net_input_kernel.detach().clone()
### start SelfDeblur
for step in tqdm(range(num_iter)):
# input regularization
net_input = net_input_saved + reg_noise_std * torch.zeros(net_input_saved.shape).type_as(
net_input_saved.data).normal_()
# net_input_kernel = net_input_kernel_saved + reg_noise_std*torch.zeros(net_input_kernel_saved.shape).type_as(net_input_kernel_saved.data).normal_()
# change the learning rate
scheduler.step(step)
optimizer.zero_grad()
# get the network output
out_x = net(net_input)
out_k = net_kernel(net_input_kernel)
out_k_m = out_k.view(-1, 1, opt.kernel_size[0], opt.kernel_size[1])
# print(out_k_m)
out_y = nn.functional.conv2d(out_x, out_k_m, padding=0, bias=None)
y_size = out_y.shape
cropw = y_size[2]-img_size[1]
croph = y_size[3]-img_size[2]
out_y = out_y[:,:,cropw//2:cropw//2+img_size[1],croph//2:croph//2+img_size[2]]
if step < 500:
total_loss = mse(out_y, y)
else:
total_loss = 1 - ssim(out_y, y)
total_loss.backward()
optimizer.step()
if (step + 1) % opt.save_frequency == 0:
# print('Iteration %05d' %(step+1))
save_path = os.path.join(opt.save_path, '%s_x.png' % imgname)
out_x_np = torch_to_np(out_x)
out_x_np = out_x_np.squeeze()
cropw, croph = padw, padh
out_x_np = out_x_np[cropw//2:cropw//2+img_size[1], croph//2:croph//2+img_size[2]]
out_x_np = np.uint8(255 * out_x_np)
out_x_np = cv2.merge([out_x_np, cr, cb])
out_x_np = cv2.cvtColor(out_x_np, cv2.COLOR_YCrCb2BGR)
cv2.imwrite(save_path, out_x_np)
save_path = os.path.join(opt.save_path, '%s_k.png' % imgname)
out_k_np = torch_to_np(out_k_m)
out_k_np = out_k_np.squeeze()
out_k_np /= np.max(out_k_np)
imsave(save_path, out_k_np)
torch.save(net, os.path.join(opt.save_path, "%s_xnet.pth" % imgname))
torch.save(net_kernel, os.path.join(opt.save_path, "%s_knet.pth" % imgname))
| 5,714 | 34.06135 | 156 | py |
SelfDeblur | SelfDeblur-master/selfdeblur_levin.py |
from __future__ import print_function
import matplotlib.pyplot as plt
import argparse
import os
import numpy as np
from networks.skip import skip
from networks.fcn import fcn
import cv2
import torch
import torch.optim
import glob
from skimage.io import imread
from skimage.io import imsave
import warnings
from tqdm import tqdm
from torch.optim.lr_scheduler import MultiStepLR
from utils.common_utils import *
from SSIM import SSIM
parser = argparse.ArgumentParser()
parser.add_argument('--num_iter', type=int, default=5000, help='number of epochs of training')
parser.add_argument('--img_size', type=int, default=[256, 256], help='size of each image dimension')
parser.add_argument('--kernel_size', type=int, default=[21, 21], help='size of blur kernel [height, width]')
parser.add_argument('--data_path', type=str, default="datasets/levin/", help='path to blurry image')
parser.add_argument('--save_path', type=str, default="results/levin/", help='path to save results')
parser.add_argument('--save_frequency', type=int, default=100, help='lfrequency to save results')
opt = parser.parse_args()
#print(opt)
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark =True
dtype = torch.cuda.FloatTensor
warnings.filterwarnings("ignore")
files_source = glob.glob(os.path.join(opt.data_path, '*.png'))
files_source.sort()
save_path = opt.save_path
os.makedirs(save_path, exist_ok=True)
# start #image
for f in files_source:
INPUT = 'noise'
pad = 'reflection'
LR = 0.01
num_iter = opt.num_iter
reg_noise_std = 0.001
path_to_image = f
imgname = os.path.basename(f)
imgname = os.path.splitext(imgname)[0]
if imgname.find('kernel1') != -1:
opt.kernel_size = [17, 17]
if imgname.find('kernel2') != -1:
opt.kernel_size = [15, 15]
if imgname.find('kernel3') != -1:
opt.kernel_size = [13, 13]
if imgname.find('kernel4') != -1:
opt.kernel_size = [27, 27]
if imgname.find('kernel5') != -1:
opt.kernel_size = [11, 11]
if imgname.find('kernel6') != -1:
opt.kernel_size = [19, 19]
if imgname.find('kernel7') != -1:
opt.kernel_size = [21, 21]
if imgname.find('kernel8') != -1:
opt.kernel_size = [21, 21]
_, imgs = get_image(path_to_image, -1) # load image and convert to np.
y = np_to_torch(imgs).type(dtype)
img_size = imgs.shape
print(imgname)
# ######################################################################
padh, padw = opt.kernel_size[0]-1, opt.kernel_size[1]-1
opt.img_size[0], opt.img_size[1] = img_size[1]+padh, img_size[2]+padw
'''
x_net:
'''
input_depth = 8
net_input = get_noise(input_depth, INPUT, (opt.img_size[0], opt.img_size[1])).type(dtype)
net = skip( input_depth, 1,
num_channels_down = [128, 128, 128, 128, 128],
num_channels_up = [128, 128, 128, 128, 128],
num_channels_skip = [16, 16, 16, 16, 16],
upsample_mode='bilinear',
need_sigmoid=True, need_bias=True, pad=pad, act_fun='LeakyReLU')
net = net.type(dtype)
'''
k_net:
'''
n_k = 200
net_input_kernel = get_noise(n_k, INPUT, (1, 1)).type(dtype)
net_input_kernel.squeeze_()
net_kernel = fcn(n_k, opt.kernel_size[0]*opt.kernel_size[1])
net_kernel = net_kernel.type(dtype)
# Losses
mse = torch.nn.MSELoss().type(dtype)
ssim = SSIM().type(dtype)
# optimizer
optimizer = torch.optim.Adam([{'params':net.parameters()},{'params':net_kernel.parameters(),'lr':1e-4}], lr=LR)
scheduler = MultiStepLR(optimizer, milestones=[2000, 3000, 4000], gamma=0.5) # learning rates
# initilization inputs
net_input_saved = net_input.detach().clone()
net_input_kernel_saved = net_input_kernel.detach().clone()
### start SelfDeblur
for step in tqdm(range(num_iter)):
# input regularization
net_input = net_input_saved + reg_noise_std*torch.zeros(net_input_saved.shape).type_as(net_input_saved.data).normal_()
# change the learning rate
scheduler.step(step)
optimizer.zero_grad()
# get the network output
out_x = net(net_input)
out_k = net_kernel(net_input_kernel)
out_k_m = out_k.view(-1,1,opt.kernel_size[0],opt.kernel_size[1])
# print(out_k_m)
out_y = nn.functional.conv2d(out_x, out_k_m, padding=0, bias=None)
if step < 1000:
total_loss = mse(out_y,y)
else:
total_loss = 1-ssim(out_y, y)
total_loss.backward()
optimizer.step()
if (step+1) % opt.save_frequency == 0:
#print('Iteration %05d' %(step+1))
save_path = os.path.join(opt.save_path, '%s_x.png'%imgname)
out_x_np = torch_to_np(out_x)
out_x_np = out_x_np.squeeze()
out_x_np = out_x_np[padh//2:padh//2+img_size[1], padw//2:padw//2+img_size[2]]
imsave(save_path, out_x_np)
save_path = os.path.join(opt.save_path, '%s_k.png'%imgname)
out_k_np = torch_to_np(out_k_m)
out_k_np = out_k_np.squeeze()
out_k_np /= np.max(out_k_np)
imsave(save_path, out_k_np)
torch.save(net, os.path.join(opt.save_path, "%s_xnet.pth" % imgname))
torch.save(net_kernel, os.path.join(opt.save_path, "%s_knet.pth" % imgname))
| 5,395 | 32.515528 | 126 | py |
SelfDeblur | SelfDeblur-master/networks/fcn.py | import torch
import torch.nn as nn
from .common import *
def fcn(num_input_channels=200, num_output_channels=1, num_hidden=1000):
model = nn.Sequential()
model.add(nn.Linear(num_input_channels, num_hidden,bias=True))
model.add(nn.ReLU6())
#
model.add(nn.Linear(num_hidden, num_output_channels))
# model.add(nn.ReLU())
model.add(nn.Softmax())
#
return model
| 398 | 13.25 | 72 | py |
SelfDeblur | SelfDeblur-master/networks/non_local_embedded_gaussian.py | import torch
from torch import nn
from torch.nn import functional as F
class _NonLocalBlockND(nn.Module):
def __init__(self, in_channels, inter_channels=None, dimension=3, sub_sample=True, bn_layer=True):
super(_NonLocalBlockND, self).__init__()
assert dimension in [1, 2, 3]
self.dimension = dimension
self.sub_sample = sub_sample
self.in_channels = in_channels
self.inter_channels = inter_channels
if self.inter_channels is None:
self.inter_channels = in_channels // 2
if self.inter_channels == 0:
self.inter_channels = 1
if dimension == 3:
conv_nd = nn.Conv3d
max_pool_layer = nn.MaxPool3d(kernel_size=(1, 2, 2))
bn = nn.BatchNorm3d
elif dimension == 2:
conv_nd = nn.Conv2d
max_pool_layer = nn.MaxPool2d(kernel_size=(2, 2))
bn = nn.BatchNorm2d
else:
conv_nd = nn.Conv1d
max_pool_layer = nn.MaxPool1d(kernel_size=(2))
bn = nn.BatchNorm1d
self.g = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
if bn_layer:
self.W = nn.Sequential(
conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels,
kernel_size=1, stride=1, padding=0),
bn(self.in_channels)
)
nn.init.constant_(self.W[1].weight, 0)
nn.init.constant_(self.W[1].bias, 0)
else:
self.W = conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels,
kernel_size=1, stride=1, padding=0)
nn.init.constant_(self.W.weight, 0)
nn.init.constant_(self.W.bias, 0)
self.theta = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
self.phi = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
if sub_sample:
self.g = nn.Sequential(self.g, max_pool_layer)
self.phi = nn.Sequential(self.phi, max_pool_layer)
def forward(self, x):
'''
:param x: (b, c, t, h, w)
:return:
'''
batch_size = x.size(0)
g_x = self.g(x).view(batch_size, self.inter_channels, -1)
g_x = g_x.permute(0, 2, 1)
theta_x = self.theta(x).view(batch_size, self.inter_channels, -1)
theta_x = theta_x.permute(0, 2, 1)
phi_x = self.phi(x).view(batch_size, self.inter_channels, -1)
f = torch.matmul(theta_x, phi_x)
f_div_C = F.softmax(f, dim=-1)
y = torch.matmul(f_div_C, g_x)
y = y.permute(0, 2, 1).contiguous()
y = y.view(batch_size, self.inter_channels, *x.size()[2:])
W_y = self.W(y)
z = W_y + x
return z
class NONLocalBlock1D(_NonLocalBlockND):
def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True):
super(NONLocalBlock1D, self).__init__(in_channels,
inter_channels=inter_channels,
dimension=1, sub_sample=sub_sample,
bn_layer=bn_layer)
class NONLocalBlock2D(_NonLocalBlockND):
def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True):
super(NONLocalBlock2D, self).__init__(in_channels,
inter_channels=inter_channels,
dimension=2, sub_sample=sub_sample,
bn_layer=bn_layer)
class NONLocalBlock3D(_NonLocalBlockND):
def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True):
super(NONLocalBlock3D, self).__init__(in_channels,
inter_channels=inter_channels,
dimension=3, sub_sample=sub_sample,
bn_layer=bn_layer)
if __name__ == '__main__':
import torch
for (sub_sample, bn_layer) in [(True, True), (False, False), (True, False), (False, True)]:
img = torch.zeros(2, 3, 20)
net = NONLocalBlock1D(3, sub_sample=sub_sample, bn_layer=bn_layer)
out = net(img)
print(out.size())
img = torch.zeros(2, 3, 20, 20)
net = NONLocalBlock2D(3, sub_sample=sub_sample, bn_layer=bn_layer)
out = net(img)
print(out.size())
img = torch.randn(2, 3, 8, 20, 20)
net = NONLocalBlock3D(3, sub_sample=sub_sample, bn_layer=bn_layer)
out = net(img)
print(out.size())
| 4,916 | 36.25 | 102 | py |
SelfDeblur | SelfDeblur-master/networks/skip.py | import torch
import torch.nn as nn
from .common import *
#from .non_local_embedded_gaussian import NONLocalBlock2D
#from .non_local_concatenation import NONLocalBlock2D
#from .non_local_gaussian import NONLocalBlock2D
from .non_local_dot_product import NONLocalBlock2D
def skip(
num_input_channels=2, num_output_channels=3,
num_channels_down=[16, 32, 64, 128, 128], num_channels_up=[16, 32, 64, 128, 128], num_channels_skip=[4, 4, 4, 4, 4],
filter_size_down=3, filter_size_up=3, filter_skip_size=1,
need_sigmoid=True, need_bias=True,
pad='zero', upsample_mode='nearest', downsample_mode='stride', act_fun='LeakyReLU',
need1x1_up=True):
"""Assembles encoder-decoder with skip connections.
Arguments:
act_fun: Either string 'LeakyReLU|Swish|ELU|none' or module (e.g. nn.ReLU)
pad (string): zero|reflection (default: 'zero')
upsample_mode (string): 'nearest|bilinear' (default: 'nearest')
downsample_mode (string): 'stride|avg|max|lanczos2' (default: 'stride')
"""
assert len(num_channels_down) == len(num_channels_up) == len(num_channels_skip)
n_scales = len(num_channels_down)
if not (isinstance(upsample_mode, list) or isinstance(upsample_mode, tuple)):
upsample_mode = [upsample_mode]*n_scales
if not (isinstance(downsample_mode, list) or isinstance(downsample_mode, tuple)):
downsample_mode = [downsample_mode]*n_scales
if not (isinstance(filter_size_down, list) or isinstance(filter_size_down, tuple)):
filter_size_down = [filter_size_down]*n_scales
if not (isinstance(filter_size_up, list) or isinstance(filter_size_up, tuple)):
filter_size_up = [filter_size_up]*n_scales
last_scale = n_scales - 1
cur_depth = None
model = nn.Sequential()
model_tmp = model
input_depth = num_input_channels
for i in range(len(num_channels_down)):
deeper = nn.Sequential()
skip = nn.Sequential()
if num_channels_skip[i] != 0:
model_tmp.add(Concat(1, skip, deeper))
else:
model_tmp.add(deeper)
model_tmp.add(bn(num_channels_skip[i] + (num_channels_up[i + 1] if i < last_scale else num_channels_down[i])))
if num_channels_skip[i] != 0:
skip.add(conv(input_depth, num_channels_skip[i], filter_skip_size, bias=need_bias, pad=pad))
skip.add(bn(num_channels_skip[i]))
skip.add(act(act_fun))
# skip.add(Concat(2, GenNoise(nums_noise[i]), skip_part))
deeper.add(conv(input_depth, num_channels_down[i], filter_size_down[i], 2, bias=need_bias, pad=pad, downsample_mode=downsample_mode[i]))
deeper.add(bn(num_channels_down[i]))
deeper.add(act(act_fun))
if i>1:
deeper.add(NONLocalBlock2D(in_channels=num_channels_down[i]))
deeper.add(conv(num_channels_down[i], num_channels_down[i], filter_size_down[i], bias=need_bias, pad=pad))
deeper.add(bn(num_channels_down[i]))
deeper.add(act(act_fun))
deeper_main = nn.Sequential()
if i == len(num_channels_down) - 1:
# The deepest
k = num_channels_down[i]
else:
deeper.add(deeper_main)
k = num_channels_up[i + 1]
deeper.add(nn.Upsample(scale_factor=2, mode=upsample_mode[i]))
model_tmp.add(conv(num_channels_skip[i] + k, num_channels_up[i], filter_size_up[i], 1, bias=need_bias, pad=pad))
model_tmp.add(bn(num_channels_up[i]))
model_tmp.add(act(act_fun))
if need1x1_up:
model_tmp.add(conv(num_channels_up[i], num_channels_up[i], 1, bias=need_bias, pad=pad))
model_tmp.add(bn(num_channels_up[i]))
model_tmp.add(act(act_fun))
input_depth = num_channels_down[i]
model_tmp = deeper_main
model.add(conv(num_channels_up[0], num_output_channels, 1, bias=need_bias, pad=pad))
if need_sigmoid:
model.add(nn.Sigmoid())
return model
| 4,045 | 36.119266 | 144 | py |
SelfDeblur | SelfDeblur-master/networks/resnet.py | import torch
import torch.nn as nn
from numpy.random import normal
from numpy.linalg import svd
from math import sqrt
import torch.nn.init
from .common import *
class ResidualSequential(nn.Sequential):
def __init__(self, *args):
super(ResidualSequential, self).__init__(*args)
def forward(self, x):
out = super(ResidualSequential, self).forward(x)
# print(x.size(), out.size())
x_ = None
if out.size(2) != x.size(2) or out.size(3) != x.size(3):
diff2 = x.size(2) - out.size(2)
diff3 = x.size(3) - out.size(3)
# print(1)
x_ = x[:, :, diff2 /2:out.size(2) + diff2 / 2, diff3 / 2:out.size(3) + diff3 / 2]
else:
x_ = x
return out + x_
def eval(self):
print(2)
for m in self.modules():
m.eval()
exit()
def get_block(num_channels, norm_layer, act_fun):
layers = [
nn.Conv2d(num_channels, num_channels, 3, 1, 1, bias=False),
norm_layer(num_channels, affine=True),
act(act_fun),
nn.Conv2d(num_channels, num_channels, 3, 1, 1, bias=False),
norm_layer(num_channels, affine=True),
]
return layers
class ResNet(nn.Module):
def __init__(self, num_input_channels, num_output_channels, num_blocks, num_channels, need_residual=True, act_fun='LeakyReLU', need_sigmoid=True, norm_layer=nn.BatchNorm2d, pad='reflection'):
'''
pad = 'start|zero|replication'
'''
super(ResNet, self).__init__()
if need_residual:
s = ResidualSequential
else:
s = nn.Sequential
# stride = 1
# First layers
layers = [
# nn.ReplicationPad2d(num_blocks * 2 * stride + 3),
conv(num_input_channels, num_channels, 3, stride=1, bias=True, pad=pad),
act(act_fun)
]
# Residual blocks
# layers_residual = []
for i in range(num_blocks):
layers += [s(*get_block(num_channels, norm_layer, act_fun))]
layers += [
nn.Conv2d(num_channels, num_channels, 3, 1, 1),
norm_layer(num_channels, affine=True)
]
# if need_residual:
# layers += [ResidualSequential(*layers_residual)]
# else:
# layers += [Sequential(*layers_residual)]
# if factor >= 2:
# # Do upsampling if needed
# layers += [
# nn.Conv2d(num_channels, num_channels *
# factor ** 2, 3, 1),
# nn.PixelShuffle(factor),
# act(act_fun)
# ]
layers += [
conv(num_channels, num_output_channels, 3, 1, bias=True, pad=pad),
nn.Sigmoid()
]
self.model = nn.Sequential(*layers)
def forward(self, input):
return self.model(input)
def eval(self):
self.model.eval()
| 2,945 | 29.371134 | 195 | py |
SelfDeblur | SelfDeblur-master/networks/downsampler.py | import numpy as np
import torch
import torch.nn as nn
class Downsampler(nn.Module):
'''
http://www.realitypixels.com/turk/computergraphics/ResamplingFilters.pdf
'''
def __init__(self, n_planes, factor, kernel_type, phase=0, kernel_width=None, support=None, sigma=None, preserve_size=False):
super(Downsampler, self).__init__()
assert phase in [0, 0.5], 'phase should be 0 or 0.5'
if kernel_type == 'lanczos2':
support = 2
kernel_width = 4 * factor + 1
kernel_type_ = 'lanczos'
elif kernel_type == 'lanczos3':
support = 3
kernel_width = 6 * factor + 1
kernel_type_ = 'lanczos'
elif kernel_type == 'gauss12':
kernel_width = 7
sigma = 1/2
kernel_type_ = 'gauss'
elif kernel_type == 'gauss1sq2':
kernel_width = 9
sigma = 1./np.sqrt(2)
kernel_type_ = 'gauss'
elif kernel_type in ['lanczos', 'gauss', 'box']:
kernel_type_ = kernel_type
else:
assert False, 'wrong name kernel'
# note that `kernel width` will be different to actual size for phase = 1/2
self.kernel = get_kernel(factor, kernel_type_, phase, kernel_width, support=support, sigma=sigma)
downsampler = nn.Conv2d(n_planes, n_planes, kernel_size=self.kernel.shape, stride=factor, padding=0)
downsampler.weight.data[:] = 0
downsampler.bias.data[:] = 0
kernel_torch = torch.from_numpy(self.kernel)
for i in range(n_planes):
downsampler.weight.data[i, i] = kernel_torch
self.downsampler_ = downsampler
if preserve_size:
if self.kernel.shape[0] % 2 == 1:
pad = int((self.kernel.shape[0] - 1) / 2.)
else:
pad = int((self.kernel.shape[0] - factor) / 2.)
self.padding = nn.ReplicationPad2d(pad)
self.preserve_size = preserve_size
def forward(self, input):
if self.preserve_size:
x = self.padding(input)
else:
x= input
self.x = x
return self.downsampler_(x)
class Blurconv(nn.Module):
'''
http://www.realitypixels.com/turk/computergraphics/ResamplingFilters.pdf
'''
def __init__(self, n_planes=1, preserve_size=False):
super(Blurconv, self).__init__()
# self.kernel = kernel
# blurconv = nn.Conv2d(n_planes, n_planes, kernel_size=self.kernel.shape, stride=1, padding=0)
# blurconvr.weight.data = self.kernel
# blurconv.bias.data[:] = 0
self.n_planes = n_planes
self.preserve_size = preserve_size
# kernel_torch = torch.from_numpy(self.kernel)
# for i in range(n_planes):
# blurconv.weight.data[i, i] = kernel_torch
# self.blurconv_ = blurconv
#
# if preserve_size:
#
# if self.kernel.shape[0] % 2 == 1:
# pad = int((self.kernel.shape[0] - 1) / 2.)
# else:
# pad = int((self.kernel.shape[0] - factor) / 2.)
#
# self.padding = nn.ReplicationPad2d(pad)
#
# self.preserve_size = preserve_size
def forward(self, input, kernel):
if self.preserve_size:
if kernel.shape[0] % 2 == 1:
pad = int((kernel.shape[3] - 1) / 2.)
else:
pad = int((kernel.shape[3] - 1.) / 2.)
padding = nn.ReplicationPad2d(pad)
x = padding(input)
else:
x= input
blurconv = nn.Conv2d(self.n_planes, self.n_planes, kernel_size=kernel.size(3), stride=1, padding=0, bias=False).cuda()
blurconv.weight.data[:] = kernel
return blurconv(x)
class Blurconv2(nn.Module):
'''
http://www.realitypixels.com/turk/computergraphics/ResamplingFilters.pdf
'''
def __init__(self, n_planes=1, preserve_size=False, k_size=21):
super(Blurconv2, self).__init__()
self.n_planes = n_planes
self.k_size = k_size
self.preserve_size = preserve_size
self.blurconv = nn.Conv2d(self.n_planes, self.n_planes, kernel_size=k_size, stride=1, padding=0, bias=False)
# self.blurconv.weight.data[:] /= self.blurconv.weight.data.sum()
def forward(self, input):
if self.preserve_size:
pad = int((self.k_size - 1.) / 2.)
padding = nn.ReplicationPad2d(pad)
x = padding(input)
else:
x= input
#self.blurconv.weight.data[:] /= self.blurconv.weight.data.sum()
return self.blurconv(x)
def get_kernel(factor, kernel_type, phase, kernel_width, support=None, sigma=None):
assert kernel_type in ['lanczos', 'gauss', 'box']
# factor = float(factor)
if phase == 0.5 and kernel_type != 'box':
kernel = np.zeros([kernel_width - 1, kernel_width - 1])
else:
kernel = np.zeros([kernel_width, kernel_width])
if kernel_type == 'box':
assert phase == 0.5, 'Box filter is always half-phased'
kernel[:] = 1./(kernel_width * kernel_width)
elif kernel_type == 'gauss':
assert sigma, 'sigma is not specified'
assert phase != 0.5, 'phase 1/2 for gauss not implemented'
center = (kernel_width + 1.)/2.
print(center, kernel_width)
sigma_sq = sigma * sigma
for i in range(1, kernel.shape[0] + 1):
for j in range(1, kernel.shape[1] + 1):
di = (i - center)/2.
dj = (j - center)/2.
kernel[i - 1][j - 1] = np.exp(-(di * di + dj * dj)/(2 * sigma_sq))
kernel[i - 1][j - 1] = kernel[i - 1][j - 1]/(2. * np.pi * sigma_sq)
elif kernel_type == 'lanczos':
assert support, 'support is not specified'
center = (kernel_width + 1) / 2.
for i in range(1, kernel.shape[0] + 1):
for j in range(1, kernel.shape[1] + 1):
if phase == 0.5:
di = abs(i + 0.5 - center) / factor
dj = abs(j + 0.5 - center) / factor
else:
di = abs(i - center) / factor
dj = abs(j - center) / factor
pi_sq = np.pi * np.pi
val = 1
if di != 0:
val = val * support * np.sin(np.pi * di) * np.sin(np.pi * di / support)
val = val / (np.pi * np.pi * di * di)
if dj != 0:
val = val * support * np.sin(np.pi * dj) * np.sin(np.pi * dj / support)
val = val / (np.pi * np.pi * dj * dj)
kernel[i - 1][j - 1] = val
else:
assert False, 'wrong method name'
kernel /= kernel.sum()
return kernel
#a = Downsampler(n_planes=3, factor=2, kernel_type='lanczos2', phase='1', preserve_size=True)
#################
# Learnable downsampler
# KS = 32
# dow = nn.Sequential(nn.ReplicationPad2d(int((KS - factor) / 2.)), nn.Conv2d(1,1,KS,factor))
# class Apply(nn.Module):
# def __init__(self, what, dim, *args):
# super(Apply, self).__init__()
# self.dim = dim
# self.what = what
# def forward(self, input):
# inputs = []
# for i in range(input.size(self.dim)):
# inputs.append(self.what(input.narrow(self.dim, i, 1)))
# return torch.cat(inputs, dim=self.dim)
# def __len__(self):
# return len(self._modules)
# downs = Apply(dow, 1)
# downs.type(dtype)(net_input.type(dtype)).size()
| 7,872 | 31.66805 | 129 | py |
SelfDeblur | SelfDeblur-master/networks/non_local_dot_product.py | import torch
from torch import nn
from torch.nn import functional as F
class _NonLocalBlockND(nn.Module):
def __init__(self, in_channels, inter_channels=None, dimension=3, sub_sample=True, bn_layer=True):
super(_NonLocalBlockND, self).__init__()
assert dimension in [1, 2, 3]
self.dimension = dimension
self.sub_sample = sub_sample
self.in_channels = in_channels
self.inter_channels = inter_channels
if self.inter_channels is None:
self.inter_channels = in_channels // 2
if self.inter_channels == 0:
self.inter_channels = 1
if dimension == 3:
conv_nd = nn.Conv3d
max_pool_layer = nn.MaxPool3d(kernel_size=(1, 2, 2))
bn = nn.BatchNorm3d
elif dimension == 2:
conv_nd = nn.Conv2d
max_pool_layer = nn.MaxPool2d(kernel_size=(2, 2))
bn = nn.BatchNorm2d
else:
conv_nd = nn.Conv1d
max_pool_layer = nn.MaxPool1d(kernel_size=(2))
bn = nn.BatchNorm1d
self.g = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
if bn_layer:
self.W = nn.Sequential(
conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels,
kernel_size=1, stride=1, padding=0),
bn(self.in_channels)
)
nn.init.constant_(self.W[1].weight, 0)
nn.init.constant_(self.W[1].bias, 0)
else:
self.W = conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels,
kernel_size=1, stride=1, padding=0)
nn.init.constant_(self.W.weight, 0)
nn.init.constant_(self.W.bias, 0)
self.theta = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
self.phi = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
if sub_sample:
self.g = nn.Sequential(self.g, max_pool_layer)
self.phi = nn.Sequential(self.phi, max_pool_layer)
def forward(self, x):
'''
:param x: (b, c, t, h, w)
:return:
'''
batch_size = x.size(0)
g_x = self.g(x).view(batch_size, self.inter_channels, -1)
g_x = g_x.permute(0, 2, 1)
theta_x = self.theta(x).view(batch_size, self.inter_channels, -1)
theta_x = theta_x.permute(0, 2, 1)
phi_x = self.phi(x).view(batch_size, self.inter_channels, -1)
f = torch.matmul(theta_x, phi_x)
N = f.size(-1)
f_div_C = f / N
y = torch.matmul(f_div_C, g_x)
y = y.permute(0, 2, 1).contiguous()
y = y.view(batch_size, self.inter_channels, *x.size()[2:])
W_y = self.W(y)
z = W_y + x
return z
class NONLocalBlock1D(_NonLocalBlockND):
def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True):
super(NONLocalBlock1D, self).__init__(in_channels,
inter_channels=inter_channels,
dimension=1, sub_sample=sub_sample,
bn_layer=bn_layer)
class NONLocalBlock2D(_NonLocalBlockND):
def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True):
super(NONLocalBlock2D, self).__init__(in_channels,
inter_channels=inter_channels,
dimension=2, sub_sample=sub_sample,
bn_layer=bn_layer)
class NONLocalBlock3D(_NonLocalBlockND):
def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True):
super(NONLocalBlock3D, self).__init__(in_channels,
inter_channels=inter_channels,
dimension=3, sub_sample=sub_sample,
bn_layer=bn_layer)
if __name__ == '__main__':
import torch
for (sub_sample, bn_layer) in [(True, True), (False, False), (True, False), (False, True)]:
img = torch.zeros(2, 3, 20)
net = NONLocalBlock1D(3, sub_sample=sub_sample, bn_layer=bn_layer)
out = net(img)
print(out.size())
img = torch.zeros(2, 3, 20, 20)
net = NONLocalBlock2D(3, sub_sample=sub_sample, bn_layer=bn_layer)
out = net(img)
print(out.size())
img = torch.randn(2, 3, 8, 20, 20)
net = NONLocalBlock3D(3, sub_sample=sub_sample, bn_layer=bn_layer)
out = net(img)
print(out.size())
| 4,926 | 35.496296 | 102 | py |
SelfDeblur | SelfDeblur-master/networks/non_local_concatenation.py | import torch
from torch import nn
from torch.nn import functional as F
class _NonLocalBlockND(nn.Module):
def __init__(self, in_channels, inter_channels=None, dimension=3, sub_sample=True, bn_layer=True):
super(_NonLocalBlockND, self).__init__()
assert dimension in [1, 2, 3]
self.dimension = dimension
self.sub_sample = sub_sample
self.in_channels = in_channels
self.inter_channels = inter_channels
if self.inter_channels is None:
self.inter_channels = in_channels // 2
if self.inter_channels == 0:
self.inter_channels = 1
if dimension == 3:
conv_nd = nn.Conv3d
max_pool_layer = nn.MaxPool3d(kernel_size=(1, 2, 2))
bn = nn.BatchNorm3d
elif dimension == 2:
conv_nd = nn.Conv2d
max_pool_layer = nn.MaxPool2d(kernel_size=(2, 2))
bn = nn.BatchNorm2d
else:
conv_nd = nn.Conv1d
max_pool_layer = nn.MaxPool1d(kernel_size=(2))
bn = nn.BatchNorm1d
self.g = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
if bn_layer:
self.W = nn.Sequential(
conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels,
kernel_size=1, stride=1, padding=0),
bn(self.in_channels)
)
nn.init.constant_(self.W[1].weight, 0)
nn.init.constant_(self.W[1].bias, 0)
else:
self.W = conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels,
kernel_size=1, stride=1, padding=0)
nn.init.constant_(self.W.weight, 0)
nn.init.constant_(self.W.bias, 0)
self.theta = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
self.phi = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
self.concat_project = nn.Sequential(
nn.Conv2d(self.inter_channels * 2, 1, 1, 1, 0, bias=False),
nn.ReLU()
)
if sub_sample:
self.g = nn.Sequential(self.g, max_pool_layer)
self.phi = nn.Sequential(self.phi, max_pool_layer)
def forward(self, x):
'''
:param x: (b, c, t, h, w)
:return:
'''
batch_size = x.size(0)
g_x = self.g(x).view(batch_size, self.inter_channels, -1)
g_x = g_x.permute(0, 2, 1)
# (b, c, N, 1)
theta_x = self.theta(x).view(batch_size, self.inter_channels, -1, 1)
# (b, c, 1, N)
phi_x = self.phi(x).view(batch_size, self.inter_channels, 1, -1)
h = theta_x.size(2)
w = phi_x.size(3)
theta_x = theta_x.repeat(1, 1, 1, w)
phi_x = phi_x.repeat(1, 1, h, 1)
concat_feature = torch.cat([theta_x, phi_x], dim=1)
f = self.concat_project(concat_feature)
b, _, h, w = f.size()
f = f.view(b, h, w)
N = f.size(-1)
f_div_C = f / N
y = torch.matmul(f_div_C, g_x)
y = y.permute(0, 2, 1).contiguous()
y = y.view(batch_size, self.inter_channels, *x.size()[2:])
W_y = self.W(y)
z = W_y + x
return z
class NONLocalBlock1D(_NonLocalBlockND):
def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True):
super(NONLocalBlock1D, self).__init__(in_channels,
inter_channels=inter_channels,
dimension=1, sub_sample=sub_sample,
bn_layer=bn_layer)
class NONLocalBlock2D(_NonLocalBlockND):
def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True):
super(NONLocalBlock2D, self).__init__(in_channels,
inter_channels=inter_channels,
dimension=2, sub_sample=sub_sample,
bn_layer=bn_layer)
class NONLocalBlock3D(_NonLocalBlockND):
def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True):
super(NONLocalBlock3D, self).__init__(in_channels,
inter_channels=inter_channels,
dimension=3, sub_sample=sub_sample,
bn_layer=bn_layer)
if __name__ == '__main__':
import torch
for (sub_sample, bn_layer) in [(True, True), (False, False), (True, False), (False, True)]:
img = torch.zeros(2, 3, 20)
net = NONLocalBlock1D(3, sub_sample=sub_sample, bn_layer=bn_layer)
out = net(img)
print(out.size())
img = torch.zeros(2, 3, 20, 20)
net = NONLocalBlock2D(3, sub_sample=sub_sample, bn_layer=bn_layer)
out = net(img)
print(out.size())
img = torch.randn(2, 3, 8, 20, 20)
net = NONLocalBlock3D(3, sub_sample=sub_sample, bn_layer=bn_layer)
out = net(img)
print(out.size())
| 5,350 | 35.155405 | 102 | py |
SelfDeblur | SelfDeblur-master/networks/common.py | import torch
import torch.nn as nn
import numpy as np
from .downsampler import Downsampler
def add_module(self, module):
self.add_module(str(len(self) + 1), module)
torch.nn.Module.add = add_module
class Concat(nn.Module):
def __init__(self, dim, *args):
super(Concat, self).__init__()
self.dim = dim
for idx, module in enumerate(args):
self.add_module(str(idx), module)
def forward(self, input):
inputs = []
for module in self._modules.values():
inputs.append(module(input))
inputs_shapes2 = [x.shape[2] for x in inputs]
inputs_shapes3 = [x.shape[3] for x in inputs]
if np.all(np.array(inputs_shapes2) == min(inputs_shapes2)) and np.all(np.array(inputs_shapes3) == min(inputs_shapes3)):
inputs_ = inputs
else:
target_shape2 = min(inputs_shapes2)
target_shape3 = min(inputs_shapes3)
inputs_ = []
for inp in inputs:
diff2 = (inp.size(2) - target_shape2) // 2
diff3 = (inp.size(3) - target_shape3) // 2
inputs_.append(inp[:, :, diff2: diff2 + target_shape2, diff3:diff3 + target_shape3])
return torch.cat(inputs_, dim=self.dim)
def __len__(self):
return len(self._modules)
class GenNoise(nn.Module):
def __init__(self, dim2):
super(GenNoise, self).__init__()
self.dim2 = dim2
def forward(self, input):
a = list(input.size())
a[1] = self.dim2
# print (input.data.type())
b = torch.zeros(a).type_as(input.data)
b.normal_()
x = torch.autograd.Variable(b)
return x
class Swish(nn.Module):
"""
https://arxiv.org/abs/1710.05941
The hype was so huge that I could not help but try it
"""
def __init__(self):
super(Swish, self).__init__()
self.s = nn.Sigmoid()
def forward(self, x):
return x * self.s(x)
def act(act_fun = 'LeakyReLU'):
'''
Either string defining an activation function or module (e.g. nn.ReLU)
'''
if isinstance(act_fun, str):
if act_fun == 'LeakyReLU':
return nn.LeakyReLU(0.2, inplace=True)
elif act_fun == 'Swish':
return Swish()
elif act_fun == 'ELU':
return nn.ELU()
elif act_fun == 'none':
return nn.Sequential()
else:
assert False
else:
return act_fun()
def bn(num_features):
return nn.BatchNorm2d(num_features)
def conv(in_f, out_f, kernel_size, stride=1, bias=True, pad='zero', downsample_mode='stride'):
downsampler = None
if stride != 1 and downsample_mode != 'stride':
if downsample_mode == 'avg':
downsampler = nn.AvgPool2d(stride, stride)
elif downsample_mode == 'max':
downsampler = nn.MaxPool2d(stride, stride)
elif downsample_mode in ['lanczos2', 'lanczos3']:
downsampler = Downsampler(n_planes=out_f, factor=stride, kernel_type=downsample_mode, phase=0.5, preserve_size=True)
else:
assert False
stride = 1
padder = None
to_pad = int((kernel_size - 1) / 2)
if pad == 'reflection':
padder = nn.ReflectionPad2d(to_pad)
to_pad = 0
convolver = nn.Conv2d(in_f, out_f, kernel_size, stride, padding=to_pad, bias=bias)
layers = filter(lambda x: x is not None, [padder, convolver, downsampler])
return nn.Sequential(*layers) | 3,531 | 27.483871 | 128 | py |
SelfDeblur | SelfDeblur-master/networks/unet.py | import torch.nn as nn
import torch
import torch.nn as nn
import torch.nn.functional as F
from .common import *
class ListModule(nn.Module):
def __init__(self, *args):
super(ListModule, self).__init__()
idx = 0
for module in args:
self.add_module(str(idx), module)
idx += 1
def __getitem__(self, idx):
if idx >= len(self._modules):
raise IndexError('index {} is out of range'.format(idx))
if idx < 0:
idx = len(self) + idx
it = iter(self._modules.values())
for i in range(idx):
next(it)
return next(it)
def __iter__(self):
return iter(self._modules.values())
def __len__(self):
return len(self._modules)
class UNet(nn.Module):
'''
upsample_mode in ['deconv', 'nearest', 'bilinear']
pad in ['zero', 'replication', 'none']
'''
def __init__(self, num_input_channels=3, num_output_channels=3,
feature_scale=4, more_layers=0, concat_x=False,
upsample_mode='deconv', pad='zero', norm_layer=nn.InstanceNorm2d, need_sigmoid=True, need_bias=True):
super(UNet, self).__init__()
self.feature_scale = feature_scale
self.more_layers = more_layers
self.concat_x = concat_x
filters = [64, 128, 256, 512, 1024]
filters = [x // self.feature_scale for x in filters]
self.start = unetConv2(num_input_channels, filters[0] if not concat_x else filters[0] - num_input_channels, norm_layer, need_bias, pad)
self.down1 = unetDown(filters[0], filters[1] if not concat_x else filters[1] - num_input_channels, norm_layer, need_bias, pad)
self.down2 = unetDown(filters[1], filters[2] if not concat_x else filters[2] - num_input_channels, norm_layer, need_bias, pad)
self.down3 = unetDown(filters[2], filters[3] if not concat_x else filters[3] - num_input_channels, norm_layer, need_bias, pad)
self.down4 = unetDown(filters[3], filters[4] if not concat_x else filters[4] - num_input_channels, norm_layer, need_bias, pad)
# more downsampling layers
if self.more_layers > 0:
self.more_downs = [
unetDown(filters[4], filters[4] if not concat_x else filters[4] - num_input_channels , norm_layer, need_bias, pad) for i in range(self.more_layers)]
self.more_ups = [unetUp(filters[4], upsample_mode, need_bias, pad, same_num_filt =True) for i in range(self.more_layers)]
self.more_downs = ListModule(*self.more_downs)
self.more_ups = ListModule(*self.more_ups)
self.up4 = unetUp(filters[3], upsample_mode, need_bias, pad)
self.up3 = unetUp(filters[2], upsample_mode, need_bias, pad)
self.up2 = unetUp(filters[1], upsample_mode, need_bias, pad)
self.up1 = unetUp(filters[0], upsample_mode, need_bias, pad)
self.final = conv(filters[0], num_output_channels, 1, bias=need_bias, pad=pad)
if need_sigmoid:
self.final = nn.Sequential(self.final, nn.Sigmoid())
def forward(self, inputs):
# Downsample
downs = [inputs]
down = nn.AvgPool2d(2, 2)
for i in range(4 + self.more_layers):
downs.append(down(downs[-1]))
in64 = self.start(inputs)
if self.concat_x:
in64 = torch.cat([in64, downs[0]], 1)
down1 = self.down1(in64)
if self.concat_x:
down1 = torch.cat([down1, downs[1]], 1)
down2 = self.down2(down1)
if self.concat_x:
down2 = torch.cat([down2, downs[2]], 1)
down3 = self.down3(down2)
if self.concat_x:
down3 = torch.cat([down3, downs[3]], 1)
down4 = self.down4(down3)
if self.concat_x:
down4 = torch.cat([down4, downs[4]], 1)
if self.more_layers > 0:
prevs = [down4]
for kk, d in enumerate(self.more_downs):
# print(prevs[-1].size())
out = d(prevs[-1])
if self.concat_x:
out = torch.cat([out, downs[kk + 5]], 1)
prevs.append(out)
up_ = self.more_ups[-1](prevs[-1], prevs[-2])
for idx in range(self.more_layers - 1):
l = self.more_ups[self.more - idx - 2]
up_= l(up_, prevs[self.more - idx - 2])
else:
up_= down4
up4= self.up4(up_, down3)
up3= self.up3(up4, down2)
up2= self.up2(up3, down1)
up1= self.up1(up2, in64)
return self.final(up1)
class unetConv2(nn.Module):
def __init__(self, in_size, out_size, norm_layer, need_bias, pad):
super(unetConv2, self).__init__()
print(pad)
if norm_layer is not None:
self.conv1= nn.Sequential(conv(in_size, out_size, 3, bias=need_bias, pad=pad),
norm_layer(out_size),
nn.ReLU(),)
self.conv2= nn.Sequential(conv(out_size, out_size, 3, bias=need_bias, pad=pad),
norm_layer(out_size),
nn.ReLU(),)
else:
self.conv1= nn.Sequential(conv(in_size, out_size, 3, bias=need_bias, pad=pad),
nn.ReLU(),)
self.conv2= nn.Sequential(conv(out_size, out_size, 3, bias=need_bias, pad=pad),
nn.ReLU(),)
def forward(self, inputs):
outputs= self.conv1(inputs)
outputs= self.conv2(outputs)
return outputs
class unetDown(nn.Module):
def __init__(self, in_size, out_size, norm_layer, need_bias, pad):
super(unetDown, self).__init__()
self.conv= unetConv2(in_size, out_size, norm_layer, need_bias, pad)
self.down= nn.MaxPool2d(2, 2)
def forward(self, inputs):
outputs= self.down(inputs)
outputs= self.conv(outputs)
return outputs
class unetUp(nn.Module):
def __init__(self, out_size, upsample_mode, need_bias, pad, same_num_filt=False):
super(unetUp, self).__init__()
num_filt = out_size if same_num_filt else out_size * 2
if upsample_mode == 'deconv':
self.up= nn.ConvTranspose2d(num_filt, out_size, 4, stride=2, padding=1)
self.conv= unetConv2(out_size * 2, out_size, None, need_bias, pad)
elif upsample_mode=='bilinear' or upsample_mode=='nearest':
self.up = nn.Sequential(nn.Upsample(scale_factor=2, mode=upsample_mode),
conv(num_filt, out_size, 3, bias=need_bias, pad=pad))
self.conv= unetConv2(out_size * 2, out_size, None, need_bias, pad)
else:
assert False
def forward(self, inputs1, inputs2):
in1_up= self.up(inputs1)
if (inputs2.size(2) != in1_up.size(2)) or (inputs2.size(3) != in1_up.size(3)):
diff2 = (inputs2.size(2) - in1_up.size(2)) // 2
diff3 = (inputs2.size(3) - in1_up.size(3)) // 2
inputs2_ = inputs2[:, :, diff2 : diff2 + in1_up.size(2), diff3 : diff3 + in1_up.size(3)]
else:
inputs2_ = inputs2
output= self.conv(torch.cat([in1_up, inputs2_], 1))
return output
if __name__ =='__main__':
print(1)
# net = UNet()
# print(net.forward) | 7,408 | 36.045 | 164 | py |
SelfDeblur | SelfDeblur-master/networks/non_local_gaussian.py | import torch
from torch import nn
from torch.nn import functional as F
class _NonLocalBlockND(nn.Module):
def __init__(self, in_channels, inter_channels=None, dimension=3, sub_sample=True, bn_layer=True):
super(_NonLocalBlockND, self).__init__()
assert dimension in [1, 2, 3]
self.dimension = dimension
self.sub_sample = sub_sample
self.in_channels = in_channels
self.inter_channels = inter_channels
if self.inter_channels is None:
self.inter_channels = in_channels // 2
if self.inter_channels == 0:
self.inter_channels = 1
if dimension == 3:
conv_nd = nn.Conv3d
max_pool_layer = nn.MaxPool3d(kernel_size=(1, 2, 2))
bn = nn.BatchNorm3d
elif dimension == 2:
conv_nd = nn.Conv2d
max_pool_layer = nn.MaxPool2d(kernel_size=(2, 2))
bn = nn.BatchNorm2d
else:
conv_nd = nn.Conv1d
max_pool_layer = nn.MaxPool1d(kernel_size=(2))
bn = nn.BatchNorm1d
self.g = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
if bn_layer:
self.W = nn.Sequential(
conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels,
kernel_size=1, stride=1, padding=0),
bn(self.in_channels)
)
nn.init.constant_(self.W[1].weight, 0)
nn.init.constant_(self.W[1].bias, 0)
else:
self.W = conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels,
kernel_size=1, stride=1, padding=0)
nn.init.constant_(self.W.weight, 0)
nn.init.constant_(self.W.bias, 0)
if sub_sample:
self.g = nn.Sequential(self.g, max_pool_layer)
self.phi = max_pool_layer
def forward(self, x):
'''
:param x: (b, c, t, h, w)
:return:
'''
batch_size = x.size(0)
g_x = self.g(x).view(batch_size, self.inter_channels, -1)
g_x = g_x.permute(0, 2, 1)
theta_x = x.view(batch_size, self.in_channels, -1)
theta_x = theta_x.permute(0, 2, 1)
if self.sub_sample:
phi_x = self.phi(x).view(batch_size, self.in_channels, -1)
else:
phi_x = x.view(batch_size, self.in_channels, -1)
f = torch.matmul(theta_x, phi_x)
f_div_C = F.softmax(f, dim=-1)
y = torch.matmul(f_div_C, g_x)
y = y.permute(0, 2, 1).contiguous()
y = y.view(batch_size, self.inter_channels, *x.size()[2:])
W_y = self.W(y)
z = W_y + x
return z
class NONLocalBlock1D(_NonLocalBlockND):
def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True):
super(NONLocalBlock1D, self).__init__(in_channels,
inter_channels=inter_channels,
dimension=1, sub_sample=sub_sample,
bn_layer=bn_layer)
class NONLocalBlock2D(_NonLocalBlockND):
def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True):
super(NONLocalBlock2D, self).__init__(in_channels,
inter_channels=inter_channels,
dimension=2, sub_sample=sub_sample,
bn_layer=bn_layer)
class NONLocalBlock3D(_NonLocalBlockND):
def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True):
super(NONLocalBlock3D, self).__init__(in_channels,
inter_channels=inter_channels,
dimension=3, sub_sample=sub_sample,
bn_layer=bn_layer)
if __name__ == '__main__':
import torch
for (sub_sample, bn_layer) in [(True, True), (False, False), (True, False), (False, True)]:
img = torch.zeros(2, 3, 20)
net = NONLocalBlock1D(3, sub_sample=sub_sample, bn_layer=bn_layer)
out = net(img)
print(out.size())
img = torch.zeros(2, 3, 20, 20)
net = NONLocalBlock2D(3, sub_sample=sub_sample, bn_layer=bn_layer)
out = net(img)
print(out.size())
img = torch.randn(2, 3, 8, 20, 20)
net = NONLocalBlock3D(3, sub_sample=sub_sample, bn_layer=bn_layer)
out = net(img)
print(out.size())
| 4,674 | 33.124088 | 102 | py |
SelfDeblur | SelfDeblur-master/models/skipfc.py | import torch
import torch.nn as nn
from .common import *
def skipfc(num_input_channels=2, num_output_channels=3,
num_channels_down=[16, 32, 64, 128, 128], num_channels_up=[16, 32, 64, 128, 128], num_channels_skip=[4, 4, 4, 4, 4],
filter_size_down=3, filter_size_up=1, filter_skip_size=1,
need_sigmoid=True, need_bias=True,
pad='zero', upsample_mode='nearest', downsample_mode='stride', act_fun='LeakyReLU',
need1x1_up=True):
"""Assembles encoder-decoder with skip connections.
Arguments:
act_fun: Either string 'LeakyReLU|Swish|ELU|none' or module (e.g. nn.ReLU)
pad (string): zero|reflection (default: 'zero')
upsample_mode (string): 'nearest|bilinear' (default: 'nearest')
downsample_mode (string): 'stride|avg|max|lanczos2' (default: 'stride')
"""
# assert len(num_channels_down) == len(num_channels_up) == len(num_channels_skip)
#
# n_scales = len(num_channels_down)
#
# if not (isinstance(upsample_mode, list) or isinstance(upsample_mode, tuple)):
# upsample_mode = [upsample_mode]*n_scales
#
# if not (isinstance(downsample_mode, list) or isinstance(downsample_mode, tuple)):
# downsample_mode = [downsample_mode]*n_scales
#
# if not (isinstance(filter_size_down, list) or isinstance(filter_size_down, tuple)):
# filter_size_down = [filter_size_down]*n_scales
#
# if not (isinstance(filter_size_up, list) or isinstance(filter_size_up, tuple)):
# filter_size_up = [filter_size_up]*n_scales
#
# last_scale = n_scales - 1
#
# cur_depth = None
#
# model = nn.Sequential()
# model_tmp = model
#
# input_depth = num_input_channels
# for i in range(len(num_channels_down)):
#
# deeper = nn.Sequential()
# skip = nn.Sequential()
#
# if num_channels_skip[i] != 0:
# model_tmp.add(Concat(1, skip, deeper))
# else:
# model_tmp.add(deeper)
#
# model_tmp.add(bn(num_channels_skip[i] + (num_channels_up[i + 1] if i < last_scale else num_channels_down[i])))
#
# if num_channels_skip[i] != 0:
# skip.add(nn.Linear(input_depth, num_channels_skip[i]))
# skip.add(bn(num_channels_skip[i]))
# skip.add(act(act_fun))
#
# # skip.add(Concat(2, GenNoise(nums_noise[i]), skip_part))
#
# deeper.add(nn.Linear(input_depth, num_channels_down[i]))
# deeper.add(bn(num_channels_down[i]))
# deeper.add(act(act_fun))
#
# deeper.add(nn.Linear(num_channels_down[i], num_channels_down[i]))
# deeper.add(bn(num_channels_down[i]))
# deeper.add(act(act_fun))
#
# deeper_main = nn.Sequential()
#
# if i == len(num_channels_down) - 1:
# # The deepest
# k = num_channels_down[i]
# else:
# deeper.add(deeper_main)
# k = num_channels_up[i + 1]
#
# deeper.add(nn.Upsample(scale_factor=2, mode=upsample_mode[i]))
#
# model_tmp.add(nn.Linear(num_channels_skip[i] + k, num_channels_up[i]))
# model_tmp.add(bn(num_channels_up[i]))
# model_tmp.add(act(act_fun))
#
#
# if need1x1_up:
# model_tmp.add(nn.Linear(num_channels_up[i], num_channels_up[i]))
# model_tmp.add(bn(num_channels_up[i]))
# model_tmp.add(act(act_fun))
#
# input_depth = num_channels_down[i]
# model_tmp = deeper_main
#
# model.add(nn.Linear(num_channels_up[0], num_output_channels))
# if need_sigmoid:
# model.add(nn.Softmax())
#
# return model
model = nn.Sequential()
model.add(nn.Linear(num_input_channels, num_channels_down[0],bias=True))
model.add(nn.ReLU6())
# model.add(nn.Tanh())
# model.add(nn.Linear(num_channels_down[0], num_channels_down[0],bias=True))
# model.add(nn.ReLU6())
# model.add(nn.Linear(num_channels_down[0], num_channels_down[0],bias=True))
# model.add(nn.ReLU6())
# model.add(nn.Linear(num_channels_down[0], num_channels_down[0],bias=True))
# model.add(nn.ReLU6())
# model.add(nn.Linear(num_channels_down[0], num_channels_down[0],bias=True))
# model.add(nn.ReLU6())
# model.add(nn.Linear(num_channels_down[0], num_channels_down[0],bias=True))
# model.add(nn.ReLU6())
# model.add(nn.Linear(num_channels_down[0], num_channels_down[1]))
# model.add(act(act_fun))
# model.add(nn.Linear(num_channels_down[1], num_channels_down[2]))
# model.add(act(act_fun))
# model.add(nn.Linear(num_channels_down[2], num_channels_down[3]))
# model.add(act(act_fun))
# model.add(nn.Linear(num_channels_down[3], num_channels_up[3]))
# model.add(act(act_fun))
# model.add(nn.Linear(num_channels_up[3], num_channels_up[2]))
# model.add(act(act_fun))
# model.add(nn.Linear(num_channels_up[2], num_channels_up[1]))
# model.add(act(act_fun))
# model.add(nn.Linear(num_channels_up[1], num_channels_up[0]))
# model.add(act(act_fun))
model.add(nn.Linear(num_channels_down[0], num_output_channels))
# model.add(nn.ReLU())
model.add(nn.Softmax())
# model.add(nn.Threshold(0.00001, 0))
return model
| 5,145 | 34.489655 | 128 | py |
SelfDeblur | SelfDeblur-master/models/non_local_embedded_gaussian.py | import torch
from torch import nn
from torch.nn import functional as F
class _NonLocalBlockND(nn.Module):
def __init__(self, in_channels, inter_channels=None, dimension=3, sub_sample=True, bn_layer=True):
super(_NonLocalBlockND, self).__init__()
assert dimension in [1, 2, 3]
self.dimension = dimension
self.sub_sample = sub_sample
self.in_channels = in_channels
self.inter_channels = inter_channels
if self.inter_channels is None:
self.inter_channels = in_channels // 2
if self.inter_channels == 0:
self.inter_channels = 1
if dimension == 3:
conv_nd = nn.Conv3d
max_pool_layer = nn.MaxPool3d(kernel_size=(1, 2, 2))
bn = nn.BatchNorm3d
elif dimension == 2:
conv_nd = nn.Conv2d
max_pool_layer = nn.MaxPool2d(kernel_size=(2, 2))
bn = nn.BatchNorm2d
else:
conv_nd = nn.Conv1d
max_pool_layer = nn.MaxPool1d(kernel_size=(2))
bn = nn.BatchNorm1d
self.g = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
if bn_layer:
self.W = nn.Sequential(
conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels,
kernel_size=1, stride=1, padding=0),
bn(self.in_channels)
)
nn.init.constant_(self.W[1].weight, 0)
nn.init.constant_(self.W[1].bias, 0)
else:
self.W = conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels,
kernel_size=1, stride=1, padding=0)
nn.init.constant_(self.W.weight, 0)
nn.init.constant_(self.W.bias, 0)
self.theta = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
self.phi = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
if sub_sample:
self.g = nn.Sequential(self.g, max_pool_layer)
self.phi = nn.Sequential(self.phi, max_pool_layer)
def forward(self, x):
'''
:param x: (b, c, t, h, w)
:return:
'''
batch_size = x.size(0)
g_x = self.g(x).view(batch_size, self.inter_channels, -1)
g_x = g_x.permute(0, 2, 1)
theta_x = self.theta(x).view(batch_size, self.inter_channels, -1)
theta_x = theta_x.permute(0, 2, 1)
phi_x = self.phi(x).view(batch_size, self.inter_channels, -1)
f = torch.matmul(theta_x, phi_x)
f_div_C = F.softmax(f, dim=-1)
y = torch.matmul(f_div_C, g_x)
y = y.permute(0, 2, 1).contiguous()
y = y.view(batch_size, self.inter_channels, *x.size()[2:])
W_y = self.W(y)
z = W_y + x
return z
class NONLocalBlock1D(_NonLocalBlockND):
def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True):
super(NONLocalBlock1D, self).__init__(in_channels,
inter_channels=inter_channels,
dimension=1, sub_sample=sub_sample,
bn_layer=bn_layer)
class NONLocalBlock2D(_NonLocalBlockND):
def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True):
super(NONLocalBlock2D, self).__init__(in_channels,
inter_channels=inter_channels,
dimension=2, sub_sample=sub_sample,
bn_layer=bn_layer)
class NONLocalBlock3D(_NonLocalBlockND):
def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True):
super(NONLocalBlock3D, self).__init__(in_channels,
inter_channels=inter_channels,
dimension=3, sub_sample=sub_sample,
bn_layer=bn_layer)
if __name__ == '__main__':
import torch
for (sub_sample, bn_layer) in [(True, True), (False, False), (True, False), (False, True)]:
img = torch.zeros(2, 3, 20)
net = NONLocalBlock1D(3, sub_sample=sub_sample, bn_layer=bn_layer)
out = net(img)
print(out.size())
img = torch.zeros(2, 3, 20, 20)
net = NONLocalBlock2D(3, sub_sample=sub_sample, bn_layer=bn_layer)
out = net(img)
print(out.size())
img = torch.randn(2, 3, 8, 20, 20)
net = NONLocalBlock3D(3, sub_sample=sub_sample, bn_layer=bn_layer)
out = net(img)
print(out.size())
| 4,916 | 36.25 | 102 | py |
SelfDeblur | SelfDeblur-master/models/skip.py | import torch
import torch.nn as nn
from .common import *
from .non_local_dot_product import NONLocalBlock2D
def skip(
num_input_channels=2, num_output_channels=3,
num_channels_down=[16, 32, 64, 128, 128], num_channels_up=[16, 32, 64, 128, 128], num_channels_skip=[4, 4, 4, 4, 4],
filter_size_down=3, filter_size_up=3, filter_skip_size=1,
need_sigmoid=True, need_bias=True,
pad='zero', upsample_mode='nearest', downsample_mode='stride', act_fun='LeakyReLU',
need1x1_up=True):
"""Assembles encoder-decoder with skip connections.
Arguments:
act_fun: Either string 'LeakyReLU|Swish|ELU|none' or module (e.g. nn.ReLU)
pad (string): zero|reflection (default: 'zero')
upsample_mode (string): 'nearest|bilinear' (default: 'nearest')
downsample_mode (string): 'stride|avg|max|lanczos2' (default: 'stride')
"""
assert len(num_channels_down) == len(num_channels_up) == len(num_channels_skip)
n_scales = len(num_channels_down)
if not (isinstance(upsample_mode, list) or isinstance(upsample_mode, tuple)):
upsample_mode = [upsample_mode]*n_scales
if not (isinstance(downsample_mode, list) or isinstance(downsample_mode, tuple)):
downsample_mode = [downsample_mode]*n_scales
if not (isinstance(filter_size_down, list) or isinstance(filter_size_down, tuple)):
filter_size_down = [filter_size_down]*n_scales
if not (isinstance(filter_size_up, list) or isinstance(filter_size_up, tuple)):
filter_size_up = [filter_size_up]*n_scales
last_scale = n_scales - 1
cur_depth = None
model = nn.Sequential()
model_tmp = model
input_depth = num_input_channels
for i in range(len(num_channels_down)):
deeper = nn.Sequential()
skip = nn.Sequential()
if num_channels_skip[i] != 0:
model_tmp.add(Concat(1, skip, deeper))
else:
model_tmp.add(deeper)
model_tmp.add(bn(num_channels_skip[i] + (num_channels_up[i + 1] if i < last_scale else num_channels_down[i])))
if num_channels_skip[i] != 0:
skip.add(conv(input_depth, num_channels_skip[i], filter_skip_size, bias=need_bias, pad=pad))
skip.add(bn(num_channels_skip[i]))
skip.add(act(act_fun))
# skip.add(Concat(2, GenNoise(nums_noise[i]), skip_part))
deeper.add(conv(input_depth, num_channels_down[i], filter_size_down[i], 2, bias=need_bias, pad=pad, downsample_mode=downsample_mode[i]))
deeper.add(bn(num_channels_down[i]))
deeper.add(act(act_fun))
if i>1:
deeper.add(NONLocalBlock2D(in_channels=num_channels_down[i]))
deeper.add(conv(num_channels_down[i], num_channels_down[i], filter_size_down[i], bias=need_bias, pad=pad))
deeper.add(bn(num_channels_down[i]))
deeper.add(act(act_fun))
deeper_main = nn.Sequential()
if i == len(num_channels_down) - 1:
# The deepest
k = num_channels_down[i]
else:
deeper.add(deeper_main)
k = num_channels_up[i + 1]
deeper.add(nn.Upsample(scale_factor=2, mode=upsample_mode[i]))
model_tmp.add(conv(num_channels_skip[i] + k, num_channels_up[i], filter_size_up[i], 1, bias=need_bias, pad=pad))
model_tmp.add(bn(num_channels_up[i]))
model_tmp.add(act(act_fun))
if need1x1_up:
model_tmp.add(conv(num_channels_up[i], num_channels_up[i], 1, bias=need_bias, pad=pad))
model_tmp.add(bn(num_channels_up[i]))
model_tmp.add(act(act_fun))
input_depth = num_channels_down[i]
model_tmp = deeper_main
model.add(conv(num_channels_up[0], num_output_channels, 1, bias=need_bias, pad=pad))
if need_sigmoid:
model.add(nn.Sigmoid())
return model
| 3,885 | 35.317757 | 144 | py |
SelfDeblur | SelfDeblur-master/models/resnet.py | import torch
import torch.nn as nn
from numpy.random import normal
from numpy.linalg import svd
from math import sqrt
import torch.nn.init
from .common import *
class ResidualSequential(nn.Sequential):
def __init__(self, *args):
super(ResidualSequential, self).__init__(*args)
def forward(self, x):
out = super(ResidualSequential, self).forward(x)
# print(x.size(), out.size())
x_ = None
if out.size(2) != x.size(2) or out.size(3) != x.size(3):
diff2 = x.size(2) - out.size(2)
diff3 = x.size(3) - out.size(3)
# print(1)
x_ = x[:, :, diff2 /2:out.size(2) + diff2 / 2, diff3 / 2:out.size(3) + diff3 / 2]
else:
x_ = x
return out + x_
def eval(self):
print(2)
for m in self.modules():
m.eval()
exit()
def get_block(num_channels, norm_layer, act_fun):
layers = [
nn.Conv2d(num_channels, num_channels, 3, 1, 1, bias=False),
norm_layer(num_channels, affine=True),
act(act_fun),
nn.Conv2d(num_channels, num_channels, 3, 1, 1, bias=False),
norm_layer(num_channels, affine=True),
]
return layers
class ResNet(nn.Module):
def __init__(self, num_input_channels, num_output_channels, num_blocks, num_channels, need_residual=True, act_fun='LeakyReLU', need_sigmoid=True, norm_layer=nn.BatchNorm2d, pad='reflection'):
'''
pad = 'start|zero|replication'
'''
super(ResNet, self).__init__()
if need_residual:
s = ResidualSequential
else:
s = nn.Sequential
# stride = 1
# First layers
layers = [
# nn.ReplicationPad2d(num_blocks * 2 * stride + 3),
conv(num_input_channels, num_channels, 3, stride=1, bias=True, pad=pad),
act(act_fun)
]
# Residual blocks
# layers_residual = []
for i in range(num_blocks):
layers += [s(*get_block(num_channels, norm_layer, act_fun))]
layers += [
nn.Conv2d(num_channels, num_channels, 3, 1, 1),
norm_layer(num_channels, affine=True)
]
# if need_residual:
# layers += [ResidualSequential(*layers_residual)]
# else:
# layers += [Sequential(*layers_residual)]
# if factor >= 2:
# # Do upsampling if needed
# layers += [
# nn.Conv2d(num_channels, num_channels *
# factor ** 2, 3, 1),
# nn.PixelShuffle(factor),
# act(act_fun)
# ]
layers += [
conv(num_channels, num_output_channels, 3, 1, bias=True, pad=pad),
nn.Sigmoid()
]
self.model = nn.Sequential(*layers)
def forward(self, input):
return self.model(input)
def eval(self):
self.model.eval()
| 2,945 | 29.371134 | 195 | py |
SelfDeblur | SelfDeblur-master/models/downsampler.py | import numpy as np
import torch
import torch.nn as nn
class Downsampler(nn.Module):
'''
http://www.realitypixels.com/turk/computergraphics/ResamplingFilters.pdf
'''
def __init__(self, n_planes, factor, kernel_type, phase=0, kernel_width=None, support=None, sigma=None, preserve_size=False):
super(Downsampler, self).__init__()
assert phase in [0, 0.5], 'phase should be 0 or 0.5'
if kernel_type == 'lanczos2':
support = 2
kernel_width = 4 * factor + 1
kernel_type_ = 'lanczos'
elif kernel_type == 'lanczos3':
support = 3
kernel_width = 6 * factor + 1
kernel_type_ = 'lanczos'
elif kernel_type == 'gauss12':
kernel_width = 7
sigma = 1/2
kernel_type_ = 'gauss'
elif kernel_type == 'gauss1sq2':
kernel_width = 9
sigma = 1./np.sqrt(2)
kernel_type_ = 'gauss'
elif kernel_type in ['lanczos', 'gauss', 'box']:
kernel_type_ = kernel_type
else:
assert False, 'wrong name kernel'
# note that `kernel width` will be different to actual size for phase = 1/2
self.kernel = get_kernel(factor, kernel_type_, phase, kernel_width, support=support, sigma=sigma)
downsampler = nn.Conv2d(n_planes, n_planes, kernel_size=self.kernel.shape, stride=factor, padding=0)
downsampler.weight.data[:] = 0
downsampler.bias.data[:] = 0
kernel_torch = torch.from_numpy(self.kernel)
for i in range(n_planes):
downsampler.weight.data[i, i] = kernel_torch
self.downsampler_ = downsampler
if preserve_size:
if self.kernel.shape[0] % 2 == 1:
pad = int((self.kernel.shape[0] - 1) / 2.)
else:
pad = int((self.kernel.shape[0] - factor) / 2.)
self.padding = nn.ReplicationPad2d(pad)
self.preserve_size = preserve_size
def forward(self, input):
if self.preserve_size:
x = self.padding(input)
else:
x= input
self.x = x
return self.downsampler_(x)
class Blurconv(nn.Module):
'''
http://www.realitypixels.com/turk/computergraphics/ResamplingFilters.pdf
'''
def __init__(self, n_planes=1, preserve_size=False):
super(Blurconv, self).__init__()
# self.kernel = kernel
# blurconv = nn.Conv2d(n_planes, n_planes, kernel_size=self.kernel.shape, stride=1, padding=0)
# blurconvr.weight.data = self.kernel
# blurconv.bias.data[:] = 0
self.n_planes = n_planes
self.preserve_size = preserve_size
# kernel_torch = torch.from_numpy(self.kernel)
# for i in range(n_planes):
# blurconv.weight.data[i, i] = kernel_torch
# self.blurconv_ = blurconv
#
# if preserve_size:
#
# if self.kernel.shape[0] % 2 == 1:
# pad = int((self.kernel.shape[0] - 1) / 2.)
# else:
# pad = int((self.kernel.shape[0] - factor) / 2.)
#
# self.padding = nn.ReplicationPad2d(pad)
#
# self.preserve_size = preserve_size
def forward(self, input, kernel):
if self.preserve_size:
if kernel.shape[0] % 2 == 1:
pad = int((kernel.shape[3] - 1) / 2.)
else:
pad = int((kernel.shape[3] - 1.) / 2.)
padding = nn.ReplicationPad2d(pad)
x = padding(input)
else:
x= input
blurconv = nn.Conv2d(self.n_planes, self.n_planes, kernel_size=kernel.size(3), stride=1, padding=0, bias=False).cuda()
blurconv.weight.data[:] = kernel
return blurconv(x)
class Blurconv2(nn.Module):
'''
http://www.realitypixels.com/turk/computergraphics/ResamplingFilters.pdf
'''
def __init__(self, n_planes=1, preserve_size=False, k_size=21):
super(Blurconv2, self).__init__()
self.n_planes = n_planes
self.k_size = k_size
self.preserve_size = preserve_size
self.blurconv = nn.Conv2d(self.n_planes, self.n_planes, kernel_size=k_size, stride=1, padding=0, bias=False)
# self.blurconv.weight.data[:] /= self.blurconv.weight.data.sum()
def forward(self, input):
if self.preserve_size:
pad = int((self.k_size - 1.) / 2.)
padding = nn.ReplicationPad2d(pad)
x = padding(input)
else:
x= input
#self.blurconv.weight.data[:] /= self.blurconv.weight.data.sum()
return self.blurconv(x)
def get_kernel(factor, kernel_type, phase, kernel_width, support=None, sigma=None):
assert kernel_type in ['lanczos', 'gauss', 'box']
# factor = float(factor)
if phase == 0.5 and kernel_type != 'box':
kernel = np.zeros([kernel_width - 1, kernel_width - 1])
else:
kernel = np.zeros([kernel_width, kernel_width])
if kernel_type == 'box':
assert phase == 0.5, 'Box filter is always half-phased'
kernel[:] = 1./(kernel_width * kernel_width)
elif kernel_type == 'gauss':
assert sigma, 'sigma is not specified'
assert phase != 0.5, 'phase 1/2 for gauss not implemented'
center = (kernel_width + 1.)/2.
print(center, kernel_width)
sigma_sq = sigma * sigma
for i in range(1, kernel.shape[0] + 1):
for j in range(1, kernel.shape[1] + 1):
di = (i - center)/2.
dj = (j - center)/2.
kernel[i - 1][j - 1] = np.exp(-(di * di + dj * dj)/(2 * sigma_sq))
kernel[i - 1][j - 1] = kernel[i - 1][j - 1]/(2. * np.pi * sigma_sq)
elif kernel_type == 'lanczos':
assert support, 'support is not specified'
center = (kernel_width + 1) / 2.
for i in range(1, kernel.shape[0] + 1):
for j in range(1, kernel.shape[1] + 1):
if phase == 0.5:
di = abs(i + 0.5 - center) / factor
dj = abs(j + 0.5 - center) / factor
else:
di = abs(i - center) / factor
dj = abs(j - center) / factor
pi_sq = np.pi * np.pi
val = 1
if di != 0:
val = val * support * np.sin(np.pi * di) * np.sin(np.pi * di / support)
val = val / (np.pi * np.pi * di * di)
if dj != 0:
val = val * support * np.sin(np.pi * dj) * np.sin(np.pi * dj / support)
val = val / (np.pi * np.pi * dj * dj)
kernel[i - 1][j - 1] = val
else:
assert False, 'wrong method name'
kernel /= kernel.sum()
return kernel
#a = Downsampler(n_planes=3, factor=2, kernel_type='lanczos2', phase='1', preserve_size=True)
#################
# Learnable downsampler
# KS = 32
# dow = nn.Sequential(nn.ReplicationPad2d(int((KS - factor) / 2.)), nn.Conv2d(1,1,KS,factor))
# class Apply(nn.Module):
# def __init__(self, what, dim, *args):
# super(Apply, self).__init__()
# self.dim = dim
# self.what = what
# def forward(self, input):
# inputs = []
# for i in range(input.size(self.dim)):
# inputs.append(self.what(input.narrow(self.dim, i, 1)))
# return torch.cat(inputs, dim=self.dim)
# def __len__(self):
# return len(self._modules)
# downs = Apply(dow, 1)
# downs.type(dtype)(net_input.type(dtype)).size()
| 7,872 | 31.66805 | 129 | py |
SelfDeblur | SelfDeblur-master/models/non_local_dot_product.py | import torch
from torch import nn
from torch.nn import functional as F
class _NonLocalBlockND(nn.Module):
def __init__(self, in_channels, inter_channels=None, dimension=3, sub_sample=True, bn_layer=True):
super(_NonLocalBlockND, self).__init__()
assert dimension in [1, 2, 3]
self.dimension = dimension
self.sub_sample = sub_sample
self.in_channels = in_channels
self.inter_channels = inter_channels
if self.inter_channels is None:
self.inter_channels = in_channels // 2
if self.inter_channels == 0:
self.inter_channels = 1
if dimension == 3:
conv_nd = nn.Conv3d
max_pool_layer = nn.MaxPool3d(kernel_size=(1, 2, 2))
bn = nn.BatchNorm3d
elif dimension == 2:
conv_nd = nn.Conv2d
max_pool_layer = nn.MaxPool2d(kernel_size=(2, 2))
bn = nn.BatchNorm2d
else:
conv_nd = nn.Conv1d
max_pool_layer = nn.MaxPool1d(kernel_size=(2))
bn = nn.BatchNorm1d
self.g = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
if bn_layer:
self.W = nn.Sequential(
conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels,
kernel_size=1, stride=1, padding=0),
bn(self.in_channels)
)
nn.init.constant_(self.W[1].weight, 0)
nn.init.constant_(self.W[1].bias, 0)
else:
self.W = conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels,
kernel_size=1, stride=1, padding=0)
nn.init.constant_(self.W.weight, 0)
nn.init.constant_(self.W.bias, 0)
self.theta = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
self.phi = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
if sub_sample:
self.g = nn.Sequential(self.g, max_pool_layer)
self.phi = nn.Sequential(self.phi, max_pool_layer)
def forward(self, x):
'''
:param x: (b, c, t, h, w)
:return:
'''
batch_size = x.size(0)
g_x = self.g(x).view(batch_size, self.inter_channels, -1)
g_x = g_x.permute(0, 2, 1)
theta_x = self.theta(x).view(batch_size, self.inter_channels, -1)
theta_x = theta_x.permute(0, 2, 1)
phi_x = self.phi(x).view(batch_size, self.inter_channels, -1)
f = torch.matmul(theta_x, phi_x)
N = f.size(-1)
f_div_C = f / N
y = torch.matmul(f_div_C, g_x)
y = y.permute(0, 2, 1).contiguous()
y = y.view(batch_size, self.inter_channels, *x.size()[2:])
W_y = self.W(y)
z = W_y + x
return z
class NONLocalBlock1D(_NonLocalBlockND):
def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True):
super(NONLocalBlock1D, self).__init__(in_channels,
inter_channels=inter_channels,
dimension=1, sub_sample=sub_sample,
bn_layer=bn_layer)
class NONLocalBlock2D(_NonLocalBlockND):
def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True):
super(NONLocalBlock2D, self).__init__(in_channels,
inter_channels=inter_channels,
dimension=2, sub_sample=sub_sample,
bn_layer=bn_layer)
class NONLocalBlock3D(_NonLocalBlockND):
def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True):
super(NONLocalBlock3D, self).__init__(in_channels,
inter_channels=inter_channels,
dimension=3, sub_sample=sub_sample,
bn_layer=bn_layer)
if __name__ == '__main__':
import torch
for (sub_sample, bn_layer) in [(True, True), (False, False), (True, False), (False, True)]:
img = torch.zeros(2, 3, 20)
net = NONLocalBlock1D(3, sub_sample=sub_sample, bn_layer=bn_layer)
out = net(img)
print(out.size())
img = torch.zeros(2, 3, 20, 20)
net = NONLocalBlock2D(3, sub_sample=sub_sample, bn_layer=bn_layer)
out = net(img)
print(out.size())
img = torch.randn(2, 3, 8, 20, 20)
net = NONLocalBlock3D(3, sub_sample=sub_sample, bn_layer=bn_layer)
out = net(img)
print(out.size())
| 4,926 | 35.496296 | 102 | py |
SelfDeblur | SelfDeblur-master/models/texture_nets.py | import torch
import torch.nn as nn
from .common import *
normalization = nn.BatchNorm2d
def conv(in_f, out_f, kernel_size, stride=1, bias=True, pad='zero'):
if pad == 'zero':
return nn.Conv2d(in_f, out_f, kernel_size, stride, padding=(kernel_size - 1) / 2, bias=bias)
elif pad == 'reflection':
layers = [nn.ReflectionPad2d((kernel_size - 1) / 2),
nn.Conv2d(in_f, out_f, kernel_size, stride, padding=0, bias=bias)]
return nn.Sequential(*layers)
def get_texture_nets(inp=3, ratios = [32, 16, 8, 4, 2, 1], fill_noise=False, pad='zero', need_sigmoid=False, conv_num=8, upsample_mode='nearest'):
for i in range(len(ratios)):
j = i + 1
seq = nn.Sequential()
tmp = nn.AvgPool2d(ratios[i], ratios[i])
seq.add(tmp)
if fill_noise:
seq.add(GenNoise(inp))
seq.add(conv(inp, conv_num, 3, pad=pad))
seq.add(normalization(conv_num))
seq.add(act())
seq.add(conv(conv_num, conv_num, 3, pad=pad))
seq.add(normalization(conv_num))
seq.add(act())
seq.add(conv(conv_num, conv_num, 1, pad=pad))
seq.add(normalization(conv_num))
seq.add(act())
if i == 0:
seq.add(nn.Upsample(scale_factor=2, mode=upsample_mode))
cur = seq
else:
cur_temp = cur
cur = nn.Sequential()
# Batch norm before merging
seq.add(normalization(conv_num))
cur_temp.add(normalization(conv_num * (j - 1)))
cur.add(Concat(1, cur_temp, seq))
cur.add(conv(conv_num * j, conv_num * j, 3, pad=pad))
cur.add(normalization(conv_num * j))
cur.add(act())
cur.add(conv(conv_num * j, conv_num * j, 3, pad=pad))
cur.add(normalization(conv_num * j))
cur.add(act())
cur.add(conv(conv_num * j, conv_num * j, 1, pad=pad))
cur.add(normalization(conv_num * j))
cur.add(act())
if i == len(ratios) - 1:
cur.add(conv(conv_num * j, 3, 1, pad=pad))
else:
cur.add(nn.Upsample(scale_factor=2, mode=upsample_mode))
model = cur
if need_sigmoid:
model.add(nn.Sigmoid())
return model
| 2,315 | 27.95 | 146 | py |
SelfDeblur | SelfDeblur-master/models/non_local_concatenation.py | import torch
from torch import nn
from torch.nn import functional as F
class _NonLocalBlockND(nn.Module):
def __init__(self, in_channels, inter_channels=None, dimension=3, sub_sample=True, bn_layer=True):
super(_NonLocalBlockND, self).__init__()
assert dimension in [1, 2, 3]
self.dimension = dimension
self.sub_sample = sub_sample
self.in_channels = in_channels
self.inter_channels = inter_channels
if self.inter_channels is None:
self.inter_channels = in_channels // 2
if self.inter_channels == 0:
self.inter_channels = 1
if dimension == 3:
conv_nd = nn.Conv3d
max_pool_layer = nn.MaxPool3d(kernel_size=(1, 2, 2))
bn = nn.BatchNorm3d
elif dimension == 2:
conv_nd = nn.Conv2d
max_pool_layer = nn.MaxPool2d(kernel_size=(2, 2))
bn = nn.BatchNorm2d
else:
conv_nd = nn.Conv1d
max_pool_layer = nn.MaxPool1d(kernel_size=(2))
bn = nn.BatchNorm1d
self.g = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
if bn_layer:
self.W = nn.Sequential(
conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels,
kernel_size=1, stride=1, padding=0),
bn(self.in_channels)
)
nn.init.constant_(self.W[1].weight, 0)
nn.init.constant_(self.W[1].bias, 0)
else:
self.W = conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels,
kernel_size=1, stride=1, padding=0)
nn.init.constant_(self.W.weight, 0)
nn.init.constant_(self.W.bias, 0)
self.theta = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
self.phi = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
self.concat_project = nn.Sequential(
nn.Conv2d(self.inter_channels * 2, 1, 1, 1, 0, bias=False),
nn.ReLU()
)
if sub_sample:
self.g = nn.Sequential(self.g, max_pool_layer)
self.phi = nn.Sequential(self.phi, max_pool_layer)
def forward(self, x):
'''
:param x: (b, c, t, h, w)
:return:
'''
batch_size = x.size(0)
g_x = self.g(x).view(batch_size, self.inter_channels, -1)
g_x = g_x.permute(0, 2, 1)
# (b, c, N, 1)
theta_x = self.theta(x).view(batch_size, self.inter_channels, -1, 1)
# (b, c, 1, N)
phi_x = self.phi(x).view(batch_size, self.inter_channels, 1, -1)
h = theta_x.size(2)
w = phi_x.size(3)
theta_x = theta_x.repeat(1, 1, 1, w)
phi_x = phi_x.repeat(1, 1, h, 1)
concat_feature = torch.cat([theta_x, phi_x], dim=1)
f = self.concat_project(concat_feature)
b, _, h, w = f.size()
f = f.view(b, h, w)
N = f.size(-1)
f_div_C = f / N
y = torch.matmul(f_div_C, g_x)
y = y.permute(0, 2, 1).contiguous()
y = y.view(batch_size, self.inter_channels, *x.size()[2:])
W_y = self.W(y)
z = W_y + x
return z
class NONLocalBlock1D(_NonLocalBlockND):
def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True):
super(NONLocalBlock1D, self).__init__(in_channels,
inter_channels=inter_channels,
dimension=1, sub_sample=sub_sample,
bn_layer=bn_layer)
class NONLocalBlock2D(_NonLocalBlockND):
def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True):
super(NONLocalBlock2D, self).__init__(in_channels,
inter_channels=inter_channels,
dimension=2, sub_sample=sub_sample,
bn_layer=bn_layer)
class NONLocalBlock3D(_NonLocalBlockND):
def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True):
super(NONLocalBlock3D, self).__init__(in_channels,
inter_channels=inter_channels,
dimension=3, sub_sample=sub_sample,
bn_layer=bn_layer)
if __name__ == '__main__':
import torch
for (sub_sample, bn_layer) in [(True, True), (False, False), (True, False), (False, True)]:
img = torch.zeros(2, 3, 20)
net = NONLocalBlock1D(3, sub_sample=sub_sample, bn_layer=bn_layer)
out = net(img)
print(out.size())
img = torch.zeros(2, 3, 20, 20)
net = NONLocalBlock2D(3, sub_sample=sub_sample, bn_layer=bn_layer)
out = net(img)
print(out.size())
img = torch.randn(2, 3, 8, 20, 20)
net = NONLocalBlock3D(3, sub_sample=sub_sample, bn_layer=bn_layer)
out = net(img)
print(out.size())
| 5,350 | 35.155405 | 102 | py |
SelfDeblur | SelfDeblur-master/models/common.py | import torch
import torch.nn as nn
import numpy as np
from .downsampler import Downsampler
def add_module(self, module):
self.add_module(str(len(self) + 1), module)
torch.nn.Module.add = add_module
class Concat(nn.Module):
def __init__(self, dim, *args):
super(Concat, self).__init__()
self.dim = dim
for idx, module in enumerate(args):
self.add_module(str(idx), module)
def forward(self, input):
inputs = []
for module in self._modules.values():
inputs.append(module(input))
inputs_shapes2 = [x.shape[2] for x in inputs]
inputs_shapes3 = [x.shape[3] for x in inputs]
if np.all(np.array(inputs_shapes2) == min(inputs_shapes2)) and np.all(np.array(inputs_shapes3) == min(inputs_shapes3)):
inputs_ = inputs
else:
target_shape2 = min(inputs_shapes2)
target_shape3 = min(inputs_shapes3)
inputs_ = []
for inp in inputs:
diff2 = (inp.size(2) - target_shape2) // 2
diff3 = (inp.size(3) - target_shape3) // 2
inputs_.append(inp[:, :, diff2: diff2 + target_shape2, diff3:diff3 + target_shape3])
return torch.cat(inputs_, dim=self.dim)
def __len__(self):
return len(self._modules)
class GenNoise(nn.Module):
def __init__(self, dim2):
super(GenNoise, self).__init__()
self.dim2 = dim2
def forward(self, input):
a = list(input.size())
a[1] = self.dim2
# print (input.data.type())
b = torch.zeros(a).type_as(input.data)
b.normal_()
x = torch.autograd.Variable(b)
return x
class Swish(nn.Module):
"""
https://arxiv.org/abs/1710.05941
The hype was so huge that I could not help but try it
"""
def __init__(self):
super(Swish, self).__init__()
self.s = nn.Sigmoid()
def forward(self, x):
return x * self.s(x)
def act(act_fun = 'LeakyReLU'):
'''
Either string defining an activation function or module (e.g. nn.ReLU)
'''
if isinstance(act_fun, str):
if act_fun == 'LeakyReLU':
return nn.LeakyReLU(0.2, inplace=True)
elif act_fun == 'Swish':
return Swish()
elif act_fun == 'ELU':
return nn.ELU()
elif act_fun == 'none':
return nn.Sequential()
else:
assert False
else:
return act_fun()
def bn(num_features):
return nn.BatchNorm2d(num_features)
def conv(in_f, out_f, kernel_size, stride=1, bias=True, pad='zero', downsample_mode='stride'):
downsampler = None
if stride != 1 and downsample_mode != 'stride':
if downsample_mode == 'avg':
downsampler = nn.AvgPool2d(stride, stride)
elif downsample_mode == 'max':
downsampler = nn.MaxPool2d(stride, stride)
elif downsample_mode in ['lanczos2', 'lanczos3']:
downsampler = Downsampler(n_planes=out_f, factor=stride, kernel_type=downsample_mode, phase=0.5, preserve_size=True)
else:
assert False
stride = 1
padder = None
to_pad = int((kernel_size - 1) / 2)
if pad == 'reflection':
padder = nn.ReflectionPad2d(to_pad)
to_pad = 0
convolver = nn.Conv2d(in_f, out_f, kernel_size, stride, padding=to_pad, bias=bias)
layers = filter(lambda x: x is not None, [padder, convolver, downsampler])
return nn.Sequential(*layers) | 3,531 | 27.483871 | 128 | py |
SelfDeblur | SelfDeblur-master/models/unet.py | import torch.nn as nn
import torch
import torch.nn as nn
import torch.nn.functional as F
from .common import *
class ListModule(nn.Module):
def __init__(self, *args):
super(ListModule, self).__init__()
idx = 0
for module in args:
self.add_module(str(idx), module)
idx += 1
def __getitem__(self, idx):
if idx >= len(self._modules):
raise IndexError('index {} is out of range'.format(idx))
if idx < 0:
idx = len(self) + idx
it = iter(self._modules.values())
for i in range(idx):
next(it)
return next(it)
def __iter__(self):
return iter(self._modules.values())
def __len__(self):
return len(self._modules)
class UNet(nn.Module):
'''
upsample_mode in ['deconv', 'nearest', 'bilinear']
pad in ['zero', 'replication', 'none']
'''
def __init__(self, num_input_channels=3, num_output_channels=3,
feature_scale=4, more_layers=0, concat_x=False,
upsample_mode='deconv', pad='zero', norm_layer=nn.InstanceNorm2d, need_sigmoid=True, need_bias=True):
super(UNet, self).__init__()
self.feature_scale = feature_scale
self.more_layers = more_layers
self.concat_x = concat_x
filters = [64, 128, 256, 512, 1024]
filters = [x // self.feature_scale for x in filters]
self.start = unetConv2(num_input_channels, filters[0] if not concat_x else filters[0] - num_input_channels, norm_layer, need_bias, pad)
self.down1 = unetDown(filters[0], filters[1] if not concat_x else filters[1] - num_input_channels, norm_layer, need_bias, pad)
self.down2 = unetDown(filters[1], filters[2] if not concat_x else filters[2] - num_input_channels, norm_layer, need_bias, pad)
self.down3 = unetDown(filters[2], filters[3] if not concat_x else filters[3] - num_input_channels, norm_layer, need_bias, pad)
self.down4 = unetDown(filters[3], filters[4] if not concat_x else filters[4] - num_input_channels, norm_layer, need_bias, pad)
# more downsampling layers
if self.more_layers > 0:
self.more_downs = [
unetDown(filters[4], filters[4] if not concat_x else filters[4] - num_input_channels , norm_layer, need_bias, pad) for i in range(self.more_layers)]
self.more_ups = [unetUp(filters[4], upsample_mode, need_bias, pad, same_num_filt =True) for i in range(self.more_layers)]
self.more_downs = ListModule(*self.more_downs)
self.more_ups = ListModule(*self.more_ups)
self.up4 = unetUp(filters[3], upsample_mode, need_bias, pad)
self.up3 = unetUp(filters[2], upsample_mode, need_bias, pad)
self.up2 = unetUp(filters[1], upsample_mode, need_bias, pad)
self.up1 = unetUp(filters[0], upsample_mode, need_bias, pad)
self.final = conv(filters[0], num_output_channels, 1, bias=need_bias, pad=pad)
if need_sigmoid:
self.final = nn.Sequential(self.final, nn.Sigmoid())
def forward(self, inputs):
# Downsample
downs = [inputs]
down = nn.AvgPool2d(2, 2)
for i in range(4 + self.more_layers):
downs.append(down(downs[-1]))
in64 = self.start(inputs)
if self.concat_x:
in64 = torch.cat([in64, downs[0]], 1)
down1 = self.down1(in64)
if self.concat_x:
down1 = torch.cat([down1, downs[1]], 1)
down2 = self.down2(down1)
if self.concat_x:
down2 = torch.cat([down2, downs[2]], 1)
down3 = self.down3(down2)
if self.concat_x:
down3 = torch.cat([down3, downs[3]], 1)
down4 = self.down4(down3)
if self.concat_x:
down4 = torch.cat([down4, downs[4]], 1)
if self.more_layers > 0:
prevs = [down4]
for kk, d in enumerate(self.more_downs):
# print(prevs[-1].size())
out = d(prevs[-1])
if self.concat_x:
out = torch.cat([out, downs[kk + 5]], 1)
prevs.append(out)
up_ = self.more_ups[-1](prevs[-1], prevs[-2])
for idx in range(self.more_layers - 1):
l = self.more_ups[self.more - idx - 2]
up_= l(up_, prevs[self.more - idx - 2])
else:
up_= down4
up4= self.up4(up_, down3)
up3= self.up3(up4, down2)
up2= self.up2(up3, down1)
up1= self.up1(up2, in64)
return self.final(up1)
class unetConv2(nn.Module):
def __init__(self, in_size, out_size, norm_layer, need_bias, pad):
super(unetConv2, self).__init__()
print(pad)
if norm_layer is not None:
self.conv1= nn.Sequential(conv(in_size, out_size, 3, bias=need_bias, pad=pad),
norm_layer(out_size),
nn.ReLU(),)
self.conv2= nn.Sequential(conv(out_size, out_size, 3, bias=need_bias, pad=pad),
norm_layer(out_size),
nn.ReLU(),)
else:
self.conv1= nn.Sequential(conv(in_size, out_size, 3, bias=need_bias, pad=pad),
nn.ReLU(),)
self.conv2= nn.Sequential(conv(out_size, out_size, 3, bias=need_bias, pad=pad),
nn.ReLU(),)
def forward(self, inputs):
outputs= self.conv1(inputs)
outputs= self.conv2(outputs)
return outputs
class unetDown(nn.Module):
def __init__(self, in_size, out_size, norm_layer, need_bias, pad):
super(unetDown, self).__init__()
self.conv= unetConv2(in_size, out_size, norm_layer, need_bias, pad)
self.down= nn.MaxPool2d(2, 2)
def forward(self, inputs):
outputs= self.down(inputs)
outputs= self.conv(outputs)
return outputs
class unetUp(nn.Module):
def __init__(self, out_size, upsample_mode, need_bias, pad, same_num_filt=False):
super(unetUp, self).__init__()
num_filt = out_size if same_num_filt else out_size * 2
if upsample_mode == 'deconv':
self.up= nn.ConvTranspose2d(num_filt, out_size, 4, stride=2, padding=1)
self.conv= unetConv2(out_size * 2, out_size, None, need_bias, pad)
elif upsample_mode=='bilinear' or upsample_mode=='nearest':
self.up = nn.Sequential(nn.Upsample(scale_factor=2, mode=upsample_mode),
conv(num_filt, out_size, 3, bias=need_bias, pad=pad))
self.conv= unetConv2(out_size * 2, out_size, None, need_bias, pad)
else:
assert False
def forward(self, inputs1, inputs2):
in1_up= self.up(inputs1)
if (inputs2.size(2) != in1_up.size(2)) or (inputs2.size(3) != in1_up.size(3)):
diff2 = (inputs2.size(2) - in1_up.size(2)) // 2
diff3 = (inputs2.size(3) - in1_up.size(3)) // 2
inputs2_ = inputs2[:, :, diff2 : diff2 + in1_up.size(2), diff3 : diff3 + in1_up.size(3)]
else:
inputs2_ = inputs2
output= self.conv(torch.cat([in1_up, inputs2_], 1))
return output
if __name__ =='__main__':
print(1)
# net = UNet()
# print(net.forward) | 7,408 | 36.045 | 164 | py |
SelfDeblur | SelfDeblur-master/models/__init__.py | from .skip import skip
from .texture_nets import get_texture_nets
from .resnet import ResNet
from .unet import UNet
import torch.nn as nn
def get_net(input_depth, NET_TYPE, pad, upsample_mode, n_channels=3, act_fun='LeakyReLU', skip_n33d=128, skip_n33u=128, skip_n11=4, num_scales=5, downsample_mode='stride'):
if NET_TYPE == 'ResNet':
# TODO
net = ResNet(input_depth, 3, 10, 16, 1, nn.BatchNorm2d, False)
elif NET_TYPE == 'skip':
net = skip(input_depth, n_channels, num_channels_down = [skip_n33d]*num_scales if isinstance(skip_n33d, int) else skip_n33d,
num_channels_up = [skip_n33u]*num_scales if isinstance(skip_n33u, int) else skip_n33u,
num_channels_skip = [skip_n11]*num_scales if isinstance(skip_n11, int) else skip_n11,
upsample_mode=upsample_mode, downsample_mode=downsample_mode,
need_sigmoid=True, need_bias=True, pad=pad, act_fun=act_fun)
elif NET_TYPE == 'texture_nets':
net = get_texture_nets(inp=input_depth, ratios = [32, 16, 8, 4, 2, 1], fill_noise=False,pad=pad)
elif NET_TYPE =='UNet':
net = UNet(num_input_channels=input_depth, num_output_channels=3,
feature_scale=4, more_layers=0, concat_x=False,
upsample_mode=upsample_mode, pad=pad, norm_layer=nn.BatchNorm2d, need_sigmoid=True, need_bias=True)
elif NET_TYPE == 'identity':
assert input_depth == 3
net = nn.Sequential()
else:
assert False
return net | 1,639 | 50.25 | 172 | py |
SelfDeblur | SelfDeblur-master/models/non_local_gaussian.py | import torch
from torch import nn
from torch.nn import functional as F
class _NonLocalBlockND(nn.Module):
def __init__(self, in_channels, inter_channels=None, dimension=3, sub_sample=True, bn_layer=True):
super(_NonLocalBlockND, self).__init__()
assert dimension in [1, 2, 3]
self.dimension = dimension
self.sub_sample = sub_sample
self.in_channels = in_channels
self.inter_channels = inter_channels
if self.inter_channels is None:
self.inter_channels = in_channels // 2
if self.inter_channels == 0:
self.inter_channels = 1
if dimension == 3:
conv_nd = nn.Conv3d
max_pool_layer = nn.MaxPool3d(kernel_size=(1, 2, 2))
bn = nn.BatchNorm3d
elif dimension == 2:
conv_nd = nn.Conv2d
max_pool_layer = nn.MaxPool2d(kernel_size=(2, 2))
bn = nn.BatchNorm2d
else:
conv_nd = nn.Conv1d
max_pool_layer = nn.MaxPool1d(kernel_size=(2))
bn = nn.BatchNorm1d
self.g = conv_nd(in_channels=self.in_channels, out_channels=self.inter_channels,
kernel_size=1, stride=1, padding=0)
if bn_layer:
self.W = nn.Sequential(
conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels,
kernel_size=1, stride=1, padding=0),
bn(self.in_channels)
)
nn.init.constant_(self.W[1].weight, 0)
nn.init.constant_(self.W[1].bias, 0)
else:
self.W = conv_nd(in_channels=self.inter_channels, out_channels=self.in_channels,
kernel_size=1, stride=1, padding=0)
nn.init.constant_(self.W.weight, 0)
nn.init.constant_(self.W.bias, 0)
if sub_sample:
self.g = nn.Sequential(self.g, max_pool_layer)
self.phi = max_pool_layer
def forward(self, x):
'''
:param x: (b, c, t, h, w)
:return:
'''
batch_size = x.size(0)
g_x = self.g(x).view(batch_size, self.inter_channels, -1)
g_x = g_x.permute(0, 2, 1)
theta_x = x.view(batch_size, self.in_channels, -1)
theta_x = theta_x.permute(0, 2, 1)
if self.sub_sample:
phi_x = self.phi(x).view(batch_size, self.in_channels, -1)
else:
phi_x = x.view(batch_size, self.in_channels, -1)
f = torch.matmul(theta_x, phi_x)
f_div_C = F.softmax(f, dim=-1)
y = torch.matmul(f_div_C, g_x)
y = y.permute(0, 2, 1).contiguous()
y = y.view(batch_size, self.inter_channels, *x.size()[2:])
W_y = self.W(y)
z = W_y + x
return z
class NONLocalBlock1D(_NonLocalBlockND):
def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True):
super(NONLocalBlock1D, self).__init__(in_channels,
inter_channels=inter_channels,
dimension=1, sub_sample=sub_sample,
bn_layer=bn_layer)
class NONLocalBlock2D(_NonLocalBlockND):
def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True):
super(NONLocalBlock2D, self).__init__(in_channels,
inter_channels=inter_channels,
dimension=2, sub_sample=sub_sample,
bn_layer=bn_layer)
class NONLocalBlock3D(_NonLocalBlockND):
def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True):
super(NONLocalBlock3D, self).__init__(in_channels,
inter_channels=inter_channels,
dimension=3, sub_sample=sub_sample,
bn_layer=bn_layer)
if __name__ == '__main__':
import torch
for (sub_sample, bn_layer) in [(True, True), (False, False), (True, False), (False, True)]:
img = torch.zeros(2, 3, 20)
net = NONLocalBlock1D(3, sub_sample=sub_sample, bn_layer=bn_layer)
out = net(img)
print(out.size())
img = torch.zeros(2, 3, 20, 20)
net = NONLocalBlock2D(3, sub_sample=sub_sample, bn_layer=bn_layer)
out = net(img)
print(out.size())
img = torch.randn(2, 3, 8, 20, 20)
net = NONLocalBlock3D(3, sub_sample=sub_sample, bn_layer=bn_layer)
out = net(img)
print(out.size())
| 4,674 | 33.124088 | 102 | py |
SelfDeblur | SelfDeblur-master/utils/common_utils.py | import torch
import torch.nn as nn
import torchvision
import sys
import cv2
import numpy as np
from PIL import Image
import PIL
import numpy as np
import matplotlib.pyplot as plt
import random
def crop_image(img, d=32):
'''Make dimensions divisible by `d`'''
imgsize = img.shape
new_size = (imgsize[0] - imgsize[0] % d,
imgsize[1] - imgsize[1] % d)
bbox = [
int((imgsize[0] - new_size[0])/2),
int((imgsize[1] - new_size[1])/2),
int((imgsize[0] + new_size[0])/2),
int((imgsize[1] + new_size[1])/2),
]
img_cropped = img[0:new_size[0],0:new_size[1],:]
return img_cropped
def get_params(opt_over, net, net_input, downsampler=None):
'''Returns parameters that we want to optimize over.
Args:
opt_over: comma separated list, e.g. "net,input" or "net"
net: network
net_input: torch.Tensor that stores input `z`
'''
opt_over_list = opt_over.split(',')
params = []
for opt in opt_over_list:
if opt == 'net':
params += [x for x in net.parameters()]
elif opt=='down':
assert downsampler is not None
params += [x for x in downsampler.parameters()]
elif opt == 'input':
net_input.requires_grad = True
params += [net_input]
else:
assert False, 'what is it?'
return params
def get_image_grid(images_np, nrow=8):
'''Creates a grid from a list of images by concatenating them.'''
images_torch = [torch.from_numpy(x) for x in images_np]
torch_grid = torchvision.utils.make_grid(images_torch, nrow)
return torch_grid.numpy()
def plot_image_grid(images_np, nrow =8, factor=1, interpolation='lanczos'):
"""Draws images in a grid
Args:
images_np: list of images, each image is np.array of size 3xHxW of 1xHxW
nrow: how many images will be in one row
factor: size if the plt.figure
interpolation: interpolation used in plt.imshow
"""
n_channels = max(x.shape[0] for x in images_np)
assert (n_channels == 3) or (n_channels == 1), "images should have 1 or 3 channels"
images_np = [x if (x.shape[0] == n_channels) else np.concatenate([x, x, x], axis=0) for x in images_np]
grid = get_image_grid(images_np, nrow)
plt.figure(figsize=(len(images_np) + factor, 12 + factor))
if images_np[0].shape[0] == 1:
plt.imshow(grid[0], cmap='gray', interpolation=interpolation)
else:
plt.imshow(grid.transpose(1, 2, 0), interpolation=interpolation)
plt.show()
return grid
def load(path):
"""Load PIL image."""
img = Image.open(path)
return img
def get_image(path, imsize=-1):
"""Load an image and resize to a cpecific size.
Args:
path: path to image
imsize: tuple or scalar with dimensions; -1 for `no resize`
"""
img = load(path)
if isinstance(imsize, int):
imsize = (imsize, imsize)
if imsize[0]!= -1 and img.size != imsize:
if imsize[0] > img.size[0]:
img = img.resize(imsize, Image.BICUBIC)
else:
img = img.resize(imsize, Image.ANTIALIAS)
img_np = pil_to_np(img)
return img, img_np
def fill_noise(x, noise_type):
"""Fills tensor `x` with noise of type `noise_type`."""
torch.manual_seed(0)
if noise_type == 'u':
x.uniform_()
elif noise_type == 'n':
x.normal_()
else:
assert False
def get_noise(input_depth, method, spatial_size, noise_type='u', var=1./10):
"""Returns a pytorch.Tensor of size (1 x `input_depth` x `spatial_size[0]` x `spatial_size[1]`)
initialized in a specific way.
Args:
input_depth: number of channels in the tensor
method: `noise` for fillting tensor with noise; `meshgrid` for np.meshgrid
spatial_size: spatial size of the tensor to initialize
noise_type: 'u' for uniform; 'n' for normal
var: a factor, a noise will be multiplicated by. Basically it is standard deviation scaler.
"""
if isinstance(spatial_size, int):
spatial_size = (spatial_size, spatial_size)
if method == 'noise':
shape = [1, input_depth, spatial_size[0], spatial_size[1]]
net_input = torch.zeros(shape)
fill_noise(net_input, noise_type)
net_input *= var
elif method == 'meshgrid':
assert input_depth == 2
X, Y = np.meshgrid(np.arange(0, spatial_size[1])/float(spatial_size[1]-1), np.arange(0, spatial_size[0])/float(spatial_size[0]-1))
meshgrid = np.concatenate([X[None, :], Y[None, :]])
net_input = np_to_torch(meshgrid)
else:
assert False
return net_input
def pil_to_np(img_PIL):
'''Converts image in PIL format to np.array.
From W x H x C [0...255] to C x W x H [0..1]
'''
ar = np.array(img_PIL)
if len(ar.shape) == 3:
ar = ar.transpose(2,0,1)
else:
ar = ar[None, ...]
return ar.astype(np.float32) / 255.
def np_to_pil(img_np):
'''Converts image in np.array format to PIL image.
From C x W x H [0..1] to W x H x C [0...255]
'''
ar = np.clip(img_np*255,0,255).astype(np.uint8)
if img_np.shape[0] == 1:
ar = ar[0]
else:
ar = ar.transpose(1, 2, 0)
return Image.fromarray(ar)
def np_to_torch(img_np):
'''Converts image in numpy.array to torch.Tensor.
From C x W x H [0..1] to C x W x H [0..1]
'''
return torch.from_numpy(img_np)[None, :]
def torch_to_np(img_var):
'''Converts an image in torch.Tensor format to np.array.
From 1 x C x W x H [0..1] to C x W x H [0..1]
'''
return img_var.detach().cpu().numpy()[0]
def optimize(optimizer_type, parameters, closure, LR, num_iter):
"""Runs optimization loop.
Args:
optimizer_type: 'LBFGS' of 'adam'
parameters: list of Tensors to optimize over
closure: function, that returns loss variable
LR: learning rate
num_iter: number of iterations
"""
if optimizer_type == 'LBFGS':
# Do several steps with adam first
optimizer = torch.optim.Adam(parameters, lr=0.001)
for j in range(100):
optimizer.zero_grad()
closure()
optimizer.step()
print('Starting optimization with LBFGS')
def closure2():
optimizer.zero_grad()
return closure()
optimizer = torch.optim.LBFGS(parameters, max_iter=num_iter, lr=LR, tolerance_grad=-1, tolerance_change=-1)
optimizer.step(closure2)
elif optimizer_type == 'adam':
print('Starting optimization with ADAM')
optimizer = torch.optim.Adam(parameters, lr=LR)
from torch.optim.lr_scheduler import MultiStepLR
scheduler = MultiStepLR(optimizer, milestones=[5000, 10000, 15000], gamma=0.1) # learning rates
for j in range(num_iter):
scheduler.step(j)
optimizer.zero_grad()
closure()
optimizer.step()
else:
assert False
def pixelshuffle(image, scale):
'''
Discription: Given an image, return a reversible sub-sampling
[Input]: Image ndarray float
[Return]: A mosic image of shuffled pixels
'''
if scale == 1:
return image
w, h, c = image.shape
mosaic = np.array([])
for ws in range(scale):
band = np.array([])
for hs in range(scale):
temp = image[ws::scale, hs::scale, :] # get the sub-sampled image
band = np.concatenate((band, temp), axis=1) if band.size else temp
mosaic = np.concatenate((mosaic, band), axis=0) if mosaic.size else band
return mosaic
def reverse_pixelshuffle(image, scale, fill=0, fill_image=0, ind=[0, 0]):
'''
Discription: Given a mosaic image of subsampling, recombine it to a full image
[Input]: Image
[Return]: Recombine it using different portions of pixels
'''
w, h, c = image.shape
real = np.zeros((w, h, c)) # real image
wf = 0
hf = 0
for ws in range(scale):
hf = 0
for hs in range(scale):
temp = real[ws::scale, hs::scale, :]
wc, hc, cc = temp.shape # get the shpae of the current images
if fill == 1 and ws == ind[0] and hs == ind[1]:
real[ws::scale, hs::scale, :] = fill_image[wf:wf + wc, hf:hf + hc, :]
else:
real[ws::scale, hs::scale, :] = image[wf:wf + wc, hf:hf + hc, :]
hf = hf + hc
wf = wf + wc
return real
def readimg(path_to_image):
img = cv2.imread(path_to_image)
x = cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb)
y, cr, cb = cv2.split(x)
return img, y, cb, cr
| 8,824 | 28.915254 | 138 | py |
e3_diffusion_for_molecules | e3_diffusion_for_molecules-main/eval_analyze.py | # Rdkit import should be first, do not move it
try:
from rdkit import Chem
except ModuleNotFoundError:
pass
import utils
import argparse
from qm9 import dataset
from qm9.models import get_model
import os
from equivariant_diffusion.utils import assert_mean_zero_with_mask, remove_mean_with_mask,\
assert_correctly_masked
import torch
import time
import pickle
from configs.datasets_config import get_dataset_info
from os.path import join
from qm9.sampling import sample
from qm9.analyze import analyze_stability_for_molecules, analyze_node_distribution
from qm9.utils import prepare_context, compute_mean_mad
from qm9 import visualizer as qm9_visualizer
import qm9.losses as losses
try:
from qm9 import rdkit_functions
except ModuleNotFoundError:
print('Not importing rdkit functions.')
def check_mask_correct(variables, node_mask):
for variable in variables:
assert_correctly_masked(variable, node_mask)
def analyze_and_save(args, eval_args, device, generative_model,
nodes_dist, prop_dist, dataset_info, n_samples=10,
batch_size=10, save_to_xyz=False):
batch_size = min(batch_size, n_samples)
assert n_samples % batch_size == 0
molecules = {'one_hot': [], 'x': [], 'node_mask': []}
start_time = time.time()
for i in range(int(n_samples/batch_size)):
nodesxsample = nodes_dist.sample(batch_size)
one_hot, charges, x, node_mask = sample(
args, device, generative_model, dataset_info, prop_dist=prop_dist, nodesxsample=nodesxsample)
molecules['one_hot'].append(one_hot.detach().cpu())
molecules['x'].append(x.detach().cpu())
molecules['node_mask'].append(node_mask.detach().cpu())
current_num_samples = (i+1) * batch_size
secs_per_sample = (time.time() - start_time) / current_num_samples
print('\t %d/%d Molecules generated at %.2f secs/sample' % (
current_num_samples, n_samples, secs_per_sample))
if save_to_xyz:
id_from = i * batch_size
qm9_visualizer.save_xyz_file(
join(eval_args.model_path, 'eval/analyzed_molecules/'),
one_hot, charges, x, dataset_info, id_from, name='molecule',
node_mask=node_mask)
molecules = {key: torch.cat(molecules[key], dim=0) for key in molecules}
stability_dict, rdkit_metrics = analyze_stability_for_molecules(
molecules, dataset_info)
return stability_dict, rdkit_metrics
def test(args, flow_dp, nodes_dist, device, dtype, loader, partition='Test', num_passes=1):
flow_dp.eval()
nll_epoch = 0
n_samples = 0
for pass_number in range(num_passes):
with torch.no_grad():
for i, data in enumerate(loader):
# Get data
x = data['positions'].to(device, dtype)
node_mask = data['atom_mask'].to(device, dtype).unsqueeze(2)
edge_mask = data['edge_mask'].to(device, dtype)
one_hot = data['one_hot'].to(device, dtype)
charges = (data['charges'] if args.include_charges else torch.zeros(0)).to(device, dtype)
batch_size = x.size(0)
x = remove_mean_with_mask(x, node_mask)
check_mask_correct([x, one_hot], node_mask)
assert_mean_zero_with_mask(x, node_mask)
h = {'categorical': one_hot, 'integer': charges}
if len(args.conditioning) > 0:
context = prepare_context(args.conditioning, data).to(device, dtype)
assert_correctly_masked(context, node_mask)
else:
context = None
# transform batch through flow
nll, _, _ = losses.compute_loss_and_nll(args, flow_dp, nodes_dist, x, h, node_mask,
edge_mask, context)
# standard nll from forward KL
nll_epoch += nll.item() * batch_size
n_samples += batch_size
if i % args.n_report_steps == 0:
print(f"\r {partition} NLL \t, iter: {i}/{len(loader)}, "
f"NLL: {nll_epoch/n_samples:.2f}")
return nll_epoch/n_samples
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', type=str, default="outputs/edm_1",
help='Specify model path')
parser.add_argument('--n_samples', type=int, default=100,
help='Specify model path')
parser.add_argument('--batch_size_gen', type=int, default=100,
help='Specify model path')
parser.add_argument('--save_to_xyz', type=eval, default=False,
help='Should save samples to xyz files.')
eval_args, unparsed_args = parser.parse_known_args()
assert eval_args.model_path is not None
with open(join(eval_args.model_path, 'args.pickle'), 'rb') as f:
args = pickle.load(f)
# CAREFUL with this -->
if not hasattr(args, 'normalization_factor'):
args.normalization_factor = 1
if not hasattr(args, 'aggregation_method'):
args.aggregation_method = 'sum'
args.cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if args.cuda else "cpu")
args.device = device
dtype = torch.float32
utils.create_folders(args)
print(args)
# Retrieve QM9 dataloaders
dataloaders, charge_scale = dataset.retrieve_dataloaders(args)
dataset_info = get_dataset_info(args.dataset, args.remove_h)
# Load model
generative_model, nodes_dist, prop_dist = get_model(args, device, dataset_info, dataloaders['train'])
if prop_dist is not None:
property_norms = compute_mean_mad(dataloaders, args.conditioning, args.dataset)
prop_dist.set_normalizer(property_norms)
generative_model.to(device)
fn = 'generative_model_ema.npy' if args.ema_decay > 0 else 'generative_model.npy'
flow_state_dict = torch.load(join(eval_args.model_path, fn), map_location=device)
generative_model.load_state_dict(flow_state_dict)
# Analyze stability, validity, uniqueness and novelty
stability_dict, rdkit_metrics = analyze_and_save(
args, eval_args, device, generative_model, nodes_dist,
prop_dist, dataset_info, n_samples=eval_args.n_samples,
batch_size=eval_args.batch_size_gen, save_to_xyz=eval_args.save_to_xyz)
print(stability_dict)
if rdkit_metrics is not None:
rdkit_metrics = rdkit_metrics[0]
print("Validity %.4f, Uniqueness: %.4f, Novelty: %.4f" % (rdkit_metrics[0], rdkit_metrics[1], rdkit_metrics[2]))
else:
print("Install rdkit roolkit to obtain Validity, Uniqueness, Novelty")
# In GEOM-Drugs the validation partition is named 'val', not 'valid'.
if args.dataset == 'geom':
val_name = 'val'
num_passes = 1
else:
val_name = 'valid'
num_passes = 5
# Evaluate negative log-likelihood for the validation and test partitions
val_nll = test(args, generative_model, nodes_dist, device, dtype,
dataloaders[val_name],
partition='Val')
print(f'Final val nll {val_nll}')
test_nll = test(args, generative_model, nodes_dist, device, dtype,
dataloaders['test'],
partition='Test', num_passes=num_passes)
print(f'Final test nll {test_nll}')
print(f'Overview: val nll {val_nll} test nll {test_nll}', stability_dict)
with open(join(eval_args.model_path, 'eval_log.txt'), 'w') as f:
print(f'Overview: val nll {val_nll} test nll {test_nll}',
stability_dict,
file=f)
if __name__ == "__main__":
main()
| 7,795 | 38.175879 | 120 | py |
e3_diffusion_for_molecules | e3_diffusion_for_molecules-main/analyse_geom.py | from rdkit import Chem
import os
import numpy as np
import torch
from torch.utils.data import BatchSampler, DataLoader, Dataset, SequentialSampler
import argparse
import collections
import pickle
import os
import json
from tqdm import tqdm
from IPython.display import display
from matplotlib import pyplot as plt
import numpy as np
from qm9.analyze import check_stability
from qm9.rdkit_functions import BasicMolecularMetrics
import configs.datasets_config
atomic_number_list = [1, 5, 6, 7, 8, 9, 13, 14, 15, 16, 17, 33, 35, 53, 80, 83]
inverse = {1: 0, 5: 1, 6: 2, 7: 3, 8: 4, 9: 5, 13: 6, 14: 7, 15: 8, 16: 9, 17: 10, 33: 11, 35: 12, 53: 13,
80: 14, 83: 15}
atom_name = ['H', 'B', 'C', 'N', 'O', 'F', 'Al', 'Si', 'P', 'S', 'Cl', 'As', 'Br', 'I', 'Hg', 'Bi']
n_atom_types = len(atomic_number_list)
n_bond_types = 4
def extract_conformers(args):
Counter = collections.Counter
bond_length_dict = {0: Counter(), 1: Counter(), 2: Counter(), 3: Counter()}
summary_file = os.path.join(args.data_dir, args.data_file)
with open(summary_file, "r") as f:
drugs_summ = json.load(f)
all_paths = []
for smiles, sub_dic in drugs_summ.items():
if 'pickle_path' in sub_dic:
pickle_path = os.path.join(args.data_dir, "rdkit_folder", sub_dic["pickle_path"])
all_paths.append(pickle_path)
for i, mol_path in tqdm(enumerate(all_paths)):
with open(mol_path, "rb") as f:
dic = pickle.load(f)
# Get the energy of each conformer. Keep only the lowest values
conformers = dic['conformers']
all_energies = []
for conformer in conformers:
all_energies.append(conformer['totalenergy'])
all_energies = np.array(all_energies)
argsort = np.argsort(all_energies)
lowest_energies = argsort[:args.conformations]
for id in lowest_energies:
conformer = conformers[id]
rd_mol = conformer["rd_mol"]
atoms = rd_mol.GetAtoms()
atom_nums = []
for atom in atoms:
atom_nums.append(atom.GetAtomicNum())
rd_conf = rd_mol.GetConformers()[0]
coords = rd_conf.GetPositions() # list of elts of size 3?
bonds = [bond for bond in rd_mol.GetBonds()]
for bond in bonds:
atom1, atom2 = [bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()]
# bond length
c1 = coords[atom1]
c2 = coords[atom2]
dist = np.linalg.norm(c1 - c2)
# Bin the distance
dist = int(dist * 100)
# atom types
at1_type = atom_nums[atom1]
at2_type = atom_nums[atom2]
if at1_type > at2_type: # Sort the pairs to avoid redundancy
temp = at2_type
at2_type = at1_type
at1_type = temp
bond_type = bond.GetBondType().name.lower()
if bond_type == 'single':
type = 0
elif bond_type == 'double':
type = 1
elif bond_type == 'triple':
type = 2
elif bond_type == 'aromatic':
type = 3
else:
raise ValueError("Unknown bond type", bond_type)
bond_length_dict[type][(at1_type, at2_type, dist)] += 1
if i % 5000 == 0:
print("Current state of the bond length dictionary", bond_length_dict)
if os.path.exists('bond_length_dict.pkl'):
os.remove('bond_length_dict.pkl')
with open('bond_length_dict', 'wb') as bond_dictionary_file:
pickle.dump(bond_length_dict, bond_dictionary_file)
def create_matrix(args):
with open('bond_length_dict', 'rb') as bond_dictionary_file:
all_bond_types = pickle.load(bond_dictionary_file)
x = np.zeros((n_atom_types, n_atom_types, n_bond_types, 350))
for bond_type, d in all_bond_types.items():
for key, count in d.items():
at1, at2, bond_len = key
x[inverse[at1], inverse[at2], bond_type, bond_len - 50] = count
np.save('bond_length_matrix', x)
def create_histograms(args):
x = np.load('./data/geom/bond_length_matrix.npy')
x = x[:, :, :, :307]
label_list = ['single', 'double', 'triple', 'aromatic']
for j in range(n_atom_types):
for i in range(j + 1):
if np.sum(x[i, j]) == 0: # Bond does not exist
continue
# Remove outliers
y = x[i, j]
y[y < 0.02 * np.sum(y, axis=0)] = 0
plt.figure()
existing_bond_lengths = np.array(np.nonzero(y))[1]
mini, maxi = existing_bond_lengths.min(), existing_bond_lengths.max()
y = y[:, mini: maxi + 1]
x_range = np.arange(mini, maxi + 1)
for k in range(n_bond_types):
if np.sum(y[k]) > 0:
plt.plot(x_range, y[k], label=label_list[k])
plt.xlabel("Bond length")
plt.ylabel("Count")
plt.title(f'{atom_name[i]} - {atom_name[j]} bonds')
plt.legend()
plt.savefig(f'./figures/{atom_name[i]}-{atom_name[j]}-hist.png')
plt.close()
def analyse_geom_stability():
data_file = './data/geom/geom_drugs_30.npy'
dataset_info = configs.datasets_config.get_dataset_info('geom', remove_h=False)
atom_dict = dataset_info['atom_decoder']
bond_dict = [None, Chem.rdchem.BondType.SINGLE, Chem.rdchem.BondType.DOUBLE, Chem.rdchem.BondType.TRIPLE,
Chem.rdchem.BondType.AROMATIC]
x = np.load(data_file)
mol_id = x[:, 0].astype(int)
all_atom_types = x[:, 1].astype(int)
all_positions = x[:, 2:]
# Get ids corresponding to new molecules
split_indices = np.nonzero(mol_id[:-1] - mol_id[1:])[0] + 1
all_atom_types_split = np.split(all_atom_types, split_indices)
all_positions_split = np.split(all_positions, split_indices)
atomic_nb_list = torch.Tensor(dataset_info['atomic_nb'])[None, :].long()
num_stable_mols = 0
num_mols = 0
num_stable_atoms_total = 0
num_atoms_total = 0
formatted_data = []
for i, (p, at_types) in tqdm(enumerate(zip(all_positions_split, all_atom_types_split))):
p = torch.from_numpy(p)
at_types = torch.from_numpy(at_types)[:, None]
one_hot = torch.eq(at_types, atomic_nb_list).int()
at_types = torch.argmax(one_hot, dim=1) # Between 0 and 15
formatted_data.append([p, at_types])
mol_is_stable, num_stable_atoms, num_atoms = check_stability(p, at_types, dataset_info)
num_mols += 1
num_stable_mols += mol_is_stable
num_stable_atoms_total += num_stable_atoms
num_atoms_total += num_atoms
if i % 5000 == 0:
print(f"IN PROGRESS -- Stable molecules: {num_stable_mols} / {num_mols}"
f" = {num_stable_mols / num_mols * 100} %")
print(
f"IN PROGRESS -- Stable atoms: {num_stable_atoms_total} / {num_atoms_total}"
f" = {num_stable_atoms_total / num_atoms_total * 100} %")
print(f"Stable molecules: {num_stable_mols} / {num_mols} = {num_stable_mols / num_mols * 100} %")
print(f"Stable atoms: {num_stable_atoms_total} / {num_atoms_total} = {num_stable_atoms_total / num_atoms_total * 100} %")
metrics = BasicMolecularMetrics(dataset_info)
metrics.evaluate(formatted_data)
def debug_geom_stability(num_atoms=100000):
data_file = './data/geom/geom_drugs_30.npy'
dataset_info = configs.datasets_config.get_dataset_info('geom', remove_h=False)
atom_dict = dataset_info['atom_decoder']
bond_dict = [None, Chem.rdchem.BondType.SINGLE, Chem.rdchem.BondType.DOUBLE, Chem.rdchem.BondType.TRIPLE,
Chem.rdchem.BondType.AROMATIC]
x = np.load(data_file)
x = x[:num_atoms]
# Print non hydrogen atoms
x = x[x[:, 1] != 1.0, :]
mol_id = x[:, 0].astype(int)
max_mol_id = mol_id.max()
may_be_incomplete = mol_id == max_mol_id
x = x[~may_be_incomplete]
mol_id = mol_id[~may_be_incomplete]
all_atom_types = x[:, 1].astype(int)
all_positions = x[:, 2:]
# Get ids corresponding to new molecules
split_indices = np.nonzero(mol_id[:-1] - mol_id[1:])[0] + 1
all_atom_types_split = np.split(all_atom_types, split_indices)
all_positions_split = np.split(all_positions, split_indices)
atomic_nb_list = torch.Tensor(dataset_info['atomic_nb'])[None, :].long()
formatted_data = []
for p, at_types in zip(all_positions_split, all_atom_types_split):
p = torch.from_numpy(p)
at_types = torch.from_numpy(at_types)[:, None]
one_hot = torch.eq(at_types, atomic_nb_list).int()
at_types = torch.argmax(one_hot, dim=1) # Between 0 and 15
formatted_data.append([p, at_types])
metrics = BasicMolecularMetrics(atom_dict, bond_dict, dataset_info)
m, smiles_list = metrics.evaluate(formatted_data)
print(smiles_list)
def compute_n_nodes_dict(file='./data/geom/geom_drugs_30.npy', remove_hydrogens=True):
all_data = np.load(file)
atom_types = all_data[:, 1]
if remove_hydrogens:
hydrogens = np.equal(atom_types, 1.0)
all_data = all_data[~hydrogens]
# Get the size of each molecule
mol_id = all_data[:, 0].astype(int)
max_id = mol_id.max()
mol_id_counter = np.zeros(max_id + 1, dtype=int)
for id in mol_id:
mol_id_counter[id] += 1
unique_sizes, size_count = np.unique(mol_id_counter, return_counts=True)
sizes_dict = {}
for size, count in zip(unique_sizes, size_count):
sizes_dict[size] = count
print(sizes_dict)
return sizes_dict
if __name__ == '__main__':
# parser = argparse.ArgumentParser()
# parser.add_argument("--conformations", type=int, default=30,
# help="Max number of conformations kept for each molecule.")
# parser.add_argument("--data_dir", type=str, default='/home/vignac/diffusion/data/geom/')
# parser.add_argument("--data_file", type=str, default="rdkit_folder/summary_drugs.json")
# args = parser.parse_args()
# # extract_conformers(args)
# # create_histograms(args)
#
analyse_geom_stability()
# compute_n_nodes_dict() | 10,412 | 37.283088 | 125 | py |
e3_diffusion_for_molecules | e3_diffusion_for_molecules-main/utils.py | import numpy as np
import getpass
import os
import torch
# Folders
def create_folders(args):
try:
os.makedirs('outputs')
except OSError:
pass
try:
os.makedirs('outputs/' + args.exp_name)
except OSError:
pass
# Model checkpoints
def save_model(model, path):
torch.save(model.state_dict(), path)
def load_model(model, path):
model.load_state_dict(torch.load(path))
model.eval()
return model
#Gradient clipping
class Queue():
def __init__(self, max_len=50):
self.items = []
self.max_len = max_len
def __len__(self):
return len(self.items)
def add(self, item):
self.items.insert(0, item)
if len(self) > self.max_len:
self.items.pop()
def mean(self):
return np.mean(self.items)
def std(self):
return np.std(self.items)
def gradient_clipping(flow, gradnorm_queue):
# Allow gradient norm to be 150% + 2 * stdev of the recent history.
max_grad_norm = 1.5 * gradnorm_queue.mean() + 2 * gradnorm_queue.std()
# Clips gradient and returns the norm
grad_norm = torch.nn.utils.clip_grad_norm_(
flow.parameters(), max_norm=max_grad_norm, norm_type=2.0)
if float(grad_norm) > max_grad_norm:
gradnorm_queue.add(float(max_grad_norm))
else:
gradnorm_queue.add(float(grad_norm))
if float(grad_norm) > max_grad_norm:
print(f'Clipped gradient with value {grad_norm:.1f} '
f'while allowed {max_grad_norm:.1f}')
return grad_norm
# Rotation data augmntation
def random_rotation(x):
bs, n_nodes, n_dims = x.size()
device = x.device
angle_range = np.pi * 2
if n_dims == 2:
theta = torch.rand(bs, 1, 1).to(device) * angle_range - np.pi
cos_theta = torch.cos(theta)
sin_theta = torch.sin(theta)
R_row0 = torch.cat([cos_theta, -sin_theta], dim=2)
R_row1 = torch.cat([sin_theta, cos_theta], dim=2)
R = torch.cat([R_row0, R_row1], dim=1)
x = x.transpose(1, 2)
x = torch.matmul(R, x)
x = x.transpose(1, 2)
elif n_dims == 3:
# Build Rx
Rx = torch.eye(3).unsqueeze(0).repeat(bs, 1, 1).to(device)
theta = torch.rand(bs, 1, 1).to(device) * angle_range - np.pi
cos = torch.cos(theta)
sin = torch.sin(theta)
Rx[:, 1:2, 1:2] = cos
Rx[:, 1:2, 2:3] = sin
Rx[:, 2:3, 1:2] = - sin
Rx[:, 2:3, 2:3] = cos
# Build Ry
Ry = torch.eye(3).unsqueeze(0).repeat(bs, 1, 1).to(device)
theta = torch.rand(bs, 1, 1).to(device) * angle_range - np.pi
cos = torch.cos(theta)
sin = torch.sin(theta)
Ry[:, 0:1, 0:1] = cos
Ry[:, 0:1, 2:3] = -sin
Ry[:, 2:3, 0:1] = sin
Ry[:, 2:3, 2:3] = cos
# Build Rz
Rz = torch.eye(3).unsqueeze(0).repeat(bs, 1, 1).to(device)
theta = torch.rand(bs, 1, 1).to(device) * angle_range - np.pi
cos = torch.cos(theta)
sin = torch.sin(theta)
Rz[:, 0:1, 0:1] = cos
Rz[:, 0:1, 1:2] = sin
Rz[:, 1:2, 0:1] = -sin
Rz[:, 1:2, 1:2] = cos
x = x.transpose(1, 2)
x = torch.matmul(Rx, x)
#x = torch.matmul(Rx.transpose(1, 2), x)
x = torch.matmul(Ry, x)
#x = torch.matmul(Ry.transpose(1, 2), x)
x = torch.matmul(Rz, x)
#x = torch.matmul(Rz.transpose(1, 2), x)
x = x.transpose(1, 2)
else:
raise Exception("Not implemented Error")
return x.contiguous()
# Other utilities
def get_wandb_username(username):
if username == 'cvignac':
return 'cvignac'
current_user = getpass.getuser()
if current_user == 'victor' or current_user == 'garciasa':
return 'vgsatorras'
else:
return username
if __name__ == "__main__":
## Test random_rotation
bs = 2
n_nodes = 16
n_dims = 3
x = torch.randn(bs, n_nodes, n_dims)
print(x)
x = random_rotation(x)
#print(x)
| 4,012 | 25.058442 | 74 | py |
e3_diffusion_for_molecules | e3_diffusion_for_molecules-main/build_geom_dataset.py | import msgpack
import os
import numpy as np
import torch
from torch.utils.data import BatchSampler, DataLoader, Dataset, SequentialSampler
import argparse
from qm9.data import collate as qm9_collate
def extract_conformers(args):
drugs_file = os.path.join(args.data_dir, args.data_file)
save_file = f"geom_drugs_{'no_h_' if args.remove_h else ''}{args.conformations}"
smiles_list_file = 'geom_drugs_smiles.txt'
number_atoms_file = f"geom_drugs_n_{'no_h_' if args.remove_h else ''}{args.conformations}"
unpacker = msgpack.Unpacker(open(drugs_file, "rb"))
all_smiles = []
all_number_atoms = []
dataset_conformers = []
mol_id = 0
for i, drugs_1k in enumerate(unpacker):
print(f"Unpacking file {i}...")
for smiles, all_info in drugs_1k.items():
all_smiles.append(smiles)
conformers = all_info['conformers']
# Get the energy of each conformer. Keep only the lowest values
all_energies = []
for conformer in conformers:
all_energies.append(conformer['totalenergy'])
all_energies = np.array(all_energies)
argsort = np.argsort(all_energies)
lowest_energies = argsort[:args.conformations]
for id in lowest_energies:
conformer = conformers[id]
coords = np.array(conformer['xyz']).astype(float) # n x 4
if args.remove_h:
mask = coords[:, 0] != 1.0
coords = coords[mask]
n = coords.shape[0]
all_number_atoms.append(n)
mol_id_arr = mol_id * np.ones((n, 1), dtype=float)
id_coords = np.hstack((mol_id_arr, coords))
dataset_conformers.append(id_coords)
mol_id += 1
print("Total number of conformers saved", mol_id)
all_number_atoms = np.array(all_number_atoms)
dataset = np.vstack(dataset_conformers)
print("Total number of atoms in the dataset", dataset.shape[0])
print("Average number of atoms per molecule", dataset.shape[0] / mol_id)
# Save conformations
np.save(os.path.join(args.data_dir, save_file), dataset)
# Save SMILES
with open(os.path.join(args.data_dir, smiles_list_file), 'w') as f:
for s in all_smiles:
f.write(s)
f.write('\n')
# Save number of atoms per conformation
np.save(os.path.join(args.data_dir, number_atoms_file), all_number_atoms)
print("Dataset processed.")
def load_split_data(conformation_file, val_proportion=0.1, test_proportion=0.1,
filter_size=None):
from pathlib import Path
path = Path(conformation_file)
base_path = path.parent.absolute()
# base_path = os.path.dirname(conformation_file)
all_data = np.load(conformation_file) # 2d array: num_atoms x 5
mol_id = all_data[:, 0].astype(int)
conformers = all_data[:, 1:]
# Get ids corresponding to new molecules
split_indices = np.nonzero(mol_id[:-1] - mol_id[1:])[0] + 1
data_list = np.split(conformers, split_indices)
# Filter based on molecule size.
if filter_size is not None:
# Keep only molecules <= filter_size
data_list = [molecule for molecule in data_list
if molecule.shape[0] <= filter_size]
assert len(data_list) > 0, 'No molecules left after filter.'
# CAREFUL! Only for first time run:
# perm = np.random.permutation(len(data_list)).astype('int32')
# print('Warning, currently taking a random permutation for '
# 'train/val/test partitions, this needs to be fixed for'
# 'reproducibility.')
# assert not os.path.exists(os.path.join(base_path, 'geom_permutation.npy'))
# np.save(os.path.join(base_path, 'geom_permutation.npy'), perm)
# del perm
perm = np.load(os.path.join(base_path, 'geom_permutation.npy'))
data_list = [data_list[i] for i in perm]
num_mol = len(data_list)
val_index = int(num_mol * val_proportion)
test_index = val_index + int(num_mol * test_proportion)
val_data, test_data, train_data = np.split(data_list, [val_index, test_index])
return train_data, val_data, test_data
class GeomDrugsDataset(Dataset):
def __init__(self, data_list, transform=None):
"""
Args:
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.transform = transform
# Sort the data list by size
lengths = [s.shape[0] for s in data_list]
argsort = np.argsort(lengths) # Sort by decreasing size
self.data_list = [data_list[i] for i in argsort]
# Store indices where the size changes
self.split_indices = np.unique(np.sort(lengths), return_index=True)[1][1:]
def __len__(self):
return len(self.data_list)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
sample = self.data_list[idx]
if self.transform:
sample = self.transform(sample)
return sample
class CustomBatchSampler(BatchSampler):
""" Creates batches where all sets have the same size. """
def __init__(self, sampler, batch_size, drop_last, split_indices):
super().__init__(sampler, batch_size, drop_last)
self.split_indices = split_indices
def __iter__(self):
batch = []
for idx in self.sampler:
batch.append(idx)
if len(batch) == self.batch_size or idx + 1 in self.split_indices:
yield batch
batch = []
if len(batch) > 0 and not self.drop_last:
yield batch
def __len__(self):
count = 0
batch = 0
for idx in self.sampler:
batch += 1
if batch == self.batch_size or idx + 1 in self.split_indices:
count += 1
batch = 0
if batch > 0 and not self.drop_last:
count += 1
return count
def collate_fn(batch):
batch = {prop: qm9_collate.batch_stack([mol[prop] for mol in batch])
for prop in batch[0].keys()}
atom_mask = batch['atom_mask']
# Obtain edges
batch_size, n_nodes = atom_mask.size()
edge_mask = atom_mask.unsqueeze(1) * atom_mask.unsqueeze(2)
# mask diagonal
diag_mask = ~torch.eye(edge_mask.size(1), dtype=torch.bool,
device=edge_mask.device).unsqueeze(0)
edge_mask *= diag_mask
# edge_mask = atom_mask.unsqueeze(1) * atom_mask.unsqueeze(2)
batch['edge_mask'] = edge_mask.view(batch_size * n_nodes * n_nodes, 1)
return batch
class GeomDrugsDataLoader(DataLoader):
def __init__(self, sequential, dataset, batch_size, shuffle, drop_last=False):
if sequential:
# This goes over the data sequentially, advantage is that it takes
# less memory for smaller molecules, but disadvantage is that the
# model sees very specific orders of data.
assert not shuffle
sampler = SequentialSampler(dataset)
batch_sampler = CustomBatchSampler(sampler, batch_size, drop_last,
dataset.split_indices)
super().__init__(dataset, batch_sampler=batch_sampler)
else:
# Dataloader goes through data randomly and pads the molecules to
# the largest molecule size.
super().__init__(dataset, batch_size, shuffle=shuffle,
collate_fn=collate_fn, drop_last=drop_last)
class GeomDrugsTransform(object):
def __init__(self, dataset_info, include_charges, device, sequential):
self.atomic_number_list = torch.Tensor(dataset_info['atomic_nb'])[None, :]
self.device = device
self.include_charges = include_charges
self.sequential = sequential
def __call__(self, data):
n = data.shape[0]
new_data = {}
new_data['positions'] = torch.from_numpy(data[:, -3:])
atom_types = torch.from_numpy(data[:, 0].astype(int)[:, None])
one_hot = atom_types == self.atomic_number_list
new_data['one_hot'] = one_hot
if self.include_charges:
new_data['charges'] = torch.zeros(n, 1, device=self.device)
else:
new_data['charges'] = torch.zeros(0, device=self.device)
new_data['atom_mask'] = torch.ones(n, device=self.device)
if self.sequential:
edge_mask = torch.ones((n, n), device=self.device)
edge_mask[~torch.eye(edge_mask.shape[0], dtype=torch.bool)] = 0
new_data['edge_mask'] = edge_mask.flatten()
return new_data
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--conformations", type=int, default=30,
help="Max number of conformations kept for each molecule.")
parser.add_argument("--remove_h", action='store_true', help="Remove hydrogens from the dataset.")
parser.add_argument("--data_dir", type=str, default='~/diffusion/data/geom/')
parser.add_argument("--data_file", type=str, default="drugs_crude.msgpack")
args = parser.parse_args()
extract_conformers(args)
| 9,281 | 36.885714 | 101 | py |
e3_diffusion_for_molecules | e3_diffusion_for_molecules-main/main_geom_drugs.py | # Rdkit import should be first, do not move it
try:
from rdkit import Chem
except ModuleNotFoundError:
pass
import build_geom_dataset
from configs.datasets_config import geom_with_h
import copy
import utils
import argparse
import wandb
from os.path import join
from qm9.models import get_optim, get_model
from equivariant_diffusion import en_diffusion
from equivariant_diffusion import utils as diffusion_utils
import torch
import time
import pickle
from qm9.utils import prepare_context, compute_mean_mad
import train_test
parser = argparse.ArgumentParser(description='e3_diffusion')
parser.add_argument('--exp_name', type=str, default='debug_10')
parser.add_argument('--model', type=str, default='egnn_dynamics',
help='our_dynamics | schnet | simple_dynamics | '
'kernel_dynamics | egnn_dynamics |gnn_dynamics')
parser.add_argument('--probabilistic_model', type=str, default='diffusion',
help='diffusion')
# Training complexity is O(1) (unaffected), but sampling complexity O(steps).
parser.add_argument('--diffusion_steps', type=int, default=500)
parser.add_argument('--diffusion_noise_schedule', type=str, default='polynomial_2',
help='learned, cosine')
parser.add_argument('--diffusion_loss_type', type=str, default='l2',
help='vlb, l2')
parser.add_argument('--diffusion_noise_precision', type=float, default=1e-5)
parser.add_argument('--n_epochs', type=int, default=10000)
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--lr', type=float, default=5e-5)
parser.add_argument('--break_train_epoch', type=eval, default=False,
help='True | False')
parser.add_argument('--dp', type=eval, default=True,
help='True | False')
parser.add_argument('--condition_time', type=eval, default=True,
help='True | False')
parser.add_argument('--clip_grad', type=eval, default=True,
help='True | False')
parser.add_argument('--trace', type=str, default='hutch',
help='hutch | exact')
# EGNN args -->
parser.add_argument('--n_layers', type=int, default=6,
help='number of layers')
parser.add_argument('--inv_sublayers', type=int, default=1,
help='number of layers')
parser.add_argument('--nf', type=int, default=192,
help='number of layers')
parser.add_argument('--tanh', type=eval, default=True,
help='use tanh in the coord_mlp')
parser.add_argument('--attention', type=eval, default=True,
help='use attention in the EGNN')
parser.add_argument('--norm_constant', type=float, default=1,
help='diff/(|diff| + norm_constant)')
parser.add_argument('--sin_embedding', type=eval, default=False,
help='whether using or not the sin embedding')
# <-- EGNN args
parser.add_argument('--ode_regularization', type=float, default=1e-3)
parser.add_argument('--dataset', type=str, default='geom',
help='dataset name')
parser.add_argument('--filter_n_atoms', type=int, default=None,
help='When set to an integer value, QM9 will only contain molecules of that amount of atoms')
parser.add_argument('--dequantization', type=str, default='argmax_variational',
help='uniform | variational | argmax_variational | deterministic')
parser.add_argument('--n_report_steps', type=int, default=50)
parser.add_argument('--wandb_usr', type=str)
parser.add_argument('--no_wandb', action='store_true', help='Disable wandb')
parser.add_argument('--online', type=bool, default=True, help='True = wandb online -- False = wandb offline')
parser.add_argument('--no-cuda', action='store_true', default=False, help='disable CUDA training')
parser.add_argument('--save_model', type=eval, default=True, help='save model')
parser.add_argument('--generate_epochs', type=int, default=1)
parser.add_argument('--num_workers', type=int, default=0,
help='Number of worker for the dataloader')
parser.add_argument('--test_epochs', type=int, default=1)
parser.add_argument('--data_augmentation', type=eval, default=False,
help='use attention in the EGNN')
parser.add_argument("--conditioning", nargs='+', default=[],
help='multiple arguments can be passed, '
'including: homo | onehot | lumo | num_atoms | etc. '
'usage: "--conditioning H_thermo homo onehot H_thermo"')
parser.add_argument('--resume', type=str, default=None,
help='')
parser.add_argument('--start_epoch', type=int, default=0,
help='')
parser.add_argument('--ema_decay', type=float, default=0, # TODO
help='Amount of EMA decay, 0 means off. A reasonable value'
' is 0.999.')
parser.add_argument('--augment_noise', type=float, default=0)
parser.add_argument('--n_stability_samples', type=int, default=20,
help='Number of samples to compute the stability')
parser.add_argument('--normalize_factors', type=eval, default=[1, 4, 10],
help='normalize factors for [x, categorical, integer]')
parser.add_argument('--remove_h', action='store_true')
parser.add_argument('--include_charges', type=eval, default=False, help='include atom charge or not')
parser.add_argument('--visualize_every_batch', type=int, default=5000)
parser.add_argument('--normalization_factor', type=float,
default=100, help="Normalize the sum aggregation of EGNN")
parser.add_argument('--aggregation_method', type=str, default='sum',
help='"sum" or "mean" aggregation for the graph network')
parser.add_argument('--filter_molecule_size', type=int, default=None,
help="Only use molecules below this size.")
parser.add_argument('--sequential', action='store_true',
help='Organize data by size to reduce average memory usage.')
args = parser.parse_args()
data_file = './data/geom/geom_drugs_30.npy'
if args.remove_h:
raise NotImplementedError()
else:
dataset_info = geom_with_h
args.cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if args.cuda else "cpu")
dtype = torch.float32
split_data = build_geom_dataset.load_split_data(data_file, val_proportion=0.1, test_proportion=0.1, filter_size=args.filter_molecule_size)
transform = build_geom_dataset.GeomDrugsTransform(dataset_info, args.include_charges, device, args.sequential)
dataloaders = {}
for key, data_list in zip(['train', 'val', 'test'], split_data):
dataset = build_geom_dataset.GeomDrugsDataset(data_list, transform=transform)
shuffle = (key == 'train') and not args.sequential
# Sequential dataloading disabled for now.
dataloaders[key] = build_geom_dataset.GeomDrugsDataLoader(
sequential=args.sequential, dataset=dataset, batch_size=args.batch_size,
shuffle=shuffle)
del split_data
atom_encoder = dataset_info['atom_encoder']
atom_decoder = dataset_info['atom_decoder']
# args, unparsed_args = parser.parse_known_args()
args.wandb_usr = utils.get_wandb_username(args.wandb_usr)
if args.resume is not None:
exp_name = args.exp_name + '_resume'
start_epoch = args.start_epoch
resume = args.resume
wandb_usr = args.wandb_usr
with open(join(args.resume, 'args.pickle'), 'rb') as f:
args = pickle.load(f)
args.resume = resume
args.break_train_epoch = False
args.exp_name = exp_name
args.start_epoch = start_epoch
args.wandb_usr = wandb_usr
print(args)
utils.create_folders(args)
print(args)
# Wandb config
if args.no_wandb:
mode = 'disabled'
else:
mode = 'online' if args.online else 'offline'
kwargs = {'entity': args.wandb_usr, 'name': args.exp_name, 'project': 'e3_diffusion_geom', 'config': args,
'settings': wandb.Settings(_disable_stats=True), 'reinit': True, 'mode': mode}
wandb.init(**kwargs)
wandb.save('*.txt')
data_dummy = next(iter(dataloaders['train']))
if len(args.conditioning) > 0:
print(f'Conditioning on {args.conditioning}')
property_norms = compute_mean_mad(dataloaders, args.conditioning)
context_dummy = prepare_context(args.conditioning, data_dummy, property_norms)
context_node_nf = context_dummy.size(2)
else:
context_node_nf = 0
property_norms = None
args.context_node_nf = context_node_nf
# Create EGNN flow
model, nodes_dist, prop_dist = get_model(args, device, dataset_info, dataloader_train=dataloaders['train'])
model = model.to(device)
optim = get_optim(args, model)
# print(model)
gradnorm_queue = utils.Queue()
gradnorm_queue.add(3000) # Add large value that will be flushed.
def main():
if args.resume is not None:
flow_state_dict = torch.load(join(args.resume, 'flow.npy'))
dequantizer_state_dict = torch.load(join(args.resume, 'dequantizer.npy'))
optim_state_dict = torch.load(join(args.resume, 'optim.npy'))
model.load_state_dict(flow_state_dict)
optim.load_state_dict(optim_state_dict)
# Initialize dataparallel if enabled and possible.
if args.dp and torch.cuda.device_count() > 1 and args.cuda:
print(f'Training using {torch.cuda.device_count()} GPUs')
model_dp = torch.nn.DataParallel(model.cpu())
model_dp = model_dp.cuda()
else:
model_dp = model
# Initialize model copy for exponential moving average of params.
if args.ema_decay > 0:
model_ema = copy.deepcopy(model)
ema = diffusion_utils.EMA(args.ema_decay)
if args.dp and torch.cuda.device_count() > 1:
model_ema_dp = torch.nn.DataParallel(model_ema)
else:
model_ema_dp = model_ema
else:
ema = None
model_ema = model
model_ema_dp = model_dp
best_nll_val = 1e8
best_nll_test = 1e8
for epoch in range(args.start_epoch, args.n_epochs):
start_epoch = time.time()
train_test.train_epoch(args, dataloaders['train'], epoch, model, model_dp, model_ema, ema, device, dtype,
property_norms, optim, nodes_dist, gradnorm_queue, dataset_info,
prop_dist)
print(f"Epoch took {time.time() - start_epoch:.1f} seconds.")
if epoch % args.test_epochs == 0:
if isinstance(model, en_diffusion.EnVariationalDiffusion):
wandb.log(model.log_info(), commit=True)
if not args.break_train_epoch:
train_test.analyze_and_save(epoch, model_ema, nodes_dist, args, device,
dataset_info, prop_dist, n_samples=args.n_stability_samples)
nll_val = train_test.test(args, dataloaders['val'], epoch, model_ema_dp, device, dtype,
property_norms, nodes_dist, partition='Val')
nll_test = train_test.test(args, dataloaders['test'], epoch, model_ema_dp, device, dtype,
property_norms, nodes_dist, partition='Test')
if nll_val < best_nll_val:
best_nll_val = nll_val
best_nll_test = nll_test
if args.save_model:
args.current_epoch = epoch + 1
utils.save_model(optim, 'outputs/%s/optim.npy' % args.exp_name)
utils.save_model(model, 'outputs/%s/generative_model.npy' % args.exp_name)
if args.ema_decay > 0:
utils.save_model(model_ema, 'outputs/%s/generative_model_ema.npy' % args.exp_name)
with open('outputs/%s/args.pickle' % args.exp_name, 'wb') as f:
pickle.dump(args, f)
if args.save_model:
utils.save_model(optim, 'outputs/%s/optim_%d.npy' % (args.exp_name, epoch))
utils.save_model(model, 'outputs/%s/generative_model_%d.npy' % (args.exp_name, epoch))
if args.ema_decay > 0:
utils.save_model(model_ema, 'outputs/%s/generative_model_ema_%d.npy' % (args.exp_name, epoch))
with open('outputs/%s/args_%d.pickle' % (args.exp_name, epoch), 'wb') as f:
pickle.dump(args, f)
print('Val loss: %.4f \t Test loss: %.4f' % (nll_val, nll_test))
print('Best val loss: %.4f \t Best test loss: %.4f' % (best_nll_val, best_nll_test))
wandb.log({"Val loss ": nll_val}, commit=True)
wandb.log({"Test loss ": nll_test}, commit=True)
wandb.log({"Best cross-validated test loss ": best_nll_test}, commit=True)
if __name__ == "__main__":
main()
| 12,752 | 44.384342 | 138 | py |
e3_diffusion_for_molecules | e3_diffusion_for_molecules-main/eval_conditional_qm9.py | import argparse
from os.path import join
import torch
import pickle
from qm9.models import get_model
from configs.datasets_config import get_dataset_info
from qm9 import dataset
from qm9.utils import compute_mean_mad
from qm9.sampling import sample
from qm9.property_prediction.main_qm9_prop import test
from qm9.property_prediction import main_qm9_prop
from qm9.sampling import sample_chain, sample, sample_sweep_conditional
import qm9.visualizer as vis
def get_classifier(dir_path='', device='cpu'):
with open(join(dir_path, 'args.pickle'), 'rb') as f:
args_classifier = pickle.load(f)
args_classifier.device = device
args_classifier.model_name = 'egnn'
classifier = main_qm9_prop.get_model(args_classifier)
classifier_state_dict = torch.load(join(dir_path, 'best_checkpoint.npy'), map_location=torch.device('cpu'))
classifier.load_state_dict(classifier_state_dict)
return classifier
def get_args_gen(dir_path):
with open(join(dir_path, 'args.pickle'), 'rb') as f:
args_gen = pickle.load(f)
assert args_gen.dataset == 'qm9_second_half'
# Add missing args!
if not hasattr(args_gen, 'normalization_factor'):
args_gen.normalization_factor = 1
if not hasattr(args_gen, 'aggregation_method'):
args_gen.aggregation_method = 'sum'
return args_gen
def get_generator(dir_path, dataloaders, device, args_gen, property_norms):
dataset_info = get_dataset_info(args_gen.dataset, args_gen.remove_h)
model, nodes_dist, prop_dist = get_model(args_gen, device, dataset_info, dataloaders['train'])
fn = 'generative_model_ema.npy' if args_gen.ema_decay > 0 else 'generative_model.npy'
model_state_dict = torch.load(join(dir_path, fn), map_location='cpu')
model.load_state_dict(model_state_dict)
# The following function be computes the normalization parameters using the 'valid' partition
if prop_dist is not None:
prop_dist.set_normalizer(property_norms)
return model.to(device), nodes_dist, prop_dist, dataset_info
def get_dataloader(args_gen):
dataloaders, charge_scale = dataset.retrieve_dataloaders(args_gen)
return dataloaders
class DiffusionDataloader:
def __init__(self, args_gen, model, nodes_dist, prop_dist, device, unkown_labels=False,
batch_size=1, iterations=200):
self.args_gen = args_gen
self.model = model
self.nodes_dist = nodes_dist
self.prop_dist = prop_dist
self.batch_size = batch_size
self.iterations = iterations
self.device = device
self.unkown_labels = unkown_labels
self.dataset_info = get_dataset_info(self.args_gen.dataset, self.args_gen.remove_h)
self.i = 0
def __iter__(self):
return self
def sample(self):
nodesxsample = self.nodes_dist.sample(self.batch_size)
context = self.prop_dist.sample_batch(nodesxsample).to(self.device)
one_hot, charges, x, node_mask = sample(self.args_gen, self.device, self.model,
self.dataset_info, self.prop_dist, nodesxsample=nodesxsample,
context=context)
node_mask = node_mask.squeeze(2)
context = context.squeeze(1)
# edge_mask
bs, n_nodes = node_mask.size()
edge_mask = node_mask.unsqueeze(1) * node_mask.unsqueeze(2)
diag_mask = ~torch.eye(edge_mask.size(1), dtype=torch.bool).unsqueeze(0)
diag_mask = diag_mask.to(self.device)
edge_mask *= diag_mask
edge_mask = edge_mask.view(bs * n_nodes * n_nodes, 1)
prop_key = self.prop_dist.properties[0]
if self.unkown_labels:
context[:] = self.prop_dist.normalizer[prop_key]['mean']
else:
context = context * self.prop_dist.normalizer[prop_key]['mad'] + self.prop_dist.normalizer[prop_key]['mean']
data = {
'positions': x.detach(),
'atom_mask': node_mask.detach(),
'edge_mask': edge_mask.detach(),
'one_hot': one_hot.detach(),
prop_key: context.detach()
}
return data
def __next__(self):
if self.i <= self.iterations:
self.i += 1
return self.sample()
else:
self.i = 0
raise StopIteration
def __len__(self):
return self.iterations
def main_quantitative(args):
# Get classifier
#if args.task == "numnodes":
# class_dir = args.classifiers_path[:-6] + "numnodes_%s" % args.property
#else:
class_dir = args.classifiers_path
classifier = get_classifier(class_dir).to(args.device)
# Get generator and dataloader used to train the generator and evalute the classifier
args_gen = get_args_gen(args.generators_path)
# Careful with this -->
if not hasattr(args_gen, 'diffusion_noise_precision'):
args_gen.normalization_factor = 1e-4
if not hasattr(args_gen, 'normalization_factor'):
args_gen.normalization_factor = 1
if not hasattr(args_gen, 'aggregation_method'):
args_gen.aggregation_method = 'sum'
dataloaders = get_dataloader(args_gen)
property_norms = compute_mean_mad(dataloaders, args_gen.conditioning, args_gen.dataset)
model, nodes_dist, prop_dist, _ = get_generator(args.generators_path, dataloaders,
args.device, args_gen, property_norms)
# Create a dataloader with the generator
mean, mad = property_norms[args.property]['mean'], property_norms[args.property]['mad']
if args.task == 'edm':
diffusion_dataloader = DiffusionDataloader(args_gen, model, nodes_dist, prop_dist,
args.device, batch_size=args.batch_size, iterations=args.iterations)
print("EDM: We evaluate the classifier on our generated samples")
loss = test(classifier, 0, diffusion_dataloader, mean, mad, args.property, args.device, 1, args.debug_break)
print("Loss classifier on Generated samples: %.4f" % loss)
elif args.task == 'qm9_second_half':
print("qm9_second_half: We evaluate the classifier on QM9")
loss = test(classifier, 0, dataloaders['train'], mean, mad, args.property, args.device, args.log_interval,
args.debug_break)
print("Loss classifier on qm9_second_half: %.4f" % loss)
elif args.task == 'naive':
print("Naive: We evaluate the classifier on QM9")
length = dataloaders['train'].dataset.data[args.property].size(0)
idxs = torch.randperm(length)
dataloaders['train'].dataset.data[args.property] = dataloaders['train'].dataset.data[args.property][idxs]
loss = test(classifier, 0, dataloaders['train'], mean, mad, args.property, args.device, args.log_interval,
args.debug_break)
print("Loss classifier on naive: %.4f" % loss)
#elif args.task == 'numnodes':
# print("Numnodes: We evaluate the numnodes classifier on EDM samples")
# diffusion_dataloader = DiffusionDataloader(args_gen, model, nodes_dist, prop_dist, device,
# batch_size=args.batch_size, iterations=args.iterations)
# loss = test(classifier, 0, diffusion_dataloader, mean, mad, args.property, args.device, 1, args.debug_break)
# print("Loss numnodes classifier on EDM generated samples: %.4f" % loss)
def save_and_sample_conditional(args, device, model, prop_dist, dataset_info, epoch=0, id_from=0):
one_hot, charges, x, node_mask = sample_sweep_conditional(args, device, model, dataset_info, prop_dist)
vis.save_xyz_file(
'outputs/%s/analysis/run%s/' % (args.exp_name, epoch), one_hot, charges, x, dataset_info,
id_from, name='conditional', node_mask=node_mask)
vis.visualize_chain("outputs/%s/analysis/run%s/" % (args.exp_name, epoch), dataset_info,
wandb=None, mode='conditional', spheres_3d=True)
return one_hot, charges, x
def main_qualitative(args):
args_gen = get_args_gen(args.generators_path)
dataloaders = get_dataloader(args_gen)
property_norms = compute_mean_mad(dataloaders, args_gen.conditioning, args_gen.dataset)
model, nodes_dist, prop_dist, dataset_info = get_generator(args.generators_path,
dataloaders, args.device, args_gen,
property_norms)
for i in range(args.n_sweeps):
print("Sampling sweep %d/%d" % (i+1, args.n_sweeps))
save_and_sample_conditional(args_gen, device, model, prop_dist, dataset_info, epoch=i, id_from=0)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--exp_name', type=str, default='debug_alpha')
parser.add_argument('--generators_path', type=str, default='outputs/exp_cond_alpha_pretrained')
parser.add_argument('--classifiers_path', type=str, default='qm9/property_prediction/outputs/exp_class_alpha_pretrained')
parser.add_argument('--property', type=str, default='alpha',
help="'alpha', 'homo', 'lumo', 'gap', 'mu', 'Cv'")
parser.add_argument('--no-cuda', action='store_true', default=False,
help='enables CUDA training')
parser.add_argument('--debug_break', type=eval, default=False,
help='break point or not')
parser.add_argument('--log_interval', type=int, default=5,
help='break point or not')
parser.add_argument('--batch_size', type=int, default=1,
help='break point or not')
parser.add_argument('--iterations', type=int, default=20,
help='break point or not')
parser.add_argument('--task', type=str, default='qualitative',
help='naive, edm, qm9_second_half, qualitative')
parser.add_argument('--n_sweeps', type=int, default=10,
help='number of sweeps for the qualitative conditional experiment')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if args.cuda else "cpu")
args.device = device
if args.task == 'qualitative':
main_qualitative(args)
else:
main_quantitative(args)
| 10,394 | 43.613734 | 125 | py |
e3_diffusion_for_molecules | e3_diffusion_for_molecules-main/eval_sample.py | # Rdkit import should be first, do not move it
try:
from rdkit import Chem
except ModuleNotFoundError:
pass
import utils
import argparse
from configs.datasets_config import qm9_with_h, qm9_without_h
from qm9 import dataset
from qm9.models import get_model
from equivariant_diffusion.utils import assert_correctly_masked
import torch
import pickle
import qm9.visualizer as vis
from qm9.analyze import check_stability
from os.path import join
from qm9.sampling import sample_chain, sample
from configs.datasets_config import get_dataset_info
def check_mask_correct(variables, node_mask):
for variable in variables:
assert_correctly_masked(variable, node_mask)
def save_and_sample_chain(args, eval_args, device, flow,
n_tries, n_nodes, dataset_info, id_from=0,
num_chains=100):
for i in range(num_chains):
target_path = f'eval/chain_{i}/'
one_hot, charges, x = sample_chain(
args, device, flow, n_tries, dataset_info)
vis.save_xyz_file(
join(eval_args.model_path, target_path), one_hot, charges, x,
dataset_info, id_from, name='chain')
vis.visualize_chain_uncertainty(
join(eval_args.model_path, target_path), dataset_info,
spheres_3d=True)
return one_hot, charges, x
def sample_different_sizes_and_save(args, eval_args, device, generative_model,
nodes_dist, dataset_info, n_samples=10):
nodesxsample = nodes_dist.sample(n_samples)
one_hot, charges, x, node_mask = sample(
args, device, generative_model, dataset_info,
nodesxsample=nodesxsample)
vis.save_xyz_file(
join(eval_args.model_path, 'eval/molecules/'), one_hot, charges, x,
id_from=0, name='molecule', dataset_info=dataset_info,
node_mask=node_mask)
def sample_only_stable_different_sizes_and_save(
args, eval_args, device, flow, nodes_dist,
dataset_info, n_samples=10, n_tries=50):
assert n_tries > n_samples
nodesxsample = nodes_dist.sample(n_tries)
one_hot, charges, x, node_mask = sample(
args, device, flow, dataset_info,
nodesxsample=nodesxsample)
counter = 0
for i in range(n_tries):
num_atoms = int(node_mask[i:i+1].sum().item())
atom_type = one_hot[i:i+1, :num_atoms].argmax(2).squeeze(0).cpu().detach().numpy()
x_squeeze = x[i:i+1, :num_atoms].squeeze(0).cpu().detach().numpy()
mol_stable = check_stability(x_squeeze, atom_type, dataset_info)[0]
num_remaining_attempts = n_tries - i - 1
num_remaining_samples = n_samples - counter
if mol_stable or num_remaining_attempts <= num_remaining_samples:
if mol_stable:
print('Found stable mol.')
vis.save_xyz_file(
join(eval_args.model_path, 'eval/molecules/'),
one_hot[i:i+1], charges[i:i+1], x[i:i+1],
id_from=counter, name='molecule_stable',
dataset_info=dataset_info,
node_mask=node_mask[i:i+1])
counter += 1
if counter >= n_samples:
break
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--model_path', type=str,
default="outputs/edm_1",
help='Specify model path')
parser.add_argument(
'--n_tries', type=int, default=10,
help='N tries to find stable molecule for gif animation')
parser.add_argument('--n_nodes', type=int, default=19,
help='number of atoms in molecule for gif animation')
eval_args, unparsed_args = parser.parse_known_args()
assert eval_args.model_path is not None
with open(join(eval_args.model_path, 'args.pickle'), 'rb') as f:
args = pickle.load(f)
# CAREFUL with this -->
if not hasattr(args, 'normalization_factor'):
args.normalization_factor = 1
if not hasattr(args, 'aggregation_method'):
args.aggregation_method = 'sum'
args.cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if args.cuda else "cpu")
args.device = device
dtype = torch.float32
utils.create_folders(args)
print(args)
dataset_info = get_dataset_info(args.dataset, args.remove_h)
dataloaders, charge_scale = dataset.retrieve_dataloaders(args)
flow, nodes_dist, prop_dist = get_model(
args, device, dataset_info, dataloaders['train'])
flow.to(device)
fn = 'generative_model_ema.npy' if args.ema_decay > 0 else 'generative_model.npy'
flow_state_dict = torch.load(join(eval_args.model_path, fn),
map_location=device)
flow.load_state_dict(flow_state_dict)
print('Sampling handful of molecules.')
sample_different_sizes_and_save(
args, eval_args, device, flow, nodes_dist,
dataset_info=dataset_info, n_samples=30)
print('Sampling stable molecules.')
sample_only_stable_different_sizes_and_save(
args, eval_args, device, flow, nodes_dist,
dataset_info=dataset_info, n_samples=10, n_tries=2*10)
print('Visualizing molecules.')
vis.visualize(
join(eval_args.model_path, 'eval/molecules/'), dataset_info,
max_num=100, spheres_3d=True)
print('Sampling visualization chain.')
save_and_sample_chain(
args, eval_args, device, flow,
n_tries=eval_args.n_tries, n_nodes=eval_args.n_nodes,
dataset_info=dataset_info)
if __name__ == "__main__":
main()
| 5,606 | 32.981818 | 90 | py |
e3_diffusion_for_molecules | e3_diffusion_for_molecules-main/main_qm9.py | # Rdkit import should be first, do not move it
try:
from rdkit import Chem
except ModuleNotFoundError:
pass
import copy
import utils
import argparse
import wandb
from configs.datasets_config import get_dataset_info
from os.path import join
from qm9 import dataset
from qm9.models import get_optim, get_model
from equivariant_diffusion import en_diffusion
from equivariant_diffusion.utils import assert_correctly_masked
from equivariant_diffusion import utils as flow_utils
import torch
import time
import pickle
from qm9.utils import prepare_context, compute_mean_mad
from train_test import train_epoch, test, analyze_and_save
parser = argparse.ArgumentParser(description='E3Diffusion')
parser.add_argument('--exp_name', type=str, default='debug_10')
parser.add_argument('--model', type=str, default='egnn_dynamics',
help='our_dynamics | schnet | simple_dynamics | '
'kernel_dynamics | egnn_dynamics |gnn_dynamics')
parser.add_argument('--probabilistic_model', type=str, default='diffusion',
help='diffusion')
# Training complexity is O(1) (unaffected), but sampling complexity is O(steps).
parser.add_argument('--diffusion_steps', type=int, default=500)
parser.add_argument('--diffusion_noise_schedule', type=str, default='polynomial_2',
help='learned, cosine')
parser.add_argument('--diffusion_noise_precision', type=float, default=1e-5,
)
parser.add_argument('--diffusion_loss_type', type=str, default='l2',
help='vlb, l2')
parser.add_argument('--n_epochs', type=int, default=200)
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--lr', type=float, default=2e-4)
parser.add_argument('--brute_force', type=eval, default=False,
help='True | False')
parser.add_argument('--actnorm', type=eval, default=True,
help='True | False')
parser.add_argument('--break_train_epoch', type=eval, default=False,
help='True | False')
parser.add_argument('--dp', type=eval, default=True,
help='True | False')
parser.add_argument('--condition_time', type=eval, default=True,
help='True | False')
parser.add_argument('--clip_grad', type=eval, default=True,
help='True | False')
parser.add_argument('--trace', type=str, default='hutch',
help='hutch | exact')
# EGNN args -->
parser.add_argument('--n_layers', type=int, default=6,
help='number of layers')
parser.add_argument('--inv_sublayers', type=int, default=1,
help='number of layers')
parser.add_argument('--nf', type=int, default=128,
help='number of layers')
parser.add_argument('--tanh', type=eval, default=True,
help='use tanh in the coord_mlp')
parser.add_argument('--attention', type=eval, default=True,
help='use attention in the EGNN')
parser.add_argument('--norm_constant', type=float, default=1,
help='diff/(|diff| + norm_constant)')
parser.add_argument('--sin_embedding', type=eval, default=False,
help='whether using or not the sin embedding')
# <-- EGNN args
parser.add_argument('--ode_regularization', type=float, default=1e-3)
parser.add_argument('--dataset', type=str, default='qm9',
help='qm9 | qm9_second_half (train only on the last 50K samples of the training dataset)')
parser.add_argument('--datadir', type=str, default='qm9/temp',
help='qm9 directory')
parser.add_argument('--filter_n_atoms', type=int, default=None,
help='When set to an integer value, QM9 will only contain molecules of that amount of atoms')
parser.add_argument('--dequantization', type=str, default='argmax_variational',
help='uniform | variational | argmax_variational | deterministic')
parser.add_argument('--n_report_steps', type=int, default=1)
parser.add_argument('--wandb_usr', type=str)
parser.add_argument('--no_wandb', action='store_true', help='Disable wandb')
parser.add_argument('--online', type=bool, default=True, help='True = wandb online -- False = wandb offline')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='enables CUDA training')
parser.add_argument('--save_model', type=eval, default=True,
help='save model')
parser.add_argument('--generate_epochs', type=int, default=1,
help='save model')
parser.add_argument('--num_workers', type=int, default=0, help='Number of worker for the dataloader')
parser.add_argument('--test_epochs', type=int, default=10)
parser.add_argument('--data_augmentation', type=eval, default=False, help='use attention in the EGNN')
parser.add_argument("--conditioning", nargs='+', default=[],
help='arguments : homo | lumo | alpha | gap | mu | Cv' )
parser.add_argument('--resume', type=str, default=None,
help='')
parser.add_argument('--start_epoch', type=int, default=0,
help='')
parser.add_argument('--ema_decay', type=float, default=0.999,
help='Amount of EMA decay, 0 means off. A reasonable value'
' is 0.999.')
parser.add_argument('--augment_noise', type=float, default=0)
parser.add_argument('--n_stability_samples', type=int, default=500,
help='Number of samples to compute the stability')
parser.add_argument('--normalize_factors', type=eval, default=[1, 4, 1],
help='normalize factors for [x, categorical, integer]')
parser.add_argument('--remove_h', action='store_true')
parser.add_argument('--include_charges', type=eval, default=True,
help='include atom charge or not')
parser.add_argument('--visualize_every_batch', type=int, default=1e8,
help="Can be used to visualize multiple times per epoch")
parser.add_argument('--normalization_factor', type=float, default=1,
help="Normalize the sum aggregation of EGNN")
parser.add_argument('--aggregation_method', type=str, default='sum',
help='"sum" or "mean"')
args = parser.parse_args()
dataset_info = get_dataset_info(args.dataset, args.remove_h)
atom_encoder = dataset_info['atom_encoder']
atom_decoder = dataset_info['atom_decoder']
# args, unparsed_args = parser.parse_known_args()
args.wandb_usr = utils.get_wandb_username(args.wandb_usr)
args.cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if args.cuda else "cpu")
dtype = torch.float32
if args.resume is not None:
exp_name = args.exp_name + '_resume'
start_epoch = args.start_epoch
resume = args.resume
wandb_usr = args.wandb_usr
normalization_factor = args.normalization_factor
aggregation_method = args.aggregation_method
with open(join(args.resume, 'args.pickle'), 'rb') as f:
args = pickle.load(f)
args.resume = resume
args.break_train_epoch = False
args.exp_name = exp_name
args.start_epoch = start_epoch
args.wandb_usr = wandb_usr
# Careful with this -->
if not hasattr(args, 'normalization_factor'):
args.normalization_factor = normalization_factor
if not hasattr(args, 'aggregation_method'):
args.aggregation_method = aggregation_method
print(args)
utils.create_folders(args)
# print(args)
# Wandb config
if args.no_wandb:
mode = 'disabled'
else:
mode = 'online' if args.online else 'offline'
kwargs = {'entity': args.wandb_usr, 'name': args.exp_name, 'project': 'e3_diffusion', 'config': args,
'settings': wandb.Settings(_disable_stats=True), 'reinit': True, 'mode': mode}
wandb.init(**kwargs)
wandb.save('*.txt')
# Retrieve QM9 dataloaders
dataloaders, charge_scale = dataset.retrieve_dataloaders(args)
data_dummy = next(iter(dataloaders['train']))
if len(args.conditioning) > 0:
print(f'Conditioning on {args.conditioning}')
property_norms = compute_mean_mad(dataloaders, args.conditioning, args.dataset)
context_dummy = prepare_context(args.conditioning, data_dummy, property_norms)
context_node_nf = context_dummy.size(2)
else:
context_node_nf = 0
property_norms = None
args.context_node_nf = context_node_nf
# Create EGNN flow
model, nodes_dist, prop_dist = get_model(args, device, dataset_info, dataloaders['train'])
if prop_dist is not None:
prop_dist.set_normalizer(property_norms)
model = model.to(device)
optim = get_optim(args, model)
# print(model)
gradnorm_queue = utils.Queue()
gradnorm_queue.add(3000) # Add large value that will be flushed.
def check_mask_correct(variables, node_mask):
for variable in variables:
if len(variable) > 0:
assert_correctly_masked(variable, node_mask)
def main():
if args.resume is not None:
flow_state_dict = torch.load(join(args.resume, 'flow.npy'))
optim_state_dict = torch.load(join(args.resume, 'optim.npy'))
model.load_state_dict(flow_state_dict)
optim.load_state_dict(optim_state_dict)
# Initialize dataparallel if enabled and possible.
if args.dp and torch.cuda.device_count() > 1:
print(f'Training using {torch.cuda.device_count()} GPUs')
model_dp = torch.nn.DataParallel(model.cpu())
model_dp = model_dp.cuda()
else:
model_dp = model
# Initialize model copy for exponential moving average of params.
if args.ema_decay > 0:
model_ema = copy.deepcopy(model)
ema = flow_utils.EMA(args.ema_decay)
if args.dp and torch.cuda.device_count() > 1:
model_ema_dp = torch.nn.DataParallel(model_ema)
else:
model_ema_dp = model_ema
else:
ema = None
model_ema = model
model_ema_dp = model_dp
best_nll_val = 1e8
best_nll_test = 1e8
for epoch in range(args.start_epoch, args.n_epochs):
start_epoch = time.time()
train_epoch(args=args, loader=dataloaders['train'], epoch=epoch, model=model, model_dp=model_dp,
model_ema=model_ema, ema=ema, device=device, dtype=dtype, property_norms=property_norms,
nodes_dist=nodes_dist, dataset_info=dataset_info,
gradnorm_queue=gradnorm_queue, optim=optim, prop_dist=prop_dist)
print(f"Epoch took {time.time() - start_epoch:.1f} seconds.")
if epoch % args.test_epochs == 0:
if isinstance(model, en_diffusion.EnVariationalDiffusion):
wandb.log(model.log_info(), commit=True)
if not args.break_train_epoch:
analyze_and_save(args=args, epoch=epoch, model_sample=model_ema, nodes_dist=nodes_dist,
dataset_info=dataset_info, device=device,
prop_dist=prop_dist, n_samples=args.n_stability_samples)
nll_val = test(args=args, loader=dataloaders['valid'], epoch=epoch, eval_model=model_ema_dp,
partition='Val', device=device, dtype=dtype, nodes_dist=nodes_dist,
property_norms=property_norms)
nll_test = test(args=args, loader=dataloaders['test'], epoch=epoch, eval_model=model_ema_dp,
partition='Test', device=device, dtype=dtype,
nodes_dist=nodes_dist, property_norms=property_norms)
if nll_val < best_nll_val:
best_nll_val = nll_val
best_nll_test = nll_test
if args.save_model:
args.current_epoch = epoch + 1
utils.save_model(optim, 'outputs/%s/optim.npy' % args.exp_name)
utils.save_model(model, 'outputs/%s/generative_model.npy' % args.exp_name)
if args.ema_decay > 0:
utils.save_model(model_ema, 'outputs/%s/generative_model_ema.npy' % args.exp_name)
with open('outputs/%s/args.pickle' % args.exp_name, 'wb') as f:
pickle.dump(args, f)
if args.save_model:
utils.save_model(optim, 'outputs/%s/optim_%d.npy' % (args.exp_name, epoch))
utils.save_model(model, 'outputs/%s/generative_model_%d.npy' % (args.exp_name, epoch))
if args.ema_decay > 0:
utils.save_model(model_ema, 'outputs/%s/generative_model_ema_%d.npy' % (args.exp_name, epoch))
with open('outputs/%s/args_%d.pickle' % (args.exp_name, epoch), 'wb') as f:
pickle.dump(args, f)
print('Val loss: %.4f \t Test loss: %.4f' % (nll_val, nll_test))
print('Best val loss: %.4f \t Best test loss: %.4f' % (best_nll_val, best_nll_test))
wandb.log({"Val loss ": nll_val}, commit=True)
wandb.log({"Test loss ": nll_test}, commit=True)
wandb.log({"Best cross-validated test loss ": best_nll_test}, commit=True)
if __name__ == "__main__":
main()
| 13,079 | 44.103448 | 118 | py |
e3_diffusion_for_molecules | e3_diffusion_for_molecules-main/train_test.py | import wandb
from equivariant_diffusion.utils import assert_mean_zero_with_mask, remove_mean_with_mask,\
assert_correctly_masked, sample_center_gravity_zero_gaussian_with_mask
import numpy as np
import qm9.visualizer as vis
from qm9.analyze import analyze_stability_for_molecules
from qm9.sampling import sample_chain, sample, sample_sweep_conditional
import utils
import qm9.utils as qm9utils
from qm9 import losses
import time
import torch
def train_epoch(args, loader, epoch, model, model_dp, model_ema, ema, device, dtype, property_norms, optim,
nodes_dist, gradnorm_queue, dataset_info, prop_dist):
model_dp.train()
model.train()
nll_epoch = []
n_iterations = len(loader)
for i, data in enumerate(loader):
x = data['positions'].to(device, dtype)
node_mask = data['atom_mask'].to(device, dtype).unsqueeze(2)
edge_mask = data['edge_mask'].to(device, dtype)
one_hot = data['one_hot'].to(device, dtype)
charges = (data['charges'] if args.include_charges else torch.zeros(0)).to(device, dtype)
x = remove_mean_with_mask(x, node_mask)
if args.augment_noise > 0:
# Add noise eps ~ N(0, augment_noise) around points.
eps = sample_center_gravity_zero_gaussian_with_mask(x.size(), x.device, node_mask)
x = x + eps * args.augment_noise
x = remove_mean_with_mask(x, node_mask)
if args.data_augmentation:
x = utils.random_rotation(x).detach()
check_mask_correct([x, one_hot, charges], node_mask)
assert_mean_zero_with_mask(x, node_mask)
h = {'categorical': one_hot, 'integer': charges}
if len(args.conditioning) > 0:
context = qm9utils.prepare_context(args.conditioning, data, property_norms).to(device, dtype)
assert_correctly_masked(context, node_mask)
else:
context = None
optim.zero_grad()
# transform batch through flow
nll, reg_term, mean_abs_z = losses.compute_loss_and_nll(args, model_dp, nodes_dist,
x, h, node_mask, edge_mask, context)
# standard nll from forward KL
loss = nll + args.ode_regularization * reg_term
loss.backward()
if args.clip_grad:
grad_norm = utils.gradient_clipping(model, gradnorm_queue)
else:
grad_norm = 0.
optim.step()
# Update EMA if enabled.
if args.ema_decay > 0:
ema.update_model_average(model_ema, model)
if i % args.n_report_steps == 0:
print(f"\rEpoch: {epoch}, iter: {i}/{n_iterations}, "
f"Loss {loss.item():.2f}, NLL: {nll.item():.2f}, "
f"RegTerm: {reg_term.item():.1f}, "
f"GradNorm: {grad_norm:.1f}")
nll_epoch.append(nll.item())
if (epoch % args.test_epochs == 0) and (i % args.visualize_every_batch == 0) and not (epoch == 0 and i == 0):
start = time.time()
if len(args.conditioning) > 0:
save_and_sample_conditional(args, device, model_ema, prop_dist, dataset_info, epoch=epoch)
save_and_sample_chain(model_ema, args, device, dataset_info, prop_dist, epoch=epoch,
batch_id=str(i))
sample_different_sizes_and_save(model_ema, nodes_dist, args, device, dataset_info,
prop_dist, epoch=epoch)
print(f'Sampling took {time.time() - start:.2f} seconds')
vis.visualize(f"outputs/{args.exp_name}/epoch_{epoch}_{i}", dataset_info=dataset_info, wandb=wandb)
vis.visualize_chain(f"outputs/{args.exp_name}/epoch_{epoch}_{i}/chain/", dataset_info, wandb=wandb)
if len(args.conditioning) > 0:
vis.visualize_chain("outputs/%s/epoch_%d/conditional/" % (args.exp_name, epoch), dataset_info,
wandb=wandb, mode='conditional')
wandb.log({"Batch NLL": nll.item()}, commit=True)
if args.break_train_epoch:
break
wandb.log({"Train Epoch NLL": np.mean(nll_epoch)}, commit=False)
def check_mask_correct(variables, node_mask):
for i, variable in enumerate(variables):
if len(variable) > 0:
assert_correctly_masked(variable, node_mask)
def test(args, loader, epoch, eval_model, device, dtype, property_norms, nodes_dist, partition='Test'):
eval_model.eval()
with torch.no_grad():
nll_epoch = 0
n_samples = 0
n_iterations = len(loader)
for i, data in enumerate(loader):
x = data['positions'].to(device, dtype)
batch_size = x.size(0)
node_mask = data['atom_mask'].to(device, dtype).unsqueeze(2)
edge_mask = data['edge_mask'].to(device, dtype)
one_hot = data['one_hot'].to(device, dtype)
charges = (data['charges'] if args.include_charges else torch.zeros(0)).to(device, dtype)
if args.augment_noise > 0:
# Add noise eps ~ N(0, augment_noise) around points.
eps = sample_center_gravity_zero_gaussian_with_mask(x.size(),
x.device,
node_mask)
x = x + eps * args.augment_noise
x = remove_mean_with_mask(x, node_mask)
check_mask_correct([x, one_hot, charges], node_mask)
assert_mean_zero_with_mask(x, node_mask)
h = {'categorical': one_hot, 'integer': charges}
if len(args.conditioning) > 0:
context = qm9utils.prepare_context(args.conditioning, data, property_norms).to(device, dtype)
assert_correctly_masked(context, node_mask)
else:
context = None
# transform batch through flow
nll, _, _ = losses.compute_loss_and_nll(args, eval_model, nodes_dist, x, h,
node_mask, edge_mask, context)
# standard nll from forward KL
nll_epoch += nll.item() * batch_size
n_samples += batch_size
if i % args.n_report_steps == 0:
print(f"\r {partition} NLL \t epoch: {epoch}, iter: {i}/{n_iterations}, "
f"NLL: {nll_epoch/n_samples:.2f}")
return nll_epoch/n_samples
def save_and_sample_chain(model, args, device, dataset_info, prop_dist,
epoch=0, id_from=0, batch_id=''):
one_hot, charges, x = sample_chain(args=args, device=device, flow=model,
n_tries=1, dataset_info=dataset_info, prop_dist=prop_dist)
vis.save_xyz_file(f'outputs/{args.exp_name}/epoch_{epoch}_{batch_id}/chain/',
one_hot, charges, x, dataset_info, id_from, name='chain')
return one_hot, charges, x
def sample_different_sizes_and_save(model, nodes_dist, args, device, dataset_info, prop_dist,
n_samples=5, epoch=0, batch_size=100, batch_id=''):
batch_size = min(batch_size, n_samples)
for counter in range(int(n_samples/batch_size)):
nodesxsample = nodes_dist.sample(batch_size)
one_hot, charges, x, node_mask = sample(args, device, model, prop_dist=prop_dist,
nodesxsample=nodesxsample,
dataset_info=dataset_info)
print(f"Generated molecule: Positions {x[:-1, :, :]}")
vis.save_xyz_file(f'outputs/{args.exp_name}/epoch_{epoch}_{batch_id}/', one_hot, charges, x, dataset_info,
batch_size * counter, name='molecule')
def analyze_and_save(epoch, model_sample, nodes_dist, args, device, dataset_info, prop_dist,
n_samples=1000, batch_size=100):
print(f'Analyzing molecule stability at epoch {epoch}...')
batch_size = min(batch_size, n_samples)
assert n_samples % batch_size == 0
molecules = {'one_hot': [], 'x': [], 'node_mask': []}
for i in range(int(n_samples/batch_size)):
nodesxsample = nodes_dist.sample(batch_size)
one_hot, charges, x, node_mask = sample(args, device, model_sample, dataset_info, prop_dist,
nodesxsample=nodesxsample)
molecules['one_hot'].append(one_hot.detach().cpu())
molecules['x'].append(x.detach().cpu())
molecules['node_mask'].append(node_mask.detach().cpu())
molecules = {key: torch.cat(molecules[key], dim=0) for key in molecules}
validity_dict, rdkit_tuple = analyze_stability_for_molecules(molecules, dataset_info)
wandb.log(validity_dict)
if rdkit_tuple is not None:
wandb.log({'Validity': rdkit_tuple[0][0], 'Uniqueness': rdkit_tuple[0][1], 'Novelty': rdkit_tuple[0][2]})
return validity_dict
def save_and_sample_conditional(args, device, model, prop_dist, dataset_info, epoch=0, id_from=0):
one_hot, charges, x, node_mask = sample_sweep_conditional(args, device, model, dataset_info, prop_dist)
vis.save_xyz_file(
'outputs/%s/epoch_%d/conditional/' % (args.exp_name, epoch), one_hot, charges, x, dataset_info,
id_from, name='conditional', node_mask=node_mask)
return one_hot, charges, x
| 9,409 | 44.240385 | 117 | py |
e3_diffusion_for_molecules | e3_diffusion_for_molecules-main/equivariant_diffusion/distributions.py | import torch
from equivariant_diffusion.utils import \
center_gravity_zero_gaussian_log_likelihood_with_mask, \
standard_gaussian_log_likelihood_with_mask, \
center_gravity_zero_gaussian_log_likelihood, \
sample_center_gravity_zero_gaussian_with_mask, \
sample_center_gravity_zero_gaussian, \
sample_gaussian_with_mask
class PositionFeaturePrior(torch.nn.Module):
def __init__(self, n_dim, in_node_nf):
super().__init__()
self.n_dim = n_dim
self.in_node_nf = in_node_nf
def forward(self, z_x, z_h, node_mask=None):
assert len(z_x.size()) == 3
assert len(node_mask.size()) == 3
assert node_mask.size()[:2] == z_x.size()[:2]
assert (z_x * (1 - node_mask)).sum() < 1e-8 and \
(z_h * (1 - node_mask)).sum() < 1e-8, \
'These variables should be properly masked.'
log_pz_x = center_gravity_zero_gaussian_log_likelihood_with_mask(
z_x, node_mask
)
log_pz_h = standard_gaussian_log_likelihood_with_mask(
z_h, node_mask
)
log_pz = log_pz_x + log_pz_h
return log_pz
def sample(self, n_samples, n_nodes, node_mask):
z_x = sample_center_gravity_zero_gaussian_with_mask(
size=(n_samples, n_nodes, self.n_dim), device=node_mask.device,
node_mask=node_mask)
z_h = sample_gaussian_with_mask(
size=(n_samples, n_nodes, self.in_node_nf), device=node_mask.device,
node_mask=node_mask)
return z_x, z_h
class PositionPrior(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return center_gravity_zero_gaussian_log_likelihood(x)
def sample(self, size, device):
samples = sample_center_gravity_zero_gaussian(size, device)
return samples
| 1,865 | 31.172414 | 80 | py |
e3_diffusion_for_molecules | e3_diffusion_for_molecules-main/equivariant_diffusion/utils.py | import torch
import numpy as np
class EMA():
def __init__(self, beta):
super().__init__()
self.beta = beta
def update_model_average(self, ma_model, current_model):
for current_params, ma_params in zip(current_model.parameters(), ma_model.parameters()):
old_weight, up_weight = ma_params.data, current_params.data
ma_params.data = self.update_average(old_weight, up_weight)
def update_average(self, old, new):
if old is None:
return new
return old * self.beta + (1 - self.beta) * new
def sum_except_batch(x):
return x.reshape(x.size(0), -1).sum(dim=-1)
def remove_mean(x):
mean = torch.mean(x, dim=1, keepdim=True)
x = x - mean
return x
def remove_mean_with_mask(x, node_mask):
masked_max_abs_value = (x * (1 - node_mask)).abs().sum().item()
assert masked_max_abs_value < 1e-5, f'Error {masked_max_abs_value} too high'
N = node_mask.sum(1, keepdims=True)
mean = torch.sum(x, dim=1, keepdim=True) / N
x = x - mean * node_mask
return x
def assert_mean_zero(x):
mean = torch.mean(x, dim=1, keepdim=True)
assert mean.abs().max().item() < 1e-4
def assert_mean_zero_with_mask(x, node_mask, eps=1e-10):
assert_correctly_masked(x, node_mask)
largest_value = x.abs().max().item()
error = torch.sum(x, dim=1, keepdim=True).abs().max().item()
rel_error = error / (largest_value + eps)
assert rel_error < 1e-2, f'Mean is not zero, relative_error {rel_error}'
def assert_correctly_masked(variable, node_mask):
assert (variable * (1 - node_mask)).abs().max().item() < 1e-4, \
'Variables not masked properly.'
def center_gravity_zero_gaussian_log_likelihood(x):
assert len(x.size()) == 3
B, N, D = x.size()
assert_mean_zero(x)
# r is invariant to a basis change in the relevant hyperplane.
r2 = sum_except_batch(x.pow(2))
# The relevant hyperplane is (N-1) * D dimensional.
degrees_of_freedom = (N-1) * D
# Normalizing constant and logpx are computed:
log_normalizing_constant = -0.5 * degrees_of_freedom * np.log(2*np.pi)
log_px = -0.5 * r2 + log_normalizing_constant
return log_px
def sample_center_gravity_zero_gaussian(size, device):
assert len(size) == 3
x = torch.randn(size, device=device)
# This projection only works because Gaussian is rotation invariant around
# zero and samples are independent!
x_projected = remove_mean(x)
return x_projected
def center_gravity_zero_gaussian_log_likelihood_with_mask(x, node_mask):
assert len(x.size()) == 3
B, N_embedded, D = x.size()
assert_mean_zero_with_mask(x, node_mask)
# r is invariant to a basis change in the relevant hyperplane, the masked
# out values will have zero contribution.
r2 = sum_except_batch(x.pow(2))
# The relevant hyperplane is (N-1) * D dimensional.
N = node_mask.squeeze(2).sum(1) # N has shape [B]
degrees_of_freedom = (N-1) * D
# Normalizing constant and logpx are computed:
log_normalizing_constant = -0.5 * degrees_of_freedom * np.log(2*np.pi)
log_px = -0.5 * r2 + log_normalizing_constant
return log_px
def sample_center_gravity_zero_gaussian_with_mask(size, device, node_mask):
assert len(size) == 3
x = torch.randn(size, device=device)
x_masked = x * node_mask
# This projection only works because Gaussian is rotation invariant around
# zero and samples are independent!
x_projected = remove_mean_with_mask(x_masked, node_mask)
return x_projected
def standard_gaussian_log_likelihood(x):
# Normalizing constant and logpx are computed:
log_px = sum_except_batch(-0.5 * x * x - 0.5 * np.log(2*np.pi))
return log_px
def sample_gaussian(size, device):
x = torch.randn(size, device=device)
return x
def standard_gaussian_log_likelihood_with_mask(x, node_mask):
# Normalizing constant and logpx are computed:
log_px_elementwise = -0.5 * x * x - 0.5 * np.log(2*np.pi)
log_px = sum_except_batch(log_px_elementwise * node_mask)
return log_px
def sample_gaussian_with_mask(size, device, node_mask):
x = torch.randn(size, device=device)
x_masked = x * node_mask
return x_masked
| 4,243 | 29.099291 | 96 | py |
e3_diffusion_for_molecules | e3_diffusion_for_molecules-main/equivariant_diffusion/en_diffusion.py | from equivariant_diffusion import utils
import numpy as np
import math
import torch
from egnn import models
from torch.nn import functional as F
from equivariant_diffusion import utils as diffusion_utils
# Defining some useful util functions.
def expm1(x: torch.Tensor) -> torch.Tensor:
return torch.expm1(x)
def softplus(x: torch.Tensor) -> torch.Tensor:
return F.softplus(x)
def sum_except_batch(x):
return x.view(x.size(0), -1).sum(-1)
def clip_noise_schedule(alphas2, clip_value=0.001):
"""
For a noise schedule given by alpha^2, this clips alpha_t / alpha_t-1. This may help improve stability during
sampling.
"""
alphas2 = np.concatenate([np.ones(1), alphas2], axis=0)
alphas_step = (alphas2[1:] / alphas2[:-1])
alphas_step = np.clip(alphas_step, a_min=clip_value, a_max=1.)
alphas2 = np.cumprod(alphas_step, axis=0)
return alphas2
def polynomial_schedule(timesteps: int, s=1e-4, power=3.):
"""
A noise schedule based on a simple polynomial equation: 1 - x^power.
"""
steps = timesteps + 1
x = np.linspace(0, steps, steps)
alphas2 = (1 - np.power(x / steps, power))**2
alphas2 = clip_noise_schedule(alphas2, clip_value=0.001)
precision = 1 - 2 * s
alphas2 = precision * alphas2 + s
return alphas2
def cosine_beta_schedule(timesteps, s=0.008, raise_to_power: float = 1):
"""
cosine schedule
as proposed in https://openreview.net/forum?id=-NEXDKk8gZ
"""
steps = timesteps + 2
x = np.linspace(0, steps, steps)
alphas_cumprod = np.cos(((x / steps) + s) / (1 + s) * np.pi * 0.5) ** 2
alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
betas = np.clip(betas, a_min=0, a_max=0.999)
alphas = 1. - betas
alphas_cumprod = np.cumprod(alphas, axis=0)
if raise_to_power != 1:
alphas_cumprod = np.power(alphas_cumprod, raise_to_power)
return alphas_cumprod
def gaussian_entropy(mu, sigma):
# In case sigma needed to be broadcast (which is very likely in this code).
zeros = torch.zeros_like(mu)
return sum_except_batch(
zeros + 0.5 * torch.log(2 * np.pi * sigma**2) + 0.5
)
def gaussian_KL(q_mu, q_sigma, p_mu, p_sigma, node_mask):
"""Computes the KL distance between two normal distributions.
Args:
q_mu: Mean of distribution q.
q_sigma: Standard deviation of distribution q.
p_mu: Mean of distribution p.
p_sigma: Standard deviation of distribution p.
Returns:
The KL distance, summed over all dimensions except the batch dim.
"""
return sum_except_batch(
(
torch.log(p_sigma / q_sigma)
+ 0.5 * (q_sigma**2 + (q_mu - p_mu)**2) / (p_sigma**2)
- 0.5
) * node_mask
)
def gaussian_KL_for_dimension(q_mu, q_sigma, p_mu, p_sigma, d):
"""Computes the KL distance between two normal distributions.
Args:
q_mu: Mean of distribution q.
q_sigma: Standard deviation of distribution q.
p_mu: Mean of distribution p.
p_sigma: Standard deviation of distribution p.
Returns:
The KL distance, summed over all dimensions except the batch dim.
"""
mu_norm2 = sum_except_batch((q_mu - p_mu)**2)
assert len(q_sigma.size()) == 1
assert len(p_sigma.size()) == 1
return d * torch.log(p_sigma / q_sigma) + 0.5 * (d * q_sigma**2 + mu_norm2) / (p_sigma**2) - 0.5 * d
class PositiveLinear(torch.nn.Module):
"""Linear layer with weights forced to be positive."""
def __init__(self, in_features: int, out_features: int, bias: bool = True,
weight_init_offset: int = -2):
super(PositiveLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = torch.nn.Parameter(
torch.empty((out_features, in_features)))
if bias:
self.bias = torch.nn.Parameter(torch.empty(out_features))
else:
self.register_parameter('bias', None)
self.weight_init_offset = weight_init_offset
self.reset_parameters()
def reset_parameters(self) -> None:
torch.nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
with torch.no_grad():
self.weight.add_(self.weight_init_offset)
if self.bias is not None:
fan_in, _ = torch.nn.init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
torch.nn.init.uniform_(self.bias, -bound, bound)
def forward(self, input):
positive_weight = softplus(self.weight)
return F.linear(input, positive_weight, self.bias)
class SinusoidalPosEmb(torch.nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
x = x.squeeze() * 1000
assert len(x.shape) == 1
device = x.device
half_dim = self.dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, device=device) * -emb)
emb = x[:, None] * emb[None, :]
emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
return emb
class PredefinedNoiseSchedule(torch.nn.Module):
"""
Predefined noise schedule. Essentially creates a lookup array for predefined (non-learned) noise schedules.
"""
def __init__(self, noise_schedule, timesteps, precision):
super(PredefinedNoiseSchedule, self).__init__()
self.timesteps = timesteps
if noise_schedule == 'cosine':
alphas2 = cosine_beta_schedule(timesteps)
elif 'polynomial' in noise_schedule:
splits = noise_schedule.split('_')
assert len(splits) == 2
power = float(splits[1])
alphas2 = polynomial_schedule(timesteps, s=precision, power=power)
else:
raise ValueError(noise_schedule)
print('alphas2', alphas2)
sigmas2 = 1 - alphas2
log_alphas2 = np.log(alphas2)
log_sigmas2 = np.log(sigmas2)
log_alphas2_to_sigmas2 = log_alphas2 - log_sigmas2
print('gamma', -log_alphas2_to_sigmas2)
self.gamma = torch.nn.Parameter(
torch.from_numpy(-log_alphas2_to_sigmas2).float(),
requires_grad=False)
def forward(self, t):
t_int = torch.round(t * self.timesteps).long()
return self.gamma[t_int]
class GammaNetwork(torch.nn.Module):
"""The gamma network models a monotonic increasing function. Construction as in the VDM paper."""
def __init__(self):
super().__init__()
self.l1 = PositiveLinear(1, 1)
self.l2 = PositiveLinear(1, 1024)
self.l3 = PositiveLinear(1024, 1)
self.gamma_0 = torch.nn.Parameter(torch.tensor([-5.]))
self.gamma_1 = torch.nn.Parameter(torch.tensor([10.]))
self.show_schedule()
def show_schedule(self, num_steps=50):
t = torch.linspace(0, 1, num_steps).view(num_steps, 1)
gamma = self.forward(t)
print('Gamma schedule:')
print(gamma.detach().cpu().numpy().reshape(num_steps))
def gamma_tilde(self, t):
l1_t = self.l1(t)
return l1_t + self.l3(torch.sigmoid(self.l2(l1_t)))
def forward(self, t):
zeros, ones = torch.zeros_like(t), torch.ones_like(t)
# Not super efficient.
gamma_tilde_0 = self.gamma_tilde(zeros)
gamma_tilde_1 = self.gamma_tilde(ones)
gamma_tilde_t = self.gamma_tilde(t)
# Normalize to [0, 1]
normalized_gamma = (gamma_tilde_t - gamma_tilde_0) / (
gamma_tilde_1 - gamma_tilde_0)
# Rescale to [gamma_0, gamma_1]
gamma = self.gamma_0 + (self.gamma_1 - self.gamma_0) * normalized_gamma
return gamma
def cdf_standard_gaussian(x):
return 0.5 * (1. + torch.erf(x / math.sqrt(2)))
class EnVariationalDiffusion(torch.nn.Module):
"""
The E(n) Diffusion Module.
"""
def __init__(
self,
dynamics: models.EGNN_dynamics_QM9, in_node_nf: int, n_dims: int,
timesteps: int = 1000, parametrization='eps', noise_schedule='learned',
noise_precision=1e-4, loss_type='vlb', norm_values=(1., 1., 1.),
norm_biases=(None, 0., 0.), include_charges=True):
super().__init__()
assert loss_type in {'vlb', 'l2'}
self.loss_type = loss_type
self.include_charges = include_charges
if noise_schedule == 'learned':
assert loss_type == 'vlb', 'A noise schedule can only be learned' \
' with a vlb objective.'
# Only supported parametrization.
assert parametrization == 'eps'
if noise_schedule == 'learned':
self.gamma = GammaNetwork()
else:
self.gamma = PredefinedNoiseSchedule(noise_schedule, timesteps=timesteps,
precision=noise_precision)
# The network that will predict the denoising.
self.dynamics = dynamics
self.in_node_nf = in_node_nf
self.n_dims = n_dims
self.num_classes = self.in_node_nf - self.include_charges
self.T = timesteps
self.parametrization = parametrization
self.norm_values = norm_values
self.norm_biases = norm_biases
self.register_buffer('buffer', torch.zeros(1))
if noise_schedule != 'learned':
self.check_issues_norm_values()
def check_issues_norm_values(self, num_stdevs=8):
zeros = torch.zeros((1, 1))
gamma_0 = self.gamma(zeros)
sigma_0 = self.sigma(gamma_0, target_tensor=zeros).item()
# Checked if 1 / norm_value is still larger than 10 * standard
# deviation.
max_norm_value = max(self.norm_values[1], self.norm_values[2])
if sigma_0 * num_stdevs > 1. / max_norm_value:
raise ValueError(
f'Value for normalization value {max_norm_value} probably too '
f'large with sigma_0 {sigma_0:.5f} and '
f'1 / norm_value = {1. / max_norm_value}')
def phi(self, x, t, node_mask, edge_mask, context):
net_out = self.dynamics._forward(t, x, node_mask, edge_mask, context)
return net_out
def inflate_batch_array(self, array, target):
"""
Inflates the batch array (array) with only a single axis (i.e. shape = (batch_size,), or possibly more empty
axes (i.e. shape (batch_size, 1, ..., 1)) to match the target shape.
"""
target_shape = (array.size(0),) + (1,) * (len(target.size()) - 1)
return array.view(target_shape)
def sigma(self, gamma, target_tensor):
"""Computes sigma given gamma."""
return self.inflate_batch_array(torch.sqrt(torch.sigmoid(gamma)), target_tensor)
def alpha(self, gamma, target_tensor):
"""Computes alpha given gamma."""
return self.inflate_batch_array(torch.sqrt(torch.sigmoid(-gamma)), target_tensor)
def SNR(self, gamma):
"""Computes signal to noise ratio (alpha^2/sigma^2) given gamma."""
return torch.exp(-gamma)
def subspace_dimensionality(self, node_mask):
"""Compute the dimensionality on translation-invariant linear subspace where distributions on x are defined."""
number_of_nodes = torch.sum(node_mask.squeeze(2), dim=1)
return (number_of_nodes - 1) * self.n_dims
def normalize(self, x, h, node_mask):
x = x / self.norm_values[0]
delta_log_px = -self.subspace_dimensionality(node_mask) * np.log(self.norm_values[0])
# Casting to float in case h still has long or int type.
h_cat = (h['categorical'].float() - self.norm_biases[1]) / self.norm_values[1] * node_mask
h_int = (h['integer'].float() - self.norm_biases[2]) / self.norm_values[2]
if self.include_charges:
h_int = h_int * node_mask
# Create new h dictionary.
h = {'categorical': h_cat, 'integer': h_int}
return x, h, delta_log_px
def unnormalize(self, x, h_cat, h_int, node_mask):
x = x * self.norm_values[0]
h_cat = h_cat * self.norm_values[1] + self.norm_biases[1]
h_cat = h_cat * node_mask
h_int = h_int * self.norm_values[2] + self.norm_biases[2]
if self.include_charges:
h_int = h_int * node_mask
return x, h_cat, h_int
def unnormalize_z(self, z, node_mask):
# Parse from z
x, h_cat = z[:, :, 0:self.n_dims], z[:, :, self.n_dims:self.n_dims+self.num_classes]
h_int = z[:, :, self.n_dims+self.num_classes:self.n_dims+self.num_classes+1]
assert h_int.size(2) == self.include_charges
# Unnormalize
x, h_cat, h_int = self.unnormalize(x, h_cat, h_int, node_mask)
output = torch.cat([x, h_cat, h_int], dim=2)
return output
def sigma_and_alpha_t_given_s(self, gamma_t: torch.Tensor, gamma_s: torch.Tensor, target_tensor: torch.Tensor):
"""
Computes sigma t given s, using gamma_t and gamma_s. Used during sampling.
These are defined as:
alpha t given s = alpha t / alpha s,
sigma t given s = sqrt(1 - (alpha t given s) ^2 ).
"""
sigma2_t_given_s = self.inflate_batch_array(
-expm1(softplus(gamma_s) - softplus(gamma_t)), target_tensor
)
# alpha_t_given_s = alpha_t / alpha_s
log_alpha2_t = F.logsigmoid(-gamma_t)
log_alpha2_s = F.logsigmoid(-gamma_s)
log_alpha2_t_given_s = log_alpha2_t - log_alpha2_s
alpha_t_given_s = torch.exp(0.5 * log_alpha2_t_given_s)
alpha_t_given_s = self.inflate_batch_array(
alpha_t_given_s, target_tensor)
sigma_t_given_s = torch.sqrt(sigma2_t_given_s)
return sigma2_t_given_s, sigma_t_given_s, alpha_t_given_s
def kl_prior(self, xh, node_mask):
"""Computes the KL between q(z1 | x) and the prior p(z1) = Normal(0, 1).
This is essentially a lot of work for something that is in practice negligible in the loss. However, you
compute it so that you see it when you've made a mistake in your noise schedule.
"""
# Compute the last alpha value, alpha_T.
ones = torch.ones((xh.size(0), 1), device=xh.device)
gamma_T = self.gamma(ones)
alpha_T = self.alpha(gamma_T, xh)
# Compute means.
mu_T = alpha_T * xh
mu_T_x, mu_T_h = mu_T[:, :, :self.n_dims], mu_T[:, :, self.n_dims:]
# Compute standard deviations (only batch axis for x-part, inflated for h-part).
sigma_T_x = self.sigma(gamma_T, mu_T_x).squeeze() # Remove inflate, only keep batch dimension for x-part.
sigma_T_h = self.sigma(gamma_T, mu_T_h)
# Compute KL for h-part.
zeros, ones = torch.zeros_like(mu_T_h), torch.ones_like(sigma_T_h)
kl_distance_h = gaussian_KL(mu_T_h, sigma_T_h, zeros, ones, node_mask)
# Compute KL for x-part.
zeros, ones = torch.zeros_like(mu_T_x), torch.ones_like(sigma_T_x)
subspace_d = self.subspace_dimensionality(node_mask)
kl_distance_x = gaussian_KL_for_dimension(mu_T_x, sigma_T_x, zeros, ones, d=subspace_d)
return kl_distance_x + kl_distance_h
def compute_x_pred(self, net_out, zt, gamma_t):
"""Commputes x_pred, i.e. the most likely prediction of x."""
if self.parametrization == 'x':
x_pred = net_out
elif self.parametrization == 'eps':
sigma_t = self.sigma(gamma_t, target_tensor=net_out)
alpha_t = self.alpha(gamma_t, target_tensor=net_out)
eps_t = net_out
x_pred = 1. / alpha_t * (zt - sigma_t * eps_t)
else:
raise ValueError(self.parametrization)
return x_pred
def compute_error(self, net_out, gamma_t, eps):
"""Computes error, i.e. the most likely prediction of x."""
eps_t = net_out
if self.training and self.loss_type == 'l2':
denom = (self.n_dims + self.in_node_nf) * eps_t.shape[1]
error = sum_except_batch((eps - eps_t) ** 2) / denom
else:
error = sum_except_batch((eps - eps_t) ** 2)
return error
def log_constants_p_x_given_z0(self, x, node_mask):
"""Computes p(x|z0)."""
batch_size = x.size(0)
n_nodes = node_mask.squeeze(2).sum(1) # N has shape [B]
assert n_nodes.size() == (batch_size,)
degrees_of_freedom_x = (n_nodes - 1) * self.n_dims
zeros = torch.zeros((x.size(0), 1), device=x.device)
gamma_0 = self.gamma(zeros)
# Recall that sigma_x = sqrt(sigma_0^2 / alpha_0^2) = SNR(-0.5 gamma_0).
log_sigma_x = 0.5 * gamma_0.view(batch_size)
return degrees_of_freedom_x * (- log_sigma_x - 0.5 * np.log(2 * np.pi))
def sample_p_xh_given_z0(self, z0, node_mask, edge_mask, context, fix_noise=False):
"""Samples x ~ p(x|z0)."""
zeros = torch.zeros(size=(z0.size(0), 1), device=z0.device)
gamma_0 = self.gamma(zeros)
# Computes sqrt(sigma_0^2 / alpha_0^2)
sigma_x = self.SNR(-0.5 * gamma_0).unsqueeze(1)
net_out = self.phi(z0, zeros, node_mask, edge_mask, context)
# Compute mu for p(zs | zt).
mu_x = self.compute_x_pred(net_out, z0, gamma_0)
xh = self.sample_normal(mu=mu_x, sigma=sigma_x, node_mask=node_mask, fix_noise=fix_noise)
x = xh[:, :, :self.n_dims]
h_int = z0[:, :, -1:] if self.include_charges else torch.zeros(0).to(z0.device)
x, h_cat, h_int = self.unnormalize(x, z0[:, :, self.n_dims:-1], h_int, node_mask)
h_cat = F.one_hot(torch.argmax(h_cat, dim=2), self.num_classes) * node_mask
h_int = torch.round(h_int).long() * node_mask
h = {'integer': h_int, 'categorical': h_cat}
return x, h
def sample_normal(self, mu, sigma, node_mask, fix_noise=False):
"""Samples from a Normal distribution."""
bs = 1 if fix_noise else mu.size(0)
eps = self.sample_combined_position_feature_noise(bs, mu.size(1), node_mask)
return mu + sigma * eps
def log_pxh_given_z0_without_constants(
self, x, h, z_t, gamma_0, eps, net_out, node_mask, epsilon=1e-10):
# Discrete properties are predicted directly from z_t.
z_h_cat = z_t[:, :, self.n_dims:-1] if self.include_charges else z_t[:, :, self.n_dims:]
z_h_int = z_t[:, :, -1:] if self.include_charges else torch.zeros(0).to(z_t.device)
# Take only part over x.
eps_x = eps[:, :, :self.n_dims]
net_x = net_out[:, :, :self.n_dims]
# Compute sigma_0 and rescale to the integer scale of the data.
sigma_0 = self.sigma(gamma_0, target_tensor=z_t)
sigma_0_cat = sigma_0 * self.norm_values[1]
sigma_0_int = sigma_0 * self.norm_values[2]
# Computes the error for the distribution N(x | 1 / alpha_0 z_0 + sigma_0/alpha_0 eps_0, sigma_0 / alpha_0),
# the weighting in the epsilon parametrization is exactly '1'.
log_p_x_given_z_without_constants = -0.5 * self.compute_error(net_x, gamma_0, eps_x)
# Compute delta indicator masks.
h_integer = torch.round(h['integer'] * self.norm_values[2] + self.norm_biases[2]).long()
onehot = h['categorical'] * self.norm_values[1] + self.norm_biases[1]
estimated_h_integer = z_h_int * self.norm_values[2] + self.norm_biases[2]
estimated_h_cat = z_h_cat * self.norm_values[1] + self.norm_biases[1]
assert h_integer.size() == estimated_h_integer.size()
h_integer_centered = h_integer - estimated_h_integer
# Compute integral from -0.5 to 0.5 of the normal distribution
# N(mean=h_integer_centered, stdev=sigma_0_int)
log_ph_integer = torch.log(
cdf_standard_gaussian((h_integer_centered + 0.5) / sigma_0_int)
- cdf_standard_gaussian((h_integer_centered - 0.5) / sigma_0_int)
+ epsilon)
log_ph_integer = sum_except_batch(log_ph_integer * node_mask)
# Centered h_cat around 1, since onehot encoded.
centered_h_cat = estimated_h_cat - 1
# Compute integrals from 0.5 to 1.5 of the normal distribution
# N(mean=z_h_cat, stdev=sigma_0_cat)
log_ph_cat_proportional = torch.log(
cdf_standard_gaussian((centered_h_cat + 0.5) / sigma_0_cat)
- cdf_standard_gaussian((centered_h_cat - 0.5) / sigma_0_cat)
+ epsilon)
# Normalize the distribution over the categories.
log_Z = torch.logsumexp(log_ph_cat_proportional, dim=2, keepdim=True)
log_probabilities = log_ph_cat_proportional - log_Z
# Select the log_prob of the current category usign the onehot
# representation.
log_ph_cat = sum_except_batch(log_probabilities * onehot * node_mask)
# Combine categorical and integer log-probabilities.
log_p_h_given_z = log_ph_integer + log_ph_cat
# Combine log probabilities for x and h.
log_p_xh_given_z = log_p_x_given_z_without_constants + log_p_h_given_z
return log_p_xh_given_z
def compute_loss(self, x, h, node_mask, edge_mask, context, t0_always):
"""Computes an estimator for the variational lower bound, or the simple loss (MSE)."""
# This part is about whether to include loss term 0 always.
if t0_always:
# loss_term_0 will be computed separately.
# estimator = loss_0 + loss_t, where t ~ U({1, ..., T})
lowest_t = 1
else:
# estimator = loss_t, where t ~ U({0, ..., T})
lowest_t = 0
# Sample a timestep t.
t_int = torch.randint(
lowest_t, self.T + 1, size=(x.size(0), 1), device=x.device).float()
s_int = t_int - 1
t_is_zero = (t_int == 0).float() # Important to compute log p(x | z0).
# Normalize t to [0, 1]. Note that the negative
# step of s will never be used, since then p(x | z0) is computed.
s = s_int / self.T
t = t_int / self.T
# Compute gamma_s and gamma_t via the network.
gamma_s = self.inflate_batch_array(self.gamma(s), x)
gamma_t = self.inflate_batch_array(self.gamma(t), x)
# Compute alpha_t and sigma_t from gamma.
alpha_t = self.alpha(gamma_t, x)
sigma_t = self.sigma(gamma_t, x)
# Sample zt ~ Normal(alpha_t x, sigma_t)
eps = self.sample_combined_position_feature_noise(
n_samples=x.size(0), n_nodes=x.size(1), node_mask=node_mask)
# Concatenate x, h[integer] and h[categorical].
xh = torch.cat([x, h['categorical'], h['integer']], dim=2)
# Sample z_t given x, h for timestep t, from q(z_t | x, h)
z_t = alpha_t * xh + sigma_t * eps
diffusion_utils.assert_mean_zero_with_mask(z_t[:, :, :self.n_dims], node_mask)
# Neural net prediction.
net_out = self.phi(z_t, t, node_mask, edge_mask, context)
# Compute the error.
error = self.compute_error(net_out, gamma_t, eps)
if self.training and self.loss_type == 'l2':
SNR_weight = torch.ones_like(error)
else:
# Compute weighting with SNR: (SNR(s-t) - 1) for epsilon parametrization.
SNR_weight = (self.SNR(gamma_s - gamma_t) - 1).squeeze(1).squeeze(1)
assert error.size() == SNR_weight.size()
loss_t_larger_than_zero = 0.5 * SNR_weight * error
# The _constants_ depending on sigma_0 from the
# cross entropy term E_q(z0 | x) [log p(x | z0)].
neg_log_constants = -self.log_constants_p_x_given_z0(x, node_mask)
# Reset constants during training with l2 loss.
if self.training and self.loss_type == 'l2':
neg_log_constants = torch.zeros_like(neg_log_constants)
# The KL between q(z1 | x) and p(z1) = Normal(0, 1). Should be close to zero.
kl_prior = self.kl_prior(xh, node_mask)
# Combining the terms
if t0_always:
loss_t = loss_t_larger_than_zero
num_terms = self.T # Since t=0 is not included here.
estimator_loss_terms = num_terms * loss_t
# Compute noise values for t = 0.
t_zeros = torch.zeros_like(s)
gamma_0 = self.inflate_batch_array(self.gamma(t_zeros), x)
alpha_0 = self.alpha(gamma_0, x)
sigma_0 = self.sigma(gamma_0, x)
# Sample z_0 given x, h for timestep t, from q(z_t | x, h)
eps_0 = self.sample_combined_position_feature_noise(
n_samples=x.size(0), n_nodes=x.size(1), node_mask=node_mask)
z_0 = alpha_0 * xh + sigma_0 * eps_0
net_out = self.phi(z_0, t_zeros, node_mask, edge_mask, context)
loss_term_0 = -self.log_pxh_given_z0_without_constants(
x, h, z_0, gamma_0, eps_0, net_out, node_mask)
assert kl_prior.size() == estimator_loss_terms.size()
assert kl_prior.size() == neg_log_constants.size()
assert kl_prior.size() == loss_term_0.size()
loss = kl_prior + estimator_loss_terms + neg_log_constants + loss_term_0
else:
# Computes the L_0 term (even if gamma_t is not actually gamma_0)
# and this will later be selected via masking.
loss_term_0 = -self.log_pxh_given_z0_without_constants(
x, h, z_t, gamma_t, eps, net_out, node_mask)
t_is_not_zero = 1 - t_is_zero
loss_t = loss_term_0 * t_is_zero.squeeze() + t_is_not_zero.squeeze() * loss_t_larger_than_zero
# Only upweigh estimator if using the vlb objective.
if self.training and self.loss_type == 'l2':
estimator_loss_terms = loss_t
else:
num_terms = self.T + 1 # Includes t = 0.
estimator_loss_terms = num_terms * loss_t
assert kl_prior.size() == estimator_loss_terms.size()
assert kl_prior.size() == neg_log_constants.size()
loss = kl_prior + estimator_loss_terms + neg_log_constants
assert len(loss.shape) == 1, f'{loss.shape} has more than only batch dim.'
return loss, {'t': t_int.squeeze(), 'loss_t': loss.squeeze(),
'error': error.squeeze()}
def forward(self, x, h, node_mask=None, edge_mask=None, context=None):
"""
Computes the loss (type l2 or NLL) if training. And if eval then always computes NLL.
"""
# Normalize data, take into account volume change in x.
x, h, delta_log_px = self.normalize(x, h, node_mask)
# Reset delta_log_px if not vlb objective.
if self.training and self.loss_type == 'l2':
delta_log_px = torch.zeros_like(delta_log_px)
if self.training:
# Only 1 forward pass when t0_always is False.
loss, loss_dict = self.compute_loss(x, h, node_mask, edge_mask, context, t0_always=False)
else:
# Less variance in the estimator, costs two forward passes.
loss, loss_dict = self.compute_loss(x, h, node_mask, edge_mask, context, t0_always=True)
neg_log_pxh = loss
# Correct for normalization on x.
assert neg_log_pxh.size() == delta_log_px.size()
neg_log_pxh = neg_log_pxh - delta_log_px
return neg_log_pxh
def sample_p_zs_given_zt(self, s, t, zt, node_mask, edge_mask, context, fix_noise=False):
"""Samples from zs ~ p(zs | zt). Only used during sampling."""
gamma_s = self.gamma(s)
gamma_t = self.gamma(t)
sigma2_t_given_s, sigma_t_given_s, alpha_t_given_s = \
self.sigma_and_alpha_t_given_s(gamma_t, gamma_s, zt)
sigma_s = self.sigma(gamma_s, target_tensor=zt)
sigma_t = self.sigma(gamma_t, target_tensor=zt)
# Neural net prediction.
eps_t = self.phi(zt, t, node_mask, edge_mask, context)
# Compute mu for p(zs | zt).
""" These lines have been commented out!!!!!!!!!"""
#This may break everything
diffusion_utils.assert_mean_zero_with_mask(zt[:, :, :self.n_dims], node_mask)
diffusion_utils.assert_mean_zero_with_mask(eps_t[:, :, :self.n_dims], node_mask)
mu = zt / alpha_t_given_s - (sigma2_t_given_s / alpha_t_given_s / sigma_t) * eps_t
# Compute sigma for p(zs | zt).
sigma = sigma_t_given_s * sigma_s / sigma_t
# Sample zs given the paramters derived from zt.
zs = self.sample_normal(mu, sigma, node_mask, fix_noise)
# Project down to avoid numerical runaway of the center of gravity.
zs = torch.cat(
[diffusion_utils.remove_mean_with_mask(zs[:, :, :self.n_dims],
node_mask),
zs[:, :, self.n_dims:]], dim=2
)
return zs
def sample_combined_position_feature_noise(self, n_samples, n_nodes, node_mask):
"""
Samples mean-centered normal noise for z_x, and standard normal noise for z_h.
"""
z_x = utils.sample_center_gravity_zero_gaussian_with_mask(
size=(n_samples, n_nodes, self.n_dims), device=node_mask.device,
node_mask=node_mask)
z_h = utils.sample_gaussian_with_mask(
size=(n_samples, n_nodes, self.in_node_nf), device=node_mask.device,
node_mask=node_mask)
z = torch.cat([z_x, z_h], dim=2)
return z
@torch.no_grad()
def sample(self, n_samples, n_nodes, node_mask, edge_mask, context, fix_noise=False, silvr_rate=0.01, ref_coords=None,ref_node_mask=None,shift_centre=True,dataset_info=None):
"""
Draw samples from the generative model.
"""
#self.in_node_nf
#Additional column needs added to account for charge
#Either that, or the remaining reference coordinates have
#too many columns
if fix_noise:
# Noise is broadcasted over the batch axis, useful for visualizations.
z = self.sample_combined_position_feature_noise(1, n_nodes, node_mask)
else:
z = self.sample_combined_position_feature_noise(n_samples, n_nodes, node_mask)
#print("testing")
#print(z.size())
diffusion_utils.assert_mean_zero_with_mask(z[:, :, :self.n_dims], node_mask)
from equivariant_diffusion.utils import assert_mean_zero_with_mask, remove_mean_with_mask,\
assert_correctly_masked, sample_center_gravity_zero_gaussian_with_mask
data = np.asarray(ref_coords)
#hard coded
#I have removed self. from all other occurances
#self.atomic_number_list = torch.Tensor([5, 6, 7, 8, 9, 13, 14, 15, 16, 17, 33, 35, 53, 80, 83])[None, :]
#Due to previous hard coding this may break things
#The effect here is and additional 1 has been added to the beginning of the array/tensor
assert dataset_info
atomic_number_list = torch.Tensor(dataset_info["atomic_nb"])[None, :]
#print(atomic_number_list)
n_atom_types = atomic_number_list.size()[1]
#print(n_atom_types)
n = data.shape[0]
new_data = {}
new_data['positions'] = torch.from_numpy(data[:, -3:])
atom_types = torch.from_numpy(data[:, 0].astype(int)[:, None])
one_hot = atom_types == atomic_number_list
new_data['one_hot'] = one_hot
new_data['charges'] = torch.zeros(0,device=z.device)#, device=self.device
new_data['atom_mask'] = torch.ones(n,device=z.device)#, device=self.device
#r_node_mask = new_data['atom_mask'].unsqueeze(2)#.to(device, dtype).unsqueeze(2)
#r_edge_mask = new_data['edge_mask']#.to(device, dtype)
#r_charges = (new_data['charges'] if args.include_charges else torch.zeros(0))#.to(device, dtype)
#r_charges = torch.zeros(0)#This is hard coded as 19 atoms
#r_charges = torch.zeros((19,1)).to(device=z.device)
r_x = new_data['positions'].to(device=z.device)#.to(device, dtype)
r_node_mask = torch.unsqueeze(torch.ones(n), 1).to(device=z.device)
r_one_hot = new_data['one_hot'].to(device=z.device)#.to(device, dtype)
r_charges = torch.zeros((n,1)).to(device=z.device)
r_h = {'categorical': r_one_hot, 'integer': r_charges}
"""This should only need to be called once"""
#<start> was inside loop after sigma_t
# Concatenate x, h[integer] and h[categorical].
#Artifically increase tensor dimensions to 1,181,19
#Adding integer causes everything to break
#Don't know what do do here
#xh = torch.cat([r_x, r_h['categorical'], r_h['integer']], dim=1).to(device=z.device)#dim was 2
xh = torch.cat([r_x, r_h['categorical']], dim=1).to(device=z.device)#dim was 2
#This was previously hard coded as 19, now n_atom_types
#+3 for xyz coordinates and +1 for charge
xh = torch.cat([xh, torch.zeros(181-xh.shape[0], n_atom_types+3,device=z.device)], dim=0).to(device=z.device)
#making xh correct dimensions
xh = torch.unsqueeze(xh, 0).to(device=z.device)
#Centering reference at zero
#"Error 32.199 too high"
#xh = diffusion_utils.remove_mean_with_mask(xh, node_mask2)
#Node mask without assertion
#Make this ref_node_mask - not great
#xh_xyz_only = xh.clone()
xh_xyz_only = xh[:, :, :3]
masked_max_abs_value = (xh_xyz_only * (1 - node_mask)).abs().sum().item()#node_mask2
N = node_mask.sum(1, keepdims=True)#node_mask2
mean = torch.sum(xh_xyz_only, dim=1, keepdim=True) / N
number_of_zeroes = xh.size()[2]-3#Probably a better place to find this
zero_padding_tensor = torch.zeros(1, 1, number_of_zeroes).to(device=z.device)
mean = torch.cat([mean, zero_padding_tensor], dim=2)
total_gravity_center_shift = mean
xh = xh - mean * node_mask#node_mask2
#diffusion_utils.assert_mean_zero_with_mask(xh[:, :, :self.n_dims], node_mask2)
#Error: Not masked properly?
#<end> was inside loop after sigma_t
#This was also inside loop
# Sample zt ~ Normal(alpha_t x, sigma_t)
#self.n_dims = 19?
# Sample z_t given x, h for timestep t, from q(z_t | x, h)
ref_nodes = torch.sum(ref_node_mask)
nodes = torch.sum(node_mask)
if nodes != ref_nodes:
#outpaint_factor = ref_nodes / (nodes-ref_nodes)
outpaint_factor = nodes / (nodes-ref_nodes)
else:
outpaint_factor = 1
#print(outpaint_factor)
#print(torch.sum(node_mask)-torch.sum(ref_node_mask))
#print(torch.sum(ref_node_mask))
dummy_mask = node_mask - ref_node_mask
animation = False
animation_array = []#Animation
if animation:
import pickle
with open("/content/e3_diffusion_for_molecules/outputs/mask.txt", "wb") as writefile:
pickle.dump(node_mask, writefile)
print("Pickle File Dumped")
# Iteratively sample p(z_s | z_t) for t = 1, ..., T, with s = t - 1.
for s in reversed(range(0, self.T)):
if (s)%100==0:
pass
#print("sample loop: ", s+1)
#print(total_gravity_center_shift)
s_array = torch.full((n_samples, 1), fill_value=s, device=z.device)
t_array = s_array + 1
s_array = s_array / self.T
t_array = t_array / self.T
#ILVR variables
# Compute gamma_s and gamma_t via the network.
gamma_s = self.inflate_batch_array(self.gamma(s_array), r_x).to(device=z.device)#was s
gamma_t = self.inflate_batch_array(self.gamma(t_array), r_x).to(device=z.device)#was t
alpha_t = self.alpha(gamma_t, r_x).to(device=z.device)
sigma_t = self.sigma(gamma_t, r_x).to(device=z.device)
#Animation section
if animation:
total_gravity_center_shift = total_gravity_center_shift.type(z.dtype)
#Centre of gravity also takes into account atom type (for some reason)
#This might be a big
#Consider fixing
total_gravity_center_shift[:, :, 3:] = 0
z_in = z + total_gravity_center_shift*node_mask#node_mask2
animation_array.append(self.sample_p_xh_given_z0(z_in, node_mask, edge_mask, context, fix_noise=fix_noise))
##############
#---------Gravity Alignments--------
#---------centering z------
z_xyz_only = z[:, :, :3]
N = node_mask.sum(1, keepdims=True)#node_mask2
mean = torch.sum(z_xyz_only, dim=1, keepdim=True) / N
mean = torch.cat([mean, zero_padding_tensor], dim=2)
z = z - mean*node_mask#node_mask2
#---------centering xh------
#Method 1
#Considering dummy atoms
xh = xh*ref_node_mask + z*(ref_node_mask - node_mask)
xh_xyz_only = xh[:, :, :3]
N = node_mask.sum(1, keepdims=True)#node_mask2
mean = torch.sum(xh_xyz_only, dim=1, keepdim=True) / N
mean = torch.cat([mean, zero_padding_tensor], dim=2)
xh = xh - mean*ref_node_mask#node_mask2
#---------centering xh------
#Method 2
#as a function of z mean using "outpain factor"
#mean *= outpaint_factor
#xh = xh - mean*ref_node_mask#1.5 is good. and 40/9. 31/9
#-------Correction factor to reference--------
#When reference coordinates are translated
#Update this variable to allow product molecule to
#Be translated to binding site
total_gravity_center_shift += mean#*(outpaint_factor)#was minus
#--supressing one hot seemed to help previously--
#z_hot_only = z[:, :, 3:]
#N = node_mask.sum(1, keepdims=True)#node_mask2
#mean = torch.sum(z_hot_only, dim=1, keepdim=True) / N
#mean = torch.cat([zero_padding_tensor[:,:,:3], mean], dim=2)
#z = z - mean*node_mask
#--------Diffusion-----------
#sample reverse diffusion
#z must be centered at zero
z = self.sample_p_zs_given_zt(s_array, t_array, z, node_mask, edge_mask, context, fix_noise=fix_noise)
#----------SILVR----------
#was n_samples=1
eps = self.sample_combined_position_feature_noise(n_samples=n_samples,n_nodes=181, node_mask=node_mask).to(device=z.device)#node_mask2
z_t = alpha_t * xh + sigma_t * eps
z_t = z_t.to(torch.float32).to(device=z.device)
#SILVR equation
z = z - (z*alpha_t*ref_node_mask)*silvr_rate + (z_t*ref_node_mask)*silvr_rate#node_mask2
#Fix Atom ID
#fix_atom_id_mask = None
#z = z*(1-fix_atom_id_mask)
#-----------comments only------------
#This line is only used to determine if an explosion has occured
#Used in centering at zero
#masked_max_abs_value = (z_xyz_only * (1 - node_mask)).abs().sum().item()#node_mask2
#Error too high
"""
z_t = torch.cat(
[diffusion_utils.remove_mean_with_mask(z_t[:, :, :self.n_dims],
node_mask2),
z_t[:, :, self.n_dims:]], dim=2)
"""
#All were z_t
#Combine dummy atoms to z_t
"""
masked_max_abs_value = (z_t * (1 - node_mask)).abs().sum().item()#node_mask2
N = node_mask.sum(1, keepdims=True)#node_mask2
mean = torch.sum(z_t, dim=1, keepdim=True) / N
total_gravity_center_shift -= mean
z_t = z_t - mean * node_mask#node_mask2
"""
#Note: mean seems to consider atom identity
#and coordinates.
#Consider only modifying xyz
#Note: Cloning is likely slow
#This is still wrong. The center of mass is now taking
#16 columns as 0
#Calculate centre of mass with only first 3 columns
#z_xyz_only = z.clone()
#z_xyz_only[:, :, 3:] = 0
#print(z_xyz_only.size())
#print(z.size())
#----EDITED--------
#used to be z
#now z_xyz_only
#produced molecules seemed better when the mean
#Was subtracted accross the entire tensor
#and not just atom coordinates
#Maybe keep one hot low
#Works
#Only updating xyz
#Not as good
#z[:, :, :3] = z[:, :, :3] - mean[:, :, :3]*node_mask#node_mask2
#!!!!!! I have no idea why this line works
#After lots of trial and error I have found multiplying
#by a factor > 1 allows for singular molecules to be formed
#Only for outpainting
#This ratio could be related to levers and fulcrim
#Better molecule is produced when there is no xh correction
#However for outpainting it seems this is needed
#Consider xh correction only for dummy atoms?
#dummy_mask
#This might be lever rule
#Correction with outpaint_factor may bring fragment
#towards centre point of balance
#Hence having the effect of combining fragments together
#Error too high
"""
z = torch.cat(
[diffusion_utils.remove_mean_with_mask(z[:, :, :self.n_dims],
node_mask),
z[:, :, self.n_dims:]], dim=2)
"""
#----set xh additional atoms to that of the live z----
#additional_atom_mask = (node_mask - ref_node_mask)
#xh = xh*(1-additional_atom_mask) + z*additional_atom_mask#Who knows
#By tracking all the gravity shifts it should be
#possible to directly obtain the coordinated
#of the generated molecule in the save coordinate
#as the reference protein
if shift_centre:
#print("shifting centre")
#Only works when ref = no.mols
total_gravity_center_shift = total_gravity_center_shift.type(z.dtype)
#Centre of gravity also takes into account atom type (for some reason)
#This might be a big
#Consider fixing
total_gravity_center_shift[:, :, 3:] = 0
z = z + total_gravity_center_shift*node_mask#node_mask2
# Finally sample p(x, h | z_0).
x, h = self.sample_p_xh_given_z0(z, node_mask, edge_mask, context, fix_noise=fix_noise)
####Animation
if animation:
animation_array.append(self.sample_p_xh_given_z0(z, node_mask, edge_mask, context, fix_noise=fix_noise))
#animation_array.append(z_t*node_mask2)
#Final entry in animation array is the reference molecule - note nodemask2 is needed
#As z_t contains a dummy atom to account for centre of mass issue
import pickle
with open("/content/e3_diffusion_for_molecules/outputs/animation.txt", "wb") as writefile:
pickle.dump(animation_array, writefile)
print("Pickle File Dumped")
#######
#These lines fixed center of gravity
#commented out while experimenting
#Line has been commented out
#This may break things
#diffusion_utils.assert_mean_zero_with_mask(x, node_mask)
#I believe this centres the molecule at 0
#However we want the actual coordinates wrt protein
#and so I have delted the drifting correction
"""
max_cog = torch.sum(x, dim=1, keepdim=True).abs().max().item()
if max_cog > 5e-2:
print(f'Warning cog drift with error {max_cog:.3f}. Projecting '
f'the positions down.')
x = diffusion_utils.remove_mean_with_mask(x, node_mask)
"""
return x, h
@torch.no_grad()
def q_sample(self, x_start, t, noise=None):
"""
Diffuse the data for a given number of diffusion steps.
In other words, sample from q(x_t | x_0).
:param x_start: the initial data batch.
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
:param noise: if specified, the split-out normal noise.
:return: A noisy version of x_start.
"""
if noise is None:
noise = th.randn_like(x_start)
assert noise.shape == x_start.shape
return (
_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
+ _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape)
* noise
)
@torch.no_grad()
def _extract_into_tensor(arr, timesteps, broadcast_shape):
"""
Extract values from a 1-D numpy array for a batch of indices.
:param arr: the 1-D numpy array.
:param timesteps: a tensor of indices into the array to extract.
:param broadcast_shape: a larger shape of K dimensions with the batch
dimension equal to the length of timesteps.
:return: a tensor of shape [batch_size, 1, ...] where the shape has K dims.
"""
res = th.from_numpy(arr).to(device=timesteps.device)[timesteps].float()
while len(res.shape) < len(broadcast_shape):
res = res[..., None]
return res.expand(broadcast_shape)
#------------------End of new code--------------------------
@torch.no_grad()
def sample_chain(self, n_samples, n_nodes, node_mask, edge_mask, context, keep_frames=None):
"""
Draw samples from the generative model, keep the intermediate states for visualization purposes.
"""
z = self.sample_combined_position_feature_noise(n_samples, n_nodes, node_mask)
diffusion_utils.assert_mean_zero_with_mask(z[:, :, :self.n_dims], node_mask)
if keep_frames is None:
keep_frames = self.T
else:
assert keep_frames <= self.T
chain = torch.zeros((keep_frames,) + z.size(), device=z.device)
# Iteratively sample p(z_s | z_t) for t = 1, ..., T, with s = t - 1.
for s in reversed(range(0, self.T)):
s_array = torch.full((n_samples, 1), fill_value=s, device=z.device)
t_array = s_array + 1
s_array = s_array / self.T
t_array = t_array / self.T
z = self.sample_p_zs_given_zt(
s_array, t_array, z, node_mask, edge_mask, context)
diffusion_utils.assert_mean_zero_with_mask(z[:, :, :self.n_dims], node_mask)
# Write to chain tensor.
write_index = (s * keep_frames) // self.T
chain[write_index] = self.unnormalize_z(z, node_mask)
# Finally sample p(x, h | z_0).
x, h = self.sample_p_xh_given_z0(z, node_mask, edge_mask, context)
diffusion_utils.assert_mean_zero_with_mask(x[:, :, :self.n_dims], node_mask)
xh = torch.cat([x, h['categorical'], h['integer']], dim=2)
chain[0] = xh # Overwrite last frame with the resulting x and h.
chain_flat = chain.view(n_samples * keep_frames, *z.size()[1:])
return chain_flat
def log_info(self):
"""
Some info logging of the model.
"""
gamma_0 = self.gamma(torch.zeros(1, device=self.buffer.device))
gamma_1 = self.gamma(torch.ones(1, device=self.buffer.device))
log_SNR_max = -gamma_0
log_SNR_min = -gamma_1
info = {
'log_SNR_max': log_SNR_max.item(),
'log_SNR_min': log_SNR_min.item()}
print(info)
return info | 48,360 | 38.510621 | 178 | py |
e3_diffusion_for_molecules | e3_diffusion_for_molecules-main/qm9/losses.py | import torch
def sum_except_batch(x):
return x.view(x.size(0), -1).sum(dim=-1)
def assert_correctly_masked(variable, node_mask):
assert (variable * (1 - node_mask)).abs().sum().item() < 1e-8
def compute_loss_and_nll(args, generative_model, nodes_dist, x, h, node_mask, edge_mask, context):
bs, n_nodes, n_dims = x.size()
if args.probabilistic_model == 'diffusion':
edge_mask = edge_mask.view(bs, n_nodes * n_nodes)
assert_correctly_masked(x, node_mask)
# Here x is a position tensor, and h is a dictionary with keys
# 'categorical' and 'integer'.
nll = generative_model(x, h, node_mask, edge_mask, context)
N = node_mask.squeeze(2).sum(1).long()
log_pN = nodes_dist.log_prob(N)
assert nll.size() == log_pN.size()
nll = nll - log_pN
# Average over batch.
nll = nll.mean(0)
reg_term = torch.tensor([0.]).to(nll.device)
mean_abs_z = 0.
else:
raise ValueError(args.probabilistic_model)
return nll, reg_term, mean_abs_z
| 1,067 | 25.04878 | 98 | py |
e3_diffusion_for_molecules | e3_diffusion_for_molecules-main/qm9/rdkit_functions.py | from rdkit import Chem
import numpy as np
from qm9.bond_analyze import get_bond_order, geom_predictor
from . import dataset
import torch
from configs.datasets_config import get_dataset_info
import pickle
import os
def compute_qm9_smiles(dataset_name, remove_h):
'''
:param dataset_name: qm9 or qm9_second_half
:return:
'''
print("\tConverting QM9 dataset to SMILES ...")
class StaticArgs:
def __init__(self, dataset, remove_h):
self.dataset = dataset
self.batch_size = 1
self.num_workers = 1
self.filter_n_atoms = None
self.datadir = 'qm9/temp'
self.remove_h = remove_h
self.include_charges = True
args_dataset = StaticArgs(dataset_name, remove_h)
dataloaders, charge_scale = dataset.retrieve_dataloaders(args_dataset)
dataset_info = get_dataset_info(args_dataset.dataset, args_dataset.remove_h)
n_types = 4 if remove_h else 5
mols_smiles = []
for i, data in enumerate(dataloaders['train']):
positions = data['positions'][0].view(-1, 3).numpy()
one_hot = data['one_hot'][0].view(-1, n_types).type(torch.float32)
atom_type = torch.argmax(one_hot, dim=1).numpy()
mol = build_molecule(torch.tensor(positions), torch.tensor(atom_type), dataset_info)
mol = mol2smiles(mol)
if mol is not None:
mols_smiles.append(mol)
if i % 1000 == 0:
print("\tConverting QM9 dataset to SMILES {0:.2%}".format(float(i)/len(dataloaders['train'])))
return mols_smiles
def retrieve_qm9_smiles(dataset_info):
dataset_name = dataset_info['name']
if dataset_info['with_h']:
pickle_name = dataset_name
else:
pickle_name = dataset_name + '_noH'
file_name = 'qm9/temp/%s_smiles.pickle' % pickle_name
try:
with open(file_name, 'rb') as f:
qm9_smiles = pickle.load(f)
return qm9_smiles
except OSError:
try:
os.makedirs('qm9/temp')
except:
pass
qm9_smiles = compute_qm9_smiles(dataset_name, remove_h=not dataset_info['with_h'])
with open(file_name, 'wb') as f:
pickle.dump(qm9_smiles, f)
return qm9_smiles
#### New implementation ####
bond_dict = [None, Chem.rdchem.BondType.SINGLE, Chem.rdchem.BondType.DOUBLE, Chem.rdchem.BondType.TRIPLE,
Chem.rdchem.BondType.AROMATIC]
class BasicMolecularMetrics(object):
def __init__(self, dataset_info, dataset_smiles_list=None):
self.atom_decoder = dataset_info['atom_decoder']
self.dataset_smiles_list = dataset_smiles_list
self.dataset_info = dataset_info
# Retrieve dataset smiles only for qm9 currently.
if dataset_smiles_list is None and 'qm9' in dataset_info['name']:
self.dataset_smiles_list = retrieve_qm9_smiles(
self.dataset_info)
def compute_validity(self, generated):
""" generated: list of couples (positions, atom_types)"""
valid = []
for graph in generated:
mol = build_molecule(*graph, self.dataset_info)
smiles = mol2smiles(mol)
if smiles is not None:
mol_frags = Chem.rdmolops.GetMolFrags(mol, asMols=True)
largest_mol = max(mol_frags, default=mol, key=lambda m: m.GetNumAtoms())
smiles = mol2smiles(largest_mol)
valid.append(smiles)
return valid, len(valid) / len(generated)
def compute_uniqueness(self, valid):
""" valid: list of SMILES strings."""
return list(set(valid)), len(set(valid)) / len(valid)
def compute_novelty(self, unique):
num_novel = 0
novel = []
for smiles in unique:
if smiles not in self.dataset_smiles_list:
novel.append(smiles)
num_novel += 1
return novel, num_novel / len(unique)
def evaluate(self, generated):
""" generated: list of pairs (positions: n x 3, atom_types: n [int])
the positions and atom types should already be masked. """
valid, validity = self.compute_validity(generated)
print(f"Validity over {len(generated)} molecules: {validity * 100 :.2f}%")
if validity > 0:
unique, uniqueness = self.compute_uniqueness(valid)
print(f"Uniqueness over {len(valid)} valid molecules: {uniqueness * 100 :.2f}%")
if self.dataset_smiles_list is not None:
_, novelty = self.compute_novelty(unique)
print(f"Novelty over {len(unique)} unique valid molecules: {novelty * 100 :.2f}%")
else:
novelty = 0.0
else:
novelty = 0.0
uniqueness = 0.0
unique = None
return [validity, uniqueness, novelty], unique
def mol2smiles(mol):
try:
Chem.SanitizeMol(mol)
except ValueError:
return None
return Chem.MolToSmiles(mol)
def build_molecule(positions, atom_types, dataset_info):
atom_decoder = dataset_info["atom_decoder"]
X, A, E = build_xae_molecule(positions, atom_types, dataset_info)
mol = Chem.RWMol()
for atom in X:
a = Chem.Atom(atom_decoder[atom.item()])
mol.AddAtom(a)
all_bonds = torch.nonzero(A)
for bond in all_bonds:
mol.AddBond(bond[0].item(), bond[1].item(), bond_dict[E[bond[0], bond[1]].item()])
return mol
def build_xae_molecule(positions, atom_types, dataset_info):
""" Returns a triplet (X, A, E): atom_types, adjacency matrix, edge_types
args:
positions: N x 3 (already masked to keep final number nodes)
atom_types: N
returns:
X: N (int)
A: N x N (bool) (binary adjacency matrix)
E: N x N (int) (bond type, 0 if no bond) such that A = E.bool()
"""
atom_decoder = dataset_info['atom_decoder']
n = positions.shape[0]
X = atom_types
A = torch.zeros((n, n), dtype=torch.bool)
E = torch.zeros((n, n), dtype=torch.int)
pos = positions.unsqueeze(0)
dists = torch.cdist(pos, pos, p=2).squeeze(0)
for i in range(n):
for j in range(i):
pair = sorted([atom_types[i], atom_types[j]])
if dataset_info['name'] == 'qm9' or dataset_info['name'] == 'qm9_second_half' or dataset_info['name'] == 'qm9_first_half':
order = get_bond_order(atom_decoder[pair[0]], atom_decoder[pair[1]], dists[i, j])
elif dataset_info['name'] == 'geom':
order = geom_predictor((atom_decoder[pair[0]], atom_decoder[pair[1]]), dists[i, j], limit_bonds_to_one=True)
# TODO: a batched version of get_bond_order to avoid the for loop
if order > 0:
# Warning: the graph should be DIRECTED
A[i, j] = 1
E[i, j] = order
return X, A, E
if __name__ == '__main__':
smiles_mol = 'C1CCC1'
print("Smiles mol %s" % smiles_mol)
chem_mol = Chem.MolFromSmiles(smiles_mol)
block_mol = Chem.MolToMolBlock(chem_mol)
print("Block mol:")
print(block_mol)
| 7,154 | 35.136364 | 134 | py |
e3_diffusion_for_molecules | e3_diffusion_for_molecules-main/qm9/utils.py | import torch
def compute_mean_mad(dataloaders, properties, dataset_name):
if dataset_name == 'qm9':
return compute_mean_mad_from_dataloader(dataloaders['train'], properties)
elif dataset_name == 'qm9_second_half' or dataset_name == 'qm9_second_half':
return compute_mean_mad_from_dataloader(dataloaders['valid'], properties)
else:
raise Exception('Wrong dataset name')
def compute_mean_mad_from_dataloader(dataloader, properties):
property_norms = {}
for property_key in properties:
values = dataloader.dataset.data[property_key]
mean = torch.mean(values)
ma = torch.abs(values - mean)
mad = torch.mean(ma)
property_norms[property_key] = {}
property_norms[property_key]['mean'] = mean
property_norms[property_key]['mad'] = mad
return property_norms
edges_dic = {}
def get_adj_matrix(n_nodes, batch_size, device):
if n_nodes in edges_dic:
edges_dic_b = edges_dic[n_nodes]
if batch_size in edges_dic_b:
return edges_dic_b[batch_size]
else:
# get edges for a single sample
rows, cols = [], []
for batch_idx in range(batch_size):
for i in range(n_nodes):
for j in range(n_nodes):
rows.append(i + batch_idx*n_nodes)
cols.append(j + batch_idx*n_nodes)
else:
edges_dic[n_nodes] = {}
return get_adj_matrix(n_nodes, batch_size, device)
edges = [torch.LongTensor(rows).to(device), torch.LongTensor(cols).to(device)]
return edges
def preprocess_input(one_hot, charges, charge_power, charge_scale, device):
charge_tensor = (charges.unsqueeze(-1) / charge_scale).pow(
torch.arange(charge_power + 1., device=device, dtype=torch.float32))
charge_tensor = charge_tensor.view(charges.shape + (1, charge_power + 1))
atom_scalars = (one_hot.unsqueeze(-1) * charge_tensor).view(charges.shape[:2] + (-1,))
return atom_scalars
def prepare_context(conditioning, minibatch, property_norms):
batch_size, n_nodes, _ = minibatch['positions'].size()
node_mask = minibatch['atom_mask'].unsqueeze(2)
context_node_nf = 0
context_list = []
for key in conditioning:
properties = minibatch[key]
properties = (properties - property_norms[key]['mean']) / property_norms[key]['mad']
if len(properties.size()) == 1:
# Global feature.
assert properties.size() == (batch_size,)
reshaped = properties.view(batch_size, 1, 1).repeat(1, n_nodes, 1)
context_list.append(reshaped)
context_node_nf += 1
elif len(properties.size()) == 2 or len(properties.size()) == 3:
# Node feature.
assert properties.size()[:2] == (batch_size, n_nodes)
context_key = properties
# Inflate if necessary.
if len(properties.size()) == 2:
context_key = context_key.unsqueeze(2)
context_list.append(context_key)
context_node_nf += context_key.size(2)
else:
raise ValueError('Invalid tensor size, more than 3 axes.')
# Concatenate
context = torch.cat(context_list, dim=2)
# Mask disabled nodes!
context = context * node_mask
assert context.size(2) == context_node_nf
return context
| 3,400 | 36.373626 | 92 | py |
e3_diffusion_for_molecules | e3_diffusion_for_molecules-main/qm9/dataset.py | from torch.utils.data import DataLoader
from qm9.data.args import init_argparse
from qm9.data.collate import PreprocessQM9
from qm9.data.utils import initialize_datasets
import os
def retrieve_dataloaders(cfg):
if 'qm9' in cfg.dataset:
batch_size = cfg.batch_size
num_workers = cfg.num_workers
filter_n_atoms = cfg.filter_n_atoms
# Initialize dataloader
args = init_argparse('qm9')
# data_dir = cfg.data_root_dir
args, datasets, num_species, charge_scale = initialize_datasets(args, cfg.datadir, cfg.dataset,
subtract_thermo=args.subtract_thermo,
force_download=args.force_download,
remove_h=cfg.remove_h)
qm9_to_eV = {'U0': 27.2114, 'U': 27.2114, 'G': 27.2114, 'H': 27.2114, 'zpve': 27211.4, 'gap': 27.2114, 'homo': 27.2114,
'lumo': 27.2114}
for dataset in datasets.values():
dataset.convert_units(qm9_to_eV)
if filter_n_atoms is not None:
print("Retrieving molecules with only %d atoms" % filter_n_atoms)
datasets = filter_atoms(datasets, filter_n_atoms)
# Construct PyTorch dataloaders from datasets
preprocess = PreprocessQM9(load_charges=cfg.include_charges)
dataloaders = {split: DataLoader(dataset,
batch_size=batch_size,
shuffle=args.shuffle if (split == 'train') else False,
num_workers=num_workers,
collate_fn=preprocess.collate_fn)
for split, dataset in datasets.items()}
elif 'geom' in cfg.dataset:
import build_geom_dataset
from configs.datasets_config import get_dataset_info
data_file = './data/geom/geom_drugs_30.npy'
dataset_info = get_dataset_info(cfg.dataset, cfg.remove_h)
# Retrieve QM9 dataloaders
split_data = build_geom_dataset.load_split_data(data_file,
val_proportion=0.1,
test_proportion=0.1,
filter_size=cfg.filter_molecule_size)
transform = build_geom_dataset.GeomDrugsTransform(dataset_info,
cfg.include_charges,
cfg.device,
cfg.sequential)
dataloaders = {}
for key, data_list in zip(['train', 'val', 'test'], split_data):
dataset = build_geom_dataset.GeomDrugsDataset(data_list,
transform=transform)
shuffle = (key == 'train') and not cfg.sequential
# Sequential dataloading disabled for now.
dataloaders[key] = build_geom_dataset.GeomDrugsDataLoader(
sequential=cfg.sequential, dataset=dataset,
batch_size=cfg.batch_size,
shuffle=shuffle)
del split_data
charge_scale = None
else:
raise ValueError(f'Unknown dataset {cfg.dataset}')
return dataloaders, charge_scale
def filter_atoms(datasets, n_nodes):
for key in datasets:
dataset = datasets[key]
idxs = dataset.data['num_atoms'] == n_nodes
for key2 in dataset.data:
dataset.data[key2] = dataset.data[key2][idxs]
datasets[key].num_pts = dataset.data['one_hot'].size(0)
datasets[key].perm = None
return datasets | 3,840 | 46.419753 | 127 | py |
e3_diffusion_for_molecules | e3_diffusion_for_molecules-main/qm9/sampling.py | import numpy as np
import torch
import torch.nn.functional as F
from equivariant_diffusion.utils import assert_mean_zero_with_mask, remove_mean_with_mask,\
assert_correctly_masked
from qm9.analyze import check_stability
def rotate_chain(z):
assert z.size(0) == 1
z_h = z[:, :, 3:]
n_steps = 30
theta = 0.6 * np.pi / n_steps
Qz = torch.tensor(
[[np.cos(theta), -np.sin(theta), 0.],
[np.sin(theta), np.cos(theta), 0.],
[0., 0., 1.]]
).float()
Qx = torch.tensor(
[[1., 0., 0.],
[0., np.cos(theta), -np.sin(theta)],
[0., np.sin(theta), np.cos(theta)]]
).float()
Qy = torch.tensor(
[[np.cos(theta), 0., np.sin(theta)],
[0., 1., 0.],
[-np.sin(theta), 0., np.cos(theta)]]
).float()
Q = torch.mm(torch.mm(Qz, Qx), Qy)
Q = Q.to(z.device)
results = []
results.append(z)
for i in range(n_steps):
z_x = results[-1][:, :, :3]
# print(z_x.size(), Q.size())
new_x = torch.matmul(z_x.view(-1, 3), Q.T).view(1, -1, 3)
# print(new_x.size())
new_z = torch.cat([new_x, z_h], dim=2)
results.append(new_z)
results = torch.cat(results, dim=0)
return results
def reverse_tensor(x):
return x[torch.arange(x.size(0) - 1, -1, -1)]
def sample_chain(args, device, flow, n_tries, dataset_info, prop_dist=None):
n_samples = 1
if args.dataset == 'qm9' or args.dataset == 'qm9_second_half' or args.dataset == 'qm9_first_half':
n_nodes = 19
elif args.dataset == 'geom':
n_nodes = 44
else:
raise ValueError()
# TODO FIX: This conditioning just zeros.
if args.context_node_nf > 0:
context = prop_dist.sample(n_nodes).unsqueeze(1).unsqueeze(0)
context = context.repeat(1, n_nodes, 1).to(device)
#context = torch.zeros(n_samples, n_nodes, args.context_node_nf).to(device)
else:
context = None
node_mask = torch.ones(n_samples, n_nodes, 1).to(device)
edge_mask = (1 - torch.eye(n_nodes)).unsqueeze(0)
edge_mask = edge_mask.repeat(n_samples, 1, 1).view(-1, 1).to(device)
if args.probabilistic_model == 'diffusion':
one_hot, charges, x = None, None, None
for i in range(n_tries):
chain = flow.sample_chain(n_samples, n_nodes, node_mask, edge_mask, context, keep_frames=100)
chain = reverse_tensor(chain)
# Repeat last frame to see final sample better.
chain = torch.cat([chain, chain[-1:].repeat(10, 1, 1)], dim=0)
x = chain[-1:, :, 0:3]
one_hot = chain[-1:, :, 3:-1]
one_hot = torch.argmax(one_hot, dim=2)
atom_type = one_hot.squeeze(0).cpu().detach().numpy()
x_squeeze = x.squeeze(0).cpu().detach().numpy()
mol_stable = check_stability(x_squeeze, atom_type, dataset_info)[0]
# Prepare entire chain.
x = chain[:, :, 0:3]
one_hot = chain[:, :, 3:-1]
one_hot = F.one_hot(torch.argmax(one_hot, dim=2), num_classes=len(dataset_info['atom_decoder']))
charges = torch.round(chain[:, :, -1:]).long()
if mol_stable:
print('Found stable molecule to visualize :)')
break
elif i == n_tries - 1:
print('Did not find stable molecule, showing last sample.')
else:
raise ValueError
return one_hot, charges, x
def sample(args, device, generative_model, dataset_info,
prop_dist=None, nodesxsample=torch.tensor([10]), context=None,
fix_noise=False):
max_n_nodes = dataset_info['max_n_nodes'] # this is the maximum node_size in QM9
assert int(torch.max(nodesxsample)) <= max_n_nodes
batch_size = len(nodesxsample)
node_mask = torch.zeros(batch_size, max_n_nodes)
for i in range(batch_size):
node_mask[i, 0:nodesxsample[i]] = 1
# Compute edge_mask
edge_mask = node_mask.unsqueeze(1) * node_mask.unsqueeze(2)
diag_mask = ~torch.eye(edge_mask.size(1), dtype=torch.bool).unsqueeze(0)
edge_mask *= diag_mask
edge_mask = edge_mask.view(batch_size * max_n_nodes * max_n_nodes, 1).to(device)
node_mask = node_mask.unsqueeze(2).to(device)
# TODO FIX: This conditioning just zeros.
if args.context_node_nf > 0:
if context is None:
context = prop_dist.sample_batch(nodesxsample)
context = context.unsqueeze(1).repeat(1, max_n_nodes, 1).to(device) * node_mask
else:
context = None
if args.probabilistic_model == 'diffusion':
x, h = generative_model.sample(batch_size, max_n_nodes, node_mask, edge_mask, context, fix_noise=fix_noise)
assert_correctly_masked(x, node_mask)
assert_mean_zero_with_mask(x, node_mask)
one_hot = h['categorical']
charges = h['integer']
assert_correctly_masked(one_hot.float(), node_mask)
if args.include_charges:
assert_correctly_masked(charges.float(), node_mask)
else:
raise ValueError(args.probabilistic_model)
return one_hot, charges, x, node_mask
def sample_sweep_conditional(args, device, generative_model, dataset_info, prop_dist, n_nodes=19, n_frames=100):
nodesxsample = torch.tensor([n_nodes] * n_frames)
context = []
for key in prop_dist.distributions:
min_val, max_val = prop_dist.distributions[key][n_nodes]['params']
mean, mad = prop_dist.normalizer[key]['mean'], prop_dist.normalizer[key]['mad']
min_val = (min_val - mean) / (mad)
max_val = (max_val - mean) / (mad)
context_row = torch.tensor(np.linspace(min_val, max_val, n_frames)).unsqueeze(1)
context.append(context_row)
context = torch.cat(context, dim=1).float().to(device)
one_hot, charges, x, node_mask = sample(args, device, generative_model, dataset_info, prop_dist, nodesxsample=nodesxsample, context=context, fix_noise=True)
return one_hot, charges, x, node_mask | 6,002 | 34.105263 | 160 | py |
e3_diffusion_for_molecules | e3_diffusion_for_molecules-main/qm9/visualizer.py | import torch
import numpy as np
import os
import glob
import random
import matplotlib
import imageio
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from qm9 import bond_analyze
##############
### Files ####
###########-->
def save_xyz_file(path, one_hot, charges, positions, dataset_info, id_from=0, name='molecule', node_mask=None):
try:
os.makedirs(path)
except OSError:
pass
if node_mask is not None:
atomsxmol = torch.sum(node_mask, dim=1)
else:
atomsxmol = [one_hot.size(1)] * one_hot.size(0)
for batch_i in range(one_hot.size(0)):
f = open(path + name + '_' + "%03d.txt" % (batch_i + id_from), "w")
f.write("%d\n\n" % atomsxmol[batch_i])
atoms = torch.argmax(one_hot[batch_i], dim=1)
n_atoms = int(atomsxmol[batch_i])
for atom_i in range(n_atoms):
atom = atoms[atom_i]
atom = dataset_info['atom_decoder'][atom]
f.write("%s %.9f %.9f %.9f\n" % (atom, positions[batch_i, atom_i, 0], positions[batch_i, atom_i, 1], positions[batch_i, atom_i, 2]))
f.close()
def load_molecule_xyz(file, dataset_info):
with open(file, encoding='utf8') as f:
n_atoms = int(f.readline())
one_hot = torch.zeros(n_atoms, len(dataset_info['atom_decoder']))
charges = torch.zeros(n_atoms, 1)
positions = torch.zeros(n_atoms, 3)
f.readline()
atoms = f.readlines()
for i in range(n_atoms):
atom = atoms[i].split(' ')
atom_type = atom[0]
one_hot[i, dataset_info['atom_encoder'][atom_type]] = 1
position = torch.Tensor([float(e) for e in atom[1:]])
positions[i, :] = position
return positions, one_hot, charges
def load_xyz_files(path, shuffle=True):
files = glob.glob(path + "/*.txt")
if shuffle:
random.shuffle(files)
return files
#<----########
### Files ####
##############
def draw_sphere(ax, x, y, z, size, color, alpha):
u = np.linspace(0, 2 * np.pi, 100)
v = np.linspace(0, np.pi, 100)
xs = size * np.outer(np.cos(u), np.sin(v))
ys = size * np.outer(np.sin(u), np.sin(v)) * 0.8 # Correct for matplotlib.
zs = size * np.outer(np.ones(np.size(u)), np.cos(v))
# for i in range(2):
# ax.plot_surface(x+random.randint(-5,5), y+random.randint(-5,5), z+random.randint(-5,5), rstride=4, cstride=4, color='b', linewidth=0, alpha=0.5)
ax.plot_surface(x + xs, y + ys, z + zs, rstride=2, cstride=2, color=color, linewidth=0,
alpha=alpha)
# # calculate vectors for "vertical" circle
# a = np.array([-np.sin(elev / 180 * np.pi), 0, np.cos(elev / 180 * np.pi)])
# b = np.array([0, 1, 0])
# b = b * np.cos(rot) + np.cross(a, b) * np.sin(rot) + a * np.dot(a, b) * (
# 1 - np.cos(rot))
# ax.plot(np.sin(u), np.cos(u), 0, color='k', linestyle='dashed')
# horiz_front = np.linspace(0, np.pi, 100)
# ax.plot(np.sin(horiz_front), np.cos(horiz_front), 0, color='k')
# vert_front = np.linspace(np.pi / 2, 3 * np.pi / 2, 100)
# ax.plot(a[0] * np.sin(u) + b[0] * np.cos(u), b[1] * np.cos(u),
# a[2] * np.sin(u) + b[2] * np.cos(u), color='k', linestyle='dashed')
# ax.plot(a[0] * np.sin(vert_front) + b[0] * np.cos(vert_front),
# b[1] * np.cos(vert_front),
# a[2] * np.sin(vert_front) + b[2] * np.cos(vert_front), color='k')
#
# ax.view_init(elev=elev, azim=0)
def plot_molecule(ax, positions, atom_type, alpha, spheres_3d, hex_bg_color,
dataset_info):
# draw_sphere(ax, 0, 0, 0, 1)
# draw_sphere(ax, 1, 1, 1, 1)
x = positions[:, 0]
y = positions[:, 1]
z = positions[:, 2]
# Hydrogen, Carbon, Nitrogen, Oxygen, Flourine
# ax.set_facecolor((1.0, 0.47, 0.42))
colors_dic = np.array(dataset_info['colors_dic'])
radius_dic = np.array(dataset_info['radius_dic'])
area_dic = 1500 * radius_dic ** 2
# areas_dic = sizes_dic * sizes_dic * 3.1416
areas = area_dic[atom_type]
radii = radius_dic[atom_type]
colors = colors_dic[atom_type]
if spheres_3d:
for i, j, k, s, c in zip(x, y, z, radii, colors):
draw_sphere(ax, i.item(), j.item(), k.item(), 0.7 * s, c, alpha)
else:
ax.scatter(x, y, z, s=areas, alpha=0.9 * alpha,
c=colors) # , linewidths=2, edgecolors='#FFFFFF')
for i in range(len(x)):
for j in range(i + 1, len(x)):
p1 = np.array([x[i], y[i], z[i]])
p2 = np.array([x[j], y[j], z[j]])
dist = np.sqrt(np.sum((p1 - p2) ** 2))
atom1, atom2 = dataset_info['atom_decoder'][atom_type[i]], \
dataset_info['atom_decoder'][atom_type[j]]
s = sorted((atom_type[i], atom_type[j]))
pair = (dataset_info['atom_decoder'][s[0]],
dataset_info['atom_decoder'][s[1]])
if 'qm9' in dataset_info['name']:
draw_edge_int = bond_analyze.get_bond_order(atom1, atom2, dist)
line_width = (3 - 2) * 2 * 2
elif dataset_info['name'] == 'geom':
draw_edge_int = bond_analyze.geom_predictor(pair, dist)
# Draw edge outputs 1 / -1 value, convert to True / False.
line_width = 2
else:
raise Exception('Wrong dataset_info name')
draw_edge = draw_edge_int > 0
if draw_edge:
if draw_edge_int == 4:
linewidth_factor = 1.5
else:
# linewidth_factor = draw_edge_int # Prop to number of
# edges.
linewidth_factor = 1
ax.plot([x[i], x[j]], [y[i], y[j]], [z[i], z[j]],
linewidth=line_width * linewidth_factor,
c=hex_bg_color, alpha=alpha)
def plot_data3d(positions, atom_type, dataset_info, camera_elev=0, camera_azim=0, save_path=None, spheres_3d=False,
bg='black', alpha=1.):
black = (0, 0, 0)
white = (1, 1, 1)
hex_bg_color = '#FFFFFF' if bg == 'black' else '#666666'
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
ax.set_aspect('auto')
ax.view_init(elev=camera_elev, azim=camera_azim)
if bg == 'black':
ax.set_facecolor(black)
else:
ax.set_facecolor(white)
# ax.xaxis.pane.set_edgecolor('#D0D0D0')
ax.xaxis.pane.set_alpha(0)
ax.yaxis.pane.set_alpha(0)
ax.zaxis.pane.set_alpha(0)
ax._axis3don = False
if bg == 'black':
ax.w_xaxis.line.set_color("black")
else:
ax.w_xaxis.line.set_color("white")
plot_molecule(ax, positions, atom_type, alpha, spheres_3d,
hex_bg_color, dataset_info)
if 'qm9' in dataset_info['name']:
max_value = positions.abs().max().item()
# axis_lim = 3.2
axis_lim = min(40, max(max_value / 1.5 + 0.3, 3.2))
ax.set_xlim(-axis_lim, axis_lim)
ax.set_ylim(-axis_lim, axis_lim)
ax.set_zlim(-axis_lim, axis_lim)
elif dataset_info['name'] == 'geom':
max_value = positions.abs().max().item()
# axis_lim = 3.2
axis_lim = min(40, max(max_value / 1.5 + 0.3, 3.2))
ax.set_xlim(-axis_lim, axis_lim)
ax.set_ylim(-axis_lim, axis_lim)
ax.set_zlim(-axis_lim, axis_lim)
else:
raise ValueError(dataset_info['name'])
dpi = 120 if spheres_3d else 50
if save_path is not None:
plt.savefig(save_path, bbox_inches='tight', pad_inches=0.0, dpi=dpi)
if spheres_3d:
img = imageio.imread(save_path)
img_brighter = np.clip(img * 1.4, 0, 255).astype('uint8')
imageio.imsave(save_path, img_brighter)
else:
plt.show()
plt.close()
def plot_data3d_uncertainty(
all_positions, all_atom_types, dataset_info, camera_elev=0, camera_azim=0,
save_path=None, spheres_3d=False, bg='black', alpha=1.):
black = (0, 0, 0)
white = (1, 1, 1)
hex_bg_color = '#FFFFFF' if bg == 'black' else '#666666'
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
ax.set_aspect('auto')
ax.view_init(elev=camera_elev, azim=camera_azim)
if bg == 'black':
ax.set_facecolor(black)
else:
ax.set_facecolor(white)
# ax.xaxis.pane.set_edgecolor('#D0D0D0')
ax.xaxis.pane.set_alpha(0)
ax.yaxis.pane.set_alpha(0)
ax.zaxis.pane.set_alpha(0)
ax._axis3don = False
if bg == 'black':
ax.w_xaxis.line.set_color("black")
else:
ax.w_xaxis.line.set_color("white")
for i in range(len(all_positions)):
positions = all_positions[i]
atom_type = all_atom_types[i]
plot_molecule(ax, positions, atom_type, alpha, spheres_3d,
hex_bg_color, dataset_info)
if 'qm9' in dataset_info['name']:
max_value = all_positions[0].abs().max().item()
# axis_lim = 3.2
axis_lim = min(40, max(max_value + 0.3, 3.2))
ax.set_xlim(-axis_lim, axis_lim)
ax.set_ylim(-axis_lim, axis_lim)
ax.set_zlim(-axis_lim, axis_lim)
elif dataset_info['name'] == 'geom':
max_value = all_positions[0].abs().max().item()
# axis_lim = 3.2
axis_lim = min(40, max(max_value / 2 + 0.3, 3.2))
ax.set_xlim(-axis_lim, axis_lim)
ax.set_ylim(-axis_lim, axis_lim)
ax.set_zlim(-axis_lim, axis_lim)
else:
raise ValueError(dataset_info['name'])
dpi = 120 if spheres_3d else 50
if save_path is not None:
plt.savefig(save_path, bbox_inches='tight', pad_inches=0.0, dpi=dpi)
if spheres_3d:
img = imageio.imread(save_path)
img_brighter = np.clip(img * 1.4, 0, 255).astype('uint8')
imageio.imsave(save_path, img_brighter)
else:
plt.show()
plt.close()
def plot_grid():
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import ImageGrid
im1 = np.arange(100).reshape((10, 10))
im2 = im1.T
im3 = np.flipud(im1)
im4 = np.fliplr(im2)
fig = plt.figure(figsize=(10., 10.))
grid = ImageGrid(fig, 111, # similar to subplot(111)
nrows_ncols=(6, 6), # creates 2x2 grid of axes
axes_pad=0.1, # pad between axes in inch.
)
for ax, im in zip(grid, [im1, im2, im3, im4]):
# Iterating over the grid returns the Axes.
ax.imshow(im)
plt.show()
def visualize(path, dataset_info, max_num=25, wandb=None, spheres_3d=False):
files = load_xyz_files(path)[0:max_num]
for file in files:
positions, one_hot, charges = load_molecule_xyz(file, dataset_info)
atom_type = torch.argmax(one_hot, dim=1).numpy()
dists = torch.cdist(positions.unsqueeze(0), positions.unsqueeze(0)).squeeze(0)
dists = dists[dists > 0]
print("Average distance between atoms", dists.mean().item())
plot_data3d(positions, atom_type, dataset_info=dataset_info, save_path=file[:-4] + '.png',
spheres_3d=spheres_3d)
if wandb is not None:
path = file[:-4] + '.png'
# Log image(s)
im = plt.imread(path)
wandb.log({'molecule': [wandb.Image(im, caption=path)]})
def visualize_chain(path, dataset_info, wandb=None, spheres_3d=False,
mode="chain"):
files = load_xyz_files(path)
files = sorted(files)
save_paths = []
for i in range(len(files)):
file = files[i]
positions, one_hot, charges = load_molecule_xyz(file, dataset_info=dataset_info)
atom_type = torch.argmax(one_hot, dim=1).numpy()
fn = file[:-4] + '.png'
plot_data3d(positions, atom_type, dataset_info=dataset_info,
save_path=fn, spheres_3d=spheres_3d, alpha=1.0)
save_paths.append(fn)
imgs = [imageio.imread(fn) for fn in save_paths]
dirname = os.path.dirname(save_paths[0])
gif_path = dirname + '/output.gif'
print(f'Creating gif with {len(imgs)} images')
# Add the last frame 10 times so that the final result remains temporally.
# imgs.extend([imgs[-1]] * 10)
imageio.mimsave(gif_path, imgs, subrectangles=True)
if wandb is not None:
wandb.log({mode: [wandb.Video(gif_path, caption=gif_path)]})
def visualize_chain_uncertainty(
path, dataset_info, wandb=None, spheres_3d=False, mode="chain"):
files = load_xyz_files(path)
files = sorted(files)
save_paths = []
for i in range(len(files)):
if i + 2 == len(files):
break
file = files[i]
file2 = files[i+1]
file3 = files[i+2]
positions, one_hot, _ = load_molecule_xyz(file, dataset_info=dataset_info)
positions2, one_hot2, _ = load_molecule_xyz(
file2, dataset_info=dataset_info)
positions3, one_hot3, _ = load_molecule_xyz(
file3, dataset_info=dataset_info)
all_positions = torch.stack([positions, positions2, positions3], dim=0)
one_hot = torch.stack([one_hot, one_hot2, one_hot3], dim=0)
all_atom_type = torch.argmax(one_hot, dim=2).numpy()
fn = file[:-4] + '.png'
plot_data3d_uncertainty(
all_positions, all_atom_type, dataset_info=dataset_info,
save_path=fn, spheres_3d=spheres_3d, alpha=0.5)
save_paths.append(fn)
imgs = [imageio.imread(fn) for fn in save_paths]
dirname = os.path.dirname(save_paths[0])
gif_path = dirname + '/output.gif'
print(f'Creating gif with {len(imgs)} images')
# Add the last frame 10 times so that the final result remains temporally.
# imgs.extend([imgs[-1]] * 10)
imageio.mimsave(gif_path, imgs, subrectangles=True)
if wandb is not None:
wandb.log({mode: [wandb.Video(gif_path, caption=gif_path)]})
if __name__ == '__main__':
#plot_grid()
import qm9.dataset as dataset
from configs.datasets_config import qm9_with_h, geom_with_h
matplotlib.use('macosx')
task = "visualize_molecules"
task_dataset = 'geom'
if task_dataset == 'qm9':
dataset_info = qm9_with_h
class Args:
batch_size = 1
num_workers = 0
filter_n_atoms = None
datadir = 'qm9/temp'
dataset = 'qm9'
remove_h = False
include_charges = True
cfg = Args()
dataloaders, charge_scale = dataset.retrieve_dataloaders(cfg)
for i, data in enumerate(dataloaders['train']):
positions = data['positions'].view(-1, 3)
positions_centered = positions - positions.mean(dim=0, keepdim=True)
one_hot = data['one_hot'].view(-1, 5).type(torch.float32)
atom_type = torch.argmax(one_hot, dim=1).numpy()
plot_data3d(
positions_centered, atom_type, dataset_info=dataset_info,
spheres_3d=True)
elif task_dataset == 'geom':
files = load_xyz_files('outputs/data')
matplotlib.use('macosx')
for file in files:
x, one_hot, _ = load_molecule_xyz(file, dataset_info=geom_with_h)
positions = x.view(-1, 3)
positions_centered = positions - positions.mean(dim=0, keepdim=True)
one_hot = one_hot.view(-1, 16).type(torch.float32)
atom_type = torch.argmax(one_hot, dim=1).numpy()
mask = (x == 0).sum(1) != 3
positions_centered = positions_centered[mask]
atom_type = atom_type[mask]
plot_data3d(
positions_centered, atom_type, dataset_info=geom_with_h,
spheres_3d=False)
else:
raise ValueError(dataset)
| 15,855 | 34.002208 | 154 | py |
e3_diffusion_for_molecules | e3_diffusion_for_molecules-main/qm9/models.py | import torch
from torch.distributions.categorical import Categorical
import numpy as np
from egnn.models import EGNN_dynamics_QM9
from equivariant_diffusion.en_diffusion import EnVariationalDiffusion
def get_model(args, device, dataset_info, dataloader_train):
histogram = dataset_info['n_nodes']
in_node_nf = len(dataset_info['atom_decoder']) + int(args.include_charges)
nodes_dist = DistributionNodes(histogram)
prop_dist = None
if len(args.conditioning) > 0:
prop_dist = DistributionProperty(dataloader_train, args.conditioning)
if args.condition_time:
dynamics_in_node_nf = in_node_nf + 1
else:
print('Warning: dynamics model is _not_ conditioned on time.')
dynamics_in_node_nf = in_node_nf
net_dynamics = EGNN_dynamics_QM9(
in_node_nf=dynamics_in_node_nf, context_node_nf=args.context_node_nf,
n_dims=3, device=device, hidden_nf=args.nf,
act_fn=torch.nn.SiLU(), n_layers=args.n_layers,
attention=args.attention, tanh=args.tanh, mode=args.model, norm_constant=args.norm_constant,
inv_sublayers=args.inv_sublayers, sin_embedding=args.sin_embedding,
normalization_factor=args.normalization_factor, aggregation_method=args.aggregation_method)
if args.probabilistic_model == 'diffusion':
vdm = EnVariationalDiffusion(
dynamics=net_dynamics,
in_node_nf=in_node_nf,
n_dims=3,
timesteps=args.diffusion_steps,
noise_schedule=args.diffusion_noise_schedule,
noise_precision=args.diffusion_noise_precision,
loss_type=args.diffusion_loss_type,
norm_values=args.normalize_factors,
include_charges=args.include_charges
)
return vdm, nodes_dist, prop_dist
else:
raise ValueError(args.probabilistic_model)
def get_optim(args, generative_model):
optim = torch.optim.AdamW(
generative_model.parameters(),
lr=args.lr, amsgrad=True,
weight_decay=1e-12)
return optim
class DistributionNodes:
def __init__(self, histogram):
self.n_nodes = []
prob = []
self.keys = {}
for i, nodes in enumerate(histogram):
self.n_nodes.append(nodes)
self.keys[nodes] = i
prob.append(histogram[nodes])
self.n_nodes = torch.tensor(self.n_nodes)
prob = np.array(prob)
prob = prob/np.sum(prob)
self.prob = torch.from_numpy(prob).float()
entropy = torch.sum(self.prob * torch.log(self.prob + 1e-30))
print("Entropy of n_nodes: H[N]", entropy.item())
self.m = Categorical(torch.tensor(prob))
def sample(self, n_samples=1):
idx = self.m.sample((n_samples,))
return self.n_nodes[idx]
def log_prob(self, batch_n_nodes):
assert len(batch_n_nodes.size()) == 1
idcs = [self.keys[i.item()] for i in batch_n_nodes]
idcs = torch.tensor(idcs).to(batch_n_nodes.device)
log_p = torch.log(self.prob + 1e-30)
log_p = log_p.to(batch_n_nodes.device)
log_probs = log_p[idcs]
return log_probs
class DistributionProperty:
def __init__(self, dataloader, properties, num_bins=1000, normalizer=None):
self.num_bins = num_bins
self.distributions = {}
self.properties = properties
for prop in properties:
self.distributions[prop] = {}
self._create_prob_dist(dataloader.dataset.data['num_atoms'],
dataloader.dataset.data[prop],
self.distributions[prop])
self.normalizer = normalizer
def set_normalizer(self, normalizer):
self.normalizer = normalizer
def _create_prob_dist(self, nodes_arr, values, distribution):
min_nodes, max_nodes = torch.min(nodes_arr), torch.max(nodes_arr)
for n_nodes in range(int(min_nodes), int(max_nodes) + 1):
idxs = nodes_arr == n_nodes
values_filtered = values[idxs]
if len(values_filtered) > 0:
probs, params = self._create_prob_given_nodes(values_filtered)
distribution[n_nodes] = {'probs': probs, 'params': params}
def _create_prob_given_nodes(self, values):
n_bins = self.num_bins #min(self.num_bins, len(values))
prop_min, prop_max = torch.min(values), torch.max(values)
prop_range = prop_max - prop_min + 1e-12
histogram = torch.zeros(n_bins)
for val in values:
i = int((val - prop_min)/prop_range * n_bins)
# Because of numerical precision, one sample can fall in bin int(n_bins) instead of int(n_bins-1)
# We move it to bin int(n_bind-1 if tat happens)
if i == n_bins:
i = n_bins - 1
histogram[i] += 1
probs = histogram / torch.sum(histogram)
probs = Categorical(torch.tensor(probs))
params = [prop_min, prop_max]
return probs, params
def normalize_tensor(self, tensor, prop):
assert self.normalizer is not None
mean = self.normalizer[prop]['mean']
mad = self.normalizer[prop]['mad']
return (tensor - mean) / mad
def sample(self, n_nodes=19):
vals = []
for prop in self.properties:
dist = self.distributions[prop][n_nodes]
idx = dist['probs'].sample((1,))
val = self._idx2value(idx, dist['params'], len(dist['probs'].probs))
val = self.normalize_tensor(val, prop)
vals.append(val)
vals = torch.cat(vals)
return vals
def sample_batch(self, nodesxsample):
vals = []
for n_nodes in nodesxsample:
vals.append(self.sample(int(n_nodes)).unsqueeze(0))
vals = torch.cat(vals, dim=0)
return vals
def _idx2value(self, idx, params, n_bins):
prop_range = params[1] - params[0]
left = float(idx) / n_bins * prop_range + params[0]
right = float(idx + 1) / n_bins * prop_range + params[0]
val = torch.rand(1) * (right - left) + left
return val
if __name__ == '__main__':
dist_nodes = DistributionNodes()
print(dist_nodes.n_nodes)
print(dist_nodes.prob)
for i in range(10):
print(dist_nodes.sample())
| 6,334 | 34 | 109 | py |
e3_diffusion_for_molecules | e3_diffusion_for_molecules-main/qm9/analyze.py | try:
from rdkit import Chem
from qm9.rdkit_functions import BasicMolecularMetrics
use_rdkit = True
except ModuleNotFoundError:
use_rdkit = False
import qm9.dataset as dataset
import torch
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as sp_stats
from qm9 import bond_analyze
# 'atom_decoder': ['H', 'B', 'C', 'N', 'O', 'F', 'Al', 'Si', 'P', 'S', 'Cl', 'As', 'Br', 'I', 'Hg', 'Bi'],
analyzed_19 ={'atom_types': {1: 93818, 3: 21212, 0: 139496, 2: 8251, 4: 26},
'distances': [0, 0, 0, 0, 0, 0, 0, 22566, 258690, 16534, 50256, 181302, 19676, 122590, 23874, 54834, 309290, 205426, 172004, 229940, 193180, 193058, 161294, 178292, 152184, 157242, 189186, 150298, 125750, 147020, 127574, 133654, 142696, 125906, 98168, 95340, 88632, 80694, 71750, 64466, 55740, 44570, 42850, 36084, 29310, 27268, 23696, 20254, 17112, 14130, 12220, 10660, 9112, 7640, 6378, 5350, 4384, 3650, 2840, 2362, 2050, 1662, 1414, 1216, 966, 856, 492, 516, 420, 326, 388, 326, 236, 140, 130, 92, 62, 52, 78, 56, 24, 8, 10, 12, 18, 2, 10, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
}
class Histogram_discrete:
def __init__(self, name='histogram'):
self.name = name
self.bins = {}
def add(self, elements):
for e in elements:
if e in self.bins:
self.bins[e] += 1
else:
self.bins[e] = 1
def normalize(self):
total = 0.
for key in self.bins:
total += self.bins[key]
for key in self.bins:
self.bins[key] = self.bins[key] / total
def plot(self, save_path=None):
width = 1 # the width of the bars
fig, ax = plt.subplots()
x, y = [], []
for key in self.bins:
x.append(key)
y.append(self.bins[key])
ax.bar(x, y, width)
plt.title(self.name)
if save_path is not None:
plt.savefig(save_path)
else:
plt.show()
plt.close()
class Histogram_cont:
def __init__(self, num_bins=100, range=(0., 13.), name='histogram', ignore_zeros=False):
self.name = name
self.bins = [0] * num_bins
self.range = range
self.ignore_zeros = ignore_zeros
def add(self, elements):
for e in elements:
if not self.ignore_zeros or e > 1e-8:
i = int(float(e) / self.range[1] * len(self.bins))
i = min(i, len(self.bins) - 1)
self.bins[i] += 1
def plot(self, save_path=None):
width = (self.range[1] - self.range[0])/len(self.bins) # the width of the bars
fig, ax = plt.subplots()
x = np.linspace(self.range[0], self.range[1], num=len(self.bins) + 1)[:-1] + width / 2
ax.bar(x, self.bins, width)
plt.title(self.name)
if save_path is not None:
plt.savefig(save_path)
else:
plt.show()
plt.close()
def plot_both(self, hist_b, save_path=None, wandb=None):
## TO DO: Check if the relation of bins and linspace is correct
hist_a = normalize_histogram(self.bins)
hist_b = normalize_histogram(hist_b)
#width = (self.range[1] - self.range[0]) / len(self.bins) # the width of the bars
fig, ax = plt.subplots()
x = np.linspace(self.range[0], self.range[1], num=len(self.bins) + 1)[:-1]
ax.step(x, hist_b)
ax.step(x, hist_a)
ax.legend(['True', 'Learned'])
plt.title(self.name)
if save_path is not None:
plt.savefig(save_path)
if wandb is not None:
if wandb is not None:
# Log image(s)
im = plt.imread(save_path)
wandb.log({save_path: [wandb.Image(im, caption=save_path)]})
else:
plt.show()
plt.close()
def normalize_histogram(hist):
hist = np.array(hist)
prob = hist / np.sum(hist)
return prob
def coord2distances(x):
x = x.unsqueeze(2)
x_t = x.transpose(1, 2)
dist = (x - x_t) ** 2
dist = torch.sqrt(torch.sum(dist, 3))
dist = dist.flatten()
return dist
def earth_mover_distance(h1, h2):
p1 = normalize_histogram(h1)
p2 = normalize_histogram(h2)
distance = sp_stats.wasserstein_distance(p1, p2)
return distance
def kl_divergence(p1, p2):
return np.sum(p1*np.log(p1 / p2))
def kl_divergence_sym(h1, h2):
p1 = normalize_histogram(h1) + 1e-10
p2 = normalize_histogram(h2) + 1e-10
kl = kl_divergence(p1, p2)
kl_flipped = kl_divergence(p2, p1)
return (kl + kl_flipped) / 2.
def js_divergence(h1, h2):
p1 = normalize_histogram(h1) + 1e-10
p2 = normalize_histogram(h2) + 1e-10
M = (p1 + p2)/2
js = (kl_divergence(p1, M) + kl_divergence(p2, M)) / 2
return js
def main_analyze_qm9(remove_h: bool, dataset_name='qm9', n_atoms=None):
class DataLoaderConfig(object):
def __init__(self):
self.batch_size = 128
self.remove_h = remove_h
self.filter_n_atoms = n_atoms
self.num_workers = 0
self.include_charges = True
self.dataset = dataset_name #could be qm9, qm9_first_half or qm9_second_half
self.datadir = 'qm9/temp'
cfg = DataLoaderConfig()
dataloaders, charge_scale = dataset.retrieve_dataloaders(cfg)
hist_nodes = Histogram_discrete('Histogram # nodes')
hist_atom_type = Histogram_discrete('Histogram of atom types')
hist_dist = Histogram_cont(name='Histogram relative distances', ignore_zeros=True)
for i, data in enumerate(dataloaders['train']):
print(i * cfg.batch_size)
# Histogram num_nodes
num_nodes = torch.sum(data['atom_mask'], dim=1)
num_nodes = list(num_nodes.numpy())
hist_nodes.add(num_nodes)
#Histogram edge distances
x = data['positions'] * data['atom_mask'].unsqueeze(2)
dist = coord2distances(x)
hist_dist.add(list(dist.numpy()))
# Histogram of atom types
one_hot = data['one_hot'].double()
atom = torch.argmax(one_hot, 2)
atom = atom.flatten()
mask = data['atom_mask'].flatten()
masked_atoms = list(atom[mask].numpy())
hist_atom_type.add(masked_atoms)
hist_dist.plot()
hist_dist.plot_both(hist_dist.bins[::-1])
print("KL divergence A %.4f" % kl_divergence_sym(hist_dist.bins, hist_dist.bins[::-1]))
print("KL divergence B %.4f" % kl_divergence_sym(hist_dist.bins, hist_dist.bins))
print(hist_dist.bins)
hist_nodes.plot()
print("Histogram of the number of nodes", hist_nodes.bins)
hist_atom_type.plot()
print(" Histogram of the atom types (H (optional), C, N, O, F)", hist_atom_type.bins)
############################
# Validity and bond analysis
def check_stability(positions, atom_type, dataset_info, debug=False):
assert len(positions.shape) == 2
assert positions.shape[1] == 3
atom_decoder = dataset_info['atom_decoder']
x = positions[:, 0]
y = positions[:, 1]
z = positions[:, 2]
nr_bonds = np.zeros(len(x), dtype='int')
for i in range(len(x)):
for j in range(i + 1, len(x)):
p1 = np.array([x[i], y[i], z[i]])
p2 = np.array([x[j], y[j], z[j]])
dist = np.sqrt(np.sum((p1 - p2) ** 2))
atom1, atom2 = atom_decoder[atom_type[i]], atom_decoder[atom_type[j]]
pair = sorted([atom_type[i], atom_type[j]])
if dataset_info['name'] == 'qm9' or dataset_info['name'] == 'qm9_second_half' or dataset_info['name'] == 'qm9_first_half':
order = bond_analyze.get_bond_order(atom1, atom2, dist)
elif dataset_info['name'] == 'geom':
order = bond_analyze.geom_predictor(
(atom_decoder[pair[0]], atom_decoder[pair[1]]), dist)
nr_bonds[i] += order
nr_bonds[j] += order
nr_stable_bonds = 0
for atom_type_i, nr_bonds_i in zip(atom_type, nr_bonds):
possible_bonds = bond_analyze.allowed_bonds[atom_decoder[atom_type_i]]
if type(possible_bonds) == int:
is_stable = possible_bonds == nr_bonds_i
else:
is_stable = nr_bonds_i in possible_bonds
if not is_stable and debug:
print("Invalid bonds for molecule %s with %d bonds" % (atom_decoder[atom_type_i], nr_bonds_i))
nr_stable_bonds += int(is_stable)
molecule_stable = nr_stable_bonds == len(x)
return molecule_stable, nr_stable_bonds, len(x)
def process_loader(dataloader):
""" Mask atoms, return positions and atom types"""
out = []
for data in dataloader:
for i in range(data['positions'].size(0)):
positions = data['positions'][i].view(-1, 3)
one_hot = data['one_hot'][i].view(-1, 5).type(torch.float32)
mask = data['atom_mask'][i].flatten()
positions, one_hot = positions[mask], one_hot[mask]
atom_type = torch.argmax(one_hot, dim=1)
out.append((positions, atom_type))
return out
def main_check_stability(remove_h: bool, batch_size=32):
from configs import datasets_config
import qm9.dataset as dataset
class Config:
def __init__(self):
self.batch_size = batch_size
self.num_workers = 0
self.remove_h = remove_h
self.filter_n_atoms = None
self.datadir = 'qm9/temp'
self.dataset = 'qm9'
self.include_charges = True
self.filter_molecule_size = None
self.sequential = False
cfg = Config()
dataset_info = datasets_config.qm9_with_h
dataloaders, charge_scale = dataset.retrieve_dataloaders(cfg)
if use_rdkit:
from qm9.rdkit_functions import BasicMolecularMetrics
metrics = BasicMolecularMetrics(dataset_info)
atom_decoder = dataset_info['atom_decoder']
def test_validity_for(dataloader):
count_mol_stable = 0
count_atm_stable = 0
count_mol_total = 0
count_atm_total = 0
for [positions, atom_types] in dataloader:
is_stable, nr_stable, total = check_stability(
positions, atom_types, dataset_info)
count_atm_stable += nr_stable
count_atm_total += total
count_mol_stable += int(is_stable)
count_mol_total += 1
print(f"Stable molecules "
f"{100. * count_mol_stable/count_mol_total:.2f} \t"
f"Stable atoms: "
f"{100. * count_atm_stable/count_atm_total:.2f} \t"
f"Counted molecules {count_mol_total}/{len(dataloader)*batch_size}")
train_loader = process_loader(dataloaders['train'])
test_loader = process_loader(dataloaders['test'])
if use_rdkit:
print('For test')
metrics.evaluate(test_loader)
print('For train')
metrics.evaluate(train_loader)
else:
print('For train')
test_validity_for(train_loader)
print('For test')
test_validity_for(test_loader)
def analyze_stability_for_molecules(molecule_list, dataset_info):
one_hot = molecule_list['one_hot']
x = molecule_list['x']
node_mask = molecule_list['node_mask']
if isinstance(node_mask, torch.Tensor):
atomsxmol = torch.sum(node_mask, dim=1)
else:
atomsxmol = [torch.sum(m) for m in node_mask]
n_samples = len(x)
molecule_stable = 0
nr_stable_bonds = 0
n_atoms = 0
processed_list = []
for i in range(n_samples):
atom_type = one_hot[i].argmax(1).cpu().detach()
pos = x[i].cpu().detach()
atom_type = atom_type[0:int(atomsxmol[i])]
pos = pos[0:int(atomsxmol[i])]
processed_list.append((pos, atom_type))
for mol in processed_list:
pos, atom_type = mol
validity_results = check_stability(pos, atom_type, dataset_info)
molecule_stable += int(validity_results[0])
nr_stable_bonds += int(validity_results[1])
n_atoms += int(validity_results[2])
# Validity
fraction_mol_stable = molecule_stable / float(n_samples)
fraction_atm_stable = nr_stable_bonds / float(n_atoms)
validity_dict = {
'mol_stable': fraction_mol_stable,
'atm_stable': fraction_atm_stable,
}
if use_rdkit:
metrics = BasicMolecularMetrics(dataset_info)
rdkit_metrics = metrics.evaluate(processed_list)
#print("Unique molecules:", rdkit_metrics[1])
return validity_dict, rdkit_metrics
else:
return validity_dict, None
def analyze_node_distribution(mol_list, save_path):
hist_nodes = Histogram_discrete('Histogram # nodes (stable molecules)')
hist_atom_type = Histogram_discrete('Histogram of atom types')
for molecule in mol_list:
positions, atom_type = molecule
hist_nodes.add([positions.shape[0]])
hist_atom_type.add(atom_type)
print("Histogram of #nodes")
print(hist_nodes.bins)
print("Histogram of # atom types")
print(hist_atom_type.bins)
hist_nodes.normalize()
if __name__ == '__main__':
# main_analyze_qm9(remove_h=False, dataset_name='qm9')
main_check_stability(remove_h=False)
| 13,305 | 32.686076 | 594 | py |
e3_diffusion_for_molecules | e3_diffusion_for_molecules-main/qm9/property_prediction/main_qm9_prop.py | import sys, os
sys.path.append(os.path.abspath(os.path.join('../../')))
from qm9.property_prediction.models_property import EGNN, Naive, NumNodes
import torch
from torch import nn, optim
import argparse
from qm9.property_prediction import prop_utils
import json
from qm9 import dataset, utils
import pickle
loss_l1 = nn.L1Loss()
def train(model, epoch, loader, mean, mad, property, device, partition='train', optimizer=None, lr_scheduler=None, log_interval=20, debug_break=False):
if partition == 'train':
lr_scheduler.step()
res = {'loss': 0, 'counter': 0, 'loss_arr':[]}
for i, data in enumerate(loader):
if partition == 'train':
model.train()
optimizer.zero_grad()
else:
model.eval()
batch_size, n_nodes, _ = data['positions'].size()
atom_positions = data['positions'].view(batch_size * n_nodes, -1).to(device, torch.float32)
atom_mask = data['atom_mask'].view(batch_size * n_nodes, -1).to(device, torch.float32)
edge_mask = data['edge_mask'].to(device, torch.float32)
nodes = data['one_hot'].to(device, torch.float32)
#charges = data['charges'].to(device, dtype).squeeze(2)
#nodes = prop_utils.preprocess_input(one_hot, charges, args.charge_power, charge_scale, device)
nodes = nodes.view(batch_size * n_nodes, -1)
# nodes = torch.cat([one_hot, charges], dim=1)
edges = prop_utils.get_adj_matrix(n_nodes, batch_size, device)
label = data[property].to(device, torch.float32)
'''
print("Positions mean")
print(torch.mean(torch.abs(atom_positions)))
print("Positions max")
print(torch.max(atom_positions))
print("Positions min")
print(torch.min(atom_positions))
print("\nOne hot mean")
print(torch.mean(torch.abs(nodes)))
print("one_hot max")
print(torch.max(nodes))
print("one_hot min")
print(torch.min(nodes))
print("\nLabel mean")
print(torch.mean(torch.abs(label)))
print("label max")
print(torch.max(label))
print("label min")
print(torch.min(label))
'''
pred = model(h0=nodes, x=atom_positions, edges=edges, edge_attr=None, node_mask=atom_mask, edge_mask=edge_mask,
n_nodes=n_nodes)
# print("\nPred mean")
# print(torch.mean(torch.abs(pred)))
# print("Pred max")
# print(torch.max(pred))
# print("Pred min")
# print(torch.min(pred))
if partition == 'train':
loss = loss_l1(pred, (label - mean) / mad)
loss.backward()
optimizer.step()
else:
loss = loss_l1(mad * pred + mean, label)
res['loss'] += loss.item() * batch_size
res['counter'] += batch_size
res['loss_arr'].append(loss.item())
prefix = ""
if partition != 'train':
prefix = ">> %s \t" % partition
if i % log_interval == 0:
print(prefix + "Epoch %d \t Iteration %d \t loss %.4f" % (epoch, i, sum(res['loss_arr'][-10:])/len(res['loss_arr'][-10:])))
if debug_break:
break
return res['loss'] / res['counter']
def test(model, epoch, loader, mean, mad, property, device, log_interval, debug_break=False):
return train(model, epoch, loader, mean, mad, property, device, partition='test', log_interval=log_interval, debug_break=debug_break)
def get_model(args):
if args.model_name == 'egnn':
model = EGNN(in_node_nf=5, in_edge_nf=0, hidden_nf=args.nf, device=args.device, n_layers=args.n_layers,
coords_weight=1.0,
attention=args.attention, node_attr=args.node_attr)
elif args.model_name == 'naive':
model = Naive(device=args.device)
elif args.model_name == 'numnodes':
model = NumNodes(device=args.device)
else:
raise Exception("Wrong model name %s" % args.model_name)
return model
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='QM9 Example')
parser.add_argument('--exp_name', type=str, default='debug', metavar='N',
help='experiment_name')
parser.add_argument('--batch_size', type=int, default=96, metavar='N',
help='input batch size for training (default: 128)')
parser.add_argument('--epochs', type=int, default=1000, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='enables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log_interval', type=int, default=20, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--test_interval', type=int, default=1, metavar='N',
help='how many epochs to wait before logging test')
parser.add_argument('--outf', type=str, default='outputs', metavar='N',
help='folder to output vae')
parser.add_argument('--lr', type=float, default=1e-3, metavar='N',
help='learning rate')
parser.add_argument('--nf', type=int, default=128, metavar='N',
help='learning rate')
parser.add_argument('--attention', type=int, default=1, metavar='N',
help='attention in the ae model')
parser.add_argument('--n_layers', type=int, default=7, metavar='N',
help='number of layers for the autoencoder')
parser.add_argument('--property', type=str, default='alpha', metavar='N',
help='label to predict: alpha | gap | homo | lumo | mu | Cv | G | H | r2 | U | U0 | zpve')
parser.add_argument('--num_workers', type=int, default=0, metavar='N',
help='number of workers for the dataloader')
parser.add_argument('--filter_n_atoms', type=int, default=None,
help='When set to an integer value, QM9 will only contain molecules of that amount of atoms')
parser.add_argument('--charge_power', type=int, default=2, metavar='N',
help='maximum power to take into one-hot features')
parser.add_argument('--dataset', type=str, default="qm9_first_half", metavar='N',
help='qm9_first_half')
parser.add_argument('--datadir', type=str, default="../../qm9/temp", metavar='N',
help='qm9_first_half')
parser.add_argument('--remove_h', action='store_true')
parser.add_argument('--include_charges', type=eval, default=True, help='include atom charge or not')
parser.add_argument('--node_attr', type=int, default=0, metavar='N',
help='node_attr or not')
parser.add_argument('--weight_decay', type=float, default=1e-16, metavar='N',
help='weight decay')
parser.add_argument('--save_path', type=float, default=1e-16, metavar='N',
help='weight decay')
parser.add_argument('--model_name', type=str, default='numnodes', metavar='N',
help='egnn | naive | numnodes')
parser.add_argument('--save_model', type=eval, default=True)
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if args.cuda else "cpu")
dtype = torch.float32
args.device = device
print(args)
res = {'epochs': [], 'losess': [], 'best_val': 1e10, 'best_test': 1e10, 'best_epoch': 0}
prop_utils.makedir(args.outf)
prop_utils.makedir(args.outf + "/" + args.exp_name)
dataloaders, charge_scale = dataset.retrieve_dataloaders(args)
args.dataset = "qm9_second_half"
dataloaders_aux, _ = dataset.retrieve_dataloaders(args)
dataloaders["test"] = dataloaders_aux["train"]
# compute mean and mean absolute deviation
property_norms = utils.compute_mean_mad_from_dataloader(dataloaders['valid'], [args.property])
mean, mad = property_norms[args.property]['mean'], property_norms[args.property]['mad']
model = get_model(args)
print(model)
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.epochs)
for epoch in range(0, args.epochs):
train(model, epoch, dataloaders['train'], mean, mad, args.property, device, partition='train', optimizer=optimizer, lr_scheduler=lr_scheduler, log_interval=args.log_interval)
if epoch % args.test_interval == 0:
val_loss = train(model, epoch, dataloaders['valid'], mean, mad, args.property, device, partition='valid', optimizer=optimizer, lr_scheduler=lr_scheduler, log_interval=args.log_interval)
test_loss = test(model, epoch, dataloaders['test'], mean, mad, args.property, device, log_interval=args.log_interval)
res['epochs'].append(epoch)
res['losess'].append(test_loss)
if val_loss < res['best_val']:
res['best_val'] = val_loss
res['best_test'] = test_loss
res['best_epoch'] = epoch
if args.save_model:
torch.save(model.state_dict(), args.outf + "/" + args.exp_name + "/best_checkpoint.npy")
with open(args.outf + "/" + args.exp_name + "/args.pickle", 'wb') as f:
pickle.dump(args, f)
print("Val loss: %.4f \t test loss: %.4f \t epoch %d" % (val_loss, test_loss, epoch))
print("Best: val loss: %.4f \t test loss: %.4f \t epoch %d" % (res['best_val'], res['best_test'], res['best_epoch']))
json_object = json.dumps(res, indent=4)
with open(args.outf + "/" + args.exp_name + "/losess.json", "w") as outfile:
outfile.write(json_object)
| 10,043 | 44.654545 | 197 | py |
e3_diffusion_for_molecules | e3_diffusion_for_molecules-main/qm9/property_prediction/models_property.py | from .models.gcl import E_GCL, unsorted_segment_sum
import torch
from torch import nn
class E_GCL_mask(E_GCL):
"""Graph Neural Net with global state and fixed number of nodes per graph.
Args:
hidden_dim: Number of hidden units.
num_nodes: Maximum number of nodes (for self-attentive pooling).
global_agg: Global aggregation function ('attn' or 'sum').
temp: Softmax temperature.
"""
def __init__(self, input_nf, output_nf, hidden_nf, edges_in_d=0, nodes_attr_dim=0, act_fn=nn.ReLU(), recurrent=True, coords_weight=1.0, attention=False):
E_GCL.__init__(self, input_nf, output_nf, hidden_nf, edges_in_d=edges_in_d, nodes_att_dim=nodes_attr_dim, act_fn=act_fn, recurrent=recurrent, coords_weight=coords_weight, attention=attention)
del self.coord_mlp
self.act_fn = act_fn
def coord_model(self, coord, edge_index, coord_diff, edge_feat, edge_mask):
row, col = edge_index
trans = coord_diff * self.coord_mlp(edge_feat) * edge_mask
agg = unsorted_segment_sum(trans, row, num_segments=coord.size(0))
coord += agg*self.coords_weight
return coord
def forward(self, h, edge_index, coord, node_mask, edge_mask, edge_attr=None, node_attr=None, n_nodes=None):
row, col = edge_index
radial, coord_diff = self.coord2radial(edge_index, coord)
edge_feat = self.edge_model(h[row], h[col], radial, edge_attr)
edge_feat = edge_feat * edge_mask
# TO DO: edge_feat = edge_feat * edge_mask
#coord = self.coord_model(coord, edge_index, coord_diff, edge_feat, edge_mask)
h, agg = self.node_model(h, edge_index, edge_feat, node_attr)
return h, coord, edge_attr
class EGNN(nn.Module):
def __init__(self, in_node_nf, in_edge_nf, hidden_nf, device='cpu', act_fn=nn.SiLU(), n_layers=4, coords_weight=1.0, attention=False, node_attr=1):
super(EGNN, self).__init__()
self.hidden_nf = hidden_nf
self.device = device
self.n_layers = n_layers
### Encoder
self.embedding = nn.Linear(in_node_nf, hidden_nf)
self.node_attr = node_attr
if node_attr:
n_node_attr = in_node_nf
else:
n_node_attr = 0
for i in range(0, n_layers):
self.add_module("gcl_%d" % i, E_GCL_mask(self.hidden_nf, self.hidden_nf, self.hidden_nf, edges_in_d=in_edge_nf, nodes_attr_dim=n_node_attr, act_fn=act_fn, recurrent=True, coords_weight=coords_weight, attention=attention))
self.node_dec = nn.Sequential(nn.Linear(self.hidden_nf, self.hidden_nf),
act_fn,
nn.Linear(self.hidden_nf, self.hidden_nf))
self.graph_dec = nn.Sequential(nn.Linear(self.hidden_nf, self.hidden_nf),
act_fn,
nn.Linear(self.hidden_nf, 1))
self.to(self.device)
def forward(self, h0, x, edges, edge_attr, node_mask, edge_mask, n_nodes):
h = self.embedding(h0)
for i in range(0, self.n_layers):
if self.node_attr:
h, _, _ = self._modules["gcl_%d" % i](h, edges, x, node_mask, edge_mask, edge_attr=edge_attr, node_attr=h0, n_nodes=n_nodes)
else:
h, _, _ = self._modules["gcl_%d" % i](h, edges, x, node_mask, edge_mask, edge_attr=edge_attr,
node_attr=None, n_nodes=n_nodes)
h = self.node_dec(h)
h = h * node_mask
h = h.view(-1, n_nodes, self.hidden_nf)
h = torch.sum(h, dim=1)
pred = self.graph_dec(h)
return pred.squeeze(1)
class EGNN(nn.Module):
def __init__(self, in_node_nf, in_edge_nf, hidden_nf, device='cpu', act_fn=nn.SiLU(), n_layers=4, coords_weight=1.0, attention=False, node_attr=1):
super(EGNN, self).__init__()
self.hidden_nf = hidden_nf
self.device = device
self.n_layers = n_layers
### Encoder
self.embedding = nn.Linear(in_node_nf, hidden_nf)
self.node_attr = node_attr
if node_attr:
n_node_attr = in_node_nf
else:
n_node_attr = 0
for i in range(0, n_layers):
self.add_module("gcl_%d" % i, E_GCL_mask(self.hidden_nf, self.hidden_nf, self.hidden_nf, edges_in_d=in_edge_nf, nodes_attr_dim=n_node_attr, act_fn=act_fn, recurrent=True, coords_weight=coords_weight, attention=attention))
self.node_dec = nn.Sequential(nn.Linear(self.hidden_nf, self.hidden_nf),
act_fn,
nn.Linear(self.hidden_nf, self.hidden_nf))
self.graph_dec = nn.Sequential(nn.Linear(self.hidden_nf, self.hidden_nf),
act_fn,
nn.Linear(self.hidden_nf, 1))
self.to(self.device)
def forward(self, h0, x, edges, edge_attr, node_mask, edge_mask, n_nodes):
h = self.embedding(h0)
for i in range(0, self.n_layers):
if self.node_attr:
h, _, _ = self._modules["gcl_%d" % i](h, edges, x, node_mask, edge_mask, edge_attr=edge_attr, node_attr=h0, n_nodes=n_nodes)
else:
h, _, _ = self._modules["gcl_%d" % i](h, edges, x, node_mask, edge_mask, edge_attr=edge_attr,
node_attr=None, n_nodes=n_nodes)
h = self.node_dec(h)
h = h * node_mask
h = h.view(-1, n_nodes, self.hidden_nf)
h = torch.sum(h, dim=1)
pred = self.graph_dec(h)
return pred.squeeze(1)
class Naive(nn.Module):
def __init__(self, device):
super(Naive, self).__init__()
self.device = device
self.linear = nn.Linear(1, 1)
self.to(self.device)
def forward(self, h0, x, edges, edge_attr, node_mask, edge_mask, n_nodes):
node_mask = node_mask.view(-1, n_nodes)
bs, n_nodes = node_mask.size()
x = torch.zeros(bs, 1).to(self.device)
return self.linear(x).squeeze(1)
class NumNodes(nn.Module):
def __init__(self, device, nf=128):
super(NumNodes, self).__init__()
self.device = device
self.linear1 = nn.Linear(1, nf)
self.linear2 = nn.Linear(nf, 1)
self.act_fn = nn.SiLU()
self.to(self.device)
def forward(self, h0, x, edges, edge_attr, node_mask, edge_mask, n_nodes):
reshaped_mask = node_mask.view(-1, n_nodes)
nodesxmol = torch.sum(reshaped_mask, dim=1).unsqueeze(1)/29
x = self.act_fn(self.linear1(nodesxmol))
return self.linear2(x).squeeze(1) | 6,706 | 40.91875 | 233 | py |
e3_diffusion_for_molecules | e3_diffusion_for_molecules-main/qm9/property_prediction/prop_utils.py | import os
import matplotlib
matplotlib.use('Agg')
import torch
import matplotlib.pyplot as plt
def create_folders(args):
try:
os.makedirs(args.outf)
except OSError:
pass
try:
os.makedirs(args.outf + '/' + args.exp_name)
except OSError:
pass
try:
os.makedirs(args.outf + '/' + args.exp_name + '/images_recon')
except OSError:
pass
try:
os.makedirs(args.outf + '/' + args.exp_name + '/images_gen')
except OSError:
pass
def makedir(path):
try:
os.makedirs(path)
except OSError:
pass
def normalize_res(res, keys=[]):
for key in keys:
if key != 'counter':
res[key] = res[key] / res['counter']
del res['counter']
return res
def plot_coords(coords_mu, path, coords_logvar=None):
if coords_mu is None:
return 0
if coords_logvar is not None:
coords_std = torch.sqrt(torch.exp(coords_logvar))
else:
coords_std = torch.zeros(coords_mu.size())
coords_size = (coords_std ** 2) * 1
plt.scatter(coords_mu[:, 0], coords_mu[:, 1], alpha=0.6, s=100)
#plt.errorbar(coords_mu[:, 0], coords_mu[:, 1], xerr=coords_size[:, 0], yerr=coords_size[:, 1], linestyle="None", alpha=0.5)
plt.savefig(path)
plt.clf()
def filter_nodes(dataset, n_nodes):
new_graphs = []
for i in range(len(dataset.graphs)):
if len(dataset.graphs[i].nodes) == n_nodes:
new_graphs.append(dataset.graphs[i])
dataset.graphs = new_graphs
dataset.n_nodes = n_nodes
return dataset
def adjust_learning_rate(optimizer, epoch, lr_0, factor=0.5, epochs_decay=100):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = lr_0 * (factor ** (epoch // epochs_decay))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
edges_dic = {}
def get_adj_matrix(n_nodes, batch_size, device):
if n_nodes in edges_dic:
edges_dic_b = edges_dic[n_nodes]
if batch_size in edges_dic_b:
return edges_dic_b[batch_size]
else:
# get edges for a single sample
rows, cols = [], []
for batch_idx in range(batch_size):
for i in range(n_nodes):
for j in range(n_nodes):
rows.append(i + batch_idx*n_nodes)
cols.append(j + batch_idx*n_nodes)
else:
edges_dic[n_nodes] = {}
return get_adj_matrix(n_nodes, batch_size, device)
edges = [torch.LongTensor(rows).to(device), torch.LongTensor(cols).to(device)]
return edges
def preprocess_input(one_hot, charges, charge_power, charge_scale, device):
charge_tensor = (charges.unsqueeze(-1) / charge_scale).pow(
torch.arange(charge_power + 1., device=device, dtype=torch.float32))
charge_tensor = charge_tensor.view(charges.shape + (1, charge_power + 1))
atom_scalars = (one_hot.unsqueeze(-1) * charge_tensor).view(charges.shape[:2] + (-1,))
return atom_scalars | 3,051 | 28.346154 | 128 | py |
e3_diffusion_for_molecules | e3_diffusion_for_molecules-main/qm9/property_prediction/models/gcl.py | from torch import nn
import torch
class MLP(nn.Module):
""" a simple 4-layer MLP """
def __init__(self, nin, nout, nh):
super().__init__()
self.net = nn.Sequential(
nn.Linear(nin, nh),
nn.LeakyReLU(0.2),
nn.Linear(nh, nh),
nn.LeakyReLU(0.2),
nn.Linear(nh, nh),
nn.LeakyReLU(0.2),
nn.Linear(nh, nout),
)
def forward(self, x):
return self.net(x)
class GCL_basic(nn.Module):
"""Graph Neural Net with global state and fixed number of nodes per graph.
Args:
hidden_dim: Number of hidden units.
num_nodes: Maximum number of nodes (for self-attentive pooling).
global_agg: Global aggregation function ('attn' or 'sum').
temp: Softmax temperature.
"""
def __init__(self):
super(GCL_basic, self).__init__()
def edge_model(self, source, target, edge_attr):
pass
def node_model(self, h, edge_index, edge_attr):
pass
def forward(self, x, edge_index, edge_attr=None):
row, col = edge_index
edge_feat = self.edge_model(x[row], x[col], edge_attr)
x = self.node_model(x, edge_index, edge_feat)
return x, edge_feat
class GCL(GCL_basic):
"""Graph Neural Net with global state and fixed number of nodes per graph.
Args:
hidden_dim: Number of hidden units.
num_nodes: Maximum number of nodes (for self-attentive pooling).
global_agg: Global aggregation function ('attn' or 'sum').
temp: Softmax temperature.
"""
def __init__(self, input_nf, output_nf, hidden_nf, edges_in_nf=0, act_fn=nn.ReLU(), bias=True, attention=False, t_eq=False, recurrent=True):
super(GCL, self).__init__()
self.attention = attention
self.t_eq=t_eq
self.recurrent = recurrent
input_edge_nf = input_nf * 2
self.edge_mlp = nn.Sequential(
nn.Linear(input_edge_nf + edges_in_nf, hidden_nf, bias=bias),
act_fn,
nn.Linear(hidden_nf, hidden_nf, bias=bias),
act_fn)
if self.attention:
self.att_mlp = nn.Sequential(
nn.Linear(input_nf, hidden_nf, bias=bias),
act_fn,
nn.Linear(hidden_nf, 1, bias=bias),
nn.Sigmoid())
self.node_mlp = nn.Sequential(
nn.Linear(hidden_nf + input_nf, hidden_nf, bias=bias),
act_fn,
nn.Linear(hidden_nf, output_nf, bias=bias))
#if recurrent:
#self.gru = nn.GRUCell(hidden_nf, hidden_nf)
def edge_model(self, source, target, edge_attr):
edge_in = torch.cat([source, target], dim=1)
if edge_attr is not None:
edge_in = torch.cat([edge_in, edge_attr], dim=1)
out = self.edge_mlp(edge_in)
if self.attention:
att = self.att_mlp(torch.abs(source - target))
out = out * att
return out
def node_model(self, h, edge_index, edge_attr):
row, col = edge_index
agg = unsorted_segment_sum(edge_attr, row, num_segments=h.size(0))
out = torch.cat([h, agg], dim=1)
out = self.node_mlp(out)
if self.recurrent:
out = out + h
#out = self.gru(out, h)
return out
class GCL_rf(GCL_basic):
"""Graph Neural Net with global state and fixed number of nodes per graph.
Args:
hidden_dim: Number of hidden units.
num_nodes: Maximum number of nodes (for self-attentive pooling).
global_agg: Global aggregation function ('attn' or 'sum').
temp: Softmax temperature.
"""
def __init__(self, nf=64, edge_attr_nf=0, reg=0, act_fn=nn.LeakyReLU(0.2), clamp=False):
super(GCL_rf, self).__init__()
self.clamp = clamp
layer = nn.Linear(nf, 1, bias=False)
torch.nn.init.xavier_uniform_(layer.weight, gain=0.001)
self.phi = nn.Sequential(nn.Linear(edge_attr_nf + 1, nf),
act_fn,
layer)
self.reg = reg
def edge_model(self, source, target, edge_attr):
x_diff = source - target
radial = torch.sqrt(torch.sum(x_diff ** 2, dim=1)).unsqueeze(1)
e_input = torch.cat([radial, edge_attr], dim=1)
e_out = self.phi(e_input)
m_ij = x_diff * e_out
if self.clamp:
m_ij = torch.clamp(m_ij, min=-100, max=100)
return m_ij
def node_model(self, x, edge_index, edge_attr):
row, col = edge_index
agg = unsorted_segment_mean(edge_attr, row, num_segments=x.size(0))
x_out = x + agg - x*self.reg
return x_out
class E_GCL(nn.Module):
"""Graph Neural Net with global state and fixed number of nodes per graph.
Args:
hidden_dim: Number of hidden units.
num_nodes: Maximum number of nodes (for self-attentive pooling).
global_agg: Global aggregation function ('attn' or 'sum').
temp: Softmax temperature.
"""
def __init__(self, input_nf, output_nf, hidden_nf, edges_in_d=0, nodes_att_dim=0, act_fn=nn.ReLU(), recurrent=True, coords_weight=1.0, attention=False, clamp=False, norm_diff=False, tanh=False):
super(E_GCL, self).__init__()
input_edge = input_nf * 2
self.coords_weight = coords_weight
self.recurrent = recurrent
self.attention = attention
self.norm_diff = norm_diff
self.tanh = tanh
edge_coords_nf = 1
self.edge_mlp = nn.Sequential(
nn.Linear(input_edge + edge_coords_nf + edges_in_d, hidden_nf),
act_fn,
nn.Linear(hidden_nf, hidden_nf),
act_fn)
self.node_mlp = nn.Sequential(
nn.Linear(hidden_nf + input_nf + nodes_att_dim, hidden_nf),
act_fn,
nn.Linear(hidden_nf, output_nf))
layer = nn.Linear(hidden_nf, 1, bias=False)
torch.nn.init.xavier_uniform_(layer.weight, gain=0.001)
self.clamp = clamp
coord_mlp = []
coord_mlp.append(nn.Linear(hidden_nf, hidden_nf))
coord_mlp.append(act_fn)
coord_mlp.append(layer)
if self.tanh:
coord_mlp.append(nn.Tanh())
self.coords_range = nn.Parameter(torch.ones(1))*3
self.coord_mlp = nn.Sequential(*coord_mlp)
if self.attention:
self.att_mlp = nn.Sequential(
nn.Linear(hidden_nf, 1),
nn.Sigmoid())
#if recurrent:
# self.gru = nn.GRUCell(hidden_nf, hidden_nf)
def edge_model(self, source, target, radial, edge_attr):
if edge_attr is None: # Unused.
out = torch.cat([source, target, radial], dim=1)
else:
out = torch.cat([source, target, radial, edge_attr], dim=1)
out = self.edge_mlp(out)
if self.attention:
att_val = self.att_mlp(out)
out = out * att_val
return out
def node_model(self, x, edge_index, edge_attr, node_attr):
row, col = edge_index
agg = unsorted_segment_sum(edge_attr, row, num_segments=x.size(0))
if node_attr is not None:
agg = torch.cat([x, agg, node_attr], dim=1)
else:
agg = torch.cat([x, agg], dim=1)
out = self.node_mlp(agg)
if self.recurrent:
out = x + out
return out, agg
def coord_model(self, coord, edge_index, coord_diff, edge_feat):
row, col = edge_index
trans = coord_diff * self.coord_mlp(edge_feat)
trans = torch.clamp(trans, min=-100, max=100) #This is never activated but just in case it case it explosed it may save the train
agg = unsorted_segment_mean(trans, row, num_segments=coord.size(0))
coord += agg*self.coords_weight
return coord
def coord2radial(self, edge_index, coord):
row, col = edge_index
coord_diff = coord[row] - coord[col]
radial = torch.sum((coord_diff)**2, 1).unsqueeze(1)
if self.norm_diff:
norm = torch.sqrt(radial) + 1
coord_diff = coord_diff/(norm)
return radial, coord_diff
def forward(self, h, edge_index, coord, edge_attr=None, node_attr=None):
row, col = edge_index
radial, coord_diff = self.coord2radial(edge_index, coord)
edge_feat = self.edge_model(h[row], h[col], radial, edge_attr)
coord = self.coord_model(coord, edge_index, coord_diff, edge_feat)
h, agg = self.node_model(h, edge_index, edge_feat, node_attr)
# coord = self.node_coord_model(h, coord)
# x = self.node_model(x, edge_index, x[col], u, batch) # GCN
return h, coord, edge_attr
class E_GCL_vel(E_GCL):
"""Graph Neural Net with global state and fixed number of nodes per graph.
Args:
hidden_dim: Number of hidden units.
num_nodes: Maximum number of nodes (for self-attentive pooling).
global_agg: Global aggregation function ('attn' or 'sum').
temp: Softmax temperature.
"""
def __init__(self, input_nf, output_nf, hidden_nf, edges_in_d=0, nodes_att_dim=0, act_fn=nn.ReLU(), recurrent=True, coords_weight=1.0, attention=False, norm_diff=False, tanh=False):
E_GCL.__init__(self, input_nf, output_nf, hidden_nf, edges_in_d=edges_in_d, nodes_att_dim=nodes_att_dim, act_fn=act_fn, recurrent=recurrent, coords_weight=coords_weight, attention=attention, norm_diff=norm_diff, tanh=tanh)
self.norm_diff = norm_diff
self.coord_mlp_vel = nn.Sequential(
nn.Linear(input_nf, hidden_nf),
act_fn,
nn.Linear(hidden_nf, 1))
def forward(self, h, edge_index, coord, vel, edge_attr=None, node_attr=None):
row, col = edge_index
radial, coord_diff = self.coord2radial(edge_index, coord)
edge_feat = self.edge_model(h[row], h[col], radial, edge_attr)
coord = self.coord_model(coord, edge_index, coord_diff, edge_feat)
coord += self.coord_mlp_vel(h) * vel
h, agg = self.node_model(h, edge_index, edge_feat, node_attr)
# coord = self.node_coord_model(h, coord)
# x = self.node_model(x, edge_index, x[col], u, batch) # GCN
return h, coord, edge_attr
class GCL_rf_vel(nn.Module):
"""Graph Neural Net with global state and fixed number of nodes per graph.
Args:
hidden_dim: Number of hidden units.
num_nodes: Maximum number of nodes (for self-attentive pooling).
global_agg: Global aggregation function ('attn' or 'sum').
temp: Softmax temperature.
"""
def __init__(self, nf=64, edge_attr_nf=0, act_fn=nn.LeakyReLU(0.2), coords_weight=1.0):
super(GCL_rf_vel, self).__init__()
self.coords_weight = coords_weight
self.coord_mlp_vel = nn.Sequential(
nn.Linear(1, nf),
act_fn,
nn.Linear(nf, 1))
layer = nn.Linear(nf, 1, bias=False)
torch.nn.init.xavier_uniform_(layer.weight, gain=0.001)
#layer.weight.uniform_(-0.1, 0.1)
self.phi = nn.Sequential(nn.Linear(1 + edge_attr_nf, nf),
act_fn,
layer,
nn.Tanh()) #we had to add the tanh to keep this method stable
def forward(self, x, vel_norm, vel, edge_index, edge_attr=None):
row, col = edge_index
edge_m = self.edge_model(x[row], x[col], edge_attr)
x = self.node_model(x, edge_index, edge_m)
x += vel * self.coord_mlp_vel(vel_norm)
return x, edge_attr
def edge_model(self, source, target, edge_attr):
x_diff = source - target
radial = torch.sqrt(torch.sum(x_diff ** 2, dim=1)).unsqueeze(1)
e_input = torch.cat([radial, edge_attr], dim=1)
e_out = self.phi(e_input)
m_ij = x_diff * e_out
return m_ij
def node_model(self, x, edge_index, edge_m):
row, col = edge_index
agg = unsorted_segment_mean(edge_m, row, num_segments=x.size(0))
x_out = x + agg * self.coords_weight
return x_out
def unsorted_segment_sum(data, segment_ids, num_segments):
"""Custom PyTorch op to replicate TensorFlow's `unsorted_segment_sum`."""
result_shape = (num_segments, data.size(1))
result = data.new_full(result_shape, 0) # Init empty result tensor.
segment_ids = segment_ids.unsqueeze(-1).expand(-1, data.size(1))
result.scatter_add_(0, segment_ids, data)
return result
def unsorted_segment_mean(data, segment_ids, num_segments):
result_shape = (num_segments, data.size(1))
segment_ids = segment_ids.unsqueeze(-1).expand(-1, data.size(1))
result = data.new_full(result_shape, 0) # Init empty result tensor.
count = data.new_full(result_shape, 0)
result.scatter_add_(0, segment_ids, data)
count.scatter_add_(0, segment_ids, torch.ones_like(data))
return result / count.clamp(min=1) | 12,996 | 36.02849 | 230 | py |
e3_diffusion_for_molecules | e3_diffusion_for_molecules-main/qm9/data/utils.py | import torch
import numpy as np
import logging
import os
from torch.utils.data import DataLoader
from qm9.data.dataset_class import ProcessedDataset
from qm9.data.prepare import prepare_dataset
def initialize_datasets(args, datadir, dataset, subset=None, splits=None,
force_download=False, subtract_thermo=False,
remove_h=False):
"""
Initialize datasets.
Parameters
----------
args : dict
Dictionary of input arguments detailing the cormorant calculation.
datadir : str
Path to the directory where the data and calculations and is, or will be, stored.
dataset : str
String specification of the dataset. If it is not already downloaded, must currently by "qm9" or "md17".
subset : str, optional
Which subset of a dataset to use. Action is dependent on the dataset given.
Must be specified if the dataset has subsets (i.e. MD17). Otherwise ignored (i.e. GDB9).
splits : str, optional
TODO: DELETE THIS ENTRY
force_download : bool, optional
If true, forces a fresh download of the dataset.
subtract_thermo : bool, optional
If True, subtracts the thermochemical energy of the atoms from each molecule in GDB9.
Does nothing for other datasets.
remove_h: bool, optional
If True, remove hydrogens from the dataset
Returns
-------
args : dict
Dictionary of input arguments detailing the cormorant calculation.
datasets : dict
Dictionary of processed dataset objects (see ????? for more information).
Valid keys are "train", "test", and "valid"[ate]. Each associated value
num_species : int
Number of unique atomic species in the dataset.
max_charge : pytorch.Tensor
Largest atomic number for the dataset.
Notes
-----
TODO: Delete the splits argument.
"""
# Set the number of points based upon the arguments
num_pts = {'train': args.num_train,
'test': args.num_test, 'valid': args.num_valid}
# Download and process dataset. Returns datafiles.
datafiles = prepare_dataset(
datadir, 'qm9', subset, splits, force_download=force_download)
# Load downloaded/processed datasets
datasets = {}
for split, datafile in datafiles.items():
with np.load(datafile) as f:
datasets[split] = {key: torch.from_numpy(
val) for key, val in f.items()}
if dataset != 'qm9':
np.random.seed(42)
fixed_perm = np.random.permutation(len(datasets['train']['num_atoms']))
if dataset == 'qm9_second_half':
sliced_perm = fixed_perm[len(datasets['train']['num_atoms'])//2:]
elif dataset == 'qm9_first_half':
sliced_perm = fixed_perm[0:len(datasets['train']['num_atoms']) // 2]
else:
raise Exception('Wrong dataset name')
for key in datasets['train']:
datasets['train'][key] = datasets['train'][key][sliced_perm]
# Basic error checking: Check the training/test/validation splits have the same set of keys.
keys = [list(data.keys()) for data in datasets.values()]
assert all([key == keys[0] for key in keys]
), 'Datasets must have same set of keys!'
# TODO: remove hydrogens here if needed
if remove_h:
for key, dataset in datasets.items():
pos = dataset['positions']
charges = dataset['charges']
num_atoms = dataset['num_atoms']
# Check that charges corresponds to real atoms
assert torch.sum(num_atoms != torch.sum(charges > 0, dim=1)) == 0
mask = dataset['charges'] > 1
new_positions = torch.zeros_like(pos)
new_charges = torch.zeros_like(charges)
for i in range(new_positions.shape[0]):
m = mask[i]
p = pos[i][m] # positions to keep
p = p - torch.mean(p, dim=0) # Center the new positions
c = charges[i][m] # Charges to keep
n = torch.sum(m)
new_positions[i, :n, :] = p
new_charges[i, :n] = c
dataset['positions'] = new_positions
dataset['charges'] = new_charges
dataset['num_atoms'] = torch.sum(dataset['charges'] > 0, dim=1)
# Get a list of all species across the entire dataset
all_species = _get_species(datasets, ignore_check=False)
# Now initialize MolecularDataset based upon loaded data
datasets = {split: ProcessedDataset(data, num_pts=num_pts.get(
split, -1), included_species=all_species, subtract_thermo=subtract_thermo) for split, data in datasets.items()}
# Now initialize MolecularDataset based upon loaded data
# Check that all datasets have the same included species:
assert(len(set(tuple(data.included_species.tolist()) for data in datasets.values())) ==
1), 'All datasets must have same included_species! {}'.format({key: data.included_species for key, data in datasets.items()})
# These parameters are necessary to initialize the network
num_species = datasets['train'].num_species
max_charge = datasets['train'].max_charge
# Now, update the number of training/test/validation sets in args
args.num_train = datasets['train'].num_pts
args.num_valid = datasets['valid'].num_pts
args.num_test = datasets['test'].num_pts
return args, datasets, num_species, max_charge
def _get_species(datasets, ignore_check=False):
"""
Generate a list of all species.
Includes a check that each split contains examples of every species in the
entire dataset.
Parameters
----------
datasets : dict
Dictionary of datasets. Each dataset is a dict of arrays containing molecular properties.
ignore_check : bool
Ignores/overrides checks to make sure every split includes every species included in the entire dataset
Returns
-------
all_species : Pytorch tensor
List of all species present in the data. Species labels shoguld be integers.
"""
# Get a list of all species in the dataset across all splits
all_species = torch.cat([dataset['charges'].unique()
for dataset in datasets.values()]).unique(sorted=True)
# Find the unique list of species in each dataset.
split_species = {split: species['charges'].unique(
sorted=True) for split, species in datasets.items()}
# If zero charges (padded, non-existent atoms) are included, remove them
if all_species[0] == 0:
all_species = all_species[1:]
# Remove zeros if zero-padded charges exst for each split
split_species = {split: species[1:] if species[0] ==
0 else species for split, species in split_species.items()}
# Now check that each split has at least one example of every atomic spcies from the entire dataset.
if not all([split.tolist() == all_species.tolist() for split in split_species.values()]):
# Allows one to override this check if they really want to. Not recommended as the answers become non-sensical.
if ignore_check:
logging.error(
'The number of species is not the same in all datasets!')
else:
raise ValueError(
'Not all datasets have the same number of species!')
# Finally, return a list of all species
return all_species
| 7,481 | 39.443243 | 136 | py |
e3_diffusion_for_molecules | e3_diffusion_for_molecules-main/qm9/data/collate.py | import torch
def batch_stack(props):
"""
Stack a list of torch.tensors so they are padded to the size of the
largest tensor along each axis.
Parameters
----------
props : list of Pytorch Tensors
Pytorch tensors to stack
Returns
-------
props : Pytorch tensor
Stacked pytorch tensor.
Notes
-----
TODO : Review whether the behavior when elements are not tensors is safe.
"""
if not torch.is_tensor(props[0]):
return torch.tensor(props)
elif props[0].dim() == 0:
return torch.stack(props)
else:
return torch.nn.utils.rnn.pad_sequence(props, batch_first=True, padding_value=0)
def drop_zeros(props, to_keep):
"""
Function to drop zeros from batches when the entire dataset is padded to the largest molecule size.
Parameters
----------
props : Pytorch tensor
Full Dataset
Returns
-------
props : Pytorch tensor
The dataset with only the retained information.
Notes
-----
TODO : Review whether the behavior when elements are not tensors is safe.
"""
if not torch.is_tensor(props[0]):
return props
elif props[0].dim() == 0:
return props
else:
return props[:, to_keep, ...]
class PreprocessQM9:
def __init__(self, load_charges=True):
self.load_charges = load_charges
def add_trick(self, trick):
self.tricks.append(trick)
def collate_fn(self, batch):
"""
Collation function that collates datapoints into the batch format for cormorant
Parameters
----------
batch : list of datapoints
The data to be collated.
Returns
-------
batch : dict of Pytorch tensors
The collated data.
"""
batch = {prop: batch_stack([mol[prop] for mol in batch]) for prop in batch[0].keys()}
to_keep = (batch['charges'].sum(0) > 0)
batch = {key: drop_zeros(prop, to_keep) for key, prop in batch.items()}
atom_mask = batch['charges'] > 0
batch['atom_mask'] = atom_mask
#Obtain edges
batch_size, n_nodes = atom_mask.size()
edge_mask = atom_mask.unsqueeze(1) * atom_mask.unsqueeze(2)
#mask diagonal
diag_mask = ~torch.eye(edge_mask.size(1), dtype=torch.bool).unsqueeze(0)
edge_mask *= diag_mask
#edge_mask = atom_mask.unsqueeze(1) * atom_mask.unsqueeze(2)
batch['edge_mask'] = edge_mask.view(batch_size * n_nodes * n_nodes, 1)
if self.load_charges:
batch['charges'] = batch['charges'].unsqueeze(2)
else:
batch['charges'] = torch.zeros(0)
return batch
| 2,718 | 25.144231 | 103 | py |
e3_diffusion_for_molecules | e3_diffusion_for_molecules-main/qm9/data/dataset_class.py | import torch
from torch.utils.data import Dataset
import os
from itertools import islice
from math import inf
import logging
class ProcessedDataset(Dataset):
"""
Data structure for a pre-processed cormorant dataset. Extends PyTorch Dataset.
Parameters
----------
data : dict
Dictionary of arrays containing molecular properties.
included_species : tensor of scalars, optional
Atomic species to include in ?????. If None, uses all species.
num_pts : int, optional
Desired number of points to include in the dataset.
Default value, -1, uses all of the datapoints.
normalize : bool, optional
????? IS THIS USED?
shuffle : bool, optional
If true, shuffle the points in the dataset.
subtract_thermo : bool, optional
If True, subtracts the thermochemical energy of the atoms from each molecule in GDB9.
Does nothing for other datasets.
"""
def __init__(self, data, included_species=None, num_pts=-1, normalize=True, shuffle=True, subtract_thermo=True):
self.data = data
if num_pts < 0:
self.num_pts = len(data['charges'])
else:
if num_pts > len(data['charges']):
logging.warning('Desired number of points ({}) is greater than the number of data points ({}) available in the dataset!'.format(num_pts, len(data['charges'])))
self.num_pts = len(data['charges'])
else:
self.num_pts = num_pts
# If included species is not specified
if included_species is None:
included_species = torch.unique(self.data['charges'], sorted=True)
if included_species[0] == 0:
included_species = included_species[1:]
if subtract_thermo:
thermo_targets = [key.split('_')[0] for key in data.keys() if key.endswith('_thermo')]
if len(thermo_targets) == 0:
logging.warning('No thermochemical targets included! Try reprocessing dataset with --force-download!')
else:
logging.info('Removing thermochemical energy from targets {}'.format(' '.join(thermo_targets)))
for key in thermo_targets:
data[key] -= data[key + '_thermo'].to(data[key].dtype)
self.included_species = included_species
self.data['one_hot'] = self.data['charges'].unsqueeze(-1) == included_species.unsqueeze(0).unsqueeze(0)
self.num_species = len(included_species)
self.max_charge = max(included_species)
self.parameters = {'num_species': self.num_species, 'max_charge': self.max_charge}
# Get a dictionary of statistics for all properties that are one-dimensional tensors.
self.calc_stats()
if shuffle:
self.perm = torch.randperm(len(data['charges']))[:self.num_pts]
else:
self.perm = None
def calc_stats(self):
self.stats = {key: (val.mean(), val.std()) for key, val in self.data.items() if type(val) is torch.Tensor and val.dim() == 1 and val.is_floating_point()}
def convert_units(self, units_dict):
for key in self.data.keys():
if key in units_dict:
self.data[key] *= units_dict[key]
self.calc_stats()
def __len__(self):
return self.num_pts
def __getitem__(self, idx):
if self.perm is not None:
idx = self.perm[idx]
return {key: val[idx] for key, val in self.data.items()}
| 3,510 | 36.351064 | 175 | py |
e3_diffusion_for_molecules | e3_diffusion_for_molecules-main/qm9/data/prepare/md17.py | from os.path import join as join
import urllib.request
import numpy as np
import torch
import logging, os, urllib
from qm9.data.prepare.utils import download_data, is_int, cleanup_file
md17_base_url = 'http://quantum-machine.org/gdml/data/npz/'
md17_subsets = {'benzene': 'benzene_old_dft',
'uracil': 'uracil_dft',
'naphthalene': 'naphthalene_dft',
'aspirin': 'aspirin_dft',
'salicylic_acid': 'salicylic_dft',
'malonaldehyde': 'malonaldehyde_dft',
'ethanol': 'ethanol_dft',
'toluene': 'toluene_dft',
'paracetamol': 'paracetamol_dft',
'azobenzene': 'azobenzene_dft'
}
def download_dataset_md17(datadir, dataname, subset, splits=None, cleanup=True):
"""
Downloads the MD17 dataset.
"""
if subset not in md17_subsets:
logging.info('Molecule {} not included in list of downloadable MD17 datasets! Attempting to download based directly upon input key.'.format(subset))
md17_molecule = subset
else:
md17_molecule = md17_subsets[subset]
# Define directory for which data will be output.
md17dir = join(*[datadir, dataname, subset])
# Important to avoid a race condition
os.makedirs(md17dir, exist_ok=True)
logging.info('Downloading and processing molecule {} from MD17 dataset. Output will be in directory: {}.'.format(subset, md17dir))
md17_data_url = md17_base_url + md17_molecule + '.npz'
md17_data_npz = join(md17dir, md17_molecule + '.npz')
download_data(md17_data_url, outfile=md17_data_npz, binary=True)
# Convert raw MD17 data to torch tensors.
md17_raw_data = np.load(md17_data_npz)
# Number of molecules in dataset:
num_tot_mols = len(md17_raw_data['E'])
# Dictionary to convert keys in MD17 database to those used in this code.
md17_keys = {'E': 'energies', 'R': 'positions', 'F': 'forces'}
# Convert numpy arrays to torch.Tensors
md17_data = {new_key: md17_raw_data[old_key] for old_key, new_key in md17_keys.items()}
# Reshape energies to remove final singleton dimension
md17_data['energies'] = md17_data['energies'].squeeze(1)
# Add charges to md17_data
md17_data['charges'] = np.tile(md17_raw_data['z'], (num_tot_mols, 1))
# If splits are not specified, automatically generate them.
if splits is None:
splits = gen_splits_md17(num_tot_mols)
# Process GDB9 dataset, and return dictionary of splits
md17_data_split = {}
for split, split_idx in splits.items():
md17_data_split[split] = {key: val[split_idx] if type(val) is np.ndarray else val for key, val in md17_data.items()}
# Save processed GDB9 data into train/validation/test splits
logging.info('Saving processed data:')
for split, data_split in md17_data_split.items():
savefile = join(md17dir, split + '.npz')
np.savez_compressed(savefile, **data_split)
cleanup_file(md17_data_npz, cleanup)
def gen_splits_md17(num_pts):
"""
Generate the splits used to train/evaluate the network in the original Cormorant paper.
"""
# deterministically generate random split based upon random permutation
np.random.seed(0)
data_perm = np.random.permutation(num_pts)
# Create masks for which splits to invoke
mask_train = np.zeros(num_pts, dtype=np.bool)
mask_valid = np.zeros(num_pts, dtype=np.bool)
mask_test = np.zeros(num_pts, dtype=np.bool)
# For historical reasons, this is the indexing on the
# 50k/10k/10k train/valid/test splits used in the paper.
mask_train[:10000] = True
mask_valid[10000:20000] = True
mask_test[20000:30000] = True
mask_train[30000:70000] = True
# COnvert masks to splits
splits = {}
splits['train'] = torch.tensor(data_perm[mask_train])
splits['valid'] = torch.tensor(data_perm[mask_valid])
splits['test'] = torch.tensor(data_perm[mask_test])
return splits
| 3,992 | 34.972973 | 156 | py |
e3_diffusion_for_molecules | e3_diffusion_for_molecules-main/qm9/data/prepare/qm9.py | import numpy as np
import torch
import logging
import os
import urllib
from os.path import join as join
import urllib.request
from qm9.data.prepare.process import process_xyz_files, process_xyz_gdb9
from qm9.data.prepare.utils import download_data, is_int, cleanup_file
def download_dataset_qm9(datadir, dataname, splits=None, calculate_thermo=True, exclude=True, cleanup=True):
"""
Download and prepare the QM9 (GDB9) dataset.
"""
# Define directory for which data will be output.
gdb9dir = join(*[datadir, dataname])
# Important to avoid a race condition
os.makedirs(gdb9dir, exist_ok=True)
logging.info(
'Downloading and processing GDB9 dataset. Output will be in directory: {}.'.format(gdb9dir))
logging.info('Beginning download of GDB9 dataset!')
gdb9_url_data = 'https://springernature.figshare.com/ndownloader/files/3195389'
gdb9_tar_data = join(gdb9dir, 'dsgdb9nsd.xyz.tar.bz2')
# gdb9_tar_file = join(gdb9dir, 'dsgdb9nsd.xyz.tar.bz2')
# gdb9_tar_data =
# tardata = tarfile.open(gdb9_tar_file, 'r')
# files = tardata.getmembers()
urllib.request.urlretrieve(gdb9_url_data, filename=gdb9_tar_data)
logging.info('GDB9 dataset downloaded successfully!')
# If splits are not specified, automatically generate them.
if splits is None:
splits = gen_splits_gdb9(gdb9dir, cleanup)
# Process GDB9 dataset, and return dictionary of splits
gdb9_data = {}
for split, split_idx in splits.items():
gdb9_data[split] = process_xyz_files(
gdb9_tar_data, process_xyz_gdb9, file_idx_list=split_idx, stack=True)
# Subtract thermochemical energy if desired.
if calculate_thermo:
# Download thermochemical energy from GDB9 dataset, and then process it into a dictionary
therm_energy = get_thermo_dict(gdb9dir, cleanup)
# For each of train/validation/test split, add the thermochemical energy
for split_idx, split_data in gdb9_data.items():
gdb9_data[split_idx] = add_thermo_targets(split_data, therm_energy)
# Save processed GDB9 data into train/validation/test splits
logging.info('Saving processed data:')
for split, data in gdb9_data.items():
savedir = join(gdb9dir, split+'.npz')
np.savez_compressed(savedir, **data)
logging.info('Processing/saving complete!')
def gen_splits_gdb9(gdb9dir, cleanup=True):
"""
Generate GDB9 training/validation/test splits used.
First, use the file 'uncharacterized.txt' in the GDB9 figshare to find a
list of excluded molecules.
Second, create a list of molecule ids, and remove the excluded molecule
indices.
Third, assign 100k molecules to the training set, 10% to the test set,
and the remaining to the validation set.
Finally, generate torch.tensors which give the molecule ids for each
set.
"""
logging.info('Splits were not specified! Automatically generating.')
gdb9_url_excluded = 'https://springernature.figshare.com/ndownloader/files/3195404'
gdb9_txt_excluded = join(gdb9dir, 'uncharacterized.txt')
urllib.request.urlretrieve(gdb9_url_excluded, filename=gdb9_txt_excluded)
# First get list of excluded indices
excluded_strings = []
with open(gdb9_txt_excluded) as f:
lines = f.readlines()
excluded_strings = [line.split()[0]
for line in lines if len(line.split()) > 0]
excluded_idxs = [int(idx) - 1 for idx in excluded_strings if is_int(idx)]
assert len(excluded_idxs) == 3054, 'There should be exactly 3054 excluded atoms. Found {}'.format(
len(excluded_idxs))
# Now, create a list of indices
Ngdb9 = 133885
Nexcluded = 3054
included_idxs = np.array(
sorted(list(set(range(Ngdb9)) - set(excluded_idxs))))
# Now generate random permutations to assign molecules to training/validation/test sets.
Nmols = Ngdb9 - Nexcluded
Ntrain = 100000
Ntest = int(0.1*Nmols)
Nvalid = Nmols - (Ntrain + Ntest)
# Generate random permutation
np.random.seed(0)
data_perm = np.random.permutation(Nmols)
# Now use the permutations to generate the indices of the dataset splits.
# train, valid, test, extra = np.split(included_idxs[data_perm], [Ntrain, Ntrain+Nvalid, Ntrain+Nvalid+Ntest])
train, valid, test, extra = np.split(
data_perm, [Ntrain, Ntrain+Nvalid, Ntrain+Nvalid+Ntest])
assert(len(extra) == 0), 'Split was inexact {} {} {} {}'.format(
len(train), len(valid), len(test), len(extra))
train = included_idxs[train]
valid = included_idxs[valid]
test = included_idxs[test]
splits = {'train': train, 'valid': valid, 'test': test}
# Cleanup
cleanup_file(gdb9_txt_excluded, cleanup)
return splits
def get_thermo_dict(gdb9dir, cleanup=True):
"""
Get dictionary of thermochemical energy to subtract off from
properties of molecules.
Probably would be easier just to just precompute this and enter it explicitly.
"""
# Download thermochemical energy
logging.info('Downloading thermochemical energy.')
gdb9_url_thermo = 'https://springernature.figshare.com/ndownloader/files/3195395'
gdb9_txt_thermo = join(gdb9dir, 'atomref.txt')
urllib.request.urlretrieve(gdb9_url_thermo, filename=gdb9_txt_thermo)
# Loop over file of thermochemical energies
therm_targets = ['zpve', 'U0', 'U', 'H', 'G', 'Cv']
# Dictionary that
id2charge = {'H': 1, 'C': 6, 'N': 7, 'O': 8, 'F': 9}
# Loop over file of thermochemical energies
therm_energy = {target: {} for target in therm_targets}
with open(gdb9_txt_thermo) as f:
for line in f:
# If line starts with an element, convert the rest to a list of energies.
split = line.split()
# Check charge corresponds to an atom
if len(split) == 0 or split[0] not in id2charge.keys():
continue
# Loop over learning targets with defined thermochemical energy
for therm_target, split_therm in zip(therm_targets, split[1:]):
therm_energy[therm_target][id2charge[split[0]]
] = float(split_therm)
# Cleanup file when finished.
cleanup_file(gdb9_txt_thermo, cleanup)
return therm_energy
def add_thermo_targets(data, therm_energy_dict):
"""
Adds a new molecular property, which is the thermochemical energy.
Parameters
----------
data : ?????
QM9 dataset split.
therm_energy : dict
Dictionary of thermochemical energies for relevant properties found using :get_thermo_dict:
"""
# Get the charge and number of charges
charge_counts = get_unique_charges(data['charges'])
# Now, loop over the targets with defined thermochemical energy
for target, target_therm in therm_energy_dict.items():
thermo = np.zeros(len(data[target]))
# Loop over each charge, and multiplicity of the charge
for z, num_z in charge_counts.items():
if z == 0:
continue
# Now add the thermochemical energy per atomic charge * the number of atoms of that type
thermo += target_therm[z] * num_z
# Now add the thermochemical energy as a property
data[target + '_thermo'] = thermo
return data
def get_unique_charges(charges):
"""
Get count of each charge for each molecule.
"""
# Create a dictionary of charges
charge_counts = {z: np.zeros(len(charges), dtype=np.int)
for z in np.unique(charges)}
print(charge_counts.keys())
# Loop over molecules, for each molecule get the unique charges
for idx, mol_charges in enumerate(charges):
# For each molecule, get the unique charge and multiplicity
for z, num_z in zip(*np.unique(mol_charges, return_counts=True)):
# Store the multiplicity of each charge in charge_counts
charge_counts[z][idx] = num_z
return charge_counts
| 8,060 | 34.355263 | 114 | py |
e3_diffusion_for_molecules | e3_diffusion_for_molecules-main/qm9/data/prepare/process.py | import logging
import os
import torch
import tarfile
from torch.nn.utils.rnn import pad_sequence
charge_dict = {'H': 1, 'C': 6, 'N': 7, 'O': 8, 'F': 9}
def split_dataset(data, split_idxs):
"""
Splits a dataset according to the indices given.
Parameters
----------
data : dict
Dictionary to split.
split_idxs : dict
Dictionary defining the split. Keys are the name of the split, and
values are the keys for the items in data that go into the split.
Returns
-------
split_dataset : dict
The split dataset.
"""
split_data = {}
for set, split in split_idxs.items():
split_data[set] = {key: val[split] for key, val in data.items()}
return split_data
# def save_database()
def process_xyz_files(data, process_file_fn, file_ext=None, file_idx_list=None, stack=True):
"""
Take a set of datafiles and apply a predefined data processing script to each
one. Data can be stored in a directory, tarfile, or zipfile. An optional
file extension can be added.
Parameters
----------
data : str
Complete path to datafiles. Files must be in a directory, tarball, or zip archive.
process_file_fn : callable
Function to process files. Can be defined externally.
Must input a file, and output a dictionary of properties, each of which
is a torch.tensor. Dictionary must contain at least three properties:
{'num_elements', 'charges', 'positions'}
file_ext : str, optional
Optionally add a file extension if multiple types of files exist.
file_idx_list : ?????, optional
Optionally add a file filter to check a file index is in a
predefined list, for example, when constructing a train/valid/test split.
stack : bool, optional
?????
"""
logging.info('Processing data file: {}'.format(data))
if tarfile.is_tarfile(data):
tardata = tarfile.open(data, 'r')
files = tardata.getmembers()
readfile = lambda data_pt: tardata.extractfile(data_pt)
elif os.is_dir(data):
files = os.listdir(data)
files = [os.path.join(data, file) for file in files]
readfile = lambda data_pt: open(data_pt, 'r')
else:
raise ValueError('Can only read from directory or tarball archive!')
# Use only files that end with specified extension.
if file_ext is not None:
files = [file for file in files if file.endswith(file_ext)]
# Use only files that match desired filter.
if file_idx_list is not None:
files = [file for idx, file in enumerate(files) if idx in file_idx_list]
# Now loop over files using readfile function defined above
# Process each file accordingly using process_file_fn
molecules = []
for file in files:
with readfile(file) as openfile:
molecules.append(process_file_fn(openfile))
# Check that all molecules have the same set of items in their dictionary:
props = molecules[0].keys()
assert all(props == mol.keys() for mol in molecules), 'All molecules must have same set of properties/keys!'
# Convert list-of-dicts to dict-of-lists
molecules = {prop: [mol[prop] for mol in molecules] for prop in props}
# If stacking is desireable, pad and then stack.
if stack:
molecules = {key: pad_sequence(val, batch_first=True) if val[0].dim() > 0 else torch.stack(val) for key, val in molecules.items()}
return molecules
def process_xyz_md17(datafile):
"""
Read xyz file and return a molecular dict with number of atoms, energy, forces, coordinates and atom-type for the MD-17 dataset.
Parameters
----------
datafile : python file object
File object containing the molecular data in the MD17 dataset.
Returns
-------
molecule : dict
Dictionary containing the molecular properties of the associated file object.
"""
xyz_lines = [line.decode('UTF-8') for line in datafile.readlines()]
line_counter = 0
atom_positions = []
atom_types = []
for line in xyz_lines:
if line[0] is '#':
continue
if line_counter is 0:
num_atoms = int(line)
elif line_counter is 1:
split = line.split(';')
assert (len(split) == 1 or len(split) == 2), 'Improperly formatted energy/force line.'
if (len(split) == 1):
e = split[0]
f = None
elif (len(split) == 2):
e, f = split
f = f.split('],[')
atom_energy = float(e)
atom_forces = [[float(x.strip('[]\n')) for x in force.split(',')] for force in f]
else:
split = line.split()
if len(split) is 4:
type, x, y, z = split
atom_types.append(split[0])
atom_positions.append([float(x) for x in split[1:]])
else:
logging.debug(line)
line_counter += 1
atom_charges = [charge_dict[type] for type in atom_types]
molecule = {'num_atoms': num_atoms, 'energy': atom_energy, 'charges': atom_charges,
'forces': atom_forces, 'positions': atom_positions}
molecule = {key: torch.tensor(val) for key, val in molecule.items()}
return molecule
def process_xyz_gdb9(datafile):
"""
Read xyz file and return a molecular dict with number of atoms, energy, forces, coordinates and atom-type for the gdb9 dataset.
Parameters
----------
datafile : python file object
File object containing the molecular data in the MD17 dataset.
Returns
-------
molecule : dict
Dictionary containing the molecular properties of the associated file object.
Notes
-----
TODO : Replace breakpoint with a more informative failure?
"""
xyz_lines = [line.decode('UTF-8') for line in datafile.readlines()]
num_atoms = int(xyz_lines[0])
mol_props = xyz_lines[1].split()
mol_xyz = xyz_lines[2:num_atoms+2]
mol_freq = xyz_lines[num_atoms+2]
atom_charges, atom_positions = [], []
for line in mol_xyz:
atom, posx, posy, posz, _ = line.replace('*^', 'e').split()
atom_charges.append(charge_dict[atom])
atom_positions.append([float(posx), float(posy), float(posz)])
prop_strings = ['tag', 'index', 'A', 'B', 'C', 'mu', 'alpha', 'homo', 'lumo', 'gap', 'r2', 'zpve', 'U0', 'U', 'H', 'G', 'Cv']
prop_strings = prop_strings[1:]
mol_props = [int(mol_props[1])] + [float(x) for x in mol_props[2:]]
mol_props = dict(zip(prop_strings, mol_props))
mol_props['omega1'] = max(float(omega) for omega in mol_freq.split())
molecule = {'num_atoms': num_atoms, 'charges': atom_charges, 'positions': atom_positions}
molecule.update(mol_props)
molecule = {key: torch.tensor(val) for key, val in molecule.items()}
return molecule
| 6,929 | 33.137931 | 138 | py |
e3_diffusion_for_molecules | e3_diffusion_for_molecules-main/generated_samples/gschnet/analyze_gschnet.py | # Rdkit import should be first, do not move it
try:
from rdkit import Chem
except ModuleNotFoundError:
pass
import pickle
import torch.nn.functional as F
from qm9.analyze import analyze_stability_for_molecules
import numpy as np
import torch
def flatten_sample_dictionary(samples):
results = {'one_hot': [], 'x': [], 'node_mask': []}
for number_of_atoms in samples:
positions = samples[number_of_atoms]['_positions']
atom_types = samples[number_of_atoms]['_atomic_numbers']
for positions_single_molecule, atom_types_single_molecule in zip(positions, atom_types):
mask = np.ones(positions.shape[1])
one_hot = F.one_hot(
torch.from_numpy(atom_types_single_molecule),
num_classes=10).numpy()
results['x'].append(torch.from_numpy(positions_single_molecule))
results['one_hot'].append(torch.from_numpy(one_hot))
results['node_mask'].append(torch.from_numpy(mask))
return results
def main():
with open('generated_samples/gschnet/gschnet_samples.pickle', 'rb') as f:
samples = pickle.load(f)
from configs import datasets_config
dataset_info = {'atom_decoder': [None, 'H', None, None, None,
None, 'C', 'N', 'O', 'F'],
'name': 'qm9'}
results = flatten_sample_dictionary(samples)
print(f'Analyzing {len(results["x"])} molecules...')
validity_dict, rdkit_metrics = analyze_stability_for_molecules(results, dataset_info)
print(validity_dict, rdkit_metrics[0])
if __name__ == '__main__':
main()
| 1,647 | 29.518519 | 96 | py |
e3_diffusion_for_molecules | e3_diffusion_for_molecules-main/egnn/egnn_new.py | from torch import nn
import torch
import math
class GCL(nn.Module):
def __init__(self, input_nf, output_nf, hidden_nf, normalization_factor, aggregation_method,
edges_in_d=0, nodes_att_dim=0, act_fn=nn.SiLU(), attention=False):
super(GCL, self).__init__()
input_edge = input_nf * 2
self.normalization_factor = normalization_factor
self.aggregation_method = aggregation_method
self.attention = attention
self.edge_mlp = nn.Sequential(
nn.Linear(input_edge + edges_in_d, hidden_nf),
act_fn,
nn.Linear(hidden_nf, hidden_nf),
act_fn)
self.node_mlp = nn.Sequential(
nn.Linear(hidden_nf + input_nf + nodes_att_dim, hidden_nf),
act_fn,
nn.Linear(hidden_nf, output_nf))
if self.attention:
self.att_mlp = nn.Sequential(
nn.Linear(hidden_nf, 1),
nn.Sigmoid())
def edge_model(self, source, target, edge_attr, edge_mask):
if edge_attr is None: # Unused.
out = torch.cat([source, target], dim=1)
else:
out = torch.cat([source, target, edge_attr], dim=1)
mij = self.edge_mlp(out)
if self.attention:
att_val = self.att_mlp(mij)
out = mij * att_val
else:
out = mij
if edge_mask is not None:
out = out * edge_mask
return out, mij
def node_model(self, x, edge_index, edge_attr, node_attr):
row, col = edge_index
agg = unsorted_segment_sum(edge_attr, row, num_segments=x.size(0),
normalization_factor=self.normalization_factor,
aggregation_method=self.aggregation_method)
if node_attr is not None:
agg = torch.cat([x, agg, node_attr], dim=1)
else:
agg = torch.cat([x, agg], dim=1)
out = x + self.node_mlp(agg)
return out, agg
def forward(self, h, edge_index, edge_attr=None, node_attr=None, node_mask=None, edge_mask=None):
row, col = edge_index
edge_feat, mij = self.edge_model(h[row], h[col], edge_attr, edge_mask)
h, agg = self.node_model(h, edge_index, edge_feat, node_attr)
if node_mask is not None:
h = h * node_mask
return h, mij
class EquivariantUpdate(nn.Module):
def __init__(self, hidden_nf, normalization_factor, aggregation_method,
edges_in_d=1, act_fn=nn.SiLU(), tanh=False, coords_range=10.0):
super(EquivariantUpdate, self).__init__()
self.tanh = tanh
self.coords_range = coords_range
input_edge = hidden_nf * 2 + edges_in_d
layer = nn.Linear(hidden_nf, 1, bias=False)
torch.nn.init.xavier_uniform_(layer.weight, gain=0.001)
self.coord_mlp = nn.Sequential(
nn.Linear(input_edge, hidden_nf),
act_fn,
nn.Linear(hidden_nf, hidden_nf),
act_fn,
layer)
self.normalization_factor = normalization_factor
self.aggregation_method = aggregation_method
def coord_model(self, h, coord, edge_index, coord_diff, edge_attr, edge_mask):
row, col = edge_index
input_tensor = torch.cat([h[row], h[col], edge_attr], dim=1)
if self.tanh:
trans = coord_diff * torch.tanh(self.coord_mlp(input_tensor)) * self.coords_range
else:
trans = coord_diff * self.coord_mlp(input_tensor)
if edge_mask is not None:
trans = trans * edge_mask
agg = unsorted_segment_sum(trans, row, num_segments=coord.size(0),
normalization_factor=self.normalization_factor,
aggregation_method=self.aggregation_method)
coord = coord + agg
return coord
def forward(self, h, coord, edge_index, coord_diff, edge_attr=None, node_mask=None, edge_mask=None):
coord = self.coord_model(h, coord, edge_index, coord_diff, edge_attr, edge_mask)
if node_mask is not None:
coord = coord * node_mask
return coord
class EquivariantBlock(nn.Module):
def __init__(self, hidden_nf, edge_feat_nf=2, device='cpu', act_fn=nn.SiLU(), n_layers=2, attention=True,
norm_diff=True, tanh=False, coords_range=15, norm_constant=1, sin_embedding=None,
normalization_factor=100, aggregation_method='sum'):
super(EquivariantBlock, self).__init__()
self.hidden_nf = hidden_nf
self.device = device
self.n_layers = n_layers
self.coords_range_layer = float(coords_range)
self.norm_diff = norm_diff
self.norm_constant = norm_constant
self.sin_embedding = sin_embedding
self.normalization_factor = normalization_factor
self.aggregation_method = aggregation_method
for i in range(0, n_layers):
self.add_module("gcl_%d" % i, GCL(self.hidden_nf, self.hidden_nf, self.hidden_nf, edges_in_d=edge_feat_nf,
act_fn=act_fn, attention=attention,
normalization_factor=self.normalization_factor,
aggregation_method=self.aggregation_method))
self.add_module("gcl_equiv", EquivariantUpdate(hidden_nf, edges_in_d=edge_feat_nf, act_fn=nn.SiLU(), tanh=tanh,
coords_range=self.coords_range_layer,
normalization_factor=self.normalization_factor,
aggregation_method=self.aggregation_method))
self.to(self.device)
def forward(self, h, x, edge_index, node_mask=None, edge_mask=None, edge_attr=None):
# Edit Emiel: Remove velocity as input
distances, coord_diff = coord2diff(x, edge_index, self.norm_constant)
if self.sin_embedding is not None:
distances = self.sin_embedding(distances)
edge_attr = torch.cat([distances, edge_attr], dim=1)
for i in range(0, self.n_layers):
h, _ = self._modules["gcl_%d" % i](h, edge_index, edge_attr=edge_attr, node_mask=node_mask, edge_mask=edge_mask)
x = self._modules["gcl_equiv"](h, x, edge_index, coord_diff, edge_attr, node_mask, edge_mask)
# Important, the bias of the last linear might be non-zero
if node_mask is not None:
h = h * node_mask
return h, x
class EGNN(nn.Module):
def __init__(self, in_node_nf, in_edge_nf, hidden_nf, device='cpu', act_fn=nn.SiLU(), n_layers=3, attention=False,
norm_diff=True, out_node_nf=None, tanh=False, coords_range=15, norm_constant=1, inv_sublayers=2,
sin_embedding=False, normalization_factor=100, aggregation_method='sum'):
super(EGNN, self).__init__()
if out_node_nf is None:
out_node_nf = in_node_nf
self.hidden_nf = hidden_nf
self.device = device
self.n_layers = n_layers
self.coords_range_layer = float(coords_range/n_layers)
self.norm_diff = norm_diff
self.normalization_factor = normalization_factor
self.aggregation_method = aggregation_method
if sin_embedding:
self.sin_embedding = SinusoidsEmbeddingNew()
edge_feat_nf = self.sin_embedding.dim * 2
else:
self.sin_embedding = None
edge_feat_nf = 2
self.embedding = nn.Linear(in_node_nf, self.hidden_nf)
self.embedding_out = nn.Linear(self.hidden_nf, out_node_nf)
for i in range(0, n_layers):
self.add_module("e_block_%d" % i, EquivariantBlock(hidden_nf, edge_feat_nf=edge_feat_nf, device=device,
act_fn=act_fn, n_layers=inv_sublayers,
attention=attention, norm_diff=norm_diff, tanh=tanh,
coords_range=coords_range, norm_constant=norm_constant,
sin_embedding=self.sin_embedding,
normalization_factor=self.normalization_factor,
aggregation_method=self.aggregation_method))
self.to(self.device)
def forward(self, h, x, edge_index, node_mask=None, edge_mask=None):
# Edit Emiel: Remove velocity as input
distances, _ = coord2diff(x, edge_index)
if self.sin_embedding is not None:
distances = self.sin_embedding(distances)
h = self.embedding(h)
for i in range(0, self.n_layers):
h, x = self._modules["e_block_%d" % i](h, x, edge_index, node_mask=node_mask, edge_mask=edge_mask, edge_attr=distances)
# Important, the bias of the last linear might be non-zero
h = self.embedding_out(h)
if node_mask is not None:
h = h * node_mask
return h, x
class GNN(nn.Module):
def __init__(self, in_node_nf, in_edge_nf, hidden_nf, aggregation_method='sum', device='cpu',
act_fn=nn.SiLU(), n_layers=4, attention=False,
normalization_factor=1, out_node_nf=None):
super(GNN, self).__init__()
if out_node_nf is None:
out_node_nf = in_node_nf
self.hidden_nf = hidden_nf
self.device = device
self.n_layers = n_layers
### Encoder
self.embedding = nn.Linear(in_node_nf, self.hidden_nf)
self.embedding_out = nn.Linear(self.hidden_nf, out_node_nf)
for i in range(0, n_layers):
self.add_module("gcl_%d" % i, GCL(
self.hidden_nf, self.hidden_nf, self.hidden_nf,
normalization_factor=normalization_factor,
aggregation_method=aggregation_method,
edges_in_d=in_edge_nf, act_fn=act_fn,
attention=attention))
self.to(self.device)
def forward(self, h, edges, edge_attr=None, node_mask=None, edge_mask=None):
# Edit Emiel: Remove velocity as input
h = self.embedding(h)
for i in range(0, self.n_layers):
h, _ = self._modules["gcl_%d" % i](h, edges, edge_attr=edge_attr, node_mask=node_mask, edge_mask=edge_mask)
h = self.embedding_out(h)
# Important, the bias of the last linear might be non-zero
if node_mask is not None:
h = h * node_mask
return h
class SinusoidsEmbeddingNew(nn.Module):
def __init__(self, max_res=15., min_res=15. / 2000., div_factor=4):
super().__init__()
self.n_frequencies = int(math.log(max_res / min_res, div_factor)) + 1
self.frequencies = 2 * math.pi * div_factor ** torch.arange(self.n_frequencies)/max_res
self.dim = len(self.frequencies) * 2
def forward(self, x):
x = torch.sqrt(x + 1e-8)
emb = x * self.frequencies[None, :].to(x.device)
emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
return emb.detach()
def coord2diff(x, edge_index, norm_constant=1):
row, col = edge_index
coord_diff = x[row] - x[col]
radial = torch.sum((coord_diff) ** 2, 1).unsqueeze(1)
norm = torch.sqrt(radial + 1e-8)
coord_diff = coord_diff/(norm + norm_constant)
return radial, coord_diff
def unsorted_segment_sum(data, segment_ids, num_segments, normalization_factor, aggregation_method: str):
"""Custom PyTorch op to replicate TensorFlow's `unsorted_segment_sum`.
Normalization: 'sum' or 'mean'.
"""
result_shape = (num_segments, data.size(1))
result = data.new_full(result_shape, 0) # Init empty result tensor.
segment_ids = segment_ids.unsqueeze(-1).expand(-1, data.size(1))
result.scatter_add_(0, segment_ids, data)
if aggregation_method == 'sum':
result = result / normalization_factor
if aggregation_method == 'mean':
norm = data.new_zeros(result.shape)
norm.scatter_add_(0, segment_ids, data.new_ones(data.shape))
norm[norm == 0] = 1
result = result / norm
return result
| 12,294 | 43.709091 | 131 | py |
e3_diffusion_for_molecules | e3_diffusion_for_molecules-main/egnn/egnn.py | import torch
from torch import Tensor
from torch import nn
import torch.nn.functional as F
class E_GCL(nn.Module):
"""Graph Neural Net with global state and fixed number of nodes per graph.
Args:
hidden_dim: Number of hidden units.
num_nodes: Maximum number of nodes (for self-attentive pooling).
global_agg: Global aggregation function ('attn' or 'sum').
temp: Softmax temperature.
"""
def __init__(self, input_nf, output_nf, hidden_nf, edges_in_d=0, nodes_att_dim=0, act_fn=nn.SiLU(), attention=False, norm_diff=True, tanh=False, coords_range=1, norm_constant=0):
super(E_GCL, self).__init__()
input_edge = input_nf * 2
self.attention = attention
self.norm_diff = norm_diff
self.tanh = tanh
self.norm_constant = norm_constant
edge_coords_nf = 1
self.edge_mlp = nn.Sequential(
nn.Linear(input_edge + edge_coords_nf + edges_in_d, hidden_nf),
act_fn,
nn.Linear(hidden_nf, hidden_nf),
act_fn)
self.node_mlp = nn.Sequential(
nn.Linear(hidden_nf + input_nf + nodes_att_dim, hidden_nf),
act_fn,
nn.Linear(hidden_nf, output_nf))
layer = nn.Linear(hidden_nf, 1, bias=False)
torch.nn.init.xavier_uniform_(layer.weight, gain=0.001)
coord_mlp = []
coord_mlp.append(nn.Linear(hidden_nf, hidden_nf))
coord_mlp.append(act_fn)
coord_mlp.append(layer)
if self.tanh:
coord_mlp.append(nn.Tanh())
self.coords_range = coords_range
self.coord_mlp = nn.Sequential(*coord_mlp)
if self.attention:
self.att_mlp = nn.Sequential(
nn.Linear(hidden_nf, 1),
nn.Sigmoid())
def edge_model(self, source, target, radial, edge_attr, edge_mask):
if edge_attr is None: # Unused.
out = torch.cat([source, target, radial], dim=1)
else:
out = torch.cat([source, target, radial, edge_attr], dim=1)
out = self.edge_mlp(out)
if self.attention:
att_val = self.att_mlp(out)
out = out * att_val
if edge_mask is not None:
out = out * edge_mask
return out
def node_model(self, x, edge_index, edge_attr, node_attr):
row, col = edge_index
agg = unsorted_segment_sum(edge_attr, row, num_segments=x.size(0))
if node_attr is not None:
agg = torch.cat([x, agg, node_attr], dim=1)
else:
agg = torch.cat([x, agg], dim=1)
out = x + self.node_mlp(agg)
return out, agg
def coord_model(self, coord, edge_index, coord_diff, radial, edge_feat, node_mask, edge_mask):
row, col = edge_index
if self.tanh:
trans = coord_diff * self.coord_mlp(edge_feat) * self.coords_range
else:
trans = coord_diff * self.coord_mlp(edge_feat)
if edge_mask is not None:
trans = trans * edge_mask
agg = unsorted_segment_sum(trans, row, num_segments=coord.size(0))
coord = coord + agg
return coord
def forward(self, h, edge_index, coord, edge_attr=None, node_attr=None, node_mask=None, edge_mask=None):
row, col = edge_index
radial, coord_diff = self.coord2radial(edge_index, coord)
edge_feat = self.edge_model(h[row], h[col], radial, edge_attr, edge_mask)
coord = self.coord_model(coord, edge_index, coord_diff, radial, edge_feat, node_mask, edge_mask)
h, agg = self.node_model(h, edge_index, edge_feat, node_attr)
# coord = self.node_coord_model(h, coord)
# x = self.node_model(x, edge_index, x[col], u, batch) # GCN
if node_mask is not None:
h = h * node_mask
coord = coord * node_mask
return h, coord, edge_attr
def coord2radial(self, edge_index, coord):
row, col = edge_index
coord_diff = coord[row] - coord[col]
radial = torch.sum((coord_diff)**2, 1).unsqueeze(1)
norm = torch.sqrt(radial + 1e-8)
coord_diff = coord_diff/(norm + self.norm_constant)
return radial, coord_diff
class EGNN(nn.Module):
def __init__(self, in_node_nf, in_edge_nf, hidden_nf, device='cpu', act_fn=nn.SiLU(), n_layers=4, recurrent=True, attention=False, norm_diff=True, out_node_nf=None, tanh=False, coords_range=15, agg='sum', norm_constant=0, inv_sublayers=1, sin_embedding=False):
super(EGNN, self).__init__()
if out_node_nf is None:
out_node_nf = in_node_nf
self.hidden_nf = hidden_nf
self.device = device
self.n_layers = n_layers
self.coords_range_layer = float(coords_range)/self.n_layers
if agg == 'mean':
self.coords_range_layer = self.coords_range_layer * 19
#self.reg = reg
### Encoder
#self.add_module("gcl_0", E_GCL(in_node_nf, self.hidden_nf, self.hidden_nf, edges_in_d=in_edge_nf, act_fn=act_fn, recurrent=False, coords_weight=coords_weight))
self.embedding = nn.Linear(in_node_nf, self.hidden_nf)
self.embedding_out = nn.Linear(self.hidden_nf, out_node_nf)
for i in range(0, n_layers):
self.add_module("gcl_%d" % i, E_GCL(self.hidden_nf, self.hidden_nf, self.hidden_nf, edges_in_d=in_edge_nf, act_fn=act_fn, attention=attention, norm_diff=norm_diff, tanh=tanh, coords_range=self.coords_range_layer, norm_constant=norm_constant))
self.to(self.device)
def forward(self, h, x, edges, edge_attr=None, node_mask=None, edge_mask=None):
# Edit Emiel: Remove velocity as input
edge_attr = torch.sum((x[edges[0]] - x[edges[1]]) ** 2, dim=1, keepdim=True)
h = self.embedding(h)
for i in range(0, self.n_layers):
h, x, _ = self._modules["gcl_%d" % i](h, edges, x, edge_attr=edge_attr, node_mask=node_mask, edge_mask=edge_mask)
h = self.embedding_out(h)
# Important, the bias of the last linear might be non-zero
if node_mask is not None:
h = h * node_mask
return h, x
def unsorted_segment_sum(data, segment_ids, num_segments):
"""Custom PyTorch op to replicate TensorFlow's `unsorted_segment_sum`."""
result_shape = (num_segments, data.size(1))
result = data.new_full(result_shape, 0) # Init empty result tensor.
segment_ids = segment_ids.unsqueeze(-1).expand(-1, data.size(1))
result.scatter_add_(0, segment_ids, data)
return result
class EGNN_old(nn.Module):
def __init__(self, in_node_nf, in_edge_nf, hidden_nf, device='cpu', act_fn=nn.SiLU(), n_layers=4, recurrent=True, attention=False, norm_diff=True, out_node_nf=None, tanh=False, coords_range=15, agg='sum'):
super(EGNN_old, self).__init__()
if out_node_nf is None:
out_node_nf = in_node_nf
self.hidden_nf = hidden_nf
self.device = device
self.n_layers = n_layers
self.coords_range_layer = float(coords_range)/self.n_layers
if agg == 'mean':
self.coords_range_layer = self.coords_range_layer * 19
#self.reg = reg
### Encoder
#self.add_module("gcl_0", E_GCL(in_node_nf, self.hidden_nf, self.hidden_nf, edges_in_d=in_edge_nf, act_fn=act_fn, recurrent=False, coords_weight=coords_weight))
self.embedding = nn.Linear(in_node_nf, self.hidden_nf)
self.embedding_out = nn.Linear(self.hidden_nf, out_node_nf)
for i in range(0, n_layers):
self.add_module("gcl_%d" % i, E_GCL(self.hidden_nf, self.hidden_nf, self.hidden_nf, edges_in_d=in_edge_nf, act_fn=act_fn, attention=attention, norm_diff=norm_diff, tanh=tanh, coords_range=self.coords_range_layer))
self.to(self.device)
def forward(self, h, x, edges, edge_attr=None, node_mask=None, edge_mask=None):
# Edit Emiel: Remove velocity as input
edge_attr = torch.sum((x[edges[0]] - x[edges[1]]) ** 2, dim=1, keepdim=True)
h = self.embedding(h)
for i in range(0, self.n_layers):
h, x, _ = self._modules["gcl_%d" % i](h, edges, x, edge_attr=edge_attr, node_mask=node_mask, edge_mask=edge_mask)
h = self.embedding_out(h)
# Important, the bias of the last linear might be non-zero
if node_mask is not None:
h = h * node_mask
return h, x
class GNN(nn.Module):
def __init__(self, in_node_nf, in_edge_nf, hidden_nf, device='cpu', act_fn=nn.SiLU(), n_layers=4,
attention=False, out_node_nf=None):
super(GNN, self).__init__()
if out_node_nf is None:
out_node_nf = in_node_nf
self.hidden_nf = hidden_nf
self.device = device
self.n_layers = n_layers
### Encoder
self.embedding = nn.Linear(in_node_nf, self.hidden_nf)
self.embedding_out = nn.Linear(self.hidden_nf, out_node_nf)
for i in range(0, n_layers):
self.add_module("gcl_%d" % i, GCL(self.hidden_nf, self.hidden_nf, self.hidden_nf, edges_in_d=in_edge_nf,
act_fn=act_fn, attention=attention))
self.to(self.device)
def forward(self, h, edges, edge_attr=None, node_mask=None, edge_mask=None):
# Edit Emiel: Remove velocity as input
h = self.embedding(h)
for i in range(0, self.n_layers):
h, _ = self._modules["gcl_%d" % i](h, edges, edge_attr=edge_attr, node_mask=node_mask,
edge_mask=edge_mask)
h = self.embedding_out(h)
# Important, the bias of the last linear might be non-zero
if node_mask is not None:
h = h * node_mask
return h
class TransformerNN(nn.Module):
def __init__(self, in_node_nf, in_edge_nf, hidden_nf, device='cpu', act_fn=nn.SiLU(), n_layers=4, recurrent=True, attention=False, norm_diff=True, out_node_nf=None, tanh=False, coords_range=15, agg='sum', norm_constant=0):
super(EGNN, self).__init__()
hidden_initial = 128
initial_mlp_layers = 1
hidden_final = 128
final_mlp_layers = 2
n_heads = 8
dim_feedforward = 512
if out_node_nf is None:
out_node_nf = in_node_nf
self.hidden_nf = hidden_nf
self.device = device
self.n_layers = n_layers
self.initial_mlp = MLP(in_node_nf, hidden_nf, hidden_initial, initial_mlp_layers, skip=1, bias=True)
self.decoder_layers = nn.ModuleList()
if self.use_bn:
self.bn_layers = nn.ModuleList()
for _ in n_layers:
self.decoder_layers.append(nn.TransformerEncoderLayer(hidden_nf, n_heads, dim_feedforward, dropout=0))
self.final_mlp = MLP(hidden_nf, out_node_nf, hidden_final, final_mlp_layers, skip=1, bias=True)
self.to(self.device)
def forward(self, h, x, edges, edge_attr=None, node_mask=None, edge_mask=None):
""" x: batch_size, n, channels
latent: batch_size, channels2. """
x = F.relu(self.initial_mlp(x))
for i in range(len(self.decoder_layers)):
out = F.relu(self.decoder_layers[i](x)) # [bs, n, d]
if self.use_bn and type(x).__name__ != 'TransformerEncoderLayer':
out = self.bn_layers[i](out.transpose(1, 2)).transpose(1, 2) # bs, n, hidden
x = out + x if self.res and type(x).__name__ != 'TransformerEncoderLayer' else out
return x
# Edit Emiel: Remove velocity as input
edge_attr = torch.sum((x[edges[0]] - x[edges[1]]) ** 2, dim=1, keepdim=True)
h = self.embedding(h)
for i in range(0, self.n_layers):
h, x, _ = self._modules["gcl_%d" % i](h, edges, x, edge_attr=edge_attr, node_mask=node_mask, edge_mask=edge_mask)
h = self.embedding_out(h)
# Important, the bias of the last linear might be non-zero
if node_mask is not None:
h = h * node_mask
return h, x
class SetDecoder(nn.Module):
def __init__(self, cfg):
super().__init__()
hidden, hidden_final = cfg.hidden_decoder, cfg.hidden_last_decoder
self.use_bn = cfg.use_batch_norm
self.res = cfg.use_residual
self.cosine_channels = cfg.cosine_channels
self.initial_mlp = MLP(cfg.set_channels,
cfg.hidden_decoder,
cfg.hidden_initial_decoder,
cfg.initial_mlp_layers_decoder,
skip=1, bias=True)
self.decoder_layers = nn.ModuleList()
if self.use_bn:
self.bn_layers = nn.ModuleList()
for layer in cfg.decoder_layers:
self.decoder_layers.append(create_layer(layer, hidden, hidden, cfg))
if self.use_bn:
self.bn_layers.append(nn.BatchNorm1d(hidden))
def forward(self, x, latent):
""" x: batch_size, n, channels
latent: batch_size, channels2. """
x = F.relu(self.initial_mlp(x, latent[:, self.cosine_channels:].unsqueeze(1)))
for i in range(len(self.decoder_layers)):
out = F.relu(self.decoder_layers[i](x)) # [bs, n, d]
if self.use_bn and type(x).__name__ != 'TransformerEncoderLayer':
out = self.bn_layers[i](out.transpose(1, 2)).transpose(1, 2) # bs, n, hidden
x = out + x if self.res and type(x).__name__ != 'TransformerEncoderLayer' else out
return x
class MLP(nn.Module):
def __init__(self, dim_in: int, dim_out: int, width: int, nb_layers: int, skip=1, bias=True):
"""
Args:
dim_in: input dimension
dim_out: output dimension
width: hidden width
nb_layers: number of layers
skip: jump from residual connections
bias: indicates presence of bias
"""
super(MLP, self).__init__()
self.dim_in = dim_in
self.dim_out = dim_out
self.width = width
self.nb_layers = nb_layers
self.hidden = nn.ModuleList()
self.lin1 = nn.Linear(self.dim_in, width, bias)
self.skip = skip
self.residual_start = dim_in == width
self.residual_end = dim_out == width
for i in range(nb_layers-2):
self.hidden.append(nn.Linear(width, width, bias))
self.lin_final = nn.Linear(width, dim_out, bias)
def forward(self, x: Tensor):
out = self.lin1(x)
out = F.relu(out) + (x if self.residual_start else 0)
for layer in self.hidden:
out = out + layer(F.relu(out))
out = self.lin_final(F.relu(out)) + (out if self.residual_end else 0)
return out | 14,826 | 41.976812 | 264 | py |
e3_diffusion_for_molecules | e3_diffusion_for_molecules-main/egnn/models.py | import torch
import torch.nn as nn
from egnn.egnn_new import EGNN, GNN
from equivariant_diffusion.utils import remove_mean, remove_mean_with_mask
import numpy as np
class EGNN_dynamics_QM9(nn.Module):
def __init__(self, in_node_nf, context_node_nf,
n_dims, hidden_nf=64, device='cpu',
act_fn=torch.nn.SiLU(), n_layers=4, attention=False,
condition_time=True, tanh=False, mode='egnn_dynamics', norm_constant=0,
inv_sublayers=2, sin_embedding=False, normalization_factor=100, aggregation_method='sum'):
super().__init__()
self.mode = mode
if mode == 'egnn_dynamics':
self.egnn = EGNN(
in_node_nf=in_node_nf + context_node_nf, in_edge_nf=1,
hidden_nf=hidden_nf, device=device, act_fn=act_fn,
n_layers=n_layers, attention=attention, tanh=tanh, norm_constant=norm_constant,
inv_sublayers=inv_sublayers, sin_embedding=sin_embedding,
normalization_factor=normalization_factor,
aggregation_method=aggregation_method)
self.in_node_nf = in_node_nf
elif mode == 'gnn_dynamics':
self.gnn = GNN(
in_node_nf=in_node_nf + context_node_nf + 3, in_edge_nf=0,
hidden_nf=hidden_nf, out_node_nf=3 + in_node_nf, device=device,
act_fn=act_fn, n_layers=n_layers, attention=attention,
normalization_factor=normalization_factor, aggregation_method=aggregation_method)
self.context_node_nf = context_node_nf
self.device = device
self.n_dims = n_dims
self._edges_dict = {}
self.condition_time = condition_time
def forward(self, t, xh, node_mask, edge_mask, context=None):
raise NotImplementedError
def wrap_forward(self, node_mask, edge_mask, context):
def fwd(time, state):
return self._forward(time, state, node_mask, edge_mask, context)
return fwd
def unwrap_forward(self):
return self._forward
def _forward(self, t, xh, node_mask, edge_mask, context):
bs, n_nodes, dims = xh.shape
h_dims = dims - self.n_dims
edges = self.get_adj_matrix(n_nodes, bs, self.device)
edges = [x.to(self.device) for x in edges]
node_mask = node_mask.view(bs*n_nodes, 1)
edge_mask = edge_mask.view(bs*n_nodes*n_nodes, 1)
xh = xh.view(bs*n_nodes, -1).clone() * node_mask
x = xh[:, 0:self.n_dims].clone()
if h_dims == 0:
h = torch.ones(bs*n_nodes, 1).to(self.device)
else:
h = xh[:, self.n_dims:].clone()
if self.condition_time:
if np.prod(t.size()) == 1:
# t is the same for all elements in batch.
h_time = torch.empty_like(h[:, 0:1]).fill_(t.item())
else:
# t is different over the batch dimension.
h_time = t.view(bs, 1).repeat(1, n_nodes)
h_time = h_time.view(bs * n_nodes, 1)
h = torch.cat([h, h_time], dim=1)
if context is not None:
# We're conditioning, awesome!
context = context.view(bs*n_nodes, self.context_node_nf)
h = torch.cat([h, context], dim=1)
if self.mode == 'egnn_dynamics':
h_final, x_final = self.egnn(h, x, edges, node_mask=node_mask, edge_mask=edge_mask)
vel = (x_final - x) * node_mask # This masking operation is redundant but just in case
elif self.mode == 'gnn_dynamics':
xh = torch.cat([x, h], dim=1)
output = self.gnn(xh, edges, node_mask=node_mask)
vel = output[:, 0:3] * node_mask
h_final = output[:, 3:]
else:
raise Exception("Wrong mode %s" % self.mode)
if context is not None:
# Slice off context size:
h_final = h_final[:, :-self.context_node_nf]
if self.condition_time:
# Slice off last dimension which represented time.
h_final = h_final[:, :-1]
vel = vel.view(bs, n_nodes, -1)
if torch.any(torch.isnan(vel)):
print('Warning: detected nan, resetting EGNN output to zero.')
vel = torch.zeros_like(vel)
if node_mask is None:
vel = remove_mean(vel)
else:
vel = remove_mean_with_mask(vel, node_mask.view(bs, n_nodes, 1))
if h_dims == 0:
return vel
else:
h_final = h_final.view(bs, n_nodes, -1)
return torch.cat([vel, h_final], dim=2)
def get_adj_matrix(self, n_nodes, batch_size, device):
if n_nodes in self._edges_dict:
edges_dic_b = self._edges_dict[n_nodes]
if batch_size in edges_dic_b:
return edges_dic_b[batch_size]
else:
# get edges for a single sample
rows, cols = [], []
for batch_idx in range(batch_size):
for i in range(n_nodes):
for j in range(n_nodes):
rows.append(i + batch_idx * n_nodes)
cols.append(j + batch_idx * n_nodes)
edges = [torch.LongTensor(rows).to(device),
torch.LongTensor(cols).to(device)]
edges_dic_b[batch_size] = edges
return edges
else:
self._edges_dict[n_nodes] = {}
return self.get_adj_matrix(n_nodes, batch_size, device)
| 5,555 | 40.155556 | 107 | py |
deep_bingham | deep_bingham-master/evaluate.py | import argparse
import os
import torch
import torchvision.transforms as transforms
import yaml
import data_loaders
import modules.network
from modules import angular_loss, BinghamFixedDispersionLoss, \
BinghamHybridLoss, BinghamLoss, BinghamMixtureLoss, \
CosineLoss, MSELoss, VonMisesLoss, VonMisesFixedKappaLoss
from utils.evaluation import run_evaluation
DEFAULT_CONFIG = os.path.dirname(__file__) + "configs/upna_train.yaml"
LOSS_FUNCTIONS = {'mse': MSELoss,
'bingham': BinghamLoss,
'bingham_mdn': BinghamMixtureLoss,
'von_mises': VonMisesLoss,
'cosine': CosineLoss}
def get_dataset(config):
"""Returns the test data using the provided configuration"""
data_loader = config["data_loader"]
size = data_loader["input_size"]
data_transforms = transforms.Compose([transforms.CenterCrop(600),
transforms.Resize((size, size)),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
data_transforms_idiap = transforms.Compose([
transforms.Resize((size, size)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
if data_loader["name"] == "UPNAHeadPose":
dataset = data_loaders.UpnaHeadPoseTrainTest(
data_loader["config"], data_transforms)
test_dataset = dataset.test
elif data_loader["name"] == "T_Less":
dataset = data_loaders.TLessTrainTest(data_loader["config"],
data_transforms_idiap)
test_dataset = dataset.test
else:
dataset = data_loaders.IDIAPTrainTest(
data_loader["config"], data_transforms_idiap)
test_dataset = dataset.test
return test_dataset
def get_data_loader(dataset, batch_size):
"""Return a data loader"""
dataset = get_dataset(dataset)
test_loader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, shuffle=False)
return test_loader
def main():
"""Loads arguments and starts testing."""
parser = argparse.ArgumentParser(
description="Deep Orientation Estimation")
parser.add_argument('-c', '--config', default=DEFAULT_CONFIG, type=str)
args = parser.parse_args()
config_file = args.config
# Load config
assert os.path.exists(args.config), "Config file {} does not exist".format(
args.config)
with open(config_file) as fp:
config = yaml.load(fp)
if "loss_parameters" in config["test"]:
loss_parameters = config["test"]["loss_parameters"]
else:
loss_parameters = None
device = torch.device(config["test"][
"device"] if torch.cuda.is_available() else "cpu")
print("Using device: {}".format(device))
num_classes = config["test"]["num_outputs"]
# Build model architecture
num_channels = config["test"]["num_channels"]
model_name = config["test"]["model"]
model = modules.network.get_model(name=model_name,
pretrained=True,
num_channels=num_channels,
num_classes=num_classes)
model.to(device)
print("Model name: {}".format(model_name))
model_path = config["test"]["model_path"]
if os.path.isfile(model_path):
print("Loading model {}".format(model_path))
checkpoint = torch.load(model_path)
model.load_state_dict(checkpoint["state_dict"])
else:
assert "model not found"
# Get data loader
batch_size = 32
test_loader = get_data_loader(config, batch_size)
loss_function_name = config["test"]["loss_function"]
dataset_name = config["data_loader"]["name"]
if loss_parameters:
criterion = LOSS_FUNCTIONS[loss_function_name](**loss_parameters)
else:
criterion = LOSS_FUNCTIONS[loss_function_name]()
if "floating_point_type" in config["test"]:
floating_point_type = config["test"]["floating_point_type"]
else:
floating_point_type = "float"
if floating_point_type == "double":
model.double()
run_evaluation(
model, test_loader, criterion,
device, floating_point_type
)
if __name__ == '__main__':
main()
| 4,542 | 30.769231 | 79 | py |
deep_bingham | deep_bingham-master/train.py | """
Deep Orientation Estimation Training
"""
import argparse
import os
import sys
import torch
import torch.optim as optim
import torchvision.transforms as transforms
import yaml
from tensorboardX import SummaryWriter
import data_loaders
import modules.network
from modules import BinghamLoss, BinghamMixtureLoss, \
VonMisesLoss, MSELoss, CosineLoss
from training import Trainer
torch.manual_seed(0)
DEFAULT_CONFIG = os.path.dirname(__file__) + "configs/upna_train.yaml"
LOSS_FUNCTIONS = {'mse': MSELoss,
'bingham': BinghamLoss,
'bingham_mdn': BinghamMixtureLoss,
'von_mises': VonMisesLoss,
'cosine': CosineLoss}
def get_dataset(config):
""" Returns the training data using the provided configuration."""
data_loader = config["data_loader"]
size = data_loader["input_size"]
data_transforms = transforms.Compose([
transforms.CenterCrop(600),
transforms.Resize((size, size)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
data_transforms_idiap = transforms.Compose([
transforms.Resize((size, size)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
data_transforms_depth = transforms.Compose([
transforms.Resize((size, size)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485], std=[0.229])
])
if data_loader["name"] == "UPNAHeadPose":
dataset = data_loaders.UpnaHeadPoseTrainTest(
data_loader["config"], data_transforms)
train_dataset = dataset.train
elif data_loader["name"] == "IDIAP":
dataset = data_loaders.IDIAPTrainTest(data_loader["config"],
data_transforms_idiap)
train_dataset = dataset.train
elif data_loader["name"] == "T_Less":
dataset = data_loaders.TLessTrainTest(
data_loader["config"], data_transforms_idiap)
train_dataset = dataset.train
else:
sys.exit("Unknown data loader " + config['data_loader']["name"] + ".")
training_size = int(len(train_dataset) * 0.90)
val_size = len(train_dataset) - training_size
train_dataset, val_dataset = torch.utils.data.random_split(train_dataset, [training_size, val_size])
return train_dataset, val_dataset
def main():
""" Loads arguments and starts training."""
parser = argparse.ArgumentParser(description="Deep Orientation Estimation")
parser.add_argument('-c', '--config', default=DEFAULT_CONFIG, type=str)
args = parser.parse_args()
config_file = args.config
# Load config
assert os.path.exists(args.config), "Config file {} does not exist".format(
args.config)
with open(config_file) as fp:
config = yaml.load(fp)
if not os.path.exists(config["train"]["save_dir"]):
os.makedirs(config["train"]["save_dir"])
device = torch.device(
config["train"]["device"] if torch.cuda.is_available() else "cpu")
print("Using device: {}".format(device))
# Build model architecture
num_channels = config["train"]["num_channels"] or 3
model_name = config["train"]["model"] or 'vgg11'
num_classes = config["train"].get("num_outputs", None)
model = modules.network.get_model(name=model_name,
pretrained=True,
num_channels=num_channels,
num_classes=num_classes)
model.to(device)
print("Model name: {}".format(model_name))
# optionally resume from checkpoint
resume = config["train"]["resume"]
if resume:
if os.path.isfile(resume):
print("Loading checkpoint {}".format(resume))
checkpoint = torch.load(resume)
start_epoch = checkpoint["epoch"]
model.load_state_dict(checkpoint["state_dict"])
else:
start_epoch = 0
print("No checkpoint found at {}".format(resume))
else:
start_epoch = 0
# Get dataset
train_dataset, test_dataset = get_dataset(config)
b_size = config["train"]["batch_size"] or 4
# This should not be necessary but it surprisingly is. In the presence of a
# GPU, PyTorch tries to allocate GPU memory when pin_memory is set to true
# in the data loader. This happens even if training is to happen on CPU and
# all objects are on CPU.
if config["train"]["device"] != "cpu":
use_memory_pinning = True
else:
use_memory_pinning = False
validationloader = torch.utils.data.DataLoader(
test_dataset, batch_size=b_size, shuffle=True, num_workers=1,
pin_memory=use_memory_pinning)
trainloader = torch.utils.data.DataLoader(
train_dataset, batch_size=b_size, shuffle=True, num_workers=1,
pin_memory=use_memory_pinning)
print("batch size: {}".format(b_size))
# Define loss function (criterion) and optimizer
learning_rate = config["train"]["learning_rate"] or 0.0001
loss_function_name = config["train"]["loss_function"]
if "loss_parameters" in config["train"]:
loss_parameters = config["train"]["loss_parameters"]
else:
loss_parameters = None
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
print(optimizer)
# Set up tensorboard writer
writer_train = SummaryWriter(
"runs/{}/training".format(config["train"]["save_as"]))
writer_val = SummaryWriter(
"runs/{}/validation".format(config["train"]["save_as"]))
# Train the network
num_epochs = config["train"]["num_epochs"] or 2
print("Number of epochs: {}".format(num_epochs))
if loss_parameters is not None:
loss_function = LOSS_FUNCTIONS[loss_function_name](**loss_parameters)
else:
loss_function = LOSS_FUNCTIONS[loss_function_name]()
if "floating_point_type" in config["train"]:
floating_point_type = config["train"]["floating_point_type"]
else:
floating_point_type = "float"
trainer = Trainer(device, floating_point_type)
for epoch in range(start_epoch, num_epochs):
trainer.train_epoch(
trainloader, model, loss_function, optimizer,
epoch, writer_train, writer_val, validationloader)
save_checkpoint(
{'epoch': epoch + 1, 'state_dict': model.state_dict()},
filename=os.path.join(config["train"]["save_dir"],
'checkpoint_{}_{}.tar'.format(
model_name, epoch))
)
print('Finished training')
def save_checkpoint(state, filename='checkpoint.pth.tar'):
"""
Save the training model
"""
torch.save(state, filename)
if __name__ == '__main__':
main()
| 6,909 | 32.543689 | 105 | py |
deep_bingham | deep_bingham-master/modules/maad.py | import torch
from modules.gram_schmidt import gram_schmidt, gram_schmidt_batched
from modules.quaternion_matrix import quaternion_matrix
from utils.utils import \
convert_euler_to_quaternion
from modules.vm_operations import *
import math
def angular_loss_single_sample(target, predicted):
""" Returns the angle between two quaternions.
Note that for a quaternion q, -q = q so the
angle of rotation must be less than 180 degrees.
Inputs:
target = target quaternion
predicted = predicted quaternion
"""
quat_ang = torch.clamp(torch.abs(torch.dot(target, predicted)), min=0,
max=1)
acos_val = torch.acos(quat_ang)
diff_ang = acos_val * 2
return diff_ang
def maad_mse(target, predicted):
"""
Computes the MAAD over a batch of
target, predicted quaternion pairs
Inputs
target = batch of target quaternions
predicted = batch of predicted quaternions
"""
angular_loss = 0
for i in range(target.shape[0]):
angular_loss += angular_loss_single_sample(target[i], predicted[i])
return angular_loss / target.shape[0]
def maad_cosine(target, predicted):
angular_dev = 0
for i in range(target.shape[0]):
angles = output_to_angles(predicted[i])
pan = torch.atan2(angles[1], angles[0])
tilt = torch.atan2(angles[3], angles[2])
roll = torch.atan2(angles[5], angles[4])
pan_target = target[i][0]
tilt_target = target[i][1]
roll_target = target[i][2]
target_quat = convert_euler_to_quaternion(pan_target, tilt_target,
roll_target)
predicted_quat = convert_euler_to_quaternion(math.degrees(pan),
math.degrees(tilt),
math.degrees(roll))
angular_dev += angular_loss_single_sample(torch.from_numpy(target_quat),
torch.from_numpy(
predicted_quat))
return angular_dev / target.shape[0]
def maad_biternion(target, predicted):
angular_dev = 0
for i in range(target.shape[0]):
angles, kappas = output_to_angles_and_kappas(predicted[i])
pan = torch.atan2(angles[1], angles[0])
tilt = torch.atan2(angles[3], angles[2])
roll = torch.atan2(angles[5], angles[4])
pan_target = target[i][0]
tilt_target = target[i][1]
roll_target = target[i][2]
target_quat = convert_euler_to_quaternion(pan_target, tilt_target,
roll_target)
predicted_quat = convert_euler_to_quaternion(math.degrees(pan),
math.degrees(tilt),
math.degrees(roll))
angular_dev += angular_loss_single_sample(torch.from_numpy(target_quat),
torch.from_numpy(
predicted_quat))
return angular_dev / target.shape[0]
def maad_bingham(target, predicted, orthogonalization="gram_schmidt"):
""" Computes mean absolute angular deviation between a pair of quaternions
Parameters:
predicted (torch.Tensor): Output from network of shape (N, 16) if
orthogonalization is "gram_schmidt" and (N, 4) if it is
"quaternion_matrix".
target (torch.Tensor): Ground truth of shape N x 4
orthogonalization (str): Orthogonalization method to use. Can be
"gram_schmidt" for usage of the classical gram-schmidt method.
"modified_gram_schmidt" for a more robust variant, or
"quaternion_matrix" for usage of a orthogonal matrix representation
of an output quaternion.
"""
angular_dev = 0
if orthogonalization == "gram_schmidt":
batch_size = target.shape[0]
reshaped_output = predicted.reshape(batch_size, 4, 4)
param_m = gram_schmidt_batched(reshaped_output)
for i in range(batch_size):
angular_dev += angular_loss_single_sample(
target[i], param_m[i, :, 3])
else:
for i in range(target.shape[0]):
if orthogonalization == "modified_gram_schmidt":
reshaped_output = predicted[i][-16:].reshape(4, 4)
param_m = gram_schmidt(reshaped_output, modified=True)
elif orthogonalization == "quaternion_matrix":
param_m = quaternion_matrix(predicted[i])
else:
raise ValueError("Invalid orthogonalization method.")
angular_dev += angular_loss_single_sample(target[i], param_m[:, 3])
return angular_dev / target.shape[0]
| 4,931 | 37.834646 | 80 | py |
deep_bingham | deep_bingham-master/modules/vm_operations.py | import torch
def output_to_kappas(output):
zero_vec = torch.zeros(len(output), 3)
if output.is_cuda:
device = output.get_device()
zero_vec = torch.zeros(len(output), 3).to(device)
kappas = torch.where(output[:, :3] > 0, output[:, :3], zero_vec)
return kappas
def output_to_angles(output):
pan = normalize_cosine_sine(output[:2])
tilt = normalize_cosine_sine(output[2:4])
roll = normalize_cosine_sine(output[4:])
angles = torch.cat((pan, tilt, roll), 0)
return angles
def output_to_angles_and_kappas(output):
pan = normalize_cosine_sine(output[3:5])
tilt = normalize_cosine_sine(output[5:7])
roll = normalize_cosine_sine(output[7:])
angles = torch.cat((pan, tilt, roll), 0)
zero_vec = torch.zeros(3)
if output.is_cuda:
device = output.get_device()
zero_vec = torch.zeros(3).to(device)
kappas = torch.where(output[:3] > 0, output[:3], zero_vec)
return angles, kappas
def normalize_cosine_sine(angle_tensor):
return angle_tensor / torch.sqrt(torch.sum(torch.pow(angle_tensor, 2)))
| 1,093 | 27.051282 | 75 | py |
deep_bingham | deep_bingham-master/modules/bingham_mixture_loss.py | """Implementation of the Bingham Mixture Loss"""
import torch
from .maad import angular_loss_single_sample
from .bingham_fixed_dispersion import BinghamFixedDispersionLoss
from .bingham_loss import BinghamLoss
from .gram_schmidt import gram_schmidt_batched
from utils import vec_to_bingham_z_many
class BinghamMixtureLoss(object):
""" Bingham Mixture Loss
Computes the log likelihood bingham mixture loss on a batch. Can be
configured such that for a predefined number of epochs
Arguments:
lookup_table_file (str): Path to the location of the lookup table.
mixture_component_count (int): Number of Bingham mixture components.
interpolation_kernel (str): The kernel to use for rbf interpolaition
(can be "multiquadric" or "gaussian").
fixed_dispersion_stage (int): Number of epochs in which the network is
trained using a fixed dispersion parameter z.
fixed_param_z (list): The fixed dispersion parameter Z used for all
mixture components during the fixed dispersion stage.
Inputs:
target (torch.Tensor): Target values at which the likelihood is
evaluated of shape (N, 4)
output (torch.Tensor): Output values from which M and Z are extracted of
shape (N, MIXTURE_COMPONENT_COUNT * 20). The first of the 20 values
per mixture component is for computing the weight of that component.
The remaining 19 are passed on to the BinghamLoss class.
"""
def __init__(self, lookup_table_file, mixture_component_count,
interpolation_kernel="multiquadric", fixed_dispersion_stage=25,
fixed_param_z=[-1, -1, -1, 0]):
self._num_components = mixture_component_count
self._fixed_dispersion_stage = fixed_dispersion_stage
self._softmax = torch.nn.Softmax(dim=1)
self._bingham_fixed_dispersion_loss = BinghamFixedDispersionLoss(
fixed_param_z, orthogonalization="gram_schmidt")
self._bingham_loss = BinghamLoss(
lookup_table_file, interpolation_kernel,
orthogonalization="gram_schmidt")
def __call__(self, target, output, epoch):
batch_size = output.shape[0]
weights = self._softmax(output[:, 0:-1:20])
log_likelihood = torch.tensor(0., device=output.device, dtype=output.dtype)
for i in range(batch_size):
current_likelihood = torch.tensor(
0., device=output.device, dtype=output.dtype)
for j in range(self._num_components):
if epoch < self._fixed_dispersion_stage:
bd_log_likelihood = self._bingham_fixed_dispersion_loss(
target[i].unsqueeze(0),
output[i, (j*20+4):((j+1)*20)].unsqueeze(0))[1]
else:
bd_log_likelihood = self._bingham_loss(
target[i].unsqueeze(0),
output[i, (j*20+1):((j+1)*20)].unsqueeze(0))[1]
current_likelihood += weights[i, j] * \
torch.exp(bd_log_likelihood).squeeze()
log_likelihood += torch.log(current_likelihood)
loss = -log_likelihood
log_likelihood /= batch_size
return loss, log_likelihood
def statistics(self, target, output, epoch):
""" Reports some additional loss statistics.
Arguments:
target (torch.Tensor): Ground-truth shaped as loss input.
output (torch.Tensor): NN output shaped as loss output parameter.
epoch (int): Current epoch. Currently unused.
Returns:
stats (dict): Bingham parameters and angular deviation.
"""
batch_size = output.shape[0]
weights = self._softmax(output[:, 0:-1:20])
maad = torch.zeros(
batch_size, device=output.device, dtype=output.dtype)
mode_stats = dict()
for j in range(self._num_components):
bd_z = torch.mean(vec_to_bingham_z_many(
output[:, (j*20+1):(j*20+4)]
).squeeze(0), 0)
mode_stats["mode_" + str(j) + "_weight"] \
= float(torch.mean(weights[:, j]))
if epoch >= self._fixed_dispersion_stage:
mode_stats["mode_" + str(j) + "_z_0"] = float(bd_z[0])
mode_stats["mode_" + str(j) + "_z_1"] = float(bd_z[1])
mode_stats["mode_" + str(j) + "_z_2"] = float(bd_z[2])
param_m = torch.zeros((batch_size, self._num_components, 4, 4),
device=output.device, dtype=output.dtype)
for j in range(self._num_components):
param_m[:, j, :, :] = gram_schmidt_batched(
output[:, (j * 20 + 4):((j + 1) * 20)].reshape(batch_size, 4, 4)
)
# Setting mmaad to 10 such that the minimum succeeds in the first run.
mmaad = 10. * torch.ones(
batch_size, device=output.device, dtype=output.dtype)
for i in range(batch_size):
for j in range(self._num_components):
cur_angular_deviation = angular_loss_single_sample(
target[i], param_m[i, j, :, 3])
maad[i] += cur_angular_deviation * weights[i, j]
mmaad[i] = torch.min(mmaad[i], cur_angular_deviation)
maad = torch.mean(maad)
mmaad = torch.mean(mmaad)
stats = {
"maad": float(maad),
"mmaad": float(mmaad)
}
stats.update(mode_stats)
return stats
| 5,574 | 41.234848 | 83 | py |
deep_bingham | deep_bingham-master/modules/bingham_fixed_dispersion.py | import torch
from modules.gram_schmidt import gram_schmidt_batched
from modules.bingham_loss import batched_logprob
from modules.quaternion_matrix import quaternion_matrix
class BinghamFixedDispersionLoss(object):
"""
Class for calculating bingham loss assuming a fixed Z.
Parameters:
bd_z (list): Values of parameter matrix Z of size 3 (the bingham is four
dimensional but the last parameter is assumed to be 0). All must be
negative and in ascending order.
orthogonalization (str): Orthogonalization method to use. Can be
"gram_schmidt" for usage of the classical gram-schmidt method.
"modified_gram_schmidt" for a more robust variant, or
"quaternion_matrix" for usage of a orthogonal matrix representation
of an output quaternion.
"""
def __init__(self, bd_z, orthogonalization="gram_schmidt"):
self.name = "bingham_fixed_z"
self.bd_z = bd_z
self.orthogonalization = orthogonalization
def __call__(self, target, output):
"""
Calculates the bingham fixed dispersion log likelihood loss
on a batch of target-output values.
Inputs:
target: Target values at which the likelihood is evaluated
of shape (N, 4)
output: Output values from which M is computed, shape
(N, 16) if orthogonalization is "gram_schmidt" and (N, 4) if it
is "quaternion_matrix".
Result:
loss: The loss of the current batch.
log_likelihood: Average log likelihood.
"""
if type(self.bd_z) != torch.Tensor:
bd_z = torch.tensor([
[self.bd_z[0], 0, 0, 0],
[0, self.bd_z[1], 0, 0],
[0, 0, self.bd_z[2], 0],
[0, 0, 0, 0]
], device=output.device, dtype=output.dtype)
log_likelihood = 0.0
bd_m = self._output_to_m(output)
for i in range(output.shape[0]):
log_likelihood \
+= self._bingham_loss_fixed_dispersion_single_sample(
target[i], bd_m[i], bd_z)
loss = -log_likelihood
return loss, log_likelihood / output.shape[0]
def statistics(self, target, output, epoch):
""" Reports some additional loss statistics.
Arguments:
target (torch.Tensor): Ground-truth shaped as loss input.
output (torch.Tensor): Network output.
epoch (int): Current epoch. Currently unused.
Returns:
stats (dict): Bingham parameters and angular deviation.
"""
stats = {
"maad": float(maad_quaternion(
target, output, self.orthogonalization))
}
return stats
@staticmethod
def _bingham_loss_fixed_dispersion_single_sample(target, bd_m, bd_z):
"""
Calculates the bingham likelihood loss on
a single sample.
Parameters:
target: Target value at which the likelihood is
evaluated
bd_m: Bingham distribution location and axes parameter of shape
(1, 4, 4)
bd_z: Z parameter matrix of shape (1, 4, 4)
"""
target = target.reshape(1, 4)
loss = torch.mm(torch.mm(torch.mm(torch.mm(
target, bd_m), bd_z), torch.t(bd_m)), torch.t(target))
return loss
def _output_to_m(self, output):
""" Creates orthogonal matrix from output.
Parameters:
output (torch.Tensor): Output values from which M is extracted,
shape (batch_size, 16) for gram-schmidt orthogonalization
and (batch_size, 4) for quaternion_matrix orthogonalization.
"""
batch_size = output.shape[0]
if self.orthogonalization == "gram_schmidt":
reshaped_output = output.reshape(batch_size, 4, 4)
bd_m = gram_schmidt_batched(reshaped_output)
elif self.orthogonalization == "modified_gram_schmidt":
reshaped_output = output.reshape(batch_size, 4, 4)
bd_m = gram_schmidt_batched(reshaped_output, modified=True)
elif self.orthogonalization == "quaternion_matrix":
#bd_m = quaternion_matrix(output)
raise NotImplementedError
else:
raise ValueError("Invalid orthogonalization type.")
return bd_m
| 4,440 | 36.008333 | 80 | py |
deep_bingham | deep_bingham-master/modules/network.py | import torch.nn as nn
from torchvision import models
def get_model(name, pretrained, num_channels, num_classes):
"""
Method that returns a torchvision model given a model
name, pretrained (or not), number of channels,
and number of outputs
Inputs:
name - string corresponding to model name
pretrained- Boolean for whether a pretrained
model is requested
num_channels- int number of channels
num_classes- number of outputs of the network
"""
function = getattr(models, name)
model = function(pretrained=pretrained)
if "resnet" in name:
if num_channels == 1:
model = ResNet18Grayscale(models.resnet.BasicBlock,
[2, 2, 2, 2],
num_classes)
else:
model.fc = nn.Linear(512, num_classes)
else:
model = nn.Sequential(*(list(model.children())[:-1]))
model.classifier.add_module('6', nn.Linear(
list(model.classifier.children()))[-3].in_features, num_classes)
return model
class ResNet18Grayscale(models.resnet.ResNet):
"""
A class that inherits the torchvision model
Resnet and makes it compatible with grayscale
images.
"""
def __init__(self, block, layers, num_classes):
super(ResNet18Grayscale, self).__init__(block, layers, num_classes)
self.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.fc = nn.Linear(512, num_classes)
| 1,587 | 32.083333 | 80 | py |
deep_bingham | deep_bingham-master/modules/von_mises.py | """Implementation of von Mises loss function
Code based on:
https://github.com/sergeyprokudin/deep_direct_stat/blob/master/utils/losses.py
"""
import numpy as np
import torch
import math
import sys
from scipy.interpolate import Rbf
import utils
from utils import generate_coordinates
from modules.maad import maad_biternion
from modules.vm_operations import *
class VonMisesLoss(object):
"""
Computes the von Mises log likelihood loss on a batch of target-output
values.
"""
def __init__(self):
self._bessel_taylor_coefs = torch.tensor(
[1.00000000e+00, 2.50000000e-01, 1.56250000e-02,
4.34027778e-04, 6.78168403e-06])
def __call__(self, target, output):
"""
Calculates the von mises likelihood loss on a batch of target-output
values.
parameters:
target: target values at which the likelihood is evaluated
of shape (n, 1, 6)
output: output values from which kappa and biterion representation
of the angles are extracted, shape (n, 1, 9)
returns:
neg_log_likelihood_loss: the negative sum of the log-likelihood of
each sample.
log_likelihood: the average log likelihood.
"""
log_likelihood = 0
data_type = output.type()
for i in range(output.shape[0]):
angles, kappas = output_to_angles_and_kappas(output[i])
x = math.radians(target[i][0])
y = math.radians(target[i][1])
z = math.radians(target[i][2])
pan_target = torch.tensor([math.cos(x), math.sin(x)])
tilt_target = torch.tensor([math.cos(y), math.sin(y)])
roll_target = torch.tensor([math.cos(z), math.sin(z)])
if output.is_cuda:
device = output.get_device()
pan_target = pan_target.to(device)
tilt_target = tilt_target.to(device)
roll_target = roll_target.to(device)
log1 = self._von_mises_log_likelihood_single_angle(
pan_target, angles[:2], kappas[0:1])
log2 = self._von_mises_log_likelihood_single_angle(
tilt_target, angles[2:4], kappas[1:2])
log3 = self._von_mises_log_likelihood_single_angle(
roll_target, angles[4:], kappas[2:])
log_likelihood += log1 + log2 + log3
loss = -log_likelihood
return loss, log_likelihood / output.shape[0]
def _von_mises_log_likelihood_single_angle(self, y_true, mu_pred,
kappa_pred):
r"""
Compute log-likelihood given data samples and predicted von-mises model
parameters
Parameters:
y_true: true values of an angle in biternion (cos, sin)
representation.
mu_pred: predicted mean values of an angle in biternion (cos, sin)
representation.
kappa_pred: predicted kappa (inverse variance) values of an angle
Returns:
log_likelihood: the von Mises log likelihood.
"""
cosine_dist = torch.sum(torch.mul(y_true, mu_pred)).reshape([-1, 1])
if kappa_pred.is_cuda:
device = kappa_pred.get_device()
cosine_dist = cosine_dist.to(device)
norm_const = self._log_bessel_approx_dds(kappa_pred) \
+ torch.log(torch.tensor(2. * 3.14159))
log_likelihood = (kappa_pred * cosine_dist) - norm_const
return log_likelihood.reshape([-1, 1])
def _log_bessel_approx_dds(self, kappa):
kappa.reshape([-1, 1])
def _log_bessel_approx_taylor(cls, x):
num_coef = cls._bessel_taylor_coefs.shape[0]
arg = torch.arange(0, num_coef, 1) * 2
deg = arg.reshape([1, -1])
n_rows = x.shape[0]
x_tiled = x.repeat([1, num_coef])
deg_tiled = deg.repeat([n_rows, 1]).float()
coef_tiled = cls._bessel_taylor_coefs[0:num_coef].reshape(
1, num_coef).repeat([n_rows, 1])
if x.is_cuda:
device = x.get_device()
x_tiled = x_tiled.to(device)
deg_tiled = deg_tiled.to(device)
coef_tiled = coef_tiled.to(device)
val = torch.log(
torch.sum(torch.pow(x_tiled, deg_tiled) * coef_tiled, 1))
return val.reshape([-1, 1])
def _log_bessel_approx_large(x):
return x - 0.5 * torch.log(2 * np.pi * x)
if kappa[0] > 5:
return _log_bessel_approx_large(kappa)
else:
return _log_bessel_approx_taylor(self, kappa)
def statistics(self, target, output, epoch=None):
param_kappas = output_to_kappas(output)
stats = {"maad" : float(maad_biternion(target, output)),
"kappa_0": float(param_kappas[:, 0].mean()),
"kappa_1": float(param_kappas[:, 1].mean()),
"kappa_2": float(param_kappas[:, 2].mean())}
return stats
| 5,116 | 33.574324 | 79 | py |
deep_bingham | deep_bingham-master/modules/gram_schmidt.py | import torch
def gram_schmidt(input_mat, reverse=False, modified=False):
""" Carries out the Gram-Schmidt orthogonalization of a matrix.
Arguments:
input_mat (torch.Tensor): A quadratic matrix that will be turned into an
orthogonal matrix.
reverse (bool): Starts gram Schmidt method beginning from the last
column if set to True.
modified (bool): Uses modified Gram-Schmidt as described.
"""
mat_size = input_mat.shape[0]
Q = torch.zeros(mat_size, mat_size,
device=input_mat.device, dtype=input_mat.dtype)
if modified:
if reverse:
outer_iterator = range(mat_size - 1, -1, -1)
def inner_iterator(k): return range(k, -1, -1)
else:
outer_iterator = range(mat_size)
def inner_iterator(k): return range(k+1, mat_size)
# This implementation mostly follows the description from
# https://www.math.uci.edu/~ttrogdon/105A/html/Lecture23.html
# The more complex form is due to pytorch not allowing for inplace
# operations of variables needed for gradient computation.
v = input_mat
for j in outer_iterator:
Q[:, j] = v[:, j] / torch.norm(v[:, j])
v_old = v
v = torch.zeros(mat_size, mat_size,
device=input_mat.device, dtype=input_mat.dtype)
for i in inner_iterator(j):
v[:, i] = v_old[:, i] \
- (torch.dot(Q[:, j].clone(), v_old[:, i])
* Q[:, j].clone())
elif not modified:
if reverse:
outer_iterator = range(mat_size - 1, -1, -1)
def inner_iterator(k): return range(mat_size - 1, k, -1)
else:
outer_iterator = range(mat_size)
def inner_iterator(k): return range(k)
for j in outer_iterator:
v = input_mat[:, j]
for i in inner_iterator(j):
p = torch.dot(Q[:, i].clone(), v) * Q[:, i].clone()
v = v - p
Q[:, j] = v / torch.norm(v)
return Q
def gram_schmidt_batched(input_mat, reverse=False, modified=False):
""" Carries out the Gram-Schmidt orthogonalization of a matrix on an
entire batch.
Arguments:
input_mat (torch.Tensor): A tensor containing quadratic matrices each of
which will be orthogonalized of shape (batch_size, m, m).
reverse (bool): Starts gram Schmidt method beginning from the last
column if set to True.
modified (bool): Uses modified Gram-Schmidt as described.
Returns:
Q (torch.Tensor): A batch of orthogonal matrices of same shape as
input_mat.
"""
batch_size = input_mat.shape[0]
mat_size = input_mat.shape[1]
Q = torch.zeros(batch_size, mat_size, mat_size,
device=input_mat.device, dtype=input_mat.dtype)
if modified:
#TODO implement batched version
for i in range(input_mat.shape[0]):
q = gram_schmidt(input_mat[i], reverse, modified)
Q[i] = q
elif not modified:
if reverse:
raise NotImplementedError
else:
outer_iterator = range(mat_size)
def inner_iterator(k): return range(k)
for j in outer_iterator:
v = input_mat[:, :, j].view(batch_size, mat_size, 1)
for i in inner_iterator(j):
q_squeezed = Q[:, :, i].view(batch_size, 1, mat_size).clone()
dot_products = torch.bmm(q_squeezed, v)
p = dot_products.repeat((1, mat_size, 1)) \
* Q[:, :, i].unsqueeze(2).clone()
v = v - p
Q[:, :, j] = v.squeeze() / torch.norm(v, dim=1).repeat(1, mat_size)
return Q
| 3,837 | 35.207547 | 80 | py |
deep_bingham | deep_bingham-master/modules/mse.py | import torch
import torch.nn as nn
from modules.maad import maad_mse
class MSELoss(object):
"""
Class for the MSE loss function
"""
def __init__(self):
self.loss = nn.MSELoss(reduction='sum')
def __call__(self, target, output):
"""
Calculates the MSE loss on a batch of target-output values.
Target value is the true unit quaternion pose. Output is the predicted
quaternion after normalization.
Arguments:
target (torch.Tensor): Target values at which the loss is evaluated of shape (N, 4)
output (torch.Tensor): Output values of shape (N, 4)
Returns:
loss: The loss of the current batch
log_likelihood: 0. This loss function does not calculate a log likelihood so 0
is returned.
"""
return self.loss(target, output), torch.Tensor([0])
def statistics(self, target, output, cur_epoch=None):
""" Reports loss statistics.
Arguments:
target (torch.Tensor): Ground-truth shaped as loss input.
output (torch.Tensor): Network output.
cur_epoch (int): Current epoch. Currently unused.
Returns:
stats (dict): angular deviation.
"""
return {"maad": maad_mse(target, output.detach())}
| 1,325 | 32.15 | 95 | py |
deep_bingham | deep_bingham-master/modules/bingham_loss.py | """Implementation of the Bingham loss function"""
from __future__ import print_function
import dill
import os
import bingham_distribution as ms
import numpy as np
import torch
from scipy.interpolate import Rbf
import utils
from modules.maad import maad_bingham
from modules.gram_schmidt import gram_schmidt, gram_schmidt_batched
from modules.quaternion_matrix import quaternion_matrix
from utils import generate_coordinates, vec_to_bingham_z_many
def batched_logprob(target, mu, sigma):
""" Mean of log probability of targets given mu and sigmas of a Gaussian
distribution """
target = target.reshape(mu.shape)
dist = torch.distributions.normal.Normal(mu, sigma)
return torch.mean(dist.log_prob(target))
def batched_norm(target, output):
""" Mean of norm error between target and output matrices """
target = target.reshape(output.shape)
diff = target - output
loss = torch.mean(torch.norm(diff, dim=-1))
return loss
class BinghamLoss(object):
"""
Calculates the bingham log likelihood loss on a batch of target-output
values.
Arguments:
lookup_table_file (str): Path to the location of the lookup table.
interpolation_kernel (str): The kernel to use for rbf interpolaition.
Can be "multiquadric" (default) or "gaussian".
orthogonalization (str): Orthogonalization method to use. Can be
"gram_schmidt" for usage of the classical gram-schmidt method.
"modified_gram_schmidt" for a more robust variant, or
"quaternion_matrix" for usage of a orthogonal matrix representation
of an output quaternion.
Inputs:
target (torch.Tensor): Target values at which the likelihood is
evaluated of shape (N, 4)
output (torch.Tensor): Output values from which M and Z are extracted of
shape (N, 19) if orthogonalization is "gram_schmidt" and shape (N,7)
if it is "quaternion_matrix"
Result:
loss: The loss of the current batch.
log_likelihood: Average log likelihood.
"""
def __init__(self, lookup_table_file,
interpolation_kernel="multiquadric",
orthogonalization="gram_schmidt"):
self.orthogonalization = orthogonalization
_, _, nc_lookup_table, coords \
= utils.load_lookup_table(lookup_table_file)
print("Bingham Interpolation Kernel: " + interpolation_kernel)
self.interp_options = {
"interp_data": torch.from_numpy(nc_lookup_table),
"interp_coords": torch.from_numpy(coords)
}
rbf_file = os.path.splitext(lookup_table_file)[0] + ".rbf"
if os.path.exists(rbf_file):
with open(rbf_file, 'rb') as file:
self.rbf = dill.load(file)
else:
x, y, z = generate_coordinates(
self.interp_options["interp_coords"])
# Limit found empirically.
assert len(x) < 71000, "Lookup table too large."
print("Creating the interpolator... (this usually takes a while)")
self.rbf = Rbf(x, y, z, torch.log(
self.interp_options["interp_data"]
).numpy().ravel().squeeze())
with open(rbf_file, 'wb') as file:
dill.dump(self.rbf, file)
def __call__(self, target, output):
if target.is_cuda:
device = target.get_device()
M, Z = self._output_to_m_z(output)
log_likelihood = torch.sum(
self._log_bingham_loss(
target, M, Z.squeeze(0),
self.rbf))
loss = -log_likelihood
return loss, log_likelihood / target.shape[0]
def statistics(self, target, output, epoch):
""" Reports some additional loss statistics.
Arguments:
target (torch.Tensor): Ground-truth shaped as loss input.
output (torch.Tensor): Network output.
epoch (int): Current epoch. Currently unused.
Returns:
stats (dict): Bingham parameters and angular deviation.
"""
bd_z = torch.mean(vec_to_bingham_z_many(output[:, :3]).squeeze(0), 0)
cur_maad = maad_bingham(target, output[:, 3:], self.orthogonalization)
stats = {
"z_0": float(bd_z[0]),
"z_1": float(bd_z[1]),
"z_2": float(bd_z[2]),
"maad": float(cur_maad)
}
return stats
@staticmethod
def _log_bingham_loss(target, M, Z, rbf=None):
r"""Log Bingham likelihood loss.
The Bingham distribution is parametrized as
f(x) = N(Z) * exp(x^T MZM^Tx)
with x being defined on the hypershere, i.e. ||x||=1.
Note: This has been developed using CPU-only storage of Tensors and may
require adaptation when used with GPU.
Parameters:
target: Target values at which the likelihood is evaluated of shape
(N, 4).
M: Bingham distribution location and axes parameter of shape
(N,4,4). M is expected to be an orthonormal matrix.
Z: Tensor representing the Z parameter matrix of shape (N, 3).
The parameters are expected to be negative and given in an
ascending order.
rbf: RBF object
Returns:
log likelihood: log value of the pdf for each of the target samples.
"""
assert target.dim() == 2 and target.shape[1] == 4, \
"Wrong dimensionality of target tensor."
assert M.dim() == 3 and M.shape[1:3] == (4, 4), \
"Wrong dimensionality of location parameter matrix M."
assert Z.dim() == 2 and Z.shape[1] == 3, \
"Wrong dimensionality of location parameter matrix Z."
assert Z.shape[0] == M.shape[0] and Z.shape[0] == target.shape[0], \
"Number of samples does not agree with number of parameters."
if target.is_cuda:
device = target.get_device()
else:
device = "cpu"
# Adds missing 0 to vectors Z and turns them into diagonal matrices.
z_padded = torch.cat(
(Z, torch.zeros((Z.shape[0], 1), device=device, dtype=M.dtype)),
dim=1)
z_as_matrices = torch.diag_embed(z_padded)
norm_const = BinghamInterpolationRBF.apply(Z, rbf)
likelihoods = (torch.bmm(torch.bmm(torch.bmm(torch.bmm(
target.unsqueeze(1),
M),
z_as_matrices),
M.transpose(1, 2)),
target.unsqueeze(2))
).squeeze() - norm_const
return likelihoods
def _output_to_m_z(self, output):
""" Creates orthogonal matrix from output.
This method does not support vectorization yet.
Parameters:
output (torch.Tensor): Output values from which M is extracted,
shape (19,) for gram-schmidt orthogonalization and (7,) for
quaternion_matrix orthogonalization.
"""
bd_z = utils.vec_to_bingham_z_many(output[:, :3])
bd_m = vec_to_bingham_m(output[:, 3:], self.orthogonalization)
return bd_m, bd_z
class BinghamInterpolationRBF(torch.autograd.Function):
r"""Computes the Bingham interpolation constant and its derivatives.
Input:
Z: Tensor representing the Z parameters of shape (N, 3).
Returns:
norm_const: Von Mises normalization constants evaluated for each set of kappas
in matrix.
"""
@staticmethod
def forward(ctx, Z, rbfi):
norm_const = np.zeros(Z.shape[0])
ctx.save_for_backward(Z)
ctx.constant = rbfi
v = Z.detach().cpu().numpy()
for idx in range(Z.shape[0]):
norm_const[idx] = rbfi(v[idx][0], v[idx][1], v[idx][2])
tensor_type = Z.type()
if Z.is_cuda:
device = Z.get_device()
result = torch.tensor(norm_const, device=device).type(tensor_type)
else:
result = torch.tensor(norm_const).type(tensor_type)
return result
@staticmethod
def _compute_derivatives(rbfi, Z):
"""
A function that computes the gradient of the kappas via finite differences.
Parameters:
rbfi: an RBF interpolation object
kappas: a list of three kappas
Returns:
a torch tensor gradient for the kappas
"""
delta = 0.0001
x = rbfi(Z[0], Z[1], Z[2])
finite_diff_x = (rbfi(Z[0] + delta, Z[1], Z[2]) - x) / delta
finite_diff_y = (rbfi(Z[0], Z[1] + delta, Z[2]) - x) / delta
finite_diff_z = (rbfi(Z[0], Z[1], Z[2] + delta) - x) / delta
return torch.tensor([finite_diff_x, finite_diff_y, finite_diff_z])
@staticmethod
def backward(ctx, grad_output):
if not ctx.needs_input_grad[0]:
return None
Z = ctx.saved_tensors[0]
rbfi = ctx.constant
grad_Z = torch.zeros(Z.shape[0], 3)
v = Z.detach().cpu().numpy()
for idx in range(grad_output.shape[0]):
grad_Z[idx] = \
grad_output[idx] \
* BinghamInterpolationRBF._compute_derivatives(rbfi, v[idx])
tensor_type = grad_output.type()
if grad_output.is_cuda:
device = grad_output.get_device()
result = torch.tensor(grad_Z, device=device).type(tensor_type)
else:
result = torch.tensor(grad_Z).type(tensor_type)
return result, None
def vec_to_bingham_m(output, orthogonalization):
""" Creates orthogonal matrix from output.
This operates on an entire batch.
Parameters:
output (torch.Tensor): Output values from which M is extracted,
shape (batch_size, 16) for gram-schmidt orthogonalization
and (batch_size, 4) for quaternion_matrix orthogonalization.
orthogonalization (str): orthogonalization (str): Orthogonalization
method to use. Can be "gram_schmidt" for usage of the classical
gram-schmidt method. "modified_gram_schmidt" for a more robust
variant, or "quaternion_matrix" for usage of a orthogonal matrix
representation of an output quaternion. The latter is not supported
yet.
"""
batch_size = output.shape[0]
if orthogonalization == "gram_schmidt":
reshaped_output = output.reshape(batch_size, 4, 4)
bd_m = gram_schmidt_batched(reshaped_output)
elif orthogonalization == "modified_gram_schmidt":
reshaped_output = output.reshape(batch_size, 4, 4)
bd_m = gram_schmidt_batched(reshaped_output, modified=True)
elif orthogonalization == "quaternion_matrix":
#TODO batched version
bd_m = torch.zeros(output.shape[0], 4, 4).to(device=output.device, dtype=output.dtype)
for i in range(output.shape[0]):
bd_m[i] = quaternion_matrix(output)
else:
raise ValueError("Invalid orthogonalization type.")
return bd_m
| 11,124 | 34.205696 | 94 | py |
deep_bingham | deep_bingham-master/modules/cosine.py | from modules.maad import output_to_angles, maad_cosine
from utils import radians
import torch
class CosineLoss():
"""
Class for calculating Cosine Loss assuming biternion representation of pose.
"""
def __init__(self):
self.stats = 0
def __call__(self, target, output):
"""
Calculates the cosine loss on a batch of target-output values.
Arguments:
target: Target values at which loss is evaluated of shape (N, 3)
output: Output values, shape (N, 6) from predicted from network
prior to normalization of each sin/cos pair.
Result:
loss: The loss of the current batch
log_likelihood: 0. This loss function does not calculate log likelihood so
so 0 is returned.
"""
loss = 0
for i in range(output.shape[0]):
loss += self._cosine_single_sample(target[i], output[i])
return loss, torch.Tensor([0])
def statistics(self, target, output, cur_epoch):
stats = {"maad": float(maad_cosine(target, output))}
self.stats = stats
return stats
def _cosine_single_sample(self, target, output):
"""
Calculates cosine loss for a single sample.
Arguments:
target: Target value at which loss is evaluated of shape (1, 3)
output: Output value, shape (1, 6) from predicted from network
prior to normalization of each sin/cos pair.
Returns:
loss: The loss of a single sample.
"""
radian_target = radians(target)
radian_target_cos = torch.cos(radian_target)
radian_target_sin = torch.sin(radian_target)
target_biternion = []
for i in range(3):
target_biternion.append(radian_target_cos[i])
target_biternion.append(radian_target_sin[i])
target = torch.tensor(target_biternion)
if output.is_cuda:
device = output.get_device()
target = target.to(device)
angles = output_to_angles(output)
return 3 - torch.dot(angles, target)
| 2,160 | 33.301587 | 86 | py |
deep_bingham | deep_bingham-master/modules/quaternion_matrix.py | import torch
def quaternion_matrix(quat):
""" Computes an orthogonal matrix from a quaternion.
We use the representation from the NeurIPS 2018 paper "Bayesian Pose
Graph Optimization via Bingham Distributions and Tempred Geodesic MCMC" by
Birdal et al. There, the presentation is given above eq. (6). In practice
any similar scheme will do.
Parameters:
quat (torch.tensor): Tensor of shape 4 representing a quaternion.
"""
# This cumbersome way is necessary because copy constructors seem not to
# preserve gradients.
indices = torch.tensor([
[0, 1, 2, 3],
[1, 0, 3, 2],
[2, 3, 0, 1],
[3, 2, 1, 0]
], device=quat.device)
sign_mask = torch.tensor([
[1, -1, -1, 1],
[1, 1, 1, 1],
[1, -1, 1, -1],
[1, 1, -1, -1]
], device=quat.device, dtype=quat.dtype)
quat_normalized = quat / torch.norm(quat)
quat_mat = torch.take(quat_normalized, indices)
quat_mat = sign_mask * quat_mat
return quat_mat
| 1,042 | 27.189189 | 78 | py |
deep_bingham | deep_bingham-master/training/trainer.py | import time
import torch
from modules import maad
from utils import AverageMeter
class Trainer(object):
""" Trainer for Bingham Orientation Uncertainty estimation.
Arguments:
device (torch.device): The device on which the training will happen.
"""
def __init__(self, device, floating_point_type="float"):
self._device = device
self._floating_point_type = floating_point_type
@staticmethod
def adjust_learning_rate(optimizer):
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr'] / 2
def train_epoch(self, train_loader, model, loss_function,
optimizer, epoch, writer_train, writer_val, val_loader):
"""
Method that trains the model for one epoch on the training set and
reports losses to Tensorboard using the writer_train
train_loader: A DataLoader that contains the shuffled training set
model: The model we are training
loss_function: Loss function object
optimizer: The optimizer we are using
Epoch: integer epoch number
writer_train: A Tensorboard summary writer for reporting the average
loss while training.
writer_val: A Tensorboard summary writer for reporting the average
loss during validation.
val_loader: A DataLoader that contains the shuffled validation set
"""
losses = AverageMeter()
model.train()
if self._floating_point_type == "double":
model = model.double()
if hasattr(model, 'is_sequential'):
is_sequential = True
else:
is_sequential = False
timings_start = time.time()
for i, data in enumerate(train_loader):
if i % 20 == 0:
if i > 0 and i % 100 == 0:
print("Elapsed time: {}".format(
str(time.time()-timings_start)))
timings_start = time.time()
if is_sequential:
model.reset_state(batch=data['image'].shape[0],
device=self._device)
self.validate(self._device, val_loader, model,
loss_function, writer_val, i, epoch,
len(train_loader), 0.1)
# switch to train mode
model.train()
if self._floating_point_type == "double":
target_var = data["pose"].double().to(self._device)
input_var = data["image"].double().to(self._device)
else:
target_var = data["pose"].float().to(self._device)
input_var = data["image"].float().to(self._device)
if torch.sum(torch.isnan(target_var)) > 0:
continue
# compute output
if is_sequential:
model.reset_state(batch=data['image'].shape[0],
device=self._device)
model.to(self._device)
output = model(input_var)
if loss_function.__class__.__name__ == "MSELoss":
# norm over the last dimension (i.e. orientations)
norms \
= torch.norm(output, dim=-1, keepdim=True).to(self._device)
output = output / norms
if loss_function.__class__.__name__ == "BinghamMixtureLoss":
loss, log_likelihood = loss_function(target_var, output, epoch)
else:
loss, log_likelihood = loss_function(target_var, output)
# compute gradient and do optimization step
optimizer.zero_grad()
loss.backward()
optimizer.step()
if self._floating_point_type == "double":
loss = loss.double() / data["image"].shape[0]
else:
loss = loss.float() / data["image"].shape[0]
losses.update(loss.item(), data["image"].size(0))
if i + len(train_loader) * epoch % 1000 == 0:
Trainer.adjust_learning_rate(optimizer)
writer_train.add_scalar('data/loss', loss,
i + len(train_loader) * epoch)
writer_train.add_scalar('data/log_likelihood', log_likelihood,
i + len(train_loader) * epoch)
cur_iter = epoch * len(train_loader) + i
stats = loss_function.statistics(target_var, output, epoch)
Trainer.report_stats(writer_train, stats, cur_iter)
print("Epoch: [{0}][{1}/{2}]\t Loss {loss.last_val:.4f} "
"({loss.avg:.4f})\t".format(
epoch, i, len(train_loader), loss=losses))
def validate(self, device, val_loader, model, loss_function, writer,
index=None, cur_epoch=None, epoch_length=None, eval_fraction=1):
"""
Method that validates the model on the validation set and reports losses
to Tensorboard using the writer
device: A string that states whether we are using GPU ("cuda:0") or cpu
model: The model we are training
loss_function: Loss function object
optimizer: The optimizer we are using
writer: A Tensorboard summary writer for reporting the average loss
during validation.
cur_epoch: integer epoch number representing the training epoch we are
currently on.
index: Refers to the batch number we are on within the training set
epoch_length: The number of batches in an epoch
val_loader: A DataLoader that contains the shuffled validation set
loss_parameters: Parameters passed on to the loss generation class.
"""
# switch to evaluate mode
model.eval()
losses = AverageMeter()
log_likelihoods = AverageMeter()
maads = AverageMeter()
averaged_stats = AverageMeter()
val_load_iter = iter(val_loader)
for i in range(int(len(val_loader) * eval_fraction)):
data = val_load_iter.next()
if self._floating_point_type == "double":
target_var = data["pose"].double().to(device)
input_var = data["image"].double().to(device)
else:
target_var = data["pose"].float().to(device)
input_var = data["image"].float().to(device)
if torch.sum(torch.isnan(target_var)) > 0:
continue
# compute output
output = model(input_var)
if loss_function.__class__.__name__ == "MSELoss":
# norm over the last dimension (ie. orientations)
norms = torch.norm(output, dim=-1, keepdim=True).to(device)
output = output / norms
if loss_function.__class__.__name__ == "BinghamMixtureLoss":
loss, log_likelihood = loss_function(target_var, output, cur_epoch)
else:
loss, log_likelihood = loss_function(target_var, output)
if self._floating_point_type == "double":
loss = loss.double() / data["image"].shape[0]
else:
loss = loss.float() / data["image"].shape[0]
# measure accuracy and record loss
losses.update(loss.item(), data["image"].size(0))
log_likelihoods.update(log_likelihood.item(), data["image"].size(0))
# TODO: Unify reporting to the style below.
stats = loss_function.statistics(target_var, output, cur_epoch)
averaged_stats.update(stats, data["image"].size(0))
if index is not None:
cur_iter = cur_epoch * epoch_length + index
writer.add_scalar('data/loss', losses.avg, cur_iter)
writer.add_scalar('data/log_likelihood', log_likelihoods.avg,
cur_iter)
Trainer.report_stats(writer, averaged_stats.avg, cur_iter)
print('Test:[{0}][{1}/{2}]\tLoss {loss.last_val:.4f} '
'({loss.avg:.4f})\t'.format(
cur_epoch, index, epoch_length, loss=losses))
@staticmethod
def report_stats(writer, stats, cur_iter):
for key in stats:
writer.add_scalar(
'data/' + key, stats[key], cur_iter)
| 8,449 | 39.430622 | 83 | py |
deep_bingham | deep_bingham-master/utils/utils.py | """ Utilities for learning pipeline."""
from __future__ import print_function
import copy
import dill
import hashlib
import itertools
import bingham_distribution as ms
import math
import numpy as np
import os
import scipy
import scipy.integrate as integrate
import scipy.special
import sys
import torch
from pathos.multiprocessing import ProcessingPool as Pool
def convert_euler_to_quaternion(roll, yaw, pitch):
"""Converts roll, yaw, pitch to a quaternion.
"""
# roll (z), yaw (y), pitch (x)
cy = math.cos(math.radians(roll) * 0.5)
sy = math.sin(math.radians(roll) * 0.5)
cp = math.cos(math.radians(yaw) * 0.5)
sp = math.sin(math.radians(yaw) * 0.5)
cr = math.cos(math.radians(pitch) * 0.5)
sr = math.sin(math.radians(pitch) * 0.5)
w = cy * cp * cr + sy * sp * sr
x = cy * cp * sr - sy * sp * cr
y = sy * cp * sr + cy * sp * cr
z = sy * cp * cr - cy * sp * sr
quat = np.array([w, x, y, z])
quat = quat / np.linalg.norm(quat)
return quat
def radians(degree_tensor):
"""
Method to convert a torch tensor of angles in degree format to radians.
Arguments:
degree_tensor (torch.Tensor): Tensor consisting of angles in degree format.
Returns:
radian_tensor (torch.Tensor): Tensor consisting of angles in radian format.
"""
radian_tensor = degree_tensor/180 * math.pi
return radian_tensor
def generate_coordinates(coords):
"""
A function that returns all possible triples of coords
Parameters:
coords: a numpy array of coordinates
Returns:
x: the first coordinate of possible triples
y: the second coordinate of possible triples
z the third coordinate of possible triples
"""
x = coords.reshape(-1, 1).repeat(1, len(coords) * len(coords)).flatten()
y = coords.reshape(-1, 1).repeat(1, len(coords)).flatten().repeat(len(coords))
z = coords.reshape(-1, 1).flatten().repeat(len(coords)*len(coords))
return x, y, z
def ensure_dir_exists(path):
""" Checks if a directory exists and creates it otherwise. """
if not os.path.exists(path):
os.makedirs(path)
def load_lookup_table(path):
"""
Loads lookup table from dill serialized file.
Returns a table specific tuple. For the Bingham case, the tuple containins:
table_type (str):
options (dict): The options used to generate the lookup table.
res_tensor (numpy.ndarray): The actual lookup table data.
coords (numpy.ndarray): Coordinates at which lookup table was evaluated.
For the von Mises case, it contains:
options (dict): The options used to generate the lookup table.
res_tensor (numpy.ndarray): The actual lookup table data.
"""
assert os.path.exists(path), "Lookup table file not found."
with open(path, "rb") as dillfile:
return dill.load(dillfile)
def eaad_von_mises(kappas, integral_options=None):
""" Expected Absolute Angular Deviation of Bingham Random Vector
Arguments:
kappas: Von Mises kappa parameters for roll, pitch, yaw.
integral_options: Options to pass on to the scipy integrator for
computing the eaad and the bingham normalization constant.
"""
def aad(quat_a, quat_b):
acos_val = np.arccos(np.abs(np.dot(quat_a, quat_b)))
diff_ang = 2.0 * acos_val
return diff_ang
if integral_options is None:
integral_options = {"epsrel": 1e-2, "epsabs": 1e-2}
param_mu = np.array([0., 0., 0.]) # radians
quat_mu = convert_euler_to_quaternion(
math.degrees(param_mu[0]), math.degrees(param_mu[1]),
math.degrees(param_mu[2])
)
param_kappa = kappas
direct_norm_const = 8.0 * (np.pi ** 3) \
* scipy.special.iv(0, param_kappa[0]) \
* scipy.special.iv(0, param_kappa[1]) \
* scipy.special.iv(0, param_kappa[2])
def integrand_aad(phi1, phi2, phi3):
return np.exp(param_kappa[0] * np.cos(phi1)) \
* np.exp(param_kappa[1] * np.cos(phi2)) \
* np.exp(param_kappa[2] * np.cos(phi3)) \
* aad(quat_mu,
convert_euler_to_quaternion(
math.degrees(phi1), math.degrees(phi2),
math.degrees(phi3)
))
eaad_int = integrate.tplquad(
integrand_aad,
0.0, 2.0 * np.pi, # phi3
lambda x: 0.0, lambda x: 2. * np.pi, # phi2
lambda x, y: 0.0, lambda x, y: 2. * np.pi, # phi1
**integral_options
)
return eaad_int[0]/direct_norm_const
def eaad_bingham(bingham_z, integral_options=None):
""" Expected Absolute Angular Deviation of Bingham Random Vector
Arguments:
bingham_z: Bingham dispersion parameter in the format expected by the
manstats BinghamDistribution class.
integral_options: Options to pass on to the scipy integrator for
computing the eaad and the bingham normalization constant.
"""
def aad(quat_a, quat_b):
# acos_val = np.arccos(np.dot(quat_a, quat_b))
# diff_ang = 2 * np.min([acos_val, np.pi - acos_val])
acos_val = np.arccos(np.abs(np.dot(quat_a, quat_b)))
diff_ang = 2 * acos_val
return diff_ang
if integral_options is None:
integral_options = {"epsrel": 1e-4, "epsabs": 1e-4}
bd = ms.BinghamDistribution(
np.eye(4), bingham_z,
{"norm_const_mode": "numerical",
"norm_const_options": integral_options}
)
def integrand_transformed(x):
# To avoid unnecessary divisions, this term does not contain the
# normalization constant. At the end, the result of the integration is
# divided by it.
return aad(x, bd.mode) \
* np.exp(np.dot(x, np.dot(np.diag(bingham_z), x)))
def integrand(phi1, phi2, phi3):
sp1 = np.sin(phi1)
sp2 = np.sin(phi2)
return integrand_transformed(np.array([
sp1 * sp2 * np.sin(phi3),
sp1 * sp2 * np.cos(phi3),
sp1 * np.cos(phi2),
np.cos(phi1)
])) * (sp1 ** 2.) * sp2
eaad_int = integrate.tplquad(
integrand,
0.0, 2.0 * np.pi, # phi3
lambda x: 0.0, lambda x: np.pi, # phi2
lambda x, y: 0.0, lambda x, y: np.pi, # phi1
**integral_options
)
return eaad_int[0] / bd.norm_const
def build_bd_lookup_table(table_type, options, path=None):
"""
Builds a lookup table for interpolating the bingham normalization
constant. If a lookup table with the given options already exists, it is
loaded and returned instead of building a new one.
Arguments:
table_type: Type of lookup table used. May be 'uniform' or 'nonuniform'
options: Dict cotaining type specific options.
If type is "uniform" this dict must contain:
"bounds" = Tuple (lower_bound, upper_bound) representing bounds.
"num_points" = Number of points per dimension.
If type is "nonuniform" this dict must contain a key "coords" which
is a numpy arrays representing the coordinates at which the
interpolation is evaluated.
path: absolute path for the lookup table (optional). The default is to
create a hash based on the options and to use this for constructing
a file name and placing the file in the precomputed folder.
"""
hash_obj = hashlib.sha256()
hash_obj.update(table_type)
hash_obj.update(dill.dumps(options))
config_hash = hash_obj.hexdigest()
if not path:
path = os.path.dirname(__file__) \
+ "/../precomputed/lookup_{}.dill".format(config_hash)
# Load existing table or create new one.
if os.path.exists(path):
with open(path, "rb") as dillfile:
(serialized_type, serialized_options, res_table, coords) \
= dill.load(dillfile)
hash_obj = hashlib.sha256()
hash_obj.update(serialized_type)
hash_obj.update(dill.dumps(serialized_options))
file_config_hash = hash_obj.hexdigest()
assert file_config_hash == config_hash, \
"Serialized lookup table does not match given type & options."
elif table_type == "uniform":
# Number of points per axis.
(lbound, rbound) = options["bounds"]
num_points = options["num_points"]
assert num_points > 1, \
"Grid must have more than one point per dimension."
nc_options = {"epsrel": 1e-3, "epsabs": 1e-7}
coords = np.linspace(lbound, rbound, num_points)
res_table = _compute_bd_lookup_table(coords, nc_options)
with open(path, "wb") as dillfile:
dill.dump((table_type, options, res_table, coords), dillfile)
elif table_type == "nonuniform":
nc_options = {"epsrel": 1e-3, "epsabs": 1e-7}
coords = options["coords"]
res_table = _compute_bd_lookup_table(coords, nc_options)
with open(path, "wb") as dillfile:
dill.dump((table_type, options, res_table, coords), dillfile)
else:
sys.exit("Unknown lookup table type")
return res_table
def build_vm_lookup_table(options, path=None):
"""
Builds a lookup table for interpolating the bingham normalization
constant. If a lookup table with the given options already exists, it is
loaded and returned instead of building a new one.
Arguments:
options: Dict cotaining table options. It must contain a key "coords"
which is a numpy arrays representing the coordinates at which the
interpolation is evaluated.
path: absolute path for the lookup table (optional). The default is to
create a hash based on the options and to use this for constructing
a file name and placing the file in the precomputed folder.
"""
hash_obj = hashlib.sha256()
hash_obj.update(dill.dumps(options))
config_hash = hash_obj.hexdigest()
if not path:
path = os.path.dirname(__file__) \
+ "/../precomputed/lookup_{}.dill".format(config_hash)
# Load existing table or create new one.
if os.path.exists(path):
with open(path, "rb") as dillfile:
(serialized_options, res_table) \
= dill.load(dillfile)
hash_obj = hashlib.sha256()
hash_obj.update(dill.dumps(serialized_options))
file_config_hash = hash_obj.hexdigest()
assert file_config_hash == config_hash, \
"Serialized lookup table does not match given type & options."
else:
coords = options["coords"]
res_table = _compute_vm_lookup_table(coords)
with open(path, "wb") as dillfile:
dill.dump((options, res_table), dillfile)
return res_table
def _compute_bd_lookup_table(coords, nc_options):
num_points = len(coords)
pool = Pool()
def nc_wrapper(idx):
pt_idx = point_indices[idx]
# Indexing pt_idx in the order 2,1,0 vs. 0,1,2 has no impact
# on the result as the Bingham normalization constant is agnostic to it.
# However, the numpy integration that is used to compute it, combines
# numerical 2d and 1d integration which is why the order matters for the
# actual computation time.
#
# TODO: Make pymanstats choose best order automatically.
norm_const = ms.BinghamDistribution.normalization_constant(
np.array(
[coords[pt_idx[2]], coords[pt_idx[1]], coords[pt_idx[0]], 0.]),
"numerical", nc_options)
print("Computing NC for Z=[{}, {}, {}, 0.0]: {}".format(
coords[pt_idx[2]], coords[pt_idx[1]], coords[pt_idx[0]],
norm_const))
return norm_const
point_indices = list(itertools.combinations_with_replacement(
range(0, num_points), 3))
results = pool.map(nc_wrapper, range(len(point_indices)))
res_tensor = -np.ones((num_points, num_points, num_points))
for idx_pos, pt_idx in enumerate(point_indices):
res_tensor[pt_idx[0], pt_idx[1], pt_idx[2]] = results[idx_pos]
res_tensor[pt_idx[0], pt_idx[2], pt_idx[1]] = results[idx_pos]
res_tensor[pt_idx[1], pt_idx[0], pt_idx[2]] = results[idx_pos]
res_tensor[pt_idx[1], pt_idx[2], pt_idx[0]] = results[idx_pos]
res_tensor[pt_idx[2], pt_idx[0], pt_idx[1]] = results[idx_pos]
res_tensor[pt_idx[2], pt_idx[1], pt_idx[0]] = results[idx_pos]
return res_tensor
class AverageMeter(object):
"""Computes and stores the averages over a numbers or dicts of numbers.
For the dict, this class assumes that no new keys are added during
the computation.
"""
def __init__(self):
self.last_val = 0
self.avg = 0
self.count = 0
def update(self, val, n=1):
self.last_val = val
n = float(n)
if type(val) == dict:
if self.count == 0:
self.avg = copy.deepcopy(val)
else:
for key in val:
self.avg[key] *= self.count / (self.count + n)
self.avg[key] += val[key] * n / (self.count + n)
else:
self.avg *= self.count / (self.count + n)
self.avg += val * n / (self.count + n)
self.count += n
self.last_val = val
def _compute_vm_lookup_table(coords):
num_points = len(coords)
pool = Pool()
def nc_wrapper(idx):
cur_pt_idx = point_indices[idx]
log_norm_const = np.log(8.0) + (3. * np.log(np.pi)) \
+ np.log(scipy.special.iv(0, coords[cur_pt_idx[0]])) \
+ np.log(scipy.special.iv(0, coords[cur_pt_idx[1]])) \
+ np.log(scipy.special.iv(0, coords[cur_pt_idx[2]]))
print("Computing NC for kappas=[{}, {}, {}]: {}".format(
coords[cur_pt_idx[2]], coords[cur_pt_idx[1]], coords[cur_pt_idx[0]],
log_norm_const))
return log_norm_const
point_indices = list(itertools.combinations_with_replacement(
range(0, num_points), 3))
results = pool.map(nc_wrapper, range(len(point_indices)))
res_tensor = -np.ones((num_points, num_points, num_points))
for idx_pos, pt_idx in enumerate(point_indices):
res_tensor[pt_idx[0], pt_idx[1], pt_idx[2]] = results[idx_pos]
res_tensor[pt_idx[0], pt_idx[2], pt_idx[1]] = results[idx_pos]
res_tensor[pt_idx[1], pt_idx[0], pt_idx[2]] = results[idx_pos]
res_tensor[pt_idx[1], pt_idx[2], pt_idx[0]] = results[idx_pos]
res_tensor[pt_idx[2], pt_idx[0], pt_idx[1]] = results[idx_pos]
res_tensor[pt_idx[2], pt_idx[1], pt_idx[0]] = results[idx_pos]
return res_tensor
def vec_to_bingham_z_many(y):
z = -torch.exp(y).cumsum(1)[:, [2, 1, 0]].unsqueeze(0)
return z
def vec_to_bingham_z(y):
z = -torch.exp(y).cumsum(0)[[2, 1, 0]].unsqueeze(0)
if not all(z[0][:-1] <= z[0][1:]):
print(z)
return z
| 15,063 | 33.629885 | 83 | py |
deep_bingham | deep_bingham-master/utils/evaluation.py | import torch
from modules import maad
from utils import AverageMeter, eaad_bingham, eaad_von_mises
import numpy as np
def run_evaluation(model, dataset, loss_function, device, floating_point_type="float"):
model.eval()
losses = AverageMeter()
log_likelihoods = AverageMeter()
maads = AverageMeter()
averaged_stats = AverageMeter()
eaads = AverageMeter()
min_eaads = AverageMeter()
min_maads = AverageMeter()
val_load_iter = iter(dataset)
eval_fraction = 0.1
for i in range(int(len(dataset)*eval_fraction)):
data = val_load_iter.next()
if floating_point_type == "double":
target_var = data["pose"].double().to(device)
input_var = data["image"].double().to(device)
else:
target_var = data["pose"].float().to(device)
input_var = data["image"].float().to(device)
if torch.sum(torch.isnan(target_var)) > 0:
continue
# compute output
output = model(input_var)
if loss_function.__class__.__name__ == "MSELoss":
# norm over the last dimension (ie. orientations)
norms = torch.norm(output, dim=-1, keepdim=True).to(device)
output = output / norms
if loss_function.__class__.__name__ == "BinghamMixtureLoss":
loss, log_likelihood = loss_function(target_var, output, 49)
else:
loss, log_likelihood = loss_function(target_var, output)
if floating_point_type == "double":
loss = loss.double() / data["image"].shape[0]
else:
loss = loss.float() / data["image"].shape[0]
# measure accuracy and record loss
losses.update(loss.item(), data["image"].size(0))
log_likelihoods.update(log_likelihood.item(), data["image"].size(0))
if loss_function.__class__.__name__ == "VonMisesLoss":
angular_deviation = maad(loss_function, target_var, output, None)
maads.update(angular_deviation)
min_maads.update(angular_deviation)
eaad, min_eaad = kappas_to_eaad(output)
eaads.update(eaad, data["image"].size(0))
min_eaads.update(min_eaad, data["image"].size(0))
else:
stats = loss_function.statistics(target_var, output, 31)
averaged_stats.update(stats, data["image"].size(0))
maads.update(stats["maad"])
if loss_function.__class__.__name__ == "BinghamMixtureLoss":
min_maads.update(stats["mmaad"])
else:
min_maads.update(stats["maad"])
if "Bingham" in loss_function.__class__.__name__:
eaad, min_eaad = bingham_z_to_eaad(
stats, loss_function
)
eaads.update(eaad, data["image"].size(0))
min_eaads.update(min_eaad, data["image"].size(0))
if "Bingham" or "VonMises" in loss_function.__class__.__name__:
print("Loss: {}, Log Likelihood: {}, MAAD: {}, Min MAAD: {}, EAAD: {}, Min EAAD: {}".format(
losses.avg, log_likelihoods.avg, maads.avg, min_maads.avg, eaads.avg, min_eaads.avg))
else:
print("Loss: {}, Log Likelhood: {}, MAAD: {}".format(losses.avg, log_likelihoods.avg, maads.avg))
def kappas_to_eaad(output):
kappas = torch.mean(output[:, :3], 0).detach().cpu().numpy()
eaad = eaad_von_mises(kappas)
return eaad, eaad
def bingham_z_to_eaad(stats, loss_function):
eaads = []
if loss_function.__class__.__name__ == "BinghamLoss":
z_0, z_1, z_2 = stats["z_0"], stats["z_1"], stats["z_2"]
bingham_z = np.array([z_0, z_1, z_2, 0])
eaad = eaad_bingham(bingham_z)
eaads.append(eaad)
elif loss_function.__class__.__name__ == "BinghamMixtureLoss":
for j in range(loss_function._num_components):
bingham_z = [stats["mode_" + str(j) + "_z_{}".format(i)] for i in range(3)]
bingham_z.append(0)
bingham_z = np.array(bingham_z)
eaad = eaad_bingham(bingham_z)
eaads.append(eaad)
return sum(eaads)/len(eaads), min(eaads)
| 4,120 | 40.21 | 105 | py |
deep_bingham | deep_bingham-master/data_loaders/t_less_dataset.py | from .utils import *
from torch.utils.data import Dataset, random_split, Subset
import yaml
import os
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
from PIL import Image
import numpy as np
from skimage import io
import torch
import quaternion
import cv2
import h5py
torch.manual_seed(0)
def make_hdf5_file(config, image_transform):
dataset_path = config["dataset_path"]
if config["dirs"]:
dirs = config["dirs"]
tless_full = TLessSplit(
TLessFullDataset(dataset_path, dirs, image_transform), shuffle=config.get("shuffle", True))
train = tless_full.train
test = tless_full.test
else:
train_dirs = config["train_dirs"]
test_dirs = config["test_dirs"]
train = TLessFullDataset(dataset_path, train_dirs, image_transform)
test = TLessFullDataset(dataset_path, test_dirs, image_transform)
file_dataset = config["hdf5"]
img_shape = train[0]["image"].shape
label_shape = train[0]["pose"].shape[-1]
f = h5py.File("datasets/{}".format(file_dataset), "w")
train_img = f.create_dataset("train_img", (
len(train), img_shape[0], img_shape[1], img_shape[2]))
train_label = f.create_dataset("train_label", (len(train), label_shape))
test_img = f.create_dataset("test_img", (
len(test), img_shape[0], img_shape[1], img_shape[2]))
test_label = f.create_dataset("test_label", (len(test), label_shape))
print("Making HDF5")
for i in range(len(train)):
f["train_img"][i, :, :, :] = train[i]["image"]
f["train_label"][i, :] = train[i]["pose"]
for i in range(len(test)):
f["test_img"][i, :, :, :] = test[i]["image"]
f["test_label"][i, :] = test[i]["pose"]
class TLessTrainTest():
"""
Stores a training and test set for the TLess Dataset
Parameters:
config_file: a yaml file or dictionary that contains data loading
information ex. See configs/upna_train.yaml. The dataset_path stores
the locsation of the originial downloaded dataset. The
preprocess_path is where the processed images and poses will be
stored.
image_transforms: A list of of composed pytorch transforms to be applied
to a PIL image
"""
def __init__(self, config_file, image_transform=None):
if type(config_file) == dict:
config = config_file
else:
with open(config_file) as fp:
config = yaml.load(fp)
file_dataset = config["hdf5"]
if not os.path.isfile("datasets/{}".format(file_dataset)):
make_hdf5_file(config_file, image_transform)
f = h5py.File("datasets/{}".format(file_dataset), 'r')
biterion = config["biterion"]
blur = config["blur"]
self.train = TLessHDF5(f.get('train_img'), f.get('train_label'),
f.get("train_bb"),
biterion, blur)
self.test = TLessHDF5(f.get('test_img'), f.get('test_label'),
f.get("test_bb"), biterion, blur)
class TLessHDF5(Dataset):
"""
Loads TLess dataset from a HDF5 dataset and applies transformations to
biterion or quaternion form and adds noise to the labels.
biterion: format of the pose. if true, biterion. if false, quaternion.
"""
def __init__(self, images, labels, bb, biterion, blur):
self.images = images
self.labels = labels
self.bb = bb
self.biterion = biterion
self.blur = blur
def __getitem__(self, idx):
image = self.images[idx, :, :, :]
pose = self.labels[idx, :]
if self.blur:
size = 10
kernel = np.ones((size, size), np.float32) / size ** 2
blurred_img = cv2.filter2D(image, -1, kernel)
image = blurred_img
if self.biterion:
convert_to_rad = quaternion_to_euler(pose[0], pose[1], pose[2],
pose[3])
sample = {'image': torch.from_numpy(image),
'pose': torch.Tensor([math.degrees(convert_to_rad[0]),
math.degrees(convert_to_rad[1]),
math.degrees(convert_to_rad[2])])}
else:
sample = {'image': torch.from_numpy(image),
'pose': torch.Tensor(pose)}
return sample
def __len__(self):
return self.images.shape[0]
class TLessSplit(object):
def __init__(self, dataset, shuffle=True):
train_size = int(len(dataset) * 0.75)
if shuffle:
self.train, self.test = random_split(dataset, [train_size, len(
dataset) - train_size])
else:
self.train = Subset(dataset, list(range(train_size)))
self.test = Subset(dataset, list(range(train_size, len(dataset))))
class TLessFullDataset(Dataset):
def __init__(self, path, dirs, image_transform):
self.subdatasets = []
self.size = [0]
self.image_transform = image_transform
for i in range(len(dirs)):
self.subdatasets.append(
TLessSingleDataset(path, dirs[i], self.image_transform))
self.size.append(len(self.subdatasets[i]) + self.size[-1])
def __getitem__(self, idx):
data_bin = 0
if not type(idx) == int:
idx = idx.item()
for i in range(1, len(self.size)):
if self.size[i] > idx >= self.size[i - 1]:
data_bin = i - 1
new_index = idx - self.size[data_bin]
return self.subdatasets[data_bin][new_index]
def __len__(self):
return self.size[-1]
class TLessSingleDataset(Dataset):
def __init__(self, path, direc, image_transform):
self.dir_to_gt = {}
self.full_path = os.path.join(path, direc)
with open(self.full_path + "/gt.yml") as fp:
self.dir_to_gt = yaml.load(fp, Loader=Loader)
self.size = len(self.dir_to_gt.keys())
self.image_transform = image_transform
def __getitem__(self, index):
name_img = str(index).zfill(4)
img_path = os.path.join(self.full_path, "rgb",
"{}.png".format(name_img))
bb = np.array(self.dir_to_gt[index][0]["obj_bb"])
image = Image.fromarray(
io.imread(img_path)[int(bb[1]): int(bb[1] + bb[3]),
int(bb[0]): int(bb[0] + bb[2]), :])
pose = np.array(self.dir_to_gt[index][0]["cam_R_m2c"]).reshape(3, 3)
if self.image_transform:
image = self.image_transform(image).numpy()
pose = rotation_matrix_to_quaternion(pose)
assert (sum(np.array(self.dir_to_gt[index][0]["obj_bb"])) != 0)
return {"image": image, "pose": torch.Tensor(pose)}
def __len__(self):
return self.size
def rotation_matrix_to_quaternion(rot_mat):
quat = quaternion.as_float_array(quaternion.from_rotation_matrix(rot_mat))
return quat
| 7,101 | 34.688442 | 103 | py |
deep_bingham | deep_bingham-master/data_loaders/upna_dataset.py | import os
import torch
from PIL import Image
from skimage import io
from torch.utils.data import Dataset
import h5py
from .upna_preprocess import *
from .utils import *
from bingham_distribution import BinghamDistribution
def make_hdf5_file(config, image_transform):
dataset_path = config["preprocess_path"]
csv_train = dataset_path + "/train/input.csv"
csv_test = dataset_path + "/test/input.csv"
biterion = config["biterion"]
if os.path.isfile(csv_train) and os.path.isfile(csv_test):
test_frame = pd.read_csv(csv_test)
train_frame = pd.read_csv(csv_train)
else:
preprocess = UpnaHeadPoseDataPreprocess(config)
test_frame = preprocess.frame_test
train_frame = preprocess.frame_train
train = UpnaHeadPoseSplitSet(dataset_path + "/train",
train_frame, image_transform)
test = UpnaHeadPoseSplitSet(dataset_path + "/test",
test_frame, image_transform)
img_shape = train[0]["image"].shape
label_shape = train[0]["pose"].shape[-1]
f = h5py.File(dataset_path + "/dataset.hdf5", "w")
f.create_dataset("train_img", (
len(train), img_shape[0], img_shape[1], img_shape[2]))
f.create_dataset("train_label", (len(train), label_shape))
f.create_dataset("test_img", (
len(test), img_shape[0], img_shape[1], img_shape[2]))
f.create_dataset("test_label", (len(test), label_shape))
for i, data in enumerate(train):
f["train_img"][i, :, :, :] = train[i]["image"]
f["train_label"][i, :] = train[i]["pose"]
print("train", i)
for i, data in enumerate(test):
f["test_img"][i, :, :, :] = test[i]["image"]
f["test_label"][i, :] = test[i]["pose"]
print("test", i)
class UpnaHeadPoseTrainTest():
"""
Stores a training and test set for the UPNA Head Pose Dataset
Parameters:
config_file: a yaml file or dictionary that contains data loading
information ex. See configs/upna_train.yaml. The dataset_path stores
the locsation of the originial downloaded dataset. The
preprocess_path is where the processed images and poses will be stored.
image_transforms: A list of of composed pytorch transforms to be applied
to a PIL image
"""
def __init__(self, config_file, image_transform=None):
if type(config_file) == dict:
config = config_file
else:
with open(config_file) as fp:
config = yaml.load(fp)
if not os.path.isfile(config["preprocess_path"] + "/dataset.hdf5"):
make_hdf5_file(config_file, image_transform)
f = h5py.File(config["preprocess_path"] + "/dataset.hdf5", 'r')
noise = config["euler_noise"]
quat_noise = config["quat_noise"]
biterion = config["biterion"]
self.train = UpnaHDF5(f.get('train_img'), f.get('train_label'),
biterion, noise, quat_noise)
self.test = UpnaHDF5(f.get('test_img'), f.get('test_label'), biterion,
noise, quat_noise)
class UpnaHDF5(Dataset):
"""
Loads UPNA dataset from a HDF5 dataset and applies transformations to
biterion or quaternion form and adds noise to the labels.
biterion: format of the pose. if true, biterion. if false, quaternion.
euler_noise: the standard deviation of the Gaussian distribution that we
sample noise from
quat_noise: the Z of a bingham distribution that we sample noise from
"""
def __init__(self, images, labels, biterion, euler_noise, quat_noise):
self.images = images
self.labels = labels
self.biterion = biterion
if euler_noise:
s = np.random.normal(0, euler_noise, 3 * len(self.labels))
self.euler_noise = []
for i in range(len(self.labels)):
self.euler_noise.append([s[i * 3], s[i * 3 + 1], s[i * 3 + 2]])
else:
self.euler_noise = None
if quat_noise:
quat_noise = [float(quat_noise[0]), float(quat_noise[1]),
float(quat_noise[2]), 0]
bd = BinghamDistribution(np.identity(4), np.array(quat_noise))
samples = bd.random_samples(len(labels))
perm = [3, 0, 1, 2]
re_samples = samples[:, perm]
self.quat_noise = quaternion.as_quat_array(re_samples)
else:
self.quat_noise = []
def __getitem__(self, idx):
image = torch.from_numpy(self.images[idx, :, :, :]).float()
if self.euler_noise:
pose = np.array([self.labels[idx][0] + self.euler_noise[idx][0],
self.labels[idx][1] + self.euler_noise[idx][1],
self.labels[idx][2] + self.euler_noise[idx][2]])
else:
pose = self.labels[idx, :]
if len(self.quat_noise) != 0:
w, x, y, z = convert_euler_to_quaternion(pose[0], pose[1], pose[2])
quat_pose = quaternion.quaternion(w, x, y, z)
res = quaternion.as_float_array(quat_pose * self.quat_noise[idx])
roll, pitch, yaw = quaternion_to_euler(res[0], res[1], res[2],
res[3])
pose = np.array(
[math.degrees(roll), math.degrees(pitch), math.degrees(yaw)])
if self.biterion:
sample = {'image': image,
'pose': torch.from_numpy(pose)}
else:
sample = {'image': image,
'pose': convert_euler_to_quaternion(pose[0],
pose[1],
pose[2])}
return sample
def __len__(self):
return self.images.shape[0]
class UpnaHeadPoseSplitSet(Dataset):
def __init__(self, dataset_path, frame, image_transform):
"""
Stores a training or test set for the UPNA Head Pose Dataset
Parameters:
dataset_path: the location of where processed images and poses will be stored.
frame: the the csv frame that stores the posesi
image_transforms: A list of of composed pytorch transforms to be applied to a PIL image
"""
self.frame = frame
self.image_transform = image_transform
self.dataset_path = dataset_path
def __len__(self):
return len(self.frame)
def __getitem__(self, idx):
name = self.frame.iloc[idx, 0]
frame_index = idx
img_name = os.path.join(self.dataset_path, name)
image = Image.fromarray(io.imread(img_name))
head_pose = self.frame.iloc[frame_index, 1:4].as_matrix()
head_pose = head_pose.astype('float').reshape(-1, 3)[0]
if self.image_transform:
image = self.image_transform(image)
sample = {'image': image,
'pose': head_pose}
return sample
# TODO: GET RID OF THIS- REDUNDANT. except for the images field. need to incorporate that elsewhere...
class UpnaHeadPoseDataset(Dataset):
"""
Stores a test set for the UPNA Head Pose Dataset
Parameters:
config_file: a yaml file or dictionary that contains data loading
information ex. See configs/upna_train.yaml. The dataset_path stores
the location of the originial downloaded dataset. The
preprocess_path is where the processed images and poses will be stored.
image_transforms: (optional) A list of of composed pytorch transforms to
be applied to a PIL image
images: (optional) Can provide a list of image names and a dataset will
be constructed with those images
"""
def __init__(self, config_file, image_transform=None):
if type(config_file) == dict:
config = config_file
else:
with open(config_file) as fp:
config = yaml.load(fp)
self.dataset_path = config["preprocess_path"] + "/test"
self.csv_path = self.dataset_path + "/input.csv"
self.user = config["user"]
self.video = config["video"]
if os.path.isfile(self.csv_path):
self.frame = pd.read_csv(self.csv_path)
else:
self.frame = UpnaHeadPoseDataPreprocess(config_file).frame_test
self.image_transform = image_transform
self.images = self._generate_file_names()
def __len__(self):
return len(self.images)
def _generate_file_names(self):
"""
From user number and video number, generate a list of corresponding frames.
Parameters:
user_num: string user number ex. "07"
video_num: string video number ex. "03"
Returns:
names: a list of file names.
"""
names = []
for i in range(1, 300):
string_name = "User_{}/user_{}_video_{}_frame{}.jpg".format(
self.user, self.user, self.video, i)
names.append(string_name)
return names
def __getitem__(self, idx):
name = self.images[idx]
frame_index = get_frame_index(name, self.frame)
img_name = os.path.join(self.dataset_path, name)
image = Image.fromarray(io.imread(img_name))
head_pose = self.frame.iloc[frame_index, 1:4].as_matrix()
head_pose = head_pose.astype('float').reshape(-1, 3)[0]
if self.image_transform:
image = self.image_transform(image)
sample = {'image': image,
'pose': torch.from_numpy(
convert_euler_to_quaternion(head_pose[0], head_pose[1],
head_pose[2]))}
return sample
| 9,853 | 36.9 | 102 | py |
deep_bingham | deep_bingham-master/data_loaders/idiap_dataset.py | """
Data loading methods from matlab file from:
https://github.com/lucasb-eyer/BiternionNet
"""
import os
import h5py
import yaml
import torch
from PIL import Image
from skimage import io
from torch.utils.data import Dataset
from .utils import *
from bingham_distribution import BinghamDistribution
class IDIAPTrainTest(object):
"""
Stores a training and test set for the IDIAP Head Pose Dataset
Parameters:
config_file: a yaml file or dictionary that contains data loading
information ex. See configs/upna_train.yaml The dataset_path stores
the locsation of the originial downloaded dataset. The
preprocess_path is where the processed images and poses will be
stored.
image_transforms: A list of of composed pytorch transforms to be applied to a PIL image
"""
def __init__(self, config_file, image_transform=None):
if type(config_file) == dict:
config = config_file
else:
with open(config_file) as fp:
config = yaml.load(fp)
self.dataset_path = config["dataset_path"]
mat_file = self.dataset_path + "/or_label_full.mat"
self.image_transform = image_transform
pose_file = h5py.File(mat_file)
train_table = load("train", pose_file)
test_table = load("test", pose_file)
euler_noise = config["euler_noise"]
biterion = config["biterion"]
quat_noise = config["quat_noise"]
self.train = IDIAPSplitSet(train_table, image_transform,
self.dataset_path + "/train", euler_noise,
quat_noise,
biterion)
self.test = IDIAPSplitSet(test_table, image_transform,
self.dataset_path + "/test", euler_noise,
quat_noise, biterion)
def matlab_string(obj):
"""
Return a string parsed from a matlab file
"""
return ''.join(chr(c) for c in obj[:, 0])
def matlab_strings(mat, ref):
"""
Returns an array of strings parsed from matlab file
"""
return [matlab_string(mat[r]) for r in ref[:, 0]]
def matlab_array(mat, ref, dtype):
"""
Parses the relevant information (ref) with type
dtype from a matlab file (mat) and returns
a numpy array.
Parameter:
mat: matlab file containing pose information
ref: the column of the file of interest
dtype: data type of data
"""
N = len(ref)
arr = np.empty(N, dtype=dtype)
for i in range(N):
arr[i] = mat[ref[i, 0]][0, 0]
return arr
def load(traintest, mat_full):
"""
Loads train or test data from mat lab file containing both train
and test data and returns the relevant information in numpy arrays
Parameters:
traintest: a string that denotes "train" or "test"
mat_full: the matlab file containing pose information
Returns:
pan: a numpy array containing pan angles from the dataset
tilt: a numpy array containing tilt angles from the dataset
roll: a numpy array containing roll angles from the dataset
names: a numpy array containing image names from the dataset
"""
container = mat_full['or_label_' + traintest]
pan = matlab_array(mat_full, container['pan'], np.float32)
tilt = matlab_array(mat_full, container['tilt'], np.float32)
roll = matlab_array(mat_full, container['roll'], np.float32)
names = matlab_strings(mat_full, container['name'])
return pan, tilt, roll, names
class IDIAPSplitSet(Dataset):
"""
Stores a training or test set for the UPNA Head Pose Dataset
Parameters:
dataset_path: the location of where processed images and poses will
be stored.
image_transforms: A list of of composed pytorch transforms to be
applied to a PIL image
euler_noise: the standard deviation of the Gaussian distribution
that we sample noise from
quat_noise: the Z of the bingham distribution that we sample noise
from
"""
def __init__(self, table, image_transform, dataset_path, euler_noise,
quat_noise, biterion):
self.image_transform = image_transform
self.pan, self.tilt, self.roll, self.names = table
self.dataset_path = dataset_path
if euler_noise:
s = np.random.normal(0, euler_noise, 3 * len(self.names))
self.euler_noise = []
for i in range(len(self.names)):
self.euler_noise.append([s[i * 3], s[i * 3 + 1], s[i * 3 + 2]])
else:
self.euler_noise = None
if quat_noise:
bd = BinghamDistribution(np.eye(4), np.array(quat_noise))
self.quat_noise = quaternion.as_quat_array(
bd.random_samples(len(self.pan)))
else:
self.quat_noise = []
self.biterion = biterion
def __len__(self):
return len(self.names)
def __getitem__(self, idx):
pan = self.pan[idx]
tilt = self.tilt[idx]
roll = self.roll[idx]
name = self.names[idx]
img_name = os.path.join(self.dataset_path, name)
image = Image.fromarray(io.imread(img_name))
if self.image_transform:
image = self.image_transform(image)
if self.euler_noise:
pan = math.degrees(pan) + self.euler_noise[idx][0]
tilt = math.degrees(tilt) + self.euler_noise[idx][1]
roll = math.degrees(roll) + self.euler_noise[idx][2]
else:
pan = math.degrees(pan)
tilt = math.degrees(tilt)
roll = math.degrees(roll)
if len(self.quat_noise) != 0:
w, x, y, z = convert_euler_to_quaternion(pan, tilt, roll)
quat_pose = quaternion.quaternion(w, x, y, z)
res = quaternion.as_float_array(quat_pose * self.quat_noise[idx])
euler_res = quaternion_to_euler(res[0], res[1], res[2], res[3])
pan = math.degrees(euler_res[0])
tilt = math.degrees(euler_res[1])
roll = math.degrees(euler_res[2])
if self.biterion:
sample = {'image': image,
'pose': torch.Tensor([pan, tilt, roll])}
else:
sample = {'image': image,
'pose': torch.from_numpy(
convert_euler_to_quaternion(pan,
tilt,
roll))}
return sample
| 6,671 | 34.679144 | 95 | py |
WSLVideoDenseAnticipation | WSLVideoDenseAnticipation-main/main.py | import argparse
import time
import os
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import DataLoader
from sklearn.metrics import accuracy_score
from dataloader import DatasetLoader, collate_fn
from primary_pred_module import primModel
from ancillary_pred_module import anclModel
from self_correction_module import selfcorrModel
parser = argparse.ArgumentParser()
#dataset
parser.add_argument('--dataset', type=str, default='salad', help='dataset', choices=['breakfast', 'salad'])
parser.add_argument('--feature_type', type=str, default='fisher', help='feature type, for salad, only have first three choices.', choices=['fisher', 'gt', 'I3D', 'fisher_label', 'I3D_label', 'fisher_label_cat', 'I3D_label_cat'])
parser.add_argument('--n_classes', type=int, default=19, help='action classes, corresponding to dataset', choices=[48, 19])
parser.add_argument('--observation', type=str, default='obs-0.3', help='portion of observed video', choices=['obs-0.2', 'obs-0.3'])
parser.add_argument('--prediction', type=float, default=0.1, help='portion of predicted video', choices=[0.1, 0.2, 0.3, 0.5])
parser.add_argument('--fps', type=int, default=30, help='fps of video, corresponding to dataset', choices=[15, 30])
#video preprocessing
parser.add_argument('--len_S_list', nargs='+', type=int, default=[5, 10, 15], help='S to be divided into how many clips')
parser.add_argument('--len_R', type=int, default=5, help='R to be divided into how many clips')
parser.add_argument('--startpoints_R', nargs='+', type=float, default=[5, 10, 15], help='startpoints of R (how many seconds before current time point')
#model hypermeters
parser.add_argument('--conv_dim_NLB', type=int, default=128, help='out_channel dimension of the convolution layer in NLB')
parser.add_argument('--linear_dim', type=int, default=1024, help='dimension of the linear layer in CB.')
parser.add_argument('--dropout_NLB', type=float, default=0.3, help='dropout rate of the dropout layer in NLB')
parser.add_argument('--dropout_CB', type=float, default=0.3, help='dropout rate of the dropout layer in CB')
parser.add_argument('--dropout_TAB', type=float, default=0.3, help='dropout rate of the dropout layer in TAB')
parser.add_argument('--hidden_dim_LSTM', type=int, default=512, help='hidden layer of LSTM (decoder of dense prediction)')
parser.add_argument('--max_len', type=int, default=25, help='maximum times of LSTM recurrence (should be long enough that no video has more clips to predict than this number, breakfast is 24, salad is 25.)')
parser.add_argument('--light', type=bool, default=True, help='whether to use light version model (refer to block.py for details)')
#self correction module
parser.add_argument('--self_correction_method', type=str, default='auto', help='which method to use in self correction module', choices=['no', 'linear', 'auto'])
parser.add_argument('--alpha', nargs=2, type=float, default=[30, 0.5], help='start and end value of alpha in self correction module (start>end), only needed when self correction module method is "linear"')
#other
parser.add_argument('--model', type=str, default='/model', help='path to save model')
parser.add_argument('--batch', type=int, default=2, help='batch size (salad is 2, breakfast is 16)')
args = parser.parse_args()
datapath = args.dataset + '/features/' #change to your datapath
modelpath = args.dataset + args.model #change to your modelpath (path to save trained models)
device = torch.device('cuda:0' if torch.cuda.is_available() else "cpu")
if args.dataset == 'breakfast':
if args.feature_type == 'gt' or args.feature_type == 'fisher_label' or args.feature_type == 'I3D_label':
video_feat_dim = args.n_classes
elif args.feature_type == 'fisher':
video_feat_dim = 64
elif args.feature_type == 'I3D':
video_feat_dim = 400
elif args.feature_type == 'fisher_label_cat':
video_feat_dim = 64 + args.n_classes
elif args.feature_type == 'I3D_label_cat':
video_feat_dim = 400 + args.n_classes
else: #args.dataset == 'salad'
if args.feature_type == 'gt':
video_feat_dim = args.n_classes
elif args.feature_type == 'fisher':
video_feat_dim = 64
elif args.feature_type == 'I3D':
video_feat_dim = 2048
def mycrossentropy(prob, gt):
loss = 0
prob = F.softmax(prob, 1)
for i in range(len(prob)):
loss -= torch.sum(gt[i]*torch.log(prob[i]))
return loss
def main():
alpha = args.alpha[0]
end = args.alpha[1]
full = 8 #how many data in full set
total = 40 #how many data in training set (including full set and weak set)
light = 'light' if args.light else 'heavy'
anci = anclModel(args, video_feat_dim).to(device)
prim = primModel(args, video_feat_dim).to(device)
loss_fn = nn.CrossEntropyLoss(reduction='sum')
loss_mse = nn.MSELoss(reduction='sum')
if not args.self_correction_method == 'auto':
#step1: train ancillary model using full set
anci.train()
optimizer1 = optim.Adam(anci.parameters(), lr=0.001, betas=(0.99, 0.9999))
scheduler1 = optim.lr_scheduler.MultiStepLR(optimizer1, milestones=[5, 15])
print('-------Start training ancillary model-------')
for e in range(20):
s = time.time()
fullset = DataLoader(dataset=DatasetLoader(args, datapath, 'full'), batch_size=args.batch, shuffle='True',
collate_fn=collate_fn)
total_loss = []
total_acc = 0
n = 0
for S, R, fl, wl, dl in fullset:
loss = 0
optimizer1.zero_grad()
label, prob, curr_action, pred_action_durations, _ = anci(S, R, wl)
loss += loss_fn(curr_action, wl)
for i in range(prob.shape[0]):
loss += loss_mse(pred_action_durations[i][:len(dl[i])-1], dl[i][:-1])
if len(fl[i]) >0:
loss += loss_fn(prob[i][:len(fl[i])], fl[i])
loss.backward()
optimizer1.step()
total_loss.append(loss)
label = torch.stack(label).cpu().numpy().T #batch_size*max_len
for i in range(len(fl)):
gt_frame = [int(wl[i])] * int(dl[i][0] * dl[i][-1])
pred_frame = [int(torch.argmax(curr_action[i]))] * int(pred_action_durations[i][0] * dl[i][-1])
for j in range(1, len(dl[i])-1):
gt_frame.extend([int(fl[i][j-1])] * int(dl[i][j] * dl[i][-1]))
pred_frame.extend([int(label[i][j-1])] * int(pred_action_durations[i][j] * dl[i][-1]))
min_len = min(len(gt_frame), len(pred_frame))
if min_len > 0:
n += 1
total_acc += accuracy_score(gt_frame[:min_len], pred_frame[:min_len])
acc = total_acc/n if n > 0 else 0
scheduler1.step()
print('step1 epoch %d: average loss is %.4f, total time %.2s seconds, acc %.4f' % (e+1, sum(total_loss)/full, time.time()-s, acc))
#step2: train primary model using full set and weak set with ancillary model fixed
optimizer2 = optim.Adam(prim.parameters(), lr=0.001, betas=(0.99, 0.9999))
scheduler2 = optim.lr_scheduler.MultiStepLR(optimizer2, milestones=[3, 15])
anci.eval()
prim.train()
print('-------Start training primary model-------')
for e in range(25):
s = time.time()
fullset = DataLoader(dataset=DatasetLoader(args, datapath, 'full'), batch_size=args.batch, shuffle='True',
collate_fn=collate_fn)
weakset = DataLoader(dataset=DatasetLoader(args, datapath, 'weak'), batch_size=args.batch*2, shuffle='True',
collate_fn=collate_fn)
total_loss = []
total_acc = 0
n = 0
for S, R, fl, wl, dl in fullset:
loss = 0
optimizer2.zero_grad()
label, prob, curr_action, pred_action_durations, _ = prim(S, R)
loss += loss_fn(curr_action, wl)
for i in range(prob.shape[0]):
loss += loss_mse(pred_action_durations[i][:len(dl[i])-1], dl[i][:-1])
if len(fl[i]) >0:
loss += loss_fn(prob[i][:len(fl[i])], fl[i])
loss.backward()
optimizer2.step()
total_loss.append(loss)
label = torch.stack(label).cpu().numpy().T #batch_size*max_len
for i in range(len(fl)):
gt_frame = [int(wl[i])] * int(dl[i][0] * dl[i][-1])
pred_frame = [int(torch.argmax(curr_action[i]))] * int(pred_action_durations[i][0] * dl[i][-1])
for j in range(1, len(dl[i])-1):
gt_frame.extend([int(fl[i][j-1])] * int(dl[i][j] * dl[i][-1]))
pred_frame.extend([int(label[i][j-1])] * int(pred_action_durations[i][j] * dl[i][-1]))
min_len = min(len(gt_frame), len(pred_frame))
if min_len > 0:
n += 1
total_acc += accuracy_score(gt_frame[:min_len], pred_frame[:min_len])
acc = total_acc/n if n > 0 else 0
for S, R, _, wl, _ in weakset:
loss = 0
optimizer2.zero_grad()
_, prob_p, curr_action, pred_action_durations_p, attention_p = prim(S, R)
with torch.no_grad():
_, prob_a, _, pred_action_durations_a, attention_a = anci(S, R, wl)
if args.self_correction_method == 'no':
sfl = prob_a
sfad = pred_action_durations_a
else:
corr = selfcorrModel(args, alpha)
sfl, sfad = corr(prob_p, prob_a, pred_action_durations_p, pred_action_durations_a)
loss += loss_fn(curr_action, wl)
loss += loss_mse(pred_action_durations_p, sfad)
for i in range(args.max_len):
loss += torch.norm(attention_a[i]-attention_p[i], p=2)
for i in range(prob_p.shape[0]):
loss += mycrossentropy(prob_p[i], sfl[i])
loss.backward()
optimizer2.step()
total_loss.append(loss)
scheduler2.step()
alpha = max(alpha*0.95, end)
print('step2 epoch %d: average loss is %.4f, total time %.2s seconds, acc %.4f' % (e+1, sum(total_loss)/total, time.time()-s, acc))
#step3: test
prim.eval()
with torch.no_grad():
testset = DataLoader(dataset=DatasetLoader(args, datapath, 'test'), batch_size=args.batch, shuffle='False',
collate_fn=collate_fn)
total_acc = 0
n = 0
for S, R, fl, wl, dl in testset:
label, _, curr_action, pred_action_durations, _ = prim(S, R)
label = torch.stack(label).cpu().numpy().T #batch_size*max_len
for i in range(len(fl)):
gt_frame = [int(wl[i])] * int(dl[i][0] * dl[i][-1])
pred_frame = [int(torch.argmax(curr_action[i]))] * int(pred_action_durations[i][0] * dl[i][-1])
for j in range(1, len(dl[i])-1):
gt_frame.extend([int(fl[i][j-1])] * int(dl[i][j] * dl[i][-1]))
pred_frame.extend([int(label[i][j-1])] * int(pred_action_durations[i][j] * dl[i][-1]))
min_len = min(len(gt_frame), len(pred_frame))
if min_len > 0:
n += 1
total_acc += accuracy_score(gt_frame[:min_len], pred_frame[:min_len])
acc = total_acc/n if n > 0 else 0
print('frame-wise accuracy on test set is %.4f' % acc)
else:
#step1: train ancillary model using half full set
anci.train()
optimizer1 = optim.Adam(anci.parameters(), lr=0.001, betas=(0.99, 0.9999))
scheduler1 = optim.lr_scheduler.MultiStepLR(optimizer1, milestones=[5])
print('-------Start training ancillary model-------')
for e in range(15):
s = time.time()
fullset = DataLoader(dataset=DatasetLoader(args, datapath, 'full', half=True), batch_size=int(args.batch/2), shuffle='True',
collate_fn=collate_fn)
total_loss = []
total_acc = 0
n = 0
for S, R, fl, wl, dl in fullset:
loss = 0
optimizer1.zero_grad()
label, prob, curr_action, pred_action_durations, _ = anci(S, R, wl)
loss += loss_fn(curr_action, wl)
for i in range(prob.shape[0]):
loss += loss_mse(pred_action_durations[i][:len(dl[i])-1], dl[i][:-1])
if len(fl[i]) > 0:
loss += loss_fn(prob[i][:len(fl[i])], fl[i])
loss.backward()
optimizer1.step()
total_loss.append(loss)
label = torch.stack(label).cpu().numpy().T #batch_size*max_len
for i in range(len(fl)):
gt_frame = [int(wl[i])] * int(dl[i][0] * dl[i][-1])
pred_frame = [int(torch.argmax(curr_action[i]))] * int(pred_action_durations[i][0] * dl[i][-1])
for j in range(1, len(dl[i])-1):
gt_frame.extend([int(fl[i][j-1])] * int(dl[i][j] * dl[i][-1]))
pred_frame.extend([int(label[i][j-1])] * int(pred_action_durations[i][j] * dl[i][-1]))
min_len = min(len(gt_frame), len(pred_frame))
if min_len > 0:
n += 1
total_acc += accuracy_score(gt_frame[:min_len], pred_frame[:min_len])
acc = total_acc/n if n > 0 else 0
scheduler1.step()
print('step1 epoch %d: average loss is %.4f, total time %.2s seconds, acc %.4f' % (e+1, sum(total_loss)/int(full/2), time.time()-s, acc))
#step2: train primary model and self-correction model using full set with ancillary model fixed
corr = selfcorrModel(args, alpha).to(device)
params = [{'params':prim.parameters()}, {'params':corr.parameters()}]
optimizer2 = optim.Adam(params, lr=0.001, betas=(0.99, 0.9999))
scheduler2 = optim.lr_scheduler.MultiStepLR(optimizer2, milestones=[3])
anci.eval()
prim.train()
corr.train()
print('-------Start training primary model and self-correction model-------')
for e in range(20):
s = time.time()
fullset = DataLoader(dataset=DatasetLoader(args, datapath, 'full'), batch_size=args.batch, shuffle='True',
collate_fn=collate_fn)
total_loss = []
total_acc = 0
n = 0
for S, R, fl, wl, dl in fullset:
loss = 0
optimizer2.zero_grad()
label, prob_p, curr_action, pred_action_durations_p, attention_p = prim(S, R)
with torch.no_grad():
_, prob_a, _, pred_action_durations_a, attention_a = anci(S, R, wl)
sfl, sfad = corr(prob_p, prob_a, pred_action_durations_p, pred_action_durations_a)
loss += loss_fn(curr_action, wl)
for i in range(args.max_len):
loss += torch.norm(attention_a[i]-attention_p[i], p=2)
for i in range(prob_p.shape[0]):
loss += loss_mse(pred_action_durations_p[i][:len(dl[i])-1], dl[i][:-1])
loss += loss_mse(sfad[i][:len(dl[i])-1], dl[i][:-1])
if len(fl[i])>0:
loss += loss_fn(prob_p[i][:len(fl[i])], fl[i])
loss += loss_fn(sfl[i][:len(fl[i])], fl[i])
loss.backward()
optimizer2.step()
total_loss.append(loss)
label = torch.stack(label).cpu().numpy().T #batch_size*max_len
for i in range(len(fl)):
gt_frame = [int(wl[i])] * int(dl[i][0] * dl[i][-1])
pred_frame = [int(torch.argmax(curr_action[i]))] * int(pred_action_durations_p[i][0] * dl[i][-1])
for j in range(1, len(dl[i])-1):
gt_frame.extend([int(fl[i][j-1])] * int(dl[i][j] * dl[i][-1]))
pred_frame.extend([int(label[i][j-1])] * int(pred_action_durations_p[i][j] * dl[i][-1]))
min_len = min(len(gt_frame), len(pred_frame))
if min_len > 0:
n += 1
total_acc += accuracy_score(gt_frame[:min_len], pred_frame[:min_len])
acc = total_acc/n if n > 0 else 0
scheduler2.step()
print('step2 epoch %d: average loss is %.4f, total time %.2s seconds, acc %.4f' % (e+1, sum(total_loss)/full, time.time()-s, acc))
#step3: fine-tune primary model using full set and weak set and self-correction model using full set with ancillary model fixed
print('-------Start fine-tuning primary model and self-correction model-------')
for e in range(20):
s = time.time()
fullset = DataLoader(dataset=DatasetLoader(args, datapath, 'full'), batch_size=args.batch, shuffle='True',
collate_fn=collate_fn)
weakset = DataLoader(dataset=DatasetLoader(args, datapath, 'weak'), batch_size=args.batch*2, shuffle='True',
collate_fn=collate_fn)
total_loss = []
total_acc = 0
n = 0
for S, R, fl, wl, dl in fullset:
loss = 0
optimizer2.zero_grad()
label, prob_p, curr_action, pred_action_durations_p, attention_p = prim(S, R)
with torch.no_grad():
_, prob_a, _, pred_action_durations_a, attention_a = anci(S, R, wl)
sfl, sfad = corr(prob_p, prob_a, pred_action_durations_p, pred_action_durations_a)
loss += loss_fn(curr_action, wl)
for i in range(args.max_len):
loss += torch.norm(attention_a[i]-attention_p[i], p=2)
for i in range(prob_p.shape[0]):
loss += loss_mse(pred_action_durations_p[i][:len(dl[i])-1], dl[i][:-1])
loss += loss_mse(sfad[i][:len(dl[i])-1], dl[i][:-1])
if len(fl[i])>0:
loss += loss_fn(prob_p[i][:len(fl[i])], fl[i])
loss += loss_fn(sfl[i][:len(fl[i])], fl[i])
loss.backward()
optimizer2.step()
total_loss.append(loss)
label = torch.stack(label).cpu().numpy().T #batch_size*max_len
for i in range(len(fl)):
gt_frame = [int(wl[i])] * int(dl[i][0] * dl[i][-1])
pred_frame = [int(torch.argmax(curr_action[i]))] * int(pred_action_durations_p[i][0] * dl[i][-1])
for j in range(1, len(dl[i])-1):
gt_frame.extend([int(fl[i][j-1])] * int(dl[i][j] * dl[i][-1]))
pred_frame.extend([int(label[i][j-1])] * int(pred_action_durations_p[i][j] * dl[i][-1]))
min_len = min(len(gt_frame), len(pred_frame))
if min_len > 0:
n += 1
total_acc += accuracy_score(gt_frame[:min_len], pred_frame[:min_len])
acc = total_acc/n if n > 0 else 0
for S, R, _, wl, _ in weakset:
loss = 0
optimizer2.zero_grad()
_, prob_p, curr_action, pred_action_durations_p, attention_p = prim(S, R)
with torch.no_grad():
_, prob_a, _, pred_action_durations_a, attention_a = anci(S, R, wl)
sfl, sfad = corr(prob_p, prob_a, pred_action_durations_p, pred_action_durations_a)
loss += loss_fn(curr_action, wl)
loss += loss_mse(pred_action_durations_p, sfad)
for i in range(args.max_len):
loss += torch.norm(attention_a[i]-attention_p[i], p=2)
for i in range(prob_p.shape[0]):
loss += mycrossentropy(prob_p[i], sfl[i])
loss.backward()
optimizer2.step()
total_loss.append(loss)
print('step3 epoch %d: average loss is %.4f, total time %.2s seconds, acc %.4f' % (e+1, sum(total_loss)/total, time.time()-s, acc))
#step4: test
prim.eval()
with torch.no_grad():
testset = DataLoader(dataset=DatasetLoader(args, datapath, 'test'), batch_size=4, shuffle='False',
collate_fn=collate_fn)
total_acc = 0
n = 0
for S, R, fl, wl, dl, ids in testset:
label, _, curr_action, pred_action_durations, _ = prim(S, R)
label = torch.stack(label).cpu().numpy().T #batch_size*max_len
for i in range(len(fl)):
gt_frame = [int(wl[i])] * int(dl[i][0] * dl[i][-1])
pred_frame = [int(torch.argmax(curr_action[i]))] * int(pred_action_durations[i][0] * dl[i][-1])
for j in range(1, len(dl[i])-1):
gt_frame.extend([int(fl[i][j-1])] * int(dl[i][j] * dl[i][-1]))
pred_frame.extend([int(label[i][j-1])] * int(pred_action_durations[i][j] * dl[i][-1]))
min_len = min(len(gt_frame), len(pred_frame))
if min_len > 0:
n += 1
total_acc += accuracy_score(gt_frame[:min_len], pred_frame[:min_len])
acc = total_acc/n if n > 0 else 0
print('frame-wise accuracy on test set is %.4f' % acc)
torch.save(anci, os.path.join(modelpath, 'fullset_%d_%s_%s_pred-%f_%s_%s_anci' % (full, args.feature_type, args.observation, args.prediction, light, args.self_correction_method)))
torch.save(prim, os.path.join(modelpath, 'fullset_%d_%s_%s_pred-%f_%s_%s_prim' % (full, args.feature_type, args.observation, args.prediction, light, args.self_correction_method)))
if args.self_correction_method != 'no':
torch.save(corr, os.path.join(modelpath, 'fullset_%d_%s_%s_pred-%f_%s_%s_corr' % (full, args.feature_type, args.observation, args.prediction, light, args.self_correction_method)))
print('Done!')
if __name__ == "__main__":
main()
| 23,720 | 51.596452 | 228 | py |
WSLVideoDenseAnticipation | WSLVideoDenseAnticipation-main/data_preprocessing.py | import os.path
import pickle
import numpy as np
import torch
def read_mapping_dict(mapping_file):
file_ptr = open(mapping_file, 'r')
actions = file_ptr.read().split('\n')[:-1]
actions_dict = dict()
for a in actions:
actions_dict[a.split()[1]] = int(a.split()[0])
return actions_dict
def get_label_bounds(data_labels):
labels_uniq = []
labels_uniq_loc = []
for kki in range(len(data_labels)):
sequence_labels, sequence_durations = get_label_length_seq(data_labels[kki])
labels_uniq.append(sequence_labels)
labels_uniq_loc.append(sequence_durations)
return labels_uniq, labels_uniq_loc
def get_label_length_seq(content):
label_seq = []
length_seq = []
start = 0
length_seq.append(0)
for i in range(len(content)):
if content[i] != content[start]:
label_seq.append(content[start])
length_seq.append(i)
start = i
label_seq.append(content[start])
length_seq.append(len(content))
return label_seq, length_seq
class DataClass:
def __init__(self, args, path, mode='full', half=False):
self.path = path
self.GT_folder = os.path.join(self.path, 'groundTruth/')
self.mapping = os.path.join(self.path, 'mapping.txt')
self.full_split = os.path.join(self.path, 'split/full.split3.bundle')
self.weak_split = os.path.join(self.path, 'split/weak.split3.bundle')
self.test_split = os.path.join(self.path, 'split/test.split.bundle')
self.obs = float(args.observation[-3:]) #observation portion
self.pred = args.prediction #prediction portion
self.fps = args.fps #video's fps
self.curr_label = dict()
self.future_labels = dict()
self.future_durations = dict()
actions_dict = read_mapping_dict(self.mapping)
if args.feature_type == 'gt' or args.feature_type == 'fisher' or args.feature_type == 'I3D':
if mode == 'full':
self.data_feat, data_labels = self.load_data_features(args, self.full_split, actions_dict, half)
elif mode == 'weak':
self.data_feat, data_labels = self.load_data_features(args, self.weak_split, actions_dict, half)
else:
self.data_feat, data_labels = self.load_data_features(args, self.test_split, actions_dict, half)
elif args.feature_type == 'fisher_label' or args.feature_type == 'I3D_label':
if mode == 'full':
self.data_feat, data_labels = self.load_seg_outs(args, self.full_split, actions_dict, mode, half)
elif mode == 'weak':
self.data_feat, data_labels = self.load_seg_outs(args, self.weak_split, actions_dict, mode, half)
else:
self.data_feat, data_labels = self.load_seg_outs(args, self.test_split, actions_dict, mode, half)
else:
if mode == 'full':
self.data_feat, data_labels = self.load_seg_outs_concat(args, self.full_split, actions_dict, mode, half)
elif mode == 'weak':
self.data_feat, data_labels = self.load_seg_outs_concat(args, self.weak_split, actions_dict, mode, half)
else:
self.data_feat, data_labels = self.load_seg_outs_concat(args, self.test_split, actions_dict, mode, half)
labels_uniq, labels_uniq_loc = get_label_bounds(data_labels)
counter_index = 0
for kki in range(0, len(data_labels)):
mi_labels = data_labels[kki] #a video's frame-wise label
video_len = len(mi_labels)
sequence_labels = labels_uniq[kki]
sequence_durations = labels_uniq_loc[kki]
current_stop = int(len(mi_labels) * self.obs) #the last frame of the observation part
pred_stop = int(len(mi_labels) * (self.obs + self.pred)) #the last frame of the prediction part
stop_index = 0
for ioi in range(len(sequence_durations) - 1):
if sequence_durations[ioi] <= current_stop:
stop_index = ioi
#the order of the last action in the observation part
list_future_labels = []
list_future_durations = [min(pred_stop, sequence_durations[stop_index+1]) - current_stop] #current action duration (within the prediction part)
val_curr_label = sequence_labels[stop_index]
if stop_index + 1 != len(sequence_labels):
for izi in range(stop_index + 1, len(sequence_labels)):
if sequence_durations[izi] <= pred_stop:
list_future_durations.append(min(pred_stop - sequence_durations[izi], sequence_durations[izi+1] - sequence_durations[izi]))
list_future_labels.append(sequence_labels[izi])
self.curr_label[str(counter_index)] = torch.tensor(val_curr_label).long() #current action
self.future_labels[str(counter_index)] = torch.Tensor(list_future_labels).long() #future actions
self.future_durations[str(counter_index)] = torch.cat((torch.Tensor(list_future_durations)/video_len, torch.Tensor([video_len]))) #future actions durations
counter_index = counter_index + 1
def load_data_features(self, args, split_load, actions_dict, half=False):
file_ptr = open(split_load, 'r')
if half:
content_all = file_ptr.read().split('\n')[:-1]
content_all = content_all[:int(len(content_all)/2)]
else:
content_all = file_ptr.read().split('\n')[:-1]
if args.dataset == 'breakfast':
content_all = [x.strip('./data/groundTruth/') + 't' for x in content_all]
data_all = []
label_all = []
for content in content_all:
file_ptr = open(self.GT_folder + content, 'r')
curr_gt = file_ptr.read().split('\n')[:-1]
curr_data = []
if args.feature_type == 'fisher':
if args.dataset == 'breakfast':
loc_curr_data = self.path + 'fisher/' + os.path.splitext(content)[0] + '.txt'
curr_data = np.loadtxt(loc_curr_data, dtype='float32')
curr_data = curr_data[:, 1:65] #n*64 (n is the number of frame)
else: #args.dataset == 'salad'
loc_curr_data = self.path + 'fisher/' + os.path.splitext(content)[0] + '-New.txt'
curr_data = np.loadtxt(loc_curr_data, dtype='float32')
curr_data = curr_data[:, 1:65] #n*64
elif args.feature_type == 'I3D':
if args.dataset == 'breakfast':
loc_curr_data = self.path + 'I3D/' + os.path.splitext(content)[0]
curr_data = np.loadtxt(loc_curr_data, dtype='float32') #n*400
else: #args.dataset == 'salad'
loc_curr_data = self.path + 'I3D/' + os.path.splitext(content)[0] + '.npy'
curr_data = np.load(loc_curr_data).T #n*2048
else: #args.feature_type == 'gt'
for iik in range(len(curr_gt)):
ind_label = actions_dict[curr_gt[iik]]
curr_data_vec = np.zeros(args.n_classes)
curr_data_vec[ind_label] = 1.0
curr_data.append(curr_data_vec)
curr_data = np.array(curr_data) #n*n_classes(one-hot)
label_curr_video = []
for iik in range(len(curr_gt)):
label_curr_video.append(actions_dict[curr_gt[iik]])
data_all.append(torch.tensor(curr_data, dtype=torch.float32))
label_all.append(label_curr_video)
return data_all, label_all
def load_seg_outs(self, args, split_load, actions_dict, mode, half=False):
file_ptr = open(split_load, 'r')
if half:
content_all = file_ptr.read().split('\n')[:-1]
content_all = content_all[:int(len(content_all)/2)]
else:
content_all = file_ptr.read().split('\n')[:-1]
content_all = [x.strip('./data/groundTruth/') + 't' for x in content_all]
data_all = []
label_all = []
if mode == 'full' or mode == 'weak':
for content in content_all:
file_ptr = open(self.GT_folder + content, 'r')
curr_gt = file_ptr.read().split('\n')[:-1]
label_curr_video = []
for iik in range(len(curr_gt)):
label_curr_video.append(actions_dict[curr_gt[iik]])
curr_data = []
for iik in range(len(label_curr_video)):
ind_label = label_curr_video[iik]
curr_data_vec = np.zeros(args.n_classes)
curr_data_vec[ind_label] = 1.0
curr_data.append(curr_data_vec)
curr_data = np.array(curr_data)
data_all.append(torch.tensor(curr_data, dtype=torch.float32))
label_all.append(label_curr_video)
else:
if args.feature_type == 'fisher_label':
# current split for fisher vector based segmentation labels
segmentation_location = os.path.join(self.path, 'seg_fisher')
for content in content_all:
file_ptr = open(self.GT_folder + content, 'r')
curr_gt = file_ptr.read().split('\n')[:-1]
label_curr_video = []
for iik in range(len(curr_gt)):
label_curr_video.append(actions_dict[curr_gt[iik]])
# read fisher based segmentation labels
file_ptr_fisher = open(segmentation_location + '/split1/' + args.observation + '/' + content, 'r')
fisher_seg_labels = file_ptr_fisher.read().split('\n')[:-1]
curr_data = []
for iik in range(len(fisher_seg_labels)):
ind_label = actions_dict[fisher_seg_labels[iik]]
curr_data_vec = np.zeros(args.n_classes)
curr_data_vec[ind_label] = 1.0
curr_data.append(curr_data_vec)
curr_data = np.array(curr_data)
data_all.append(torch.tensor(curr_data, dtype=torch.float32))
label_all.append(label_curr_video)
else:
counter = 0
# read segmentation labels based on i3d features
file_name = os.path.join(self.path, 'seg_I3D') + '/' + 'seg_ours_2_split1.pickle'
with open(file_name, 'rb') as handle:
segmentation_data = pickle.load(handle)
for content in content_all:
file_ptr = open(self.GT_folder + content, 'r')
curr_gt = file_ptr.read().split('\n')[:-1]
label_curr_video = []
for iik in range(len(curr_gt)):
label_curr_video.append(actions_dict[curr_gt[iik]])
# read i3d based segmentation labels
i3d_seg_labels = segmentation_data[counter]
counter = counter + 1
curr_data = []
for iik in range(len(i3d_seg_labels)):
ind_label = i3d_seg_labels[iik]
curr_data_vec = np.zeros(args.n_classes)
curr_data_vec[ind_label] = 1.0
curr_data.append(curr_data_vec)
curr_data = np.array(curr_data)
data_all.append(torch.tensor(curr_data, dtype=torch.float32))
label_all.append(label_curr_video)
return data_all, label_all
def load_seg_outs_concat(self, args, split_load, actions_dict, mode, half=False):
file_ptr = open(split_load, 'r')
if half:
content_all = file_ptr.read().split('\n')[:-1]
content_all = content_all[:int(len(content_all)/2)]
else:
content_all = file_ptr.read().split('\n')[:-1]
content_all = [x.strip('./data/groundTruth/') + 't' for x in content_all]
data_all = []
label_all = []
if args.feature_type == 'fisher_label_cat':
# current split for fisher vector based segmentation labels
segmentation_location = os.path.join(self.path, 'seg_fisher')
for content in content_all:
#fisher feature
loc_curr_data = self.path+'fisher/' + os.path.splitext(content)[0] + '.txt'
curr_data = np.loadtxt(loc_curr_data, dtype='float32')
curr_data = curr_data[:, 1:65] #n*64(n指帧数)
#gt label
file_ptr = open(self.GT_folder + content, 'r')
curr_gt = file_ptr.read().split('\n')[:-1]
label_curr_video = []
for iik in range(len(curr_gt)):
label_curr_video.append(actions_dict[curr_gt[iik]])
#one-hot feature
curr_data_feat = []
if mode == 'full' or mode == 'weak':
#gt one-hot label
for iik in range(len(curr_gt)):
curr_data_vec = np.zeros(args.n_classes)
curr_data_vec[actions_dict[curr_gt[iik]]] = 1.0
curr_data_feat.append(curr_data_vec)
else:
# read fisher based segmentation labels
file_ptr_fisher = open(segmentation_location + '/split1/' + args.observation + '/' + content, 'r')
fisher_seg_labels = file_ptr_fisher.read().split('\n')[:-1]
for iik in range(len(fisher_seg_labels)):
curr_data_vec = np.zeros(args.n_classes)
curr_data_vec[actions_dict[fisher_seg_labels[iik]]] = 1.0
curr_data_feat.append(curr_data_vec)
curr_data_feat = np.array(curr_data_feat) #n*n_classes
minlen = min(len(curr_data_feat), len(curr_data))
curr_data = np.concatenate((curr_data_feat[:minlen], curr_data[:minlen]), axis=1)
data_all.append(torch.tensor(curr_data, dtype=torch.float32))
label_all.append(label_curr_video)
else:
# read segmentation labels based on i3d features
file_name = os.path.join(self.path, 'seg_I3D') + '/' + 'seg_ours_2_split1.pickle'
with open(file_name, 'rb') as handle:
segmentation_data = pickle.load(handle)
counter = 0
for content in content_all:
#I3D feature
loc_curr_data = self.path + 'I3D/' + os.path.splitext(content)[0]
curr_data = np.loadtxt(loc_curr_data, dtype='float32') #n*400
#gt label
file_ptr = open(self.GT_folder + content, 'r')
curr_gt = file_ptr.read().split('\n')[:-1]
label_curr_video = []
for iik in range(len(curr_gt)):
label_curr_video.append(actions_dict[curr_gt[iik]])
#one-hot label
curr_data_feat = []
if mode == 'full' or mode == 'weak':
#gt one-hot label
for iik in range(len(curr_gt)):
curr_data_vec = np.zeros(args.n_classes)
curr_data_vec[actions_dict[curr_gt[iik]]] = 1.0
curr_data_feat.append(curr_data_vec)
else:
# read i3d based segmentation labels
i3d_seg_labels = segmentation_data[counter]
counter = counter + 1
for iik in range(len(i3d_seg_labels)):
curr_data_vec = np.zeros(args.n_classes)
curr_data_vec[i3d_seg_labels[iik]] = 1.0
curr_data_feat.append(curr_data_vec)
curr_data_feat = np.array(curr_data_feat) #n*n_classes
minlen = min(len(curr_data_feat), len(curr_data))
curr_data = np.concatenate((curr_data_feat[:minlen], curr_data[:minlen]), axis=1)
data_all.append(torch.tensor(curr_data, dtype=torch.float32))
label_all.append(label_curr_video)
return data_all, label_all
| 17,121 | 48.060172 | 167 | py |
WSLVideoDenseAnticipation | WSLVideoDenseAnticipation-main/ancillary_pred_module.py | '''
input: a video and its weak label
output: predicted frame-wise action
Ancillary predction model outputs a frame-wise action prediction given a video and first second label.
This model generates an initial prediction for the weak set, which will aid training the primary model.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from blocks import TABlock
class anclModel(nn.Module):
def __init__(self, args, video_feat_dim):
super(anclModel, self).__init__()
self.n_classes = args.n_classes
self.hidden_size = args.hidden_dim_LSTM
self.num_TAB = len(args.startpoints_R)
self.linear_dim = args.linear_dim
self.max_len = args.max_len
self.fps = args.fps
self.device = torch.device('cuda:0' if torch.cuda.is_available() else "cpu")
self.TABs = nn.ModuleList([TABlock(args, video_feat_dim) for _ in range(self.num_TAB)])
self.cls_layer = nn.ModuleList([nn.Sequential(nn.Linear(in_features=2*self.linear_dim, out_features=self.n_classes), nn.Softmax(dim=1)) for _ in range(self.num_TAB)])
self.cls_curr_duration = nn.Linear(in_features=self.num_TAB*self.linear_dim, out_features=1)
self.lstm_linear = nn.Linear(in_features=(2*self.num_TAB)*self.linear_dim + self.num_TAB*self.n_classes, out_features=self.linear_dim+1)
self.lstm = nn.LSTM(self.linear_dim+1, self.hidden_size, batch_first=True)
self.pred_class = nn.Linear(in_features=self.hidden_size, out_features=self.n_classes)
self.pred_duration = nn.Linear(in_features=self.hidden_size+self.linear_dim, out_features=1)
self.embed = nn.Embedding(self.n_classes, self.linear_dim)
self.attn = nn.Linear(in_features=self.hidden_size, out_features=self.linear_dim, bias=False)
def forward(self, S_list, R_list, weak_label):
Y = []
R_ppps = []
S_ppps = []
for i in range(len(R_list)):
S_ppp, R_ppp = self.TABs[i](S_list, R_list[i])
R_ppps.append(R_ppp)
S_ppps.append(S_ppp)
Y.append(self.cls_layer[i](torch.cat((S_ppp, R_ppp), 1)))
curr_action_duration = self.cls_curr_duration(torch.cat(R_ppps, 1)) #batch_size*1
pred_action_durations = [curr_action_duration]
lstm_input = torch.cat((self.embed(weak_label).view(-1, self.linear_dim).contiguous(), curr_action_duration), 1).unsqueeze(1) #batch_size*1*(linear_dim+1)
batch_size = lstm_input.size(0)
pred_class_labels = []
pred_class_probs = []
attentions = []
states = None
prev_hiddens = torch.zeros(batch_size, self.hidden_size).to(self.device)
for i in range(self.max_len):
hiddens, states = self.lstm(lstm_input, states)
hiddens = hiddens.squeeze(1) #batch_size*hidden_size
outputs = self.pred_class(hiddens.squeeze(1))
attention = F.softmax(torch.matmul(self.attn(hiddens).unsqueeze(1)/(self.linear_dim ** 0.5), torch.stack(S_ppps, 1).permute(0,2,1)), dim=-1) #batch_size*1*3
attention = torch.matmul(attention, torch.stack(S_ppps, 1)).view(batch_size, -1) #batch_size*linear_dim
duration = self.pred_duration(torch.cat((attention, prev_hiddens), 1)) #batch_size*1
attentions.append(attention)
predicted_class = outputs.max(1)[1] #batch_size
pred_class_prob = F.softmax(outputs, 1) #batch_size*n_classes
pred_class_labels.append(predicted_class)
pred_class_probs.append(pred_class_prob)
pred_action_durations.append(duration)
lstm_input = torch.cat((self.embed(predicted_class), duration), 1).unsqueeze(1) #batch_size*1*(linear_dim+1)
prev_hiddens = hiddens
curr_action = torch.sum(torch.stack(Y, 0), 0) #current action: batch*n_classes
pred_class_probs = torch.stack(pred_class_probs, 1) #batch*max_len*n_classes
pred_action_durations = torch.cat(pred_action_durations, 1) #batch_size*(max_len+1)
return pred_class_labels, pred_class_probs, curr_action, pred_action_durations, attentions
| 4,229 | 51.875 | 174 | py |
WSLVideoDenseAnticipation | WSLVideoDenseAnticipation-main/dataloader.py | import torch
import torch.utils.data as data
from data_preprocessing import DataClass
class DatasetLoader(data.Dataset):
def __init__(self, args, path, mode, half=False):
self.dataset = DataClass(args, path, mode, half)
self.obs = float(args.observation[-3:]) #observation portion
self.pred = args.prediction #prediction portion
self.fps = args.fps
self.len_R = args.len_R
self.startpoints_R = args.startpoints_R
self.len_S_list = args.len_S_list
self.args = args
self.mode = mode
self.features = self.dataset.data_feat #list, one element is the feature of one video (tensor)
self.curr_label = self.dataset.curr_label #dict, key is video index, value is its current action label
self.future_labels = self.dataset.future_labels #dict, key is video index, value is its future labels list, could be an empty list
self.future_durations = self.dataset.future_durations #dict, key is video index, value is its current and future action duration (within the prediction part)
def cut(self, feature, curr_label, future_labels, durations):
'''
feature : tensor (n*dim)
feature of a video, n is the number of frames, dim is the dimension of each frame.
curr_label: torch.longtensor, label of current action
future_labels : torch.longtensor, zero or several labels
Return S_list, R_list, groundtruth label for predict part and weak label
'''
if (self.args.feature_type == 'fisher_label' or self.args.feature_type == 'I3D_label' or self.args.feature_type == 'fisher_label_cat' or self.args.feature_type == 'I3D_label_cat') and self.mode == 'test':
obs = feature
else:
obs = feature[:int(len(feature) * self.obs), :] #first obs (0.2 or 0.3) portion of videos as observation part
full_label = future_labels #ground truth for prediction part
weak_label = curr_label #weak label: current action label
durations = durations
recent_snippets = [] #R_list
spanning_snippets = [] #S_list
for scale in self.len_S_list:
curr = []
a = len(obs)/scale
for i in range(scale):
curr.append(torch.max(obs[int(i*a):int((i+1)*a)], 0)[0].squeeze())
spanning_snippets.append(torch.stack(curr))
for sp in self.startpoints_R:
curr = []
recent = obs[int(max(0, len(obs)-sp*self.fps)):, :]
a = len(recent)/self.len_R
for i in range(self.len_R):
curr.append(torch.max(recent[int(i*a):int((i+1)*a)], 0)[0].squeeze())
recent_snippets.append(torch.stack(curr))
return (spanning_snippets, recent_snippets, full_label, weak_label, durations)
def __getitem__(self, index):
return self.cut(self.features[index], self.curr_label[str(index)], self.future_labels[str(index)], self.future_durations[str(index)]) #a tuple
def __len__(self):
return len(self.features)
def collate_fn(data):
device = torch.device('cuda:0' if torch.cuda.is_available() else "cpu")
S_list = [[] for _ in range(3)] #3 is the length of len_S_list
R_list = [[] for _ in range(3)] #3 is the length of startpoints_R
fl = []
wl = []
dl = []
for d in data:
curr_s = d[0] #List: len_S_i*feat_dim
curr_r = d[1] #List: len_R*feat_dim
for i in range(len(curr_s)):
S_list[i].append(curr_s[i])
for i in range(len(curr_r)):
R_list[i].append(curr_r[i])
fl.append(d[2].to(device)) #List: each element is a tensor of future action labels
wl.append(d[3])
dl.append(d[4].to(device)) #List: each element is a tensor of current and future action durations
S_list = [torch.stack(s).to(device) for s in S_list] #List: each element is batch*len_S_i*feat_dim
R_list = [torch.stack(r).to(device) for r in R_list] #List: each element is batch*len_R*feat_dim
wl = torch.stack(wl, 0).to(device) #batch
return S_list, R_list, fl, wl, dl | 4,177 | 48.152941 | 212 | py |
WSLVideoDenseAnticipation | WSLVideoDenseAnticipation-main/self_correction_module.py | '''
input: outputs from ancillary module and primary module of weak set
output: full label of weak set
Self-correction module refines predictions generated by the ancillary model and the current primary model for the weak set.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class selfcorrModel(nn.Module):
def __init__(self, args, alpha):
super(selfcorrModel, self).__init__()
self.method = args.self_correction_method
self.alpha = alpha
self.comb1 = nn.Sequential(
nn.Linear(in_features=2,out_features=1),
nn.ReLU())
self.comb2 = nn.Sequential(
nn.Linear(in_features=2,out_features=1),
nn.ReLU())
def forward(self, prim_pred, ancl_pred, prim_duration, ancl_duration):
#prim/ancl_pred: batch_size*max_len*n_classes
#prim/ancl_duration: batch_size*(max_len+1)
if self.method == 'linear':
self_corr_label = []
for i in range(prim_pred.shape[1]):
self_corr_label.append(F.softmax(torch.pow(prim_pred[:, i, :], 1/(self.alpha+1))*torch.pow(ancl_pred[:, i, :], self.alpha/(self.alpha+1)), 1))
self_corr_duration = torch.pow(prim_duration, 1/(self.alpha+1))*torch.pow(ancl_duration, self.alpha/(self.alpha+1)) if torch.min(ancl_duration) > 1e-3 and torch.min(prim_duration) > 1e-3 else ancl_duration
#this is to avoid exploding gradient if one element is too small, however,after the first epoches, duration should be big enough (at least bigger than 1) so that it will not damage the result.
return torch.stack(self_corr_label, 1), self_corr_duration #batch_size*max_len*n_classes, batch_size*(max_len+1)
else: #auto
self_corr_label = []
for i in range(prim_pred.shape[1]):
self_corr_label.append(F.softmax(self.comb1(torch.stack((prim_pred[:, i, :], ancl_pred[:, i, :]), 1).permute(0, 2, 1)).squeeze(), 1))
self_corr_duration = self.comb2(torch.stack((prim_duration, ancl_duration), 2)).squeeze()
return torch.stack(self_corr_label, 1), self_corr_duration #batch_size*max_len*n_classes, batch_size*(max_len+1)
| 2,235 | 53.536585 | 217 | py |
WSLVideoDenseAnticipation | WSLVideoDenseAnticipation-main/primary_pred_module.py | '''
input: a video
output: predicted frame-wise action
Primary prediction model generates a frame-wise prediction of actions given an video.
This is the main model that is subject to the training and is used at test time.
'''
import torch.nn as nn
from blocks import TABlock
import torch
import torch.nn.functional as F
class primModel(nn.Module):
def __init__(self, args, video_feat_dim):
super(primModel, self).__init__()
self.n_classes = args.n_classes
self.hidden_size = args.hidden_dim_LSTM
self.num_TAB = len(args.startpoints_R)
self.linear_dim = args.linear_dim
self.max_len = args.max_len
self.device = torch.device('cuda:0' if torch.cuda.is_available() else "cpu")
self.TABs = nn.ModuleList([TABlock(args, video_feat_dim) for _ in range(self.num_TAB)])
self.cls_layer = nn.ModuleList([nn.Sequential(nn.Linear(in_features=2*self.linear_dim,out_features=self.n_classes), nn.Softmax(dim=1)) for _ in range(self.num_TAB)])
self.cls_curr_duration = nn.Linear(in_features=self.num_TAB*self.linear_dim, out_features=1)
self.lstm_linear = nn.Linear(in_features=(2*self.num_TAB)*self.linear_dim + self.num_TAB*self.n_classes, out_features=self.linear_dim+1)
self.lstm = nn.LSTM(self.linear_dim+1, self.hidden_size, batch_first=True)
self.pred_class = nn.Linear(in_features=self.hidden_size, out_features=self.n_classes)
self.pred_duration = nn.Linear(in_features=self.hidden_size+self.linear_dim, out_features=1)
self.embed = nn.Embedding(self.n_classes, self.linear_dim)
self.attn = nn.Linear(in_features=self.hidden_size, out_features=self.linear_dim, bias=False)
def forward(self, S_list, R_list):
S_ppps = []
R_ppps = []
Y = []
for i in range(len(R_list)):
S_ppp, R_ppp = self.TABs[i](S_list, R_list[i])
S_ppps.append(S_ppp)
R_ppps.append(R_ppp)
Y.append(self.cls_layer[i](torch.cat((S_ppp, R_ppp), 1)))
lstm_input = torch.cat((Y+S_ppps+R_ppps), 1) #batch_size*(2*num_TAB*linear_dim+num_TAB*n_classes)
lstm_input = self.lstm_linear(lstm_input).unsqueeze(1) #batch_size*1*(linear_dim+1)
curr_action_duration = self.cls_curr_duration(torch.cat(R_ppps, 1)) #batch_size*1
pred_action_durations = [curr_action_duration]
batch_size = lstm_input.size(0)
pred_class_labels = []
pred_class_probs = []
attentions = []
states = None
prev_hiddens = torch.zeros(batch_size, self.hidden_size).to(self.device)
for i in range(self.max_len):
hiddens, states = self.lstm(lstm_input, states)
hiddens = hiddens.squeeze(1) #batch_size*hidden_size
outputs = self.pred_class(hiddens)
attention = F.softmax(torch.matmul(self.attn(hiddens).unsqueeze(1)/(self.linear_dim ** 0.5), torch.stack(S_ppps, 1).permute(0,2,1)), dim=-1) #batch_size*1*3
attention = torch.matmul(attention, torch.stack(S_ppps, 1)).view(batch_size, -1) #batch_size*linear_dim
attentions.append(attention)
duration = self.pred_duration(torch.cat((attention, prev_hiddens), 1)) #batch_size*1
predicted_class = outputs.max(1)[1] #batch_size
pred_class_prob = F.softmax(outputs, 1) #batch_size*n_classes
pred_class_labels.append(predicted_class)
pred_class_probs.append(pred_class_prob)
pred_action_durations.append(duration)
lstm_input = torch.cat((self.embed(predicted_class), duration), 1).unsqueeze(1) #batch_size*1*(linear_dim+1)
prev_hiddens = hiddens
curr_action = torch.sum(torch.stack(Y, 0), 0) #current action: batch_size*n_classes
pred_class_probs = torch.stack(pred_class_probs, 1) #batch_size*max_len*n_classes
pred_action_durations = torch.cat(pred_action_durations, 1) #batch_size*(max_len+1)
return pred_class_labels, pred_class_probs, curr_action, pred_action_durations, attentions | 4,186 | 51.3375 | 173 | py |
WSLVideoDenseAnticipation | WSLVideoDenseAnticipation-main/blocks.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class NONLocalBlock(nn.Module):
#Non Local Block
def __init__(self, args, dim_1, dim_2, video_feat_dim):
super(NONLocalBlock, self).__init__()
self.dim_1 = dim_1
self.dim_2 = dim_2
self.video_feat_dim = video_feat_dim
self.latent_dim = args.conv_dim_NLB
self.dropout = args.dropout_NLB
self.initnn = True# initnn is used in theta, phi, and g.
self.initnn2 = True# initnn2 is used in the final linear layer.
self.theta = nn.Conv1d(in_channels=self.dim_2,
out_channels=self.latent_dim,
kernel_size=1, stride=1, padding=0)
if self.initnn:
nn.init.xavier_normal_(self.theta.weight)
nn.init.constant_(self.theta.bias, 0)
self.phi = nn.Conv1d(in_channels=self.dim_1,
out_channels=self.latent_dim,
kernel_size=1, stride=1, padding=0)
if self.initnn:
nn.init.xavier_normal_(self.phi.weight)
nn.init.constant_(self.phi.bias, 0)
self.g = nn.Conv1d(in_channels=self.dim_1,
out_channels=self.latent_dim,
kernel_size=1, stride=1, padding=0)
if self.initnn:
nn.init.xavier_normal_(self.g.weight)
nn.init.constant_(self.g.bias, 0)
self.final_layers = nn.Sequential(
nn.LayerNorm(torch.Size([self.latent_dim, self.video_feat_dim])),
nn.ReLU(),
nn.Conv1d(in_channels=self.latent_dim,
out_channels=self.dim_2,
kernel_size=1, stride=1, padding=0),
nn.Dropout(p=self.dropout),
)
if self.initnn2:
nn.init.xavier_normal_(self.final_layers[2].weight)
nn.init.constant_(self.final_layers[2].bias, 0)
def forward(self, input1, input2):
#input1: batch_size*dim_1*video_feat_dim
#input2: batch_size*dim_2*video_feat_dim
theta_x = self.theta(input2).permute(0, 2, 1) #batch_size*video_feat_dim*latent_dim
phi_x = self.phi(input1) #batch_size*latent_dim*video_feat_dim
theta_phi = torch.matmul(theta_x, phi_x) #batch_size*video_feat_dim*video_feat_dim
p_x = F.softmax(theta_phi, dim=-1) #batch_size*video_feat_dim*video_feat_dim
g_x = self.g(input1).permute(0, 2, 1) #batch_size*video_feat_dim*latent_dim
t_x = torch.matmul(p_x, g_x).permute(0, 2, 1).contiguous() #batch_size*latent_dim*video_feat_dim
W_t = self.final_layers(t_x) #batch_size*dim_2*video_feat_dim
z_x = W_t + input2 #batch_size*dim_2*video_feat_dim
return z_x
class CouplingBlock(nn.Module):
#Coupling Block
def __init__(self, args, dim_S, dim_R, video_feat_dim):
super(CouplingBlock, self).__init__()
self.dropout = args.dropout_CB
self.video_feat_dim = video_feat_dim
self.linear_dim = args.linear_dim
self.dim_S = dim_S
self.dim_R = dim_R
self.coupBlock1 = NONLocalBlock(args, self.dim_S, self.dim_S, video_feat_dim)
self.coupBlock2 = NONLocalBlock(args, self.dim_S, self.dim_R, video_feat_dim)
self.final_SR = nn.Sequential(
nn.Linear(in_features = (self.dim_S+2*self.dim_R)*self.video_feat_dim, out_features = self.linear_dim),
nn.ReLU(),
nn.Dropout(self.dropout))
self.final_RR = nn.Sequential(
nn.Linear(in_features = 2*self.dim_R*self.video_feat_dim, out_features = self.linear_dim),
nn.ReLU(),
nn.Dropout(self.dropout))
def forward(self, S, R):
#S: batch_size*dim_S*video_feat_dim
#R: batch_size*dim_R*video_feat_dim
batch_size = S.size(0)
S_p = F.relu(self.coupBlock1(S, S)) #batch_size*dim_S*video_feat_dim
R_p = F.relu(self.coupBlock2(S_p, R)) #batch_size*dim_R*video_feat_dim
R_pp = torch.cat((R_p, R), 1).view(batch_size, -1) #batch_size*(2*dim_R*video_feat_dim)
S_pp = torch.cat((S_p.view(batch_size, -1).contiguous(), R_pp), 1).view(batch_size, -1) #batch_size*(dim_S*video_feat_dim+2*dim_R*video_feat_dim)
S_pp = self.final_SR(S_pp) #batch_size*linear_dim
R_pp = self.final_RR(R_pp) #batch_size*linear_dim
return S_pp, R_pp
class CouplingBlock_light(nn.Module):
#Coupling Block
def __init__(self, args, dim_S, dim_R, video_feat_dim):
super(CouplingBlock_light, self).__init__()
self.dropout = args.dropout_CB
self.video_feat_dim = video_feat_dim
self.linear_dim = args.linear_dim
self.dim_S = dim_S
self.dim_R = dim_R
self.coupBlock = NONLocalBlock(args, self.dim_S, self.dim_R, video_feat_dim)
self.final_SR = nn.Sequential(
nn.Linear(in_features = (self.dim_S+2*self.dim_R)*self.video_feat_dim, out_features = self.linear_dim),
nn.ReLU(),
nn.Dropout(self.dropout))
self.final_RR = nn.Sequential(
nn.Linear(in_features = 2*self.dim_R*self.video_feat_dim, out_features = self.linear_dim),
nn.ReLU(),
nn.Dropout(self.dropout))
def forward(self, S, R):
#S: batch_size*dim_S*video_feat_dim
#R: batch_size*dim_R*video_feat_dim
batch_size = S.size(0)
R_p = F.relu(self.coupBlock(S, R)) #batch_size*dim_R*video_feat_dim
R_pp = torch.cat((R_p, R), 1).view(batch_size, -1) #batch_size*(2*dim_R*video_feat_dim)
S_pp = torch.cat((S.view(batch_size, -1).contiguous(), R_pp), 1).view(batch_size, -1) #batch_size*(dim_S*video_feat_dim+2*dim_R*video_feat_dim)
S_pp = self.final_SR(S_pp) #batch_size*linear_dim
R_pp = self.final_RR(R_pp) #batch_size*linear_dim
return S_pp, R_pp
class TABlock(nn.Module):
#Temporal Aggregation Block
def __init__(self, args, video_feat_dim):
super(TABlock, self).__init__()
self.linear_dim = args.linear_dim
self.video_feat_dim = video_feat_dim
self.len_R = args.len_R
self.len_S_list = args.len_S_list
self.S_num = len(self.len_S_list)
self.dropout = args.dropout_TAB
self.light = args.light
if self.light:
self.CBs = nn.ModuleList([CouplingBlock_light(args, len_S, self.len_R, video_feat_dim) for len_S in self.len_S_list])
else:
self.CBs = nn.ModuleList([CouplingBlock(args, len_S, self.len_R, video_feat_dim) for len_S in self.len_S_list])
# self.final_RRR = nn.Sequential(
# nn.Linear(in_features = self.S_num*self.linear_dim, out_features = self.linear_dim),
# nn.ReLU(),
# nn.Dropout(self.dropout))
self.final_RRR = nn.Linear(in_features = self.S_num*self.linear_dim, out_features = self.linear_dim)
def forward(self, S_list, R):
S_pps = []
R_pps = []
for i in range(len(S_list)):
S_pp, R_pp = self.CBs[i](S_list[i], R)
S_pps.append(S_pp)
R_pps.append(R_pp)
R_ppp = torch.cat(R_pps, 1) #batch_size*(3*linear_dim)
R_ppp = self.final_RRR(R_ppp) #batch_size*linear_dim
S_ppp = torch.stack(S_pps, 0) #3*batch_size*linear_dim
S_ppp = torch.max(S_ppp, 0)[0].view(-1, self.linear_dim) #batch_size*linear_dim
return S_ppp, R_ppp
| 7,659 | 41.555556 | 153 | py |
fmm2d | fmm2d-main/docs/conf.py | # -*- coding: utf-8 -*-
#
# fmm2d documentation build configuration file, created by
# sphinx-quickstart on Wed Nov 1 16:19:13 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx.ext.autodoc
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
sys.path.append(os.path.abspath('sphinxext'))
sys.path.insert(0,os.path.abspath('../../texext'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.6' # dylan, but I only have 1.3.6
needs_sphinx = '1.3'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.napoleon',
'sphinx.ext.mathjax',
# 'sphinx.ext.autosectionlabel', # needs v 1.4; can :ref: other files w/o this; removed 7/29/18
'texext',
# 'sphinxcontrib.bibtex',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'fmm2d'
copyright = u'2018-2019 The Simons Foundation, Inc. - All Rights Reserved'
author = u"Zydrunas Gimbutas, Leslie Greengard, Mike O'Neil, Manas Rachh, and Vladimir Rokhlin"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'alabaster'
html_theme = 'classic'
#html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'collapsiblesidebar': 'true', 'sidebarwidth': '270px'}
#html_theme_options = {"codebgcolor":"rgb(240,240,240)"}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'fmm2ddoc'
# To fix location of equation numbering. Barnett tried 6/19/18
# see https://samnicholls.net/2016/06/15/how-to-sphinx-readthedocs/
def setup(app):
app.add_css_file('theme_overrides.css')
app.add_css_file('custom.css')
# it doesn't fail if this file not found in _static :(
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'fmm2d.tex', u'fmm2d Documentation',
u"Zydrunas Gimbutas \\and Leslie Greengard \\and Mike O'Neil \\and Manas Rachh \\and Vladimir Rokhlin", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'fmm2d', u'fmm2d Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'fmm2d', u'fmm2d Documentation',
author, 'fmm2d', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 10,222 | 32.299674 | 119 | py |
Progressive-Face-Super-Resolution | Progressive-Face-Super-Resolution-master/ssim.py | import torch
import torch.nn.functional as F
from math import exp
import numpy as np
def gaussian(window_size, sigma):
gauss = torch.Tensor([exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)]).double()
# gauss.requires_grad = True
return gauss/gauss.sum()
def create_window(window_size):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).double().unsqueeze(0).unsqueeze(0)
window = _2D_window.expand(1, 1, window_size, window_size).contiguous()
return window.cuda()
def ssim(img1, img2, window_size=11, size_average=True, full=False, val_range=None):
# Value range can be different from 255. Other common ranges are 1 (sigmoid) and 2 (tanh).
if val_range is None:
if torch.max(img1) > 128:
max_val = 255
else:
max_val = 1
if torch.min(img1) < -0.5:
min_val = -1
else:
min_val = 0
L = max_val - min_val
else:
L = val_range
padd = 0
(_, channels, height, width) = img1.size()
real_size = min(window_size, height, width)
window = create_window(real_size)
ret_channels = []
cs_channels = []
for ch in range(channels): # loop over channels, then average
img1_ch = torch.unsqueeze(img1[:, ch, :, :], 1)
img2_ch = torch.unsqueeze(img2[:, ch, :, :], 1)
mu1 = F.conv2d(img1_ch, window, padding=padd)
mu2 = F.conv2d(img2_ch, window, padding=padd)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1 * mu2
sigma1_sq = F.conv2d(img1_ch * img1_ch, window, padding=padd) - mu1_sq
sigma2_sq = F.conv2d(img2_ch * img2_ch, window, padding=padd) - mu2_sq
sigma12 = F.conv2d(img1_ch * img2_ch, window, padding=padd) - mu1_mu2
C1 = (0.01 * L) ** 2
C2 = (0.03 * L) ** 2
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))
v1 = 2.0 * sigma12 + C2
v2 = sigma1_sq + sigma2_sq + C2
cs = torch.mean(v1 / v2) # contrast sensitivity
if size_average:
ret = ssim_map.mean()
else:
ret = ssim_map.mean(1).mean(1).mean(1)
cs_channels.append(cs)
ret_channels.append(ret)
cs_mean = torch.mean(torch.stack(cs_channels), dim=-1)
ret_mean = torch.mean(torch.stack(ret_channels), dim=-1)
if full:
return ret_mean, cs_mean
return ret_mean
def msssim(img1, img2, window_size=11, size_average=True, val_range=None):
device = img1.device
weights = torch.FloatTensor([0.0448, 0.2856, 0.3001, 0.2363, 0.1333]).to(device)
levels = weights.size()[0]
mssim = []
mcs = []
for _ in range(levels):
sim, cs = ssim(img1, img2, window_size=window_size, size_average=size_average, full=True, val_range=val_range)
mssim.append(sim)
mcs.append(cs)
img1 = F.avg_pool2d(img1, (2, 2))
img2 = F.avg_pool2d(img2, (2, 2))
mssim = torch.stack(mssim)
mcs = torch.stack(mcs)
# # Normalize (to avoid NaNs)
#
# mssim = (mssim + 1) / 2
# mcs = (mcs + 1) / 2
pow1 = mcs ** weights
pow2 = mssim ** weights
# output = torch.prod(pow1 * pow2)
# From Matlab implementation https://ece.uwaterloo.ca/~z70wang/research/iwssim/
output = torch.prod(pow1[:-1] * pow2[-1])
return output | 3,447 | 30.925926 | 118 | py |
Progressive-Face-Super-Resolution | Progressive-Face-Super-Resolution-master/dataloader.py | from torch.utils.data.dataset import Dataset
import torchvision.transforms as transforms
from os.path import join
from PIL import Image
class CelebDataSet(Dataset):
"""CelebA dataset
Parameters:
data_path (str) -- CelebA dataset main directory(inculduing '/Img' and '/Anno') path
state (str) -- dataset phase 'train' | 'val' | 'test'
Center crop the alingned celeb dataset to 178x178 to include the face area and then downsample to 128x128(Step3).
In addition, for progressive training, the target image for each step is resized to 32x32(Step1) and 64x64(Step2).
"""
def __init__(self, data_path = './dataset/', state = 'train', data_augmentation=None):
self.main_path = data_path
self.state = state
self.data_augmentation = data_augmentation
self.img_path = join(self.main_path, 'CelebA/Img/img_align_celeba')
self.eval_partition_path = join(self.main_path, 'Anno/list_eval_partition.txt')
train_img_list = []
val_img_list = []
test_img_list = []
f = open(self.eval_partition_path, mode='r')
while True:
line = f.readline().split()
if not line: break
if line[1] == '0':
train_img_list.append(line)
elif line[1] =='1':
val_img_list.append(line)
else:
test_img_list.append(line)
f.close()
if state=='train':
train_img_list.sort()
self.image_list = train_img_list
elif state=='val':
val_img_list.sort()
self.image_list = val_img_list
else:
test_img_list.sort()
self.image_list = test_img_list
if state=='train' and self.data_augmentation:
self.pre_process = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.CenterCrop((178, 178)),
transforms.Resize((128, 128)),
transforms.RandomRotation(20, resample=Image.BILINEAR),
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1)
])
else:
self.pre_process = transforms.Compose([
transforms.CenterCrop((178, 178)),
transforms.Resize((128,128)),
])
self.totensor = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
self._64x64_down_sampling = transforms.Resize((64, 64))
self._32x32_down_sampling = transforms.Resize((32, 32))
self._16x16_down_sampling = transforms.Resize((16,16))
def __getitem__(self, index):
image_path = join(self.img_path, self.image_list[index][0])
target_image = Image.open(image_path).convert('RGB')
target_image = self.pre_process(target_image)
x4_target_image = self._64x64_down_sampling(target_image)
x2_target_image = self._32x32_down_sampling(x4_target_image)
input_image = self._16x16_down_sampling(x2_target_image)
x2_target_image = self.totensor(x2_target_image)
x4_target_image = self.totensor(x4_target_image)
target_image = self.totensor(target_image)
input_image = self.totensor(input_image)
return x2_target_image, x4_target_image, target_image, input_image
def __len__(self):
return len(self.image_list)
| 3,841 | 39.87234 | 125 | py |
Progressive-Face-Super-Resolution | Progressive-Face-Super-Resolution-master/model.py | import torch
import torch.nn as nn
from torch.nn import functional as F
from math import sqrt
"""Original EqualConv2d code is at
https://github.com/rosinality/style-based-gan-pytorch/blob/master/model.py
"""
class EqualLR:
def __init__(self, name):
self.name = name
def compute_weight(self, module):
weight = getattr(module, self.name+'_orig')
fan_in = weight.data.size(1) * weight.data[0][0].numel()
return weight * sqrt(2/fan_in)
@staticmethod
def apply(module, name):
fn = EqualLR(name)
weight = getattr(module, name)
del module._parameters[name]
module.register_parameter(name + '_orig', nn.Parameter(weight.data))
module.register_forward_pre_hook(fn)
return fn
def __call__(self, module, input):
weight = self.compute_weight(module)
setattr(module, self.name, weight)
def equal_lr(module, name='weight'):
EqualLR.apply(module, name)
return module
class EqualConv2d(nn.Module):
def __init__(self, *args, **kwargs):
super(EqualConv2d, self).__init__()
conv = nn.Conv2d(*args, **kwargs)
conv.weight.data.normal_()
conv.bias.data.zero_()
self.conv = equal_lr(conv)
def forward(self, input):
return self.conv(input)
class ResBlock(nn.Module):
def __init__(self, dim, kernel_size=3, padding=1, stride=1):
super(ResBlock, self).__init__()
self.conv = nn.Sequential(
EqualConv2d(dim, dim, kernel_size=3, padding=1, stride=1),
nn.BatchNorm2d(dim),
nn.ReLU(),
EqualConv2d(dim, dim, kernel_size=3, padding=1, stride=1),
nn.BatchNorm2d(dim),
nn.ReLU()
)
def forward(self, x):
return self.conv(x) + x
class ConvBlock(nn.Module):
def __init__(self, in_plane, out_plane, kernel_size=3, padding=1, stride=1):
super(ConvBlock, self).__init__()
self.conv = nn.Sequential(
EqualConv2d(in_plane, out_plane, kernel_size=3, padding=1, stride=1),
nn.LeakyReLU(0.2),
EqualConv2d(out_plane, out_plane, kernel_size=3, padding=1, stride=1),
nn.LeakyReLU(0.2))
def forward(self, x):
return self.conv(x)
class Generator(nn.Module):
def __init__(self, ):
super(Generator, self).__init__()
step1 = [nn.Conv2d(3, 512, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(512), nn.ReLU()]
step1 += [ResBlock(dim=512, kernel_size=3, stride=1, padding=1),
nn.ConvTranspose2d(512, 256, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(256),
nn.ReLU()
]
step2 = [ResBlock(dim=256, kernel_size=3, stride=1, padding=1),
nn.ConvTranspose2d(256, 128, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(128),
nn.ReLU()
]
step3 = [ResBlock(dim=128, kernel_size=3, stride=1, padding=1),
nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(64),
nn.ReLU()]
self.to_rgb = nn.ModuleList([nn.Conv2d(256, 3, kernel_size=1, stride=1, padding=0),
nn.Conv2d(128, 3, kernel_size=1, stride=1, padding=0),
nn.Conv2d(64, 3, kernel_size=1, stride=1, padding=0)])
self.step1 = nn.Sequential(*step1)
self.step2 = nn.Sequential(*step2)
self.step3 = nn.Sequential(*step3)
#self.model = nn.Sequential(self.step1, self.step2, self.step3)
def forward(self, input, step=1, alpha=-1):
"""Progressive generator forward"""
if step == 1:
out = self.step1(input)
out = self.to_rgb[step-1](out)
elif step == 2:
if 0 <= alpha < 1:
prev = self.step1(input)
skip_rgb = F.interpolate(self.to_rgb[step-2](prev), scale_factor=2, mode='nearest')
out = self.step2(prev)
out = (1-alpha)*skip_rgb + alpha*self.to_rgb[step-1](out)
else:
out = self.step2(self.step1(input))
out = self.to_rgb[step-1](out)
else:
if 0 <= alpha < 1:
prev = self.step2(self.step1(input))
skip_rgb = F.interpolate(self.to_rgb[step-2](prev), scale_factor=2, mode='nearest')
out = self.step3(prev)
out = (1-alpha)*skip_rgb + alpha*self.to_rgb[step-1](out)
else:
out = self.step3(self.step2(self.step1(input)))
out = self.to_rgb[step-1](out)
return out
class Discriminator(nn.Module):
"""Discriminator"""
def __init__(self,):
super(Discriminator, self).__init__()
self.from_rgb = nn.ModuleList([
nn.Conv2d(3, 256, kernel_size=1, stride=1, padding=0),
nn.Conv2d(3, 128, kernel_size=1, stride=1, padding=0),
nn.Conv2d(3, 64, kernel_size=1, stride=1, padding=0)])
step1 = [ConvBlock(256, 512, kernel_size=3, padding=1, stride=1), nn.AvgPool2d(kernel_size=2, stride=2)]
step2 = [ConvBlock(128, 256, kernel_size=3, padding=1, stride=1), nn.AvgPool2d(kernel_size=2, stride=2)]
step3 = [ConvBlock(64, 128, kernel_size=3, padding=1, stride=1), nn.AvgPool2d(kernel_size=2, stride=2)]
self.step1 = nn.Sequential(*step1)
self.step2 = nn.Sequential(*step2)
self.step3 = nn.Sequential(*step3)
#for last layer
self.equal_conv = EqualConv2d(513, 512, kernel_size=3, stride=1, padding=1)
self.linear = nn.Linear(512, 2048)
self.linear2 = nn.Linear(2048, 1)
def forward(self, input, step=1, alpha=-1):
"""Progressive discriminator forward
Each step's output(generator output) is mixed with previous genertor output
stacked from step1 to step3.
| step1 -----> step2 ------> step3 |
"""
if step == 1:#32x32
out = self.from_rgb[step-1](input)
out = self.step1(out)
if step ==2:#64x64
out = self.from_rgb[step-1](input)#128x64x64
out = self.step2(out) #256x32x32
if 0 <= alpha < 1:
skip_rgb = F.avg_pool2d(input, kernel_size=2,stride=2)#F.interpolate(input, size=(32, 32), mode='nearest') #3x32x32
skip_rgb = self.from_rgb[step-2](skip_rgb) #256x32x32
out = (1-alpha)*skip_rgb + alpha * out
out = self.step1(out) #256x16x16
elif step ==3:#128x128
out = self.from_rgb[step-1](input) #64x128x128
out = self.step3(out) #128x64x64
if 0 <= alpha < 1:
skip_rgb = F.avg_pool2d(input, kernel_size=2,stride=2) #F.interpolate(input, size=(64, 64), mode='nearest') #3x64x64
skip_rgb = self.from_rgb[step-2](skip_rgb) #128x64x64
out = (1-alpha)*skip_rgb + alpha * out #128x64x64
out = self.step2(out) #256x32x32
out = self.step1(out) #512x16x16
mean_std = input.std(0).mean()
mean_std = mean_std.expand(input.size(0), 1, 16, 16)
out = torch.cat([out, mean_std], dim=1)
out = self.equal_conv(out)
out = F.avg_pool2d(out, 16, stride=1)
out = out.view(input.size(0), -1)
out = self.linear(out)
out = self.linear2(out)
out = out.squeeze_(dim=1)
return out
| 7,795 | 35.773585 | 132 | py |
Progressive-Face-Super-Resolution | Progressive-Face-Super-Resolution-master/demo.py | import torch
import argparse
from model import Generator
from PIL import Image
import torchvision.transforms as transforms
from torchvision import utils
if __name__ == '__main__':
parser = argparse.ArgumentParser('Demo of Progressive Face Super-Resolution')
parser.add_argument('--image-path', type=str)
parser.add_argument('--checkpoint-path', default='./checkpoints/generator_checkpoint_singleGPU.ckpt')
parser.add_argument('--output-path', type=str)
args = parser.parse_args()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
with torch.no_grad():
generator = Generator().to(device)
generator.eval()
g_checkpoint = torch.load(args.checkpoint_path)
generator.load_state_dict(g_checkpoint['model_state_dict'], strict=False)
step = g_checkpoint['step']
alpha = g_checkpoint['alpha']
iteration = g_checkpoint['iteration']
print('pre-trained model is loaded step:%d, alpha:%d iteration:%d'%(step, alpha, iteration))
input_image = Image.open(args.image_path).convert('RGB')
#for aligned CelebA evaluation images
#input_image = transforms.CenterCrop((178, 178))(input_image)
_16x16_down_sampling = transforms.Resize((16,16))
_64x64_down_sampling = transforms.Resize((64, 64))
_32x32_down_sampling = transforms.Resize((32, 32))
totensor = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
#Note: Our netowork is trained by progressively downsampled images.
transformed_image = _16x16_down_sampling(_32x32_down_sampling(_64x64_down_sampling(input_image)))
transformed_image = totensor(transformed_image).unsqueeze(0).to(device)
output_image = generator(transformed_image, step, alpha)
utils.save_image(0.5*output_image+0.5, args.output_path)
| 2,016 | 41.020833 | 105 | py |
Progressive-Face-Super-Resolution | Progressive-Face-Super-Resolution-master/eval.py | import torch
from torch import optim, nn
import argparse
from dataloader import CelebDataSet
from torch.utils.data import DataLoader
from model import Generator
import os
from torch.autograd import Variable, grad
import sys
from torchvision import utils
from math import log10
from ssim import ssim, msssim
def test(dataloader, generator, MSE_Loss, step, alpha):
avg_psnr = 0
avg_ssim = 0
avg_msssim = 0
for i, (x2_target_image, x4_target_image, target_image, input_image) in enumerate(dataloader):
input_image = input_image.to(device)
if step==1:
target_image = x2_target_image.to(device)
elif step==2:
target_image = x4_target_image.to(device)
else:
target_image = target_image.to(device)
input_image = input_image.to(device)
predicted_image = generator(input_image, step, alpha)
predicted_image = predicted_image.double()
target_image = target_image.double()
mse_loss = MSE_Loss(0.5*predicted_image+0.5, 0.5*target_image+0.5)
psnr = 10*log10(1./mse_loss.item())
avg_psnr += psnr
_ssim = ssim(0.5*predicted_image+0.5, 0.5*target_image+0.5)
avg_ssim += _ssim.item()
ms_ssim = msssim(0.5*predicted_image+0.5, 0.5*target_image+0.5)
avg_msssim += ms_ssim.item()
sys.stdout.write('\r [%d/%d] Test progress... PSNR: %6.4f'%(i, len(dataloader), psnr))
save_image = torch.cat([predicted_image, target_image], dim=0)
if args.local_rank==0:
utils.save_image(0.5*save_image+0.5, os.path.join(args.result_path, '%d_results.jpg'%i))
print('Test done, Average PSNR:%6.4f, Average SSIM:%6.4f, Average MS-SSIM:%6.4f '%(avg_psnr/len(dataloader),avg_ssim/len(dataloader), avg_msssim/len(dataloader)))
if __name__ == '__main__':
parser = argparse.ArgumentParser('Implemnetation of Progressive Face Super-Resolution Attention to Face Landmarks')
parser.add_argument('--batch-size', default=16, type=int)
parser.add_argument('--checkpoint-path', default='./checkpoints/', type=str)
parser.add_argument('--data-path', default='./dataset/', type=str)
parser.add_argument('--result-path', default='./result/', type=str)
parser.add_argument('--workers', default=4, type=int)
parser.add_argument('--local_rank', default=0, type=int, help='node rank for distributed training')
parser.add_argument('--distributed', action='store_true')
args = parser.parse_args()
if args.local_rank == 0:
if not os.path.exists(args.result_path):
os.mkdir(args.result_path)
print('===>make directory', args.result_path)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
args.gpu = 0
args.world_size = 1
dataset = CelebDataSet(data_path=args.data_path, state='test')
if args.distributed:
import apex.parallel as parallel
args.gpu = args.local_rank
torch.cuda.set_device(args.gpu)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
args.world_size = torch.distributed.get_world_size()
train_sampler = torch.utils.data.distributed.DistributedSampler(dataset)
dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True, sampler=train_sampler)
else:
dataloader = DataLoader(dataset=dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True)
generator = Generator().to(device)
if args.distributed:
g_checkpoint = torch.load(args.checkpoint_path, map_location = lambda storage, loc: storage.cuda(args.local_rank))
generator = parallel.DistributedDataParallel(generator)
generator = parallel.convert_syncbn_model(generator)
else:
g_checkpoint = torch.load(args.checkpoint_path)
generator.load_state_dict(g_checkpoint['model_state_dict'], strict=False)
step = g_checkpoint['step']
alpha = g_checkpoint['alpha']
iteration = g_checkpoint['iteration']
print('pre-trained model is loaded step:%d, alpha:%d iteration:%d'%(step, alpha, iteration))
MSE_Loss = nn.MSELoss()
generator.eval()
test(dataloader, generator, MSE_Loss, step, alpha)
| 4,296 | 41.97 | 166 | py |
ExtendedBitPlaneCompression | ExtendedBitPlaneCompression-master/algoEvals/dataCollect.py | # Copyright (c) 2019 ETH Zurich, Lukas Cavigelli, Georg Rutishauser, Luca Benini
import torch
import numpy as np
import tensorboard
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
import os
import glob
import csv
import sys
sys.path.append('./quantLab')
def getModel(modelName, epoch=None, returnPath=False):
import torchvision as tv
import quantlab.ImageNet.topology as topo
loss_func = torch.nn.CrossEntropyLoss()
model = None
if modelName == 'alexnet':
model = tv.models.alexnet(pretrained=True)
elif modelName == 'squeezenet':
model = tv.models.squeezenet1_1(pretrained=True)
elif modelName == 'resnet34':
model = tv.models.resnet34(pretrained=True)
elif modelName == 'vgg16':
model = tv.models.vgg16_bn(pretrained=True)
elif modelName == 'mobilenet2':
model = tv.models.mobilenet_v2(pretrained=True)
elif modelName == 'alexnet-cust':
path = './quantLab/ImageNet/log/exp00/'
model = topo.AlexNetBaseline(capacity=1)
if epoch == None: epoch = 54
tmp = torch.load(path + '/save/epoch%04d.ckpt' % epoch, map_location=torch.device('cpu'))
model.load_state_dict(tmp['net'])
elif modelName == 'mobilenetV2-cust':
path = './quantLab/ImageNet/log/exp05/'
model = topo.MobileNetV2Baseline(capacity=1, expansion=6)
if epoch == None: epoch = 200
tmp = torch.load(path + '/save/epoch%04d.ckpt' % epoch, map_location=torch.device('cpu'))
model.load_state_dict(tmp['net'])
assert(model != None)
if returnPath:
return model, loss_func, path
else:
return model, loss_func
def getTensorBoardData(path, traceName):
"""Reads values form Tensorboard log files."""
if not os.path.isfile(path):
pathOpts = glob.glob(path + '/events.out.tfevents.*')
assert(len(pathOpts) == 1)
path = pathOpts[0]
event_acc = EventAccumulator(path)
event_acc.Reload()
# Show all tags in the log file: print(event_acc.Tags())
trace = event_acc.Scalars(traceName)
values = [v.value for v in trace]
steps = [v.step for v in trace]
return steps, values
def getFMs(model, loss_func, training=True, numBatches=1, batchSize=10, computeGrads=False, safetyFactor=0.75):
# CREATE DATASET LOADERS
import quantLab.quantlab.ImageNet.preprocess as pp
datasetTrain, datasetVal, _ = pp.load_datasets('./ilsvrc12/', augment=False)
if training:
dataset = datasetTrain
model.train()
else:
dataset = datasetVal
model.eval()
dataLoader = torch.utils.data.DataLoader(dataset, batch_size=batchSize, shuffle=False)
# SELECT MODULES
msReLU = list(filter(lambda m: type(m) == torch.nn.modules.ReLU or type(m) == torch.nn.modules.ReLU6, model.modules()))
msConv = list(filter(lambda m: type(m) == torch.nn.modules.Conv2d, model.modules()))
msBN = list(filter(lambda m: type(m) == torch.nn.modules.BatchNorm2d, model.modules()))
#register hooks to get intermediate outputs:
def setupFwdHooks(modules):
outputs = []
def hook(module, input, output):
outputs.append(output.detach().contiguous().clone())
for i, m in enumerate(modules):
m.register_forward_hook(hook)
return outputs
#register hooks to get gradient maps:
def setupGradHooks(modules):
grads = []
def hook(module, gradInput, gradOutput):
assert(len(gradInput) == 1)
grads.insert(0, gradInput[0].contiguous().clone())
for i, m in enumerate(modules):
m.register_backward_hook(hook)
return grads
outputsReLU = setupFwdHooks(msReLU)
outputsConv = setupFwdHooks(msConv)
outputsBN = setupFwdHooks(msBN)
gradsReLU = setupGradHooks(msReLU)
# PASS IMAGES THROUGH NETWORK
outputSetsMaxMulti, gradSetsMaxMulti = [], []
outputSets, gradSets = [outputsReLU, outputsConv, outputsBN], [gradsReLU]
dataIterator = iter(dataLoader)
for _ in range(numBatches):
for outputs in outputSets:
outputs.clear()
for grads in gradSets:
grads.clear()
(image, target) = next(dataIterator)
if training:
model.train()
outp = model(image)
if computeGrads:
loss = loss_func(outp, target)
loss.backward()
else:
model.eval()
outp = model(image)
tmp = [[outp.max().item() for outp in outputs]
for outputs in outputSets]
outputSetsMaxMulti.append(tmp)
if computeGrads:
tmp = [[grad.max().item() for grad in grads]
for grads in gradSets]
gradSetsMaxMulti.append(tmp)
outputSetsMax = [np.array([om2[i] for om2 in outputSetsMaxMulti]).max(axis=0) for i in range(len(outputSets))]
if computeGrads:
gradSetsMax = [np.array([om2[i] for om2 in gradSetsMaxMulti]).max(axis=0) for i in range(len(gradSets))]
# NORMALIZE
for outputs, outputsMax in zip(outputSets, outputSetsMax):
for op, opmax in zip(outputs,outputsMax):
op.mul_(safetyFactor/opmax)
if computeGrads:
for grads, gradsMax in zip(gradSets, gradSetsMax):
for op, opmax in zip(grads,gradsMax):
op.mul_(safetyFactor/opmax)
else:
gradsReLU = []
return outputsReLU, outputsConv, outputsBN, gradsReLU | 5,473 | 33.64557 | 123 | py |
blp | blp-master/utils.py | import torch
import logging
import models
def get_model(model, dim, rel_model, loss_fn, num_entities, num_relations,
encoder_name, regularizer):
if model == 'blp':
return models.BertEmbeddingsLP(dim, rel_model, loss_fn, num_relations,
encoder_name, regularizer)
elif model == 'bert-bow':
return models.BOW(rel_model, loss_fn, num_relations, regularizer,
encoder_name=encoder_name)
elif model == 'bert-dkrl':
return models.DKRL(dim, rel_model, loss_fn, num_relations, regularizer,
encoder_name=encoder_name)
elif model == 'glove-bow':
return models.BOW(rel_model, loss_fn, num_relations, regularizer,
embeddings='data/glove/glove.6B.300d.pt')
elif model == 'glove-dkrl':
return models.DKRL(dim, rel_model, loss_fn, num_relations, regularizer,
embeddings='data/glove/glove.6B.300d.pt')
elif model == 'transductive':
return models.TransductiveLinkPrediction(dim, rel_model, loss_fn,
num_entities, num_relations,
regularizer)
else:
raise ValueError(f'Unkown model {model}')
def make_ent2idx(entities, max_ent_id):
"""Given a tensor with entity IDs, return a tensor indexed with
an entity ID, containing the position of the entity.
Empty positions are filled with -1.
Example:
> make_ent2idx(torch.tensor([4, 5, 0]))
tensor([ 2, -1, -1, -1, 0, 1])
"""
idx = torch.arange(entities.shape[0])
ent2idx = torch.empty(max_ent_id + 1, dtype=torch.long).fill_(-1)
ent2idx.scatter_(0, entities, idx)
return ent2idx
def get_triple_filters(triples, graph, num_ents, ent2idx):
"""Given a set of triples, filter candidate entities that are valid
substitutes of an entity in the triple at a given position (head or tail).
For a particular triple, this allows to compute rankings for an entity of
interest, against other entities in the graph that would actually be wrong
substitutes.
Results are returned as a mask array with a value of 1.0 for filtered
entities, and 0.0 otherwise.
Args:
triples: Bx3 tensor of type torch.long, where B is the batch size,
and each row contains a triple of the form (head, tail, rel)
graph: nx.MultiDiGraph containing all edges used to filter candidates
num_ents: int, number of candidate entities
ent2idx: tensor, contains at index ent_id the index of the column for
that entity in the output mask array
"""
num_triples = triples.shape[0]
heads_filter = torch.zeros((num_triples, num_ents), dtype=torch.bool)
tails_filter = torch.zeros_like(heads_filter)
triples = triples.tolist()
for i, (head, tail, rel) in enumerate(triples):
head_edges = graph.out_edges(head, data='weight')
for (h, t, r) in head_edges:
if r == rel and t != tail:
ent_idx = ent2idx[t]
if ent_idx != -1:
tails_filter[i, ent_idx] = True
tail_edges = graph.in_edges(tail, data='weight')
for (h, t, r) in tail_edges:
if r == rel and h != head:
ent_idx = ent2idx[h]
if ent_idx != -1:
heads_filter[i, ent_idx] = True
return heads_filter, tails_filter
def get_metrics(pred_scores: torch.Tensor,
true_idx: torch.Tensor,
k_values: torch.Tensor):
"""Calculates mean number of hits@k. Higher values are ranked first.
Args:
pred_scores: (B, N) tensor of prediction values where B is batch size
and N number of classes.
ground_truth_idx: (B, 1) tensor with index of ground truth class
k_values: (1, k) tensor containing number of top-k results to be
considered as hits.
Returns:
reciprocals: (B, 1) tensor containing reciprocals of the ranks
hits: (B, k) tensor containing the number of hits for each value of k
"""
# Based on PyKEEN's implementation
true_scores = pred_scores.gather(dim=1, index=true_idx)
best_rank = (pred_scores > true_scores).sum(dim=1, keepdim=True) + 1
worst_rank = (pred_scores >= true_scores).sum(dim=1, keepdim=True)
average_rank = (best_rank + worst_rank).float() * 0.5
reciprocals = average_rank.reciprocal()
hits = average_rank <= k_values
return reciprocals, hits
def split_by_new_position(triples, mrr_values, new_entities):
"""Split MRR results by the position of new entity. Use to break down
results for a triple where a new entity is at the head and the tail,
at the head only, or the tail only.
Since MRR is calculated by corrupting the head first, and then the head,
the size of mrr_values should be twice the size of triples. The calculated
MRR is then the average of the two cases.
Args:
triples: Bx3 tensor containing (head, tail, rel).
mrr_values: 2B tensor, with first half containing MRR for corrupted
triples at the head position, and second half at the tail position.
new_entities: set, entities to be considered as new.
Returns:
mrr_by_position: tensor of 3 elements breaking down MRR by new entities
at both positions, at head, and tail.
mrr_pos_counts: tensor of 3 elements containing counts for each case.
"""
mrr_by_position = torch.zeros(3, device=mrr_values.device)
mrr_pos_counts = torch.zeros_like(mrr_by_position)
num_triples = triples.shape[0]
for i, (h, t, r) in enumerate(triples):
head, tail = h.item(), t.item()
mrr_val = (mrr_values[i] + mrr_values[i + num_triples]).item() / 2.0
if head in new_entities and tail in new_entities:
mrr_by_position[0] += mrr_val
mrr_pos_counts[0] += 1.0
elif head in new_entities:
mrr_by_position[1] += mrr_val
mrr_pos_counts[1] += 1.0
elif tail in new_entities:
mrr_by_position[2] += mrr_val
mrr_pos_counts[2] += 1.0
return mrr_by_position, mrr_pos_counts
def split_by_category(triples, mrr_values, rel_categories):
mrr_by_category = torch.zeros([2, 4], device=mrr_values.device)
mrr_cat_count = torch.zeros([1, 4], dtype=torch.float,
device=mrr_by_category.device)
num_triples = triples.shape[0]
for i, (h, t, r) in enumerate(triples):
rel_category = rel_categories[r]
mrr_val_head_pred = mrr_values[i]
mrr_by_category[0, rel_category] += mrr_val_head_pred
mrr_val_tail_pred = mrr_values[i + num_triples]
mrr_by_category[1, rel_category] += mrr_val_tail_pred
mrr_cat_count[0, rel_category] += 1
return mrr_by_category, mrr_cat_count
def get_logger():
"""Get a default logger that includes a timestamp."""
logger = logging.getLogger("")
logger.handlers = []
ch = logging.StreamHandler()
str_fmt = '%(asctime)s - %(levelname)s - %(name)s - %(message)s'
formatter = logging.Formatter(str_fmt, datefmt='%H:%M:%S')
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.setLevel('INFO')
return logger
| 7,375 | 39.306011 | 79 | py |
blp | blp-master/data.py | import os.path as osp
import torch
from torch.utils.data import Dataset
import transformers
import string
import nltk
from tqdm import tqdm
from nltk.corpus import stopwords
import logging
UNK = '[UNK]'
nltk.download('stopwords')
nltk.download('punkt')
STOP_WORDS = stopwords.words('english')
DROPPED = STOP_WORDS + list(string.punctuation)
CATEGORY_IDS = {'1-to-1': 0, '1-to-many': 1, 'many-to-1': 2, 'many-to-many': 3}
def file_to_ids(file_path):
"""Read one line per file and assign it an ID.
Args:
file_path: str, path of file to read
Returns: dict, mapping str to ID (int)
"""
str2id = dict()
with open(file_path) as file:
for i, line in enumerate(file):
str2id[line.strip()] = i
return str2id
def get_negative_sampling_indices(batch_size, num_negatives, repeats=1):
""""Obtain indices for negative sampling within a batch of entity pairs.
Indices are sampled from a reshaped array of indices. For example,
if there are 4 pairs (batch_size=4), the array of indices is
[[0, 1],
[2, 3],
[4, 5],
[6, 7]]
From this array, we corrupt either the first or second element of each row.
This yields one negative sample.
For example, if the positions with a dash are selected,
[[0, -],
[-, 3],
[4, -],
[-, 7]]
they are then replaced with a random index from a row other than the row
to which they belong:
[[0, 3],
[5, 3],
[4, 6],
[1, 7]]
The returned array has shape (batch_size, num_negatives, 2).
"""
num_ents = batch_size * 2
idx = torch.arange(num_ents).reshape(batch_size, 2)
# For each row, sample entities, assigning 0 probability to entities
# of the same row
zeros = torch.zeros(batch_size, 2)
head_weights = torch.ones(batch_size, num_ents, dtype=torch.float)
head_weights.scatter_(1, idx, zeros)
random_idx = head_weights.multinomial(num_negatives * repeats,
replacement=True)
random_idx = random_idx.t().flatten()
# Select randomly the first or the second column
row_selector = torch.arange(batch_size * num_negatives * repeats)
col_selector = torch.randint(0, 2, [batch_size * num_negatives * repeats])
# Fill the array of negative samples with the sampled random entities
# at the right positions
neg_idx = idx.repeat((num_negatives * repeats, 1))
neg_idx[row_selector, col_selector] = random_idx
neg_idx = neg_idx.reshape(-1, batch_size * repeats, 2)
neg_idx.transpose_(0, 1)
return neg_idx
class GraphDataset(Dataset):
"""A Dataset storing the triples of a Knowledge Graph.
Args:
triples_file: str, path to the file containing triples. This is a
text file where each line contains a triple of the form
'subject predicate object'
write_maps_file: bool, if set to True, dictionaries mapping
entities and relations to IDs are saved to disk (for reuse with
other datasets).
"""
def __init__(self, triples_file, neg_samples=None, write_maps_file=False,
num_devices=1):
directory = osp.dirname(triples_file)
maps_path = osp.join(directory, 'maps.pt')
# Create or load maps from entity and relation strings to unique IDs
if not write_maps_file:
if not osp.exists(maps_path):
raise ValueError('Maps file not found.')
maps = torch.load(maps_path)
ent_ids, rel_ids = maps['ent_ids'], maps['rel_ids']
else:
ents_file = osp.join(directory, 'entities.txt')
rels_file = osp.join(directory, 'relations.txt')
ent_ids = file_to_ids(ents_file)
rel_ids = file_to_ids(rels_file)
entities = set()
relations = set()
# Read triples and store as ints in tensor
file = open(triples_file)
triples = []
for i, line in enumerate(file):
values = line.split()
# FB13 and WN11 have duplicate triples for classification,
# here we keep the correct triple
if len(values) > 3 and values[3] == '-1':
continue
head, rel, tail = line.split()[:3]
entities.update([head, tail])
relations.add(rel)
triples.append([ent_ids[head], ent_ids[tail], rel_ids[rel]])
self.triples = torch.tensor(triples, dtype=torch.long)
self.rel_categories = torch.zeros(len(rel_ids), dtype=torch.long)
rel_categories_file = osp.join(directory, 'relations-cat.txt')
self.has_rel_categories = False
if osp.exists(rel_categories_file):
with open(rel_categories_file) as f:
for line in f:
rel, cat = line.strip().split()
self.rel_categories[rel_ids[rel]] = CATEGORY_IDS[cat]
self.has_rel_categories = True
# Save maps for reuse
torch.save({'ent_ids': ent_ids, 'rel_ids': rel_ids}, maps_path)
self.num_ents = len(entities)
self.num_rels = len(relations)
self.entities = torch.tensor([ent_ids[ent] for ent in entities])
self.num_triples = self.triples.shape[0]
self.directory = directory
self.maps_path = maps_path
self.neg_samples = neg_samples
self.num_devices = num_devices
def __getitem__(self, index):
return self.triples[index]
def __len__(self):
return self.num_triples
def collate_fn(self, data_list):
"""Given a batch of triples, return it together with a batch of
corrupted triples where either the subject or object are replaced
by a random entity. Use as a collate_fn for a DataLoader.
"""
batch_size = len(data_list)
pos_pairs, rels = torch.stack(data_list).split(2, dim=1)
neg_idx = get_negative_sampling_indices(batch_size, self.neg_samples)
return pos_pairs, rels, neg_idx
class TextGraphDataset(GraphDataset):
"""A dataset storing a graph, and textual descriptions of its entities.
Args:
triples_file: str, path to the file containing triples. This is a
text file where each line contains a triple of the form
'subject predicate object'
max_len: int, maximum number of tokens to read per description.
neg_samples: int, number of negative samples to get per triple
tokenizer: transformers.PreTrainedTokenizer or GloVeTokenizer, used
to tokenize the text.
drop_stopwords: bool, if set to True, punctuation and stopwords are
dropped from entity descriptions.
write_maps_file: bool, if set to True, dictionaries mapping
entities and relations to IDs are saved to disk (for reuse with
other datasets).
drop_stopwords: bool
"""
def __init__(self, triples_file, neg_samples, max_len, tokenizer,
drop_stopwords, write_maps_file=False, use_cached_text=False,
num_devices=1):
super().__init__(triples_file, neg_samples, write_maps_file,
num_devices)
maps = torch.load(self.maps_path)
ent_ids = maps['ent_ids']
if max_len is None:
max_len = tokenizer.max_len
cached_text_path = osp.join(self.directory, 'text_data.pt')
need_to_load_text = True
if use_cached_text:
logger = logging.getLogger()
if osp.exists(cached_text_path):
self.text_data = torch.load(cached_text_path)
logger.info(f'Loaded cached text data for'
f' {self.text_data.shape[0]} entities,'
f' and maximum length {self.text_data.shape[1]}.')
need_to_load_text = False
else:
logger.info(f'Cached text data not found.')
if need_to_load_text:
self.text_data = torch.zeros((len(ent_ids), max_len + 1),
dtype=torch.long)
read_entities = set()
progress = tqdm(desc='Reading entity descriptions',
total=len(ent_ids), mininterval=5)
for text_file in ('entity2textlong.txt', 'entity2text.txt'):
file_path = osp.join(self.directory, text_file)
if not osp.exists(file_path):
continue
with open(file_path) as f:
for line in f:
values = line.strip().split('\t')
entity = values[0]
text = ' '.join(values[1:])
if entity not in ent_ids:
continue
if entity in read_entities:
continue
read_entities.add(entity)
ent_id = ent_ids[entity]
if drop_stopwords:
tokens = nltk.word_tokenize(text)
text = ' '.join([t for t in tokens if
t.lower() not in DROPPED])
text_tokens = tokenizer.encode(text,
max_length=max_len,
return_tensors='pt')
text_len = text_tokens.shape[1]
# Starting slice of row contains token IDs
self.text_data[ent_id, :text_len] = text_tokens
# Last cell contains sequence length
self.text_data[ent_id, -1] = text_len
progress.update()
progress.close()
if len(read_entities) != len(ent_ids):
raise ValueError(f'Read {len(read_entities):,} descriptions,'
f' but {len(ent_ids):,} were expected.')
if self.text_data[:, -1].min().item() < 1:
raise ValueError(f'Some entries in text_data contain'
f' length-0 descriptions.')
torch.save(self.text_data,
osp.join(self.directory, 'text_data.pt'))
def get_entity_description(self, ent_ids):
"""Get entity descriptions for a tensor of entity IDs."""
text_data = self.text_data[ent_ids]
text_end_idx = text_data.shape[-1] - 1
# Separate tokens from lengths
text_tok, text_len = text_data.split(text_end_idx, dim=-1)
max_batch_len = text_len.max()
# Truncate batch
text_tok = text_tok[..., :max_batch_len]
text_mask = (text_tok > 0).float()
return text_tok, text_mask, text_len
def collate_fn(self, data_list):
"""Given a batch of triples, return it in the form of
entity descriptions, and the relation types between them.
Use as a collate_fn for a DataLoader.
"""
batch_size = len(data_list) // self.num_devices
if batch_size <= 1:
raise ValueError('collate_text can only work with batch sizes'
' larger than 1.')
pos_pairs, rels = torch.stack(data_list).split(2, dim=1)
text_tok, text_mask, text_len = self.get_entity_description(pos_pairs)
neg_idx = get_negative_sampling_indices(batch_size, self.neg_samples,
repeats=self.num_devices)
return text_tok, text_mask, rels, neg_idx
class GloVeTokenizer:
def __init__(self, vocab_dict_file, uncased=True):
self.word2idx = torch.load(vocab_dict_file)
self.uncased = uncased
def encode(self, text, max_length, return_tensors):
if self.uncased:
text = text.lower()
tokens = nltk.word_tokenize(text)
encoded = [self.word2idx.get(t, self.word2idx[UNK]) for t in tokens]
encoded = [encoded[:max_length]]
if return_tensors:
encoded = torch.tensor(encoded)
return encoded
def batch_encode_plus(self, batch, max_length, **kwargs):
batch_tokens = []
for text in batch:
tokens = self.encode(text, max_length, return_tensors=False)[0]
if len(tokens) < max_length:
tokens += [0] * (max_length - len(tokens))
batch_tokens.append(tokens)
batch_tokens = torch.tensor(batch_tokens, dtype=torch.long)
batch_masks = (batch_tokens > 0).float()
tokenized_data = {'input_ids': batch_tokens,
'attention_mask': batch_masks}
return tokenized_data
def test_text_graph_dataset():
from torch.utils.data import DataLoader
tok = transformers.AlbertTokenizer.from_pretrained('albert-base-v2')
gtr = TextGraphDataset('data/wikifb15k237/train-triples.txt', max_len=32,
neg_samples=32, tokenizer=tok, drop_stopwords=False)
loader = DataLoader(gtr, batch_size=8, collate_fn=gtr.collate_fn)
data = next(iter(loader))
print('Done')
if __name__ == '__main__':
test_text_graph_dataset()
| 13,290 | 36.866097 | 79 | py |
Subsets and Splits